aarch64: add new from scratch self-hosted backend

This commit is contained in:
Jacob Young 2025-06-18 10:59:09 -04:00 committed by Andrew Kelley
parent a023b9b22b
commit 5060ab99c9
167 changed files with 28210 additions and 3730 deletions

View File

@ -550,6 +550,14 @@ set(ZIG_STAGE2_SOURCES
src/clang_options.zig
src/clang_options_data.zig
src/codegen.zig
src/codegen/aarch64.zig
src/codegen/aarch64/abi.zig
src/codegen/aarch64/Assemble.zig
src/codegen/aarch64/Disassemble.zig
src/codegen/aarch64/encoding.zig
src/codegen/aarch64/instructions.zon
src/codegen/aarch64/Mir.zig
src/codegen/aarch64/Select.zig
src/codegen/c.zig
src/codegen/c/Type.zig
src/codegen/llvm.zig

View File

@ -16,6 +16,7 @@ var stdin_buffer: [4096]u8 = undefined;
var stdout_buffer: [4096]u8 = undefined;
const crippled = switch (builtin.zig_backend) {
.stage2_aarch64,
.stage2_powerpc,
.stage2_riscv64,
=> true,
@ -287,13 +288,14 @@ pub fn log(
/// work-in-progress backends can handle it.
pub fn mainSimple() anyerror!void {
@disableInstrumentation();
// is the backend capable of printing to stderr?
const enable_print = switch (builtin.zig_backend) {
// is the backend capable of calling `std.fs.File.writeAll`?
const enable_write = switch (builtin.zig_backend) {
.stage2_aarch64, .stage2_riscv64 => true,
else => false,
};
// is the backend capable of using std.fmt.format to print a summary at the end?
const print_summary = switch (builtin.zig_backend) {
.stage2_riscv64 => true,
// is the backend capable of calling `std.Io.Writer.print`?
const enable_print = switch (builtin.zig_backend) {
.stage2_aarch64, .stage2_riscv64 => true,
else => false,
};
@ -302,34 +304,31 @@ pub fn mainSimple() anyerror!void {
var failed: u64 = 0;
// we don't want to bring in File and Writer if the backend doesn't support it
const stderr = if (comptime enable_print) std.fs.File.stderr() else {};
const stdout = if (enable_write) std.fs.File.stdout() else {};
for (builtin.test_functions) |test_fn| {
if (enable_write) {
stdout.writeAll(test_fn.name) catch {};
stdout.writeAll("... ") catch {};
}
if (test_fn.func()) |_| {
if (enable_print) {
stderr.writeAll(test_fn.name) catch {};
stderr.writeAll("... ") catch {};
stderr.writeAll("PASS\n") catch {};
}
if (enable_write) stdout.writeAll("PASS\n") catch {};
} else |err| {
if (enable_print) {
stderr.writeAll(test_fn.name) catch {};
stderr.writeAll("... ") catch {};
}
if (err != error.SkipZigTest) {
if (enable_print) stderr.writeAll("FAIL\n") catch {};
if (enable_write) stdout.writeAll("FAIL\n") catch {};
failed += 1;
if (!enable_print) return err;
if (!enable_write) return err;
continue;
}
if (enable_print) stderr.writeAll("SKIP\n") catch {};
if (enable_write) stdout.writeAll("SKIP\n") catch {};
skipped += 1;
continue;
}
passed += 1;
}
if (enable_print and print_summary) {
stderr.deprecatedWriter().print("{} passed, {} skipped, {} failed\n", .{ passed, skipped, failed }) catch {};
if (enable_print) {
var stdout_writer = stdout.writer(&.{});
stdout_writer.interface.print("{} passed, {} skipped, {} failed\n", .{ passed, skipped, failed }) catch {};
}
if (failed != 0) std.process.exit(1);
}

View File

@ -240,7 +240,7 @@ comptime {
_ = @import("compiler_rt/udivmodti4.zig");
// extra
_ = @import("compiler_rt/os_version_check.zig");
if (builtin.zig_backend != .stage2_aarch64) _ = @import("compiler_rt/os_version_check.zig");
_ = @import("compiler_rt/emutls.zig");
_ = @import("compiler_rt/arm.zig");
_ = @import("compiler_rt/aulldiv.zig");
@ -249,12 +249,12 @@ comptime {
_ = @import("compiler_rt/hexagon.zig");
if (@import("builtin").object_format != .c) {
_ = @import("compiler_rt/atomics.zig");
if (builtin.zig_backend != .stage2_aarch64) _ = @import("compiler_rt/atomics.zig");
_ = @import("compiler_rt/stack_probe.zig");
// macOS has these functions inside libSystem.
if (builtin.cpu.arch.isAARCH64() and !builtin.os.tag.isDarwin()) {
_ = @import("compiler_rt/aarch64_outline_atomics.zig");
if (builtin.zig_backend != .stage2_aarch64) _ = @import("compiler_rt/aarch64_outline_atomics.zig");
}
_ = @import("compiler_rt/memcpy.zig");

View File

@ -1,6 +1,4 @@
const std = @import("std");
const builtin = @import("builtin");
const is_test = builtin.is_test;
const common = @import("./common.zig");
pub const panic = @import("common.zig").panic;
@ -16,7 +14,7 @@ comptime {
// - addoXi4_generic as default
inline fn addoXi4_generic(comptime ST: type, a: ST, b: ST, overflow: *c_int) ST {
@setRuntimeSafety(builtin.is_test);
@setRuntimeSafety(common.test_safety);
overflow.* = 0;
const sum: ST = a +% b;
// Hackers Delight: section Overflow Detection, subsection Signed Add/Subtract

View File

@ -1,4 +1,5 @@
const addv = @import("addo.zig");
const builtin = @import("builtin");
const std = @import("std");
const testing = std.testing;
const math = std.math;
@ -23,6 +24,8 @@ fn simple_addoti4(a: i128, b: i128, overflow: *c_int) i128 {
}
test "addoti4" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const min: i128 = math.minInt(i128);
const max: i128 = math.maxInt(i128);
var i: i128 = 1;

View File

@ -97,8 +97,7 @@ fn clear_cache(start: usize, end: usize) callconv(.c) void {
.nbytes = end - start,
.whichcache = 3, // ICACHE | DCACHE
};
asm volatile (
\\ syscall
asm volatile ("syscall"
:
: [_] "{$2}" (165), // nr = SYS_sysarch
[_] "{$4}" (0), // op = MIPS_CACHEFLUSH
@ -116,11 +115,8 @@ fn clear_cache(start: usize, end: usize) callconv(.c) void {
} else if (arm64 and !apple) {
// Get Cache Type Info.
// TODO memoize this?
var ctr_el0: u64 = 0;
asm volatile (
\\mrs %[x], ctr_el0
\\
: [x] "=r" (ctr_el0),
const ctr_el0 = asm volatile ("mrs %[ctr_el0], ctr_el0"
: [ctr_el0] "=r" (-> u64),
);
// The DC and IC instructions must use 64-bit registers so we don't use
// uintptr_t in case this runs in an IPL32 environment.
@ -187,9 +183,7 @@ fn clear_cache(start: usize, end: usize) callconv(.c) void {
exportIt();
} else if (os == .linux and loongarch) {
// See: https://github.com/llvm/llvm-project/blob/cf54cae26b65fc3201eff7200ffb9b0c9e8f9a13/compiler-rt/lib/builtins/clear_cache.c#L94-L95
asm volatile (
\\ ibar 0
);
asm volatile ("ibar 0");
exportIt();
}

View File

@ -1,6 +1,5 @@
const std = @import("std");
const builtin = @import("builtin");
const is_test = builtin.is_test;
const common = @import("common.zig");
pub const panic = common.panic;

View File

@ -102,9 +102,14 @@ pub const gnu_f16_abi = switch (builtin.cpu.arch) {
pub const want_sparc_abi = builtin.cpu.arch.isSPARC();
pub const test_safety = switch (builtin.zig_backend) {
.stage2_aarch64 => false,
else => builtin.is_test,
};
// Avoid dragging in the runtime safety mechanisms into this .o file, unless
// we're trying to test compiler-rt.
pub const panic = if (builtin.is_test) std.debug.FullPanic(std.debug.defaultPanic) else std.debug.no_panic;
pub const panic = if (test_safety) std.debug.FullPanic(std.debug.defaultPanic) else std.debug.no_panic;
/// This seems to mostly correspond to `clang::TargetInfo::HasFloat16`.
pub fn F16T(comptime OtherType: type) type {

View File

@ -4,7 +4,6 @@
const std = @import("std");
const builtin = @import("builtin");
const is_test = builtin.is_test;
const __eqdf2 = @import("./cmpdf2.zig").__eqdf2;
const __ledf2 = @import("./cmpdf2.zig").__ledf2;

View File

@ -4,7 +4,6 @@
const std = @import("std");
const builtin = @import("builtin");
const is_test = builtin.is_test;
const __eqsf2 = @import("./cmpsf2.zig").__eqsf2;
const __lesf2 = @import("./cmpsf2.zig").__lesf2;

View File

@ -1,6 +1,5 @@
const std = @import("std");
const builtin = @import("builtin");
const is_test = builtin.is_test;
const common = @import("common.zig");
pub const panic = common.panic;

View File

@ -5,7 +5,6 @@
const std = @import("std");
const builtin = @import("builtin");
const arch = builtin.cpu.arch;
const is_test = builtin.is_test;
const common = @import("common.zig");
const normalize = common.normalize;

View File

@ -34,7 +34,7 @@ fn divmod(q: ?[]u32, r: ?[]u32, u: []u32, v: []u32) !void {
}
pub fn __divei4(q_p: [*]u8, u_p: [*]u8, v_p: [*]u8, bits: usize) callconv(.c) void {
@setRuntimeSafety(builtin.is_test);
@setRuntimeSafety(common.test_safety);
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
const q: []u32 = @ptrCast(@alignCast(q_p[0..byte_size]));
const u: []u32 = @ptrCast(@alignCast(u_p[0..byte_size]));
@ -43,7 +43,7 @@ pub fn __divei4(q_p: [*]u8, u_p: [*]u8, v_p: [*]u8, bits: usize) callconv(.c) vo
}
pub fn __modei4(r_p: [*]u8, u_p: [*]u8, v_p: [*]u8, bits: usize) callconv(.c) void {
@setRuntimeSafety(builtin.is_test);
@setRuntimeSafety(common.test_safety);
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
const r: []u32 = @ptrCast(@alignCast(r_p[0..byte_size]));
const u: []u32 = @ptrCast(@alignCast(u_p[0..byte_size]));

View File

@ -1,4 +1,3 @@
const is_test = @import("builtin").is_test;
const std = @import("std");
const math = std.math;
const testing = std.testing;

View File

@ -6,7 +6,6 @@ const testing = std.testing;
const maxInt = std.math.maxInt;
const minInt = std.math.minInt;
const arch = builtin.cpu.arch;
const is_test = builtin.is_test;
const common = @import("common.zig");
const udivmod = @import("udivmod.zig").udivmod;
const __divti3 = @import("divti3.zig").__divti3;

View File

@ -11,7 +11,7 @@ comptime {
.visibility = common.visibility,
};
if (builtin.mode == .ReleaseSmall)
if (builtin.mode == .ReleaseSmall or builtin.zig_backend == .stage2_aarch64)
@export(&memcpySmall, export_options)
else
@export(&memcpyFast, export_options);
@ -195,6 +195,8 @@ inline fn copyRange4(
}
test "memcpy" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const S = struct {
fn testFunc(comptime copy_func: anytype) !void {
const max_len = 1024;

View File

@ -14,7 +14,7 @@ comptime {
.visibility = common.visibility,
};
if (builtin.mode == .ReleaseSmall)
if (builtin.mode == .ReleaseSmall or builtin.zig_backend == .stage2_aarch64)
@export(&memmoveSmall, export_options)
else
@export(&memmoveFast, export_options);
@ -39,7 +39,7 @@ fn memmoveSmall(opt_dest: ?[*]u8, opt_src: ?[*]const u8, len: usize) callconv(.c
}
fn memmoveFast(dest: ?[*]u8, src: ?[*]u8, len: usize) callconv(.c) ?[*]u8 {
@setRuntimeSafety(builtin.is_test);
@setRuntimeSafety(common.test_safety);
const small_limit = @max(2 * @sizeOf(Element), @sizeOf(Element));
if (copySmallLength(small_limit, dest.?, src.?, len)) return dest;
@ -79,7 +79,7 @@ inline fn copyLessThan16(
src: [*]const u8,
len: usize,
) void {
@setRuntimeSafety(builtin.is_test);
@setRuntimeSafety(common.test_safety);
if (len < 4) {
if (len == 0) return;
const b = len / 2;
@ -100,7 +100,7 @@ inline fn copy16ToSmallLimit(
src: [*]const u8,
len: usize,
) bool {
@setRuntimeSafety(builtin.is_test);
@setRuntimeSafety(common.test_safety);
inline for (2..(std.math.log2(small_limit) + 1) / 2 + 1) |p| {
const limit = 1 << (2 * p);
if (len < limit) {
@ -119,7 +119,7 @@ inline fn copyRange4(
src: [*]const u8,
len: usize,
) void {
@setRuntimeSafety(builtin.is_test);
@setRuntimeSafety(common.test_safety);
comptime assert(std.math.isPowerOfTwo(copy_len));
assert(len >= copy_len);
assert(len < 4 * copy_len);
@ -147,7 +147,7 @@ inline fn copyForwards(
src: [*]const u8,
len: usize,
) void {
@setRuntimeSafety(builtin.is_test);
@setRuntimeSafety(common.test_safety);
assert(len >= 2 * @sizeOf(Element));
const head = src[0..@sizeOf(Element)].*;
@ -181,7 +181,7 @@ inline fn copyBlocks(
src: anytype,
max_bytes: usize,
) void {
@setRuntimeSafety(builtin.is_test);
@setRuntimeSafety(common.test_safety);
const T = @typeInfo(@TypeOf(dest)).pointer.child;
comptime assert(T == @typeInfo(@TypeOf(src)).pointer.child);
@ -217,6 +217,8 @@ inline fn copyBackwards(
}
test memmoveFast {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const max_len = 1024;
var buffer: [max_len + @alignOf(Element) - 1]u8 = undefined;
for (&buffer, 0..) |*b, i| {

View File

@ -6,7 +6,7 @@ const common = @import("./common.zig");
/// Ported from:
/// https://github.com/llvm/llvm-project/blob/2ffb1b0413efa9a24eb3c49e710e36f92e2cb50b/compiler-rt/lib/builtins/fp_mul_impl.inc
pub inline fn mulf3(comptime T: type, a: T, b: T) T {
@setRuntimeSafety(builtin.is_test);
@setRuntimeSafety(common.test_safety);
const typeWidth = @typeInfo(T).float.bits;
const significandBits = math.floatMantissaBits(T);
const fractionalBits = math.floatFractionalBits(T);
@ -163,7 +163,7 @@ pub inline fn mulf3(comptime T: type, a: T, b: T) T {
///
/// This is analogous to an shr version of `@shlWithOverflow`
fn wideShrWithTruncation(comptime Z: type, hi: *Z, lo: *Z, count: u32) bool {
@setRuntimeSafety(builtin.is_test);
@setRuntimeSafety(common.test_safety);
const typeWidth = @typeInfo(Z).int.bits;
var inexact = false;
if (count < typeWidth) {

View File

@ -251,7 +251,7 @@ const PIo2 = [_]f64{
/// compiler will convert from decimal to binary accurately enough
/// to produce the hexadecimal values shown.
///
pub fn rem_pio2_large(x: []f64, y: []f64, e0: i32, nx: i32, prec: usize) i32 {
pub fn rem_pio2_large(x: []const f64, y: []f64, e0: i32, nx: i32, prec: usize) i32 {
var jz: i32 = undefined;
var jx: i32 = undefined;
var jv: i32 = undefined;

View File

@ -4,7 +4,6 @@ const common = @import("common.zig");
const os_tag = builtin.os.tag;
const arch = builtin.cpu.arch;
const abi = builtin.abi;
const is_test = builtin.is_test;
pub const panic = common.panic;

View File

@ -1,4 +1,5 @@
const subo = @import("subo.zig");
const builtin = @import("builtin");
const std = @import("std");
const testing = std.testing;
const math = std.math;
@ -27,6 +28,8 @@ pub fn simple_suboti4(a: i128, b: i128, overflow: *c_int) i128 {
}
test "suboti3" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const min: i128 = math.minInt(i128);
const max: i128 = math.maxInt(i128);
var i: i128 = 1;

View File

@ -1,8 +1,8 @@
const std = @import("std");
const builtin = @import("builtin");
const is_test = builtin.is_test;
const Log2Int = std.math.Log2Int;
const HalveInt = @import("common.zig").HalveInt;
const common = @import("common.zig");
const HalveInt = common.HalveInt;
const lo = switch (builtin.cpu.arch.endian()) {
.big => 1,
@ -14,7 +14,7 @@ const hi = 1 - lo;
// Returns U / v_ and sets r = U % v_.
fn divwide_generic(comptime T: type, _u1: T, _u0: T, v_: T, r: *T) T {
const HalfT = HalveInt(T, false).HalfT;
@setRuntimeSafety(is_test);
@setRuntimeSafety(common.test_safety);
var v = v_;
const b = @as(T, 1) << (@bitSizeOf(T) / 2);
@ -70,7 +70,7 @@ fn divwide_generic(comptime T: type, _u1: T, _u0: T, v_: T, r: *T) T {
}
fn divwide(comptime T: type, _u1: T, _u0: T, v: T, r: *T) T {
@setRuntimeSafety(is_test);
@setRuntimeSafety(common.test_safety);
if (T == u64 and builtin.target.cpu.arch == .x86_64 and builtin.target.os.tag != .windows) {
var rem: T = undefined;
const quo = asm (
@ -90,7 +90,7 @@ fn divwide(comptime T: type, _u1: T, _u0: T, v: T, r: *T) T {
// Returns a_ / b_ and sets maybe_rem = a_ % b.
pub fn udivmod(comptime T: type, a_: T, b_: T, maybe_rem: ?*T) T {
@setRuntimeSafety(is_test);
@setRuntimeSafety(common.test_safety);
const HalfT = HalveInt(T, false).HalfT;
const SignedT = std.meta.Int(.signed, @bitSizeOf(T));

View File

@ -113,7 +113,7 @@ pub fn divmod(q: ?[]u32, r: ?[]u32, u: []const u32, v: []const u32) !void {
}
pub fn __udivei4(q_p: [*]u8, u_p: [*]const u8, v_p: [*]const u8, bits: usize) callconv(.c) void {
@setRuntimeSafety(builtin.is_test);
@setRuntimeSafety(common.test_safety);
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
const q: []u32 = @ptrCast(@alignCast(q_p[0..byte_size]));
const u: []const u32 = @ptrCast(@alignCast(u_p[0..byte_size]));
@ -122,7 +122,7 @@ pub fn __udivei4(q_p: [*]u8, u_p: [*]const u8, v_p: [*]const u8, bits: usize) ca
}
pub fn __umodei4(r_p: [*]u8, u_p: [*]const u8, v_p: [*]const u8, bits: usize) callconv(.c) void {
@setRuntimeSafety(builtin.is_test);
@setRuntimeSafety(common.test_safety);
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
const r: []u32 = @ptrCast(@alignCast(r_p[0..byte_size]));
const u: []const u32 = @ptrCast(@alignCast(u_p[0..byte_size]));
@ -131,6 +131,7 @@ pub fn __umodei4(r_p: [*]u8, u_p: [*]const u8, v_p: [*]const u8, bits: usize) ca
}
test "__udivei4/__umodei4" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;

View File

@ -2239,6 +2239,10 @@ pub const Discarding = struct {
pub fn sendFile(w: *Writer, file_reader: *File.Reader, limit: Limit) FileError!usize {
if (File.Handle == void) return error.Unimplemented;
switch (builtin.zig_backend) {
else => {},
.stage2_aarch64 => return error.Unimplemented,
}
const d: *Discarding = @alignCast(@fieldParentPtr("writer", w));
d.count += w.end;
w.end = 0;

View File

@ -408,6 +408,9 @@ pub const have_ipc = switch (builtin.os.tag) {
const noop_impl = builtin.single_threaded or switch (builtin.os.tag) {
.wasi, .freestanding => true,
else => false,
} or switch (builtin.zig_backend) {
.stage2_aarch64 => true,
else => false,
};
/// Initializes a global Progress instance.
@ -754,7 +757,7 @@ fn appendTreeSymbol(symbol: TreeSymbol, buf: []u8, start_i: usize) usize {
}
fn clearWrittenWithEscapeCodes() anyerror!void {
if (!global_progress.need_clear) return;
if (noop_impl or !global_progress.need_clear) return;
global_progress.need_clear = false;
try write(clear);

View File

@ -772,7 +772,7 @@ pub const Endian = enum {
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
pub const Signedness = enum {
pub const Signedness = enum(u1) {
signed,
unsigned,
};
@ -894,7 +894,10 @@ pub const VaList = switch (builtin.cpu.arch) {
.aarch64, .aarch64_be => switch (builtin.os.tag) {
.windows => *u8,
.ios, .macos, .tvos, .watchos, .visionos => *u8,
else => @compileError("disabled due to miscompilations"), // VaListAarch64,
else => switch (builtin.zig_backend) {
.stage2_aarch64 => VaListAarch64,
else => @compileError("disabled due to miscompilations"),
},
},
.arm, .armeb, .thumb, .thumbeb => switch (builtin.os.tag) {
.ios, .macos, .tvos, .watchos, .visionos => *u8,

View File

@ -2001,7 +2001,7 @@ pub const R_AARCH64 = enum(u32) {
TLSLE_LDST64_TPREL_LO12 = 558,
/// Likewise; no check.
TLSLE_LDST64_TPREL_LO12_NC = 559,
/// PC-rel. load immediate 20:2.
/// PC-rel. load immediate 20:2.
TLSDESC_LD_PREL19 = 560,
/// PC-rel. ADR immediate 20:0.
TLSDESC_ADR_PREL21 = 561,

View File

@ -1554,7 +1554,10 @@ pub const Writer = struct {
return .{
.vtable = &.{
.drain = drain,
.sendFile = sendFile,
.sendFile = switch (builtin.zig_backend) {
else => sendFile,
.stage2_aarch64 => std.io.Writer.unimplementedSendFile,
},
},
.buffer = buffer,
};

View File

@ -45,6 +45,7 @@ pub const rad_per_deg = 0.017453292519943295769236907684886127134428718885417254
/// 180.0/pi
pub const deg_per_rad = 57.295779513082320876798154814105170332405472466564321549160243861;
pub const Sign = enum(u1) { positive, negative };
pub const FloatRepr = float.FloatRepr;
pub const floatExponentBits = float.floatExponentBits;
pub const floatMantissaBits = float.floatMantissaBits;
@ -594,27 +595,30 @@ pub fn shlExact(comptime T: type, a: T, shift_amt: Log2Int(T)) !T {
/// Shifts left. Overflowed bits are truncated.
/// A negative shift amount results in a right shift.
pub fn shl(comptime T: type, a: T, shift_amt: anytype) T {
const is_shl = shift_amt >= 0;
const abs_shift_amt = @abs(shift_amt);
const casted_shift_amt = blk: {
if (@typeInfo(T) == .vector) {
const C = @typeInfo(T).vector.child;
const len = @typeInfo(T).vector.len;
if (abs_shift_amt >= @typeInfo(C).int.bits) return @splat(0);
break :blk @as(@Vector(len, Log2Int(C)), @splat(@as(Log2Int(C), @intCast(abs_shift_amt))));
} else {
if (abs_shift_amt >= @typeInfo(T).int.bits) return 0;
break :blk @as(Log2Int(T), @intCast(abs_shift_amt));
}
const casted_shift_amt = casted_shift_amt: switch (@typeInfo(T)) {
.int => |info| {
if (abs_shift_amt < info.bits) break :casted_shift_amt @as(
Log2Int(T),
@intCast(abs_shift_amt),
);
if (info.signedness == .unsigned or is_shl) return 0;
return a >> (info.bits - 1);
},
.vector => |info| {
const Child = info.child;
const child_info = @typeInfo(Child).int;
if (abs_shift_amt < child_info.bits) break :casted_shift_amt @as(
@Vector(info.len, Log2Int(Child)),
@splat(@as(Log2Int(Child), @intCast(abs_shift_amt))),
);
if (child_info.signedness == .unsigned or is_shl) return @splat(0);
return a >> @splat(child_info.bits - 1);
},
else => comptime unreachable,
};
if (@TypeOf(shift_amt) == comptime_int or @typeInfo(@TypeOf(shift_amt)).int.signedness == .signed) {
if (shift_amt < 0) {
return a >> casted_shift_amt;
}
}
return a << casted_shift_amt;
return if (is_shl) a << casted_shift_amt else a >> casted_shift_amt;
}
test shl {
@ -629,32 +633,40 @@ test shl {
try testing.expect(shl(@Vector(1, u32), @Vector(1, u32){42}, @as(usize, 1))[0] == @as(u32, 42) << 1);
try testing.expect(shl(@Vector(1, u32), @Vector(1, u32){42}, @as(isize, -1))[0] == @as(u32, 42) >> 1);
try testing.expect(shl(@Vector(1, u32), @Vector(1, u32){42}, 33)[0] == 0);
try testing.expect(shl(i8, -1, -100) == -1);
try testing.expect(shl(i8, -1, 100) == 0);
try testing.expect(@reduce(.And, shl(@Vector(2, i8), .{ -1, 1 }, -100) == @Vector(2, i8){ -1, 0 }));
try testing.expect(@reduce(.And, shl(@Vector(2, i8), .{ -1, 1 }, 100) == @Vector(2, i8){ 0, 0 }));
}
/// Shifts right. Overflowed bits are truncated.
/// A negative shift amount results in a left shift.
pub fn shr(comptime T: type, a: T, shift_amt: anytype) T {
const is_shl = shift_amt < 0;
const abs_shift_amt = @abs(shift_amt);
const casted_shift_amt = blk: {
if (@typeInfo(T) == .vector) {
const C = @typeInfo(T).vector.child;
const len = @typeInfo(T).vector.len;
if (abs_shift_amt >= @typeInfo(C).int.bits) return @splat(0);
break :blk @as(@Vector(len, Log2Int(C)), @splat(@as(Log2Int(C), @intCast(abs_shift_amt))));
} else {
if (abs_shift_amt >= @typeInfo(T).int.bits) return 0;
break :blk @as(Log2Int(T), @intCast(abs_shift_amt));
}
const casted_shift_amt = casted_shift_amt: switch (@typeInfo(T)) {
.int => |info| {
if (abs_shift_amt < info.bits) break :casted_shift_amt @as(
Log2Int(T),
@intCast(abs_shift_amt),
);
if (info.signedness == .unsigned or is_shl) return 0;
return a >> (info.bits - 1);
},
.vector => |info| {
const Child = info.child;
const child_info = @typeInfo(Child).int;
if (abs_shift_amt < child_info.bits) break :casted_shift_amt @as(
@Vector(info.len, Log2Int(Child)),
@splat(@as(Log2Int(Child), @intCast(abs_shift_amt))),
);
if (child_info.signedness == .unsigned or is_shl) return @splat(0);
return a >> @splat(child_info.bits - 1);
},
else => comptime unreachable,
};
if (@TypeOf(shift_amt) == comptime_int or @typeInfo(@TypeOf(shift_amt)).int.signedness == .signed) {
if (shift_amt < 0) {
return a << casted_shift_amt;
}
}
return a >> casted_shift_amt;
return if (is_shl) a << casted_shift_amt else a >> casted_shift_amt;
}
test shr {
@ -669,6 +681,11 @@ test shr {
try testing.expect(shr(@Vector(1, u32), @Vector(1, u32){42}, @as(usize, 1))[0] == @as(u32, 42) >> 1);
try testing.expect(shr(@Vector(1, u32), @Vector(1, u32){42}, @as(isize, -1))[0] == @as(u32, 42) << 1);
try testing.expect(shr(@Vector(1, u32), @Vector(1, u32){42}, 33)[0] == 0);
try testing.expect(shr(i8, -1, -100) == 0);
try testing.expect(shr(i8, -1, 100) == -1);
try testing.expect(@reduce(.And, shr(@Vector(2, i8), .{ -1, 1 }, -100) == @Vector(2, i8){ 0, 0 }));
try testing.expect(@reduce(.And, shr(@Vector(2, i8), .{ -1, 1 }, 100) == @Vector(2, i8){ -1, 0 }));
}
/// Rotates right. Only unsigned values can be rotated. Negative shift

View File

@ -2774,7 +2774,6 @@ test "bitNotWrap more than two limbs" {
// This test requires int sizes greater than 128 bits.
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
// LLVM: unexpected runtime library name: __umodei4
if (builtin.zig_backend == .stage2_llvm and comptime builtin.target.cpu.arch.isWasm()) return error.SkipZigTest; // TODO

View File

@ -4,8 +4,6 @@ const assert = std.debug.assert;
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
pub const Sign = enum(u1) { positive, negative };
pub fn FloatRepr(comptime Float: type) type {
const fractional_bits = floatFractionalBits(Float);
const exponent_bits = floatExponentBits(Float);
@ -14,7 +12,7 @@ pub fn FloatRepr(comptime Float: type) type {
mantissa: StoredMantissa,
exponent: BiasedExponent,
sign: Sign,
sign: std.math.Sign,
pub const StoredMantissa = @Type(.{ .int = .{
.signedness = .unsigned,
@ -69,7 +67,7 @@ pub fn FloatRepr(comptime Float: type) type {
/// This currently truncates denormal values, which needs to be fixed before this can be used to
/// produce a rounded value.
pub fn reconstruct(normalized: Normalized, sign: Sign) Float {
pub fn reconstruct(normalized: Normalized, sign: std.math.Sign) Float {
if (normalized.exponent > BiasedExponent.max_normal.unbias()) return @bitCast(Repr{
.mantissa = 0,
.exponent = .infinite,

View File

@ -132,7 +132,6 @@ inline fn less_than_5(x: u32) u32 {
test log10_int {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_llvm and comptime builtin.target.cpu.arch.isWasm()) return error.SkipZigTest; // TODO

View File

@ -676,6 +676,7 @@ test lessThan {
const eqlBytes_allowed = switch (builtin.zig_backend) {
// These backends don't support vectors yet.
.stage2_aarch64,
.stage2_powerpc,
.stage2_riscv64,
=> false,
@ -4482,7 +4483,7 @@ pub fn doNotOptimizeAway(val: anytype) void {
);
asm volatile (""
:
: [val2] "r" (val2),
: [_] "r" (val2),
);
} else doNotOptimizeAway(&val);
},
@ -4490,7 +4491,7 @@ pub fn doNotOptimizeAway(val: anytype) void {
if ((t.float.bits == 32 or t.float.bits == 64) and builtin.zig_backend != .stage2_c) {
asm volatile (""
:
: [val] "rm" (val),
: [_] "rm" (val),
);
} else doNotOptimizeAway(&val);
},
@ -4500,7 +4501,7 @@ pub fn doNotOptimizeAway(val: anytype) void {
} else {
asm volatile (""
:
: [val] "m" (val),
: [_] "m" (val),
: .{ .memory = true });
}
},

View File

@ -503,7 +503,6 @@ pub var elf_aux_maybe: ?[*]std.elf.Auxv = null;
/// Whether an external or internal getauxval implementation is used.
const extern_getauxval = switch (builtin.zig_backend) {
// Calling extern functions is not yet supported with these backends
.stage2_aarch64,
.stage2_arm,
.stage2_powerpc,
.stage2_riscv64,

View File

@ -101,17 +101,11 @@ comptime {
// Simplified start code for stage2 until it supports more language features ///
fn main2() callconv(.c) c_int {
root.main();
return 0;
return callMain();
}
fn _start2() callconv(.withStackAlign(.c, 1)) noreturn {
callMain2();
}
fn callMain2() noreturn {
root.main();
exit2(0);
std.posix.exit(callMain());
}
fn spirvMain2() callconv(.kernel) void {
@ -119,51 +113,7 @@ fn spirvMain2() callconv(.kernel) void {
}
fn wWinMainCRTStartup2() callconv(.c) noreturn {
root.main();
exit2(0);
}
fn exit2(code: usize) noreturn {
switch (native_os) {
.linux => switch (builtin.cpu.arch) {
.x86_64 => {
asm volatile ("syscall"
:
: [number] "{rax}" (231),
[arg1] "{rdi}" (code),
: .{ .rcx = true, .r11 = true, .memory = true });
},
.arm => {
asm volatile ("svc #0"
:
: [number] "{r7}" (1),
[arg1] "{r0}" (code),
: .{ .memory = true });
},
.aarch64 => {
asm volatile ("svc #0"
:
: [number] "{x8}" (93),
[arg1] "{x0}" (code),
: .{ .memory = true });
},
.sparc64 => {
asm volatile ("ta 0x6d"
:
: [number] "{g1}" (1),
[arg1] "{o0}" (code),
: .{ .o0 = true, .o1 = true, .o2 = true, .o3 = true, .o4 = true, .o5 = true, .o6 = true, .o7 = true, .memory = true });
},
else => @compileError("TODO"),
},
// exits(0)
.plan9 => std.os.plan9.exits(null),
.windows => {
std.os.windows.ntdll.RtlExitUserProcess(@truncate(code));
},
else => @compileError("TODO"),
}
unreachable;
std.posix.exit(callMain());
}
////////////////////////////////////////////////////////////////////////////////
@ -676,10 +626,11 @@ pub inline fn callMain() u8 {
const result = root.main() catch |err| {
switch (builtin.zig_backend) {
.stage2_aarch64,
.stage2_powerpc,
.stage2_riscv64,
=> {
std.debug.print("error: failed with error\n", .{});
_ = std.posix.write(std.posix.STDERR_FILENO, "error: failed with error\n") catch {};
return 1;
},
else => {},

View File

@ -33,6 +33,7 @@ pub var log_level = std.log.Level.warn;
// Disable printing in tests for simple backends.
pub const backend_can_print = switch (builtin.zig_backend) {
.stage2_aarch64,
.stage2_powerpc,
.stage2_riscv64,
.stage2_spirv,

View File

@ -1850,7 +1850,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
// approach, since the ubsan runtime uses quite a lot of the standard library
// and this reduces unnecessary bloat.
const ubsan_rt_strat: RtStrat = s: {
const can_build_ubsan_rt = target_util.canBuildLibUbsanRt(target);
const can_build_ubsan_rt = target_util.canBuildLibUbsanRt(target, use_llvm, build_options.have_llvm);
const want_ubsan_rt = options.want_ubsan_rt orelse (can_build_ubsan_rt and any_sanitize_c == .full and is_exe_or_dyn_lib);
if (!want_ubsan_rt) break :s .none;
if (options.skip_linker_dependencies) break :s .none;

View File

@ -7556,12 +7556,18 @@ fn extraFuncCoerced(ip: *const InternPool, extra: Local.Extra, extra_index: u32)
fn indexToKeyBigInt(ip: *const InternPool, tid: Zcu.PerThread.Id, limb_index: u32, positive: bool) Key {
const limbs_items = ip.getLocalShared(tid).getLimbs().view().items(.@"0");
const int: Int = @bitCast(limbs_items[limb_index..][0..Int.limbs_items_len].*);
const big_int: BigIntConst = .{
.limbs = limbs_items[limb_index + Int.limbs_items_len ..][0..int.limbs_len],
.positive = positive,
};
return .{ .int = .{
.ty = int.ty,
.storage = .{ .big_int = .{
.limbs = limbs_items[limb_index + Int.limbs_items_len ..][0..int.limbs_len],
.positive = positive,
} },
.storage = if (big_int.toInt(u64)) |x|
.{ .u64 = x }
else |_| if (big_int.toInt(i64)) |x|
.{ .i64 = x }
else |_|
.{ .big_int = big_int },
} };
}

View File

@ -16522,7 +16522,7 @@ fn zirAsm(
break :empty try sema.structInitEmpty(block, clobbers_ty, src, src);
} else try sema.resolveInst(extra.data.clobbers); // Already coerced by AstGen.
const clobbers_val = try sema.resolveConstDefinedValue(block, src, clobbers, .{ .simple = .clobber });
needed_capacity += (asm_source.len + 3) / 4;
needed_capacity += asm_source.len / 4 + 1;
const gpa = sema.gpa;
try sema.air_extra.ensureUnusedCapacity(gpa, needed_capacity);
@ -16562,7 +16562,8 @@ fn zirAsm(
{
const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice());
@memcpy(buffer[0..asm_source.len], asm_source);
sema.air_extra.items.len += (asm_source.len + 3) / 4;
buffer[asm_source.len] = 0;
sema.air_extra.items.len += asm_source.len / 4 + 1;
}
return asm_air;
}
@ -24846,7 +24847,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
},
.@"packed" => {
const byte_offset = std.math.divExact(u32, @abs(@as(i32, actual_parent_ptr_info.packed_offset.bit_offset) +
(if (zcu.typeToStruct(parent_ty)) |struct_obj| pt.structPackedFieldBitOffset(struct_obj, field_index) else 0) -
(if (zcu.typeToStruct(parent_ty)) |struct_obj| zcu.structPackedFieldBitOffset(struct_obj, field_index) else 0) -
actual_field_ptr_info.packed_offset.bit_offset), 8) catch
return sema.fail(block, inst_src, "pointer bit-offset mismatch", .{});
actual_parent_ptr_info.flags.alignment = actual_field_ptr_info.flags.alignment.minStrict(if (byte_offset > 0)
@ -24873,7 +24874,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
// Logic lifted from type computation above - I'm just assuming it's correct.
// `catch unreachable` since error case handled above.
const byte_offset = std.math.divExact(u32, @abs(@as(i32, actual_parent_ptr_info.packed_offset.bit_offset) +
pt.structPackedFieldBitOffset(zcu.typeToStruct(parent_ty).?, field_index) -
zcu.structPackedFieldBitOffset(zcu.typeToStruct(parent_ty).?, field_index) -
actual_field_ptr_info.packed_offset.bit_offset), 8) catch unreachable;
const parent_ptr_val = try sema.ptrSubtract(block, field_ptr_src, field_ptr_val, byte_offset, actual_parent_ptr_ty);
break :result Air.internedToRef(parent_ptr_val.toIntern());

View File

@ -4166,7 +4166,7 @@ pub const generic_poison: Type = .{ .ip_index = .generic_poison_type };
pub fn smallestUnsignedBits(max: u64) u16 {
return switch (max) {
0 => 0,
else => 1 + std.math.log2_int(u64, max),
else => @as(u16, 1) + std.math.log2_int(u64, max),
};
}

View File

@ -3891,6 +3891,29 @@ pub fn typeToPackedStruct(zcu: *const Zcu, ty: Type) ?InternPool.LoadedStructTyp
return s;
}
/// https://github.com/ziglang/zig/issues/17178 explored storing these bit offsets
/// into the packed struct InternPool data rather than computing this on the
/// fly, however it was found to perform worse when measured on real world
/// projects.
pub fn structPackedFieldBitOffset(
zcu: *Zcu,
struct_type: InternPool.LoadedStructType,
field_index: u32,
) u16 {
const ip = &zcu.intern_pool;
assert(struct_type.layout == .@"packed");
assert(struct_type.haveLayout(ip));
var bit_sum: u64 = 0;
for (0..struct_type.field_types.len) |i| {
if (i == field_index) {
return @intCast(bit_sum);
}
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
bit_sum += field_ty.bitSize(zcu);
}
unreachable; // index out of bounds
}
pub fn typeToUnion(zcu: *const Zcu, ty: Type) ?InternPool.LoadedUnionType {
if (ty.ip_index == .none) return null;
const ip = &zcu.intern_pool;
@ -4436,11 +4459,7 @@ pub fn callconvSupported(zcu: *Zcu, cc: std.builtin.CallingConvention) union(enu
else => false,
},
.stage2_aarch64 => switch (cc) {
.aarch64_aapcs,
.aarch64_aapcs_darwin,
.aarch64_aapcs_win,
=> |opts| opts.incoming_stack_alignment == null,
.naked => true,
.aarch64_aapcs, .aarch64_aapcs_darwin, .naked => true,
else => false,
},
.stage2_x86 => switch (cc) {

View File

@ -3737,30 +3737,6 @@ pub fn intBitsForValue(pt: Zcu.PerThread, val: Value, sign: bool) u16 {
}
}
/// https://github.com/ziglang/zig/issues/17178 explored storing these bit offsets
/// into the packed struct InternPool data rather than computing this on the
/// fly, however it was found to perform worse when measured on real world
/// projects.
pub fn structPackedFieldBitOffset(
pt: Zcu.PerThread,
struct_type: InternPool.LoadedStructType,
field_index: u32,
) u16 {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
assert(struct_type.layout == .@"packed");
assert(struct_type.haveLayout(ip));
var bit_sum: u64 = 0;
for (0..struct_type.field_types.len) |i| {
if (i == field_index) {
return @intCast(bit_sum);
}
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
bit_sum += field_ty.bitSize(zcu);
}
unreachable; // index out of bounds
}
pub fn navPtrType(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Allocator.Error!Type {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
@ -4381,8 +4357,11 @@ fn runCodegenInner(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) e
try air.legalize(pt, features);
}
var liveness: Air.Liveness = try .analyze(zcu, air.*, ip);
defer liveness.deinit(gpa);
var liveness: ?Air.Liveness = if (codegen.wantsLiveness(pt, nav))
try .analyze(zcu, air.*, ip)
else
null;
defer if (liveness) |*l| l.deinit(gpa);
if (build_options.enable_debug_extensions and comp.verbose_air) {
const stderr = std.debug.lockStderrWriter(&.{});
@ -4392,12 +4371,12 @@ fn runCodegenInner(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) e
stderr.print("# End Function AIR: {f}\n\n", .{fqn.fmt(ip)}) catch {};
}
if (std.debug.runtime_safety) {
if (std.debug.runtime_safety) verify_liveness: {
var verify: Air.Liveness.Verify = .{
.gpa = gpa,
.zcu = zcu,
.air = air.*,
.liveness = liveness,
.liveness = liveness orelse break :verify_liveness,
.intern_pool = ip,
};
defer verify.deinit();

File diff suppressed because it is too large Load Diff

View File

@ -744,7 +744,7 @@ pub fn generate(
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: *const Air,
liveness: *const Air.Liveness,
liveness: *const ?Air.Liveness,
) CodeGenError!Mir {
const zcu = pt.zcu;
const gpa = zcu.gpa;
@ -767,7 +767,7 @@ pub fn generate(
.pt = pt,
.mod = mod,
.bin_file = bin_file,
.liveness = liveness.*,
.liveness = liveness.*.?,
.target = &mod.resolved_target.result,
.owner = .{ .nav_index = func.owner_nav },
.args = undefined, // populated after `resolveCallingConventionValues`
@ -4584,7 +4584,7 @@ fn structFieldPtr(func: *Func, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
const field_offset: i32 = switch (container_ty.containerLayout(zcu)) {
.auto, .@"extern" => @intCast(container_ty.structFieldOffset(index, zcu)),
.@"packed" => @divExact(@as(i32, ptr_container_ty.ptrInfo(zcu).packed_offset.bit_offset) +
(if (zcu.typeToStruct(container_ty)) |struct_obj| pt.structPackedFieldBitOffset(struct_obj, index) else 0) -
(if (zcu.typeToStruct(container_ty)) |struct_obj| zcu.structPackedFieldBitOffset(struct_obj, index) else 0) -
ptr_field_ty.ptrInfo(zcu).packed_offset.bit_offset, 8),
};
@ -4615,7 +4615,7 @@ fn airStructFieldVal(func: *Func, inst: Air.Inst.Index) !void {
const field_off: u32 = switch (struct_ty.containerLayout(zcu)) {
.auto, .@"extern" => @intCast(struct_ty.structFieldOffset(index, zcu) * 8),
.@"packed" => if (zcu.typeToStruct(struct_ty)) |struct_type|
pt.structPackedFieldBitOffset(struct_type, index)
zcu.structPackedFieldBitOffset(struct_type, index)
else
0,
};
@ -8059,7 +8059,7 @@ fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void {
const elem_abi_size: u32 = @intCast(elem_ty.abiSize(zcu));
const elem_abi_bits = elem_abi_size * 8;
const elem_off = pt.structPackedFieldBitOffset(struct_obj, elem_i);
const elem_off = zcu.structPackedFieldBitOffset(struct_obj, elem_i);
const elem_byte_off: i32 = @intCast(elem_off / elem_abi_bits * elem_abi_size);
const elem_bit_off = elem_off % elem_abi_bits;
const elem_mcv = try func.resolveInst(elem);

View File

@ -267,7 +267,7 @@ pub fn generate(
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: *const Air,
liveness: *const Air.Liveness,
liveness: *const ?Air.Liveness,
) CodeGenError!Mir {
const zcu = pt.zcu;
const gpa = zcu.gpa;
@ -288,7 +288,7 @@ pub fn generate(
.gpa = gpa,
.pt = pt,
.air = air.*,
.liveness = liveness.*,
.liveness = liveness.*.?,
.target = target,
.bin_file = lf,
.func_index = func_index,

View File

@ -1173,7 +1173,7 @@ pub fn generate(
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: *const Air,
liveness: *const Air.Liveness,
liveness: *const ?Air.Liveness,
) Error!Mir {
_ = src_loc;
_ = bin_file;
@ -1194,7 +1194,7 @@ pub fn generate(
.gpa = gpa,
.pt = pt,
.air = air.*,
.liveness = liveness.*,
.liveness = liveness.*.?,
.owner_nav = cg.owner_nav,
.target = target,
.ptr_size = switch (target.cpu.arch) {
@ -3776,7 +3776,7 @@ fn structFieldPtr(
break :offset @as(u32, 0);
}
const struct_type = zcu.typeToStruct(struct_ty).?;
break :offset @divExact(pt.structPackedFieldBitOffset(struct_type, index) + struct_ptr_ty_info.packed_offset.bit_offset, 8);
break :offset @divExact(zcu.structPackedFieldBitOffset(struct_type, index) + struct_ptr_ty_info.packed_offset.bit_offset, 8);
},
.@"union" => 0,
else => unreachable,
@ -3812,7 +3812,7 @@ fn airStructFieldVal(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.@"packed" => switch (struct_ty.zigTypeTag(zcu)) {
.@"struct" => result: {
const packed_struct = zcu.typeToPackedStruct(struct_ty).?;
const offset = pt.structPackedFieldBitOffset(packed_struct, field_index);
const offset = zcu.structPackedFieldBitOffset(packed_struct, field_index);
const backing_ty = Type.fromInterned(packed_struct.backingIntTypeUnordered(ip));
const host_bits = backing_ty.intInfo(zcu).bits;
@ -5696,7 +5696,7 @@ fn airFieldParentPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.auto, .@"extern" => parent_ty.structFieldOffset(field_index, zcu),
.@"packed" => offset: {
const parent_ptr_offset = parent_ptr_ty.ptrInfo(zcu).packed_offset.bit_offset;
const field_offset = if (zcu.typeToStruct(parent_ty)) |loaded_struct| pt.structPackedFieldBitOffset(loaded_struct, field_index) else 0;
const field_offset = if (zcu.typeToStruct(parent_ty)) |loaded_struct| zcu.structPackedFieldBitOffset(loaded_struct, field_index) else 0;
const field_ptr_offset = field_ptr_ty.ptrInfo(zcu).packed_offset.bit_offset;
break :offset @divExact(parent_ptr_offset + field_offset - field_ptr_offset, 8);
},

View File

@ -878,7 +878,7 @@ pub fn generate(
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: *const Air,
liveness: *const Air.Liveness,
liveness: *const ?Air.Liveness,
) codegen.CodeGenError!Mir {
_ = bin_file;
const zcu = pt.zcu;
@ -894,7 +894,7 @@ pub fn generate(
.gpa = gpa,
.pt = pt,
.air = air.*,
.liveness = liveness.*,
.liveness = liveness.*.?,
.target = &mod.resolved_target.result,
.mod = mod,
.owner = .{ .nav_index = func.owner_nav },
@ -100674,11 +100674,12 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
const struct_field = cg.air.extraData(Air.StructField, ty_pl.payload).data;
var ops = try cg.tempsFromOperands(inst, .{struct_field.struct_operand});
try ops[0].toOffset(cg.fieldOffset(
try ops[0].toOffset(@intCast(codegen.fieldOffset(
cg.typeOf(struct_field.struct_operand),
ty_pl.ty.toType(),
struct_field.field_index,
), cg);
zcu,
)), cg);
try ops[0].finish(inst, &.{struct_field.struct_operand}, &ops, cg);
},
.struct_field_ptr_index_0,
@ -100688,7 +100689,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
=> |air_tag| {
const ty_op = air_datas[@intFromEnum(inst)].ty_op;
var ops = try cg.tempsFromOperands(inst, .{ty_op.operand});
try ops[0].toOffset(cg.fieldOffset(
try ops[0].toOffset(@intCast(codegen.fieldOffset(
cg.typeOf(ty_op.operand),
ty_op.ty.toType(),
switch (air_tag) {
@ -100698,7 +100699,8 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.struct_field_ptr_index_2 => 2,
.struct_field_ptr_index_3 => 3,
},
), cg);
zcu,
)), cg);
try ops[0].finish(inst, &.{ty_op.operand}, &ops, cg);
},
.struct_field_val => {
@ -168108,11 +168110,12 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
const field_parent_ptr = cg.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
var ops = try cg.tempsFromOperands(inst, .{field_parent_ptr.field_ptr});
try ops[0].toOffset(-cg.fieldOffset(
try ops[0].toOffset(-@as(i32, @intCast(codegen.fieldOffset(
ty_pl.ty.toType(),
cg.typeOf(field_parent_ptr.field_ptr),
field_parent_ptr.field_index,
), cg);
zcu,
))), cg);
try ops[0].finish(inst, &.{field_parent_ptr.field_ptr}, &ops, cg);
},
.wasm_memory_size, .wasm_memory_grow => unreachable,
@ -174809,18 +174812,6 @@ fn airStore(self: *CodeGen, inst: Air.Inst.Index, safety: bool) !void {
return self.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn fieldOffset(self: *CodeGen, ptr_agg_ty: Type, ptr_field_ty: Type, field_index: u32) i32 {
const pt = self.pt;
const zcu = pt.zcu;
const agg_ty = ptr_agg_ty.childType(zcu);
return switch (agg_ty.containerLayout(zcu)) {
.auto, .@"extern" => @intCast(agg_ty.structFieldOffset(field_index, zcu)),
.@"packed" => @divExact(@as(i32, ptr_agg_ty.ptrInfo(zcu).packed_offset.bit_offset) +
(if (zcu.typeToStruct(agg_ty)) |loaded_struct| pt.structPackedFieldBitOffset(loaded_struct, field_index) else 0) -
ptr_field_ty.ptrInfo(zcu).packed_offset.bit_offset, 8),
};
}
fn genUnOp(self: *CodeGen, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: Air.Inst.Ref) !MCValue {
const pt = self.pt;
const zcu = pt.zcu;
@ -184575,7 +184566,7 @@ fn airAggregateInit(self: *CodeGen, inst: Air.Inst.Index) !void {
}
const elem_abi_size: u32 = @intCast(elem_ty.abiSize(zcu));
const elem_abi_bits = elem_abi_size * 8;
const elem_off = pt.structPackedFieldBitOffset(loaded_struct, elem_i);
const elem_off = zcu.structPackedFieldBitOffset(loaded_struct, elem_i);
const elem_byte_off: i32 = @intCast(elem_off / elem_abi_bits * elem_abi_size);
const elem_bit_off = elem_off % elem_abi_bits;
const elem_mcv = try self.resolveInst(elem);
@ -185625,21 +185616,19 @@ fn resolveCallingConventionValues(
fn fail(cg: *CodeGen, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
@branchHint(.cold);
const zcu = cg.pt.zcu;
switch (cg.owner) {
.nav_index => |i| return zcu.codegenFail(i, format, args),
.lazy_sym => |s| return zcu.codegenFailType(s.ty, format, args),
}
return error.CodegenFail;
return switch (cg.owner) {
.nav_index => |i| zcu.codegenFail(i, format, args),
.lazy_sym => |s| zcu.codegenFailType(s.ty, format, args),
};
}
fn failMsg(cg: *CodeGen, msg: *Zcu.ErrorMsg) error{ OutOfMemory, CodegenFail } {
@branchHint(.cold);
const zcu = cg.pt.zcu;
switch (cg.owner) {
.nav_index => |i| return zcu.codegenFailMsg(i, msg),
.lazy_sym => |s| return zcu.codegenFailTypeMsg(s.ty, msg),
}
return error.CodegenFail;
return switch (cg.owner) {
.nav_index => |i| zcu.codegenFailMsg(i, msg),
.lazy_sym => |s| zcu.codegenFailTypeMsg(s.ty, msg),
};
}
fn parseRegName(name: []const u8) ?Register {

View File

@ -22,6 +22,8 @@ const Zir = std.zig.Zir;
const Alignment = InternPool.Alignment;
const dev = @import("dev.zig");
pub const aarch64 = @import("codegen/aarch64.zig");
pub const CodeGenError = GenerateSymbolError || error{
/// Indicates the error is already stored in Zcu `failed_codegen`.
CodegenFail,
@ -48,7 +50,7 @@ fn devFeatureForBackend(backend: std.builtin.CompilerBackend) dev.Feature {
fn importBackend(comptime backend: std.builtin.CompilerBackend) type {
return switch (backend) {
.other, .stage1 => unreachable,
.stage2_aarch64 => unreachable,
.stage2_aarch64 => aarch64,
.stage2_arm => unreachable,
.stage2_c => @import("codegen/c.zig"),
.stage2_llvm => @import("codegen/llvm.zig"),
@ -71,6 +73,7 @@ pub fn legalizeFeatures(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) ?*co
.stage2_c,
.stage2_wasm,
.stage2_x86_64,
.stage2_aarch64,
.stage2_x86,
.stage2_riscv64,
.stage2_sparc64,
@ -82,10 +85,20 @@ pub fn legalizeFeatures(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) ?*co
}
}
pub fn wantsLiveness(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) bool {
const zcu = pt.zcu;
const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
return switch (target_util.zigBackend(target, zcu.comp.config.use_llvm)) {
else => true,
.stage2_aarch64 => false,
};
}
/// Every code generation backend has a different MIR representation. However, we want to pass
/// MIR from codegen to the linker *regardless* of which backend is in use. So, we use this: a
/// union of all MIR types. The active tag is known from the backend in use; see `AnyMir.tag`.
pub const AnyMir = union {
aarch64: @import("codegen/aarch64/Mir.zig"),
riscv64: @import("arch/riscv64/Mir.zig"),
sparc64: @import("arch/sparc64/Mir.zig"),
x86_64: @import("arch/x86_64/Mir.zig"),
@ -95,7 +108,6 @@ pub const AnyMir = union {
pub inline fn tag(comptime backend: std.builtin.CompilerBackend) []const u8 {
return switch (backend) {
.stage2_aarch64 => "aarch64",
.stage2_arm => "arm",
.stage2_riscv64 => "riscv64",
.stage2_sparc64 => "sparc64",
.stage2_x86_64 => "x86_64",
@ -110,7 +122,8 @@ pub const AnyMir = union {
const backend = target_util.zigBackend(&zcu.root_mod.resolved_target.result, zcu.comp.config.use_llvm);
switch (backend) {
else => unreachable,
inline .stage2_riscv64,
inline .stage2_aarch64,
.stage2_riscv64,
.stage2_sparc64,
.stage2_x86_64,
.stage2_wasm,
@ -131,14 +144,15 @@ pub fn generateFunction(
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: *const Air,
liveness: *const Air.Liveness,
liveness: *const ?Air.Liveness,
) CodeGenError!AnyMir {
const zcu = pt.zcu;
const func = zcu.funcInfo(func_index);
const target = &zcu.navFileScope(func.owner_nav).mod.?.resolved_target.result;
switch (target_util.zigBackend(target, false)) {
else => unreachable,
inline .stage2_riscv64,
inline .stage2_aarch64,
.stage2_riscv64,
.stage2_sparc64,
.stage2_x86_64,
.stage2_wasm,
@ -173,7 +187,8 @@ pub fn emitFunction(
const target = &zcu.navFileScope(func.owner_nav).mod.?.resolved_target.result;
switch (target_util.zigBackend(target, zcu.comp.config.use_llvm)) {
else => unreachable,
inline .stage2_riscv64,
inline .stage2_aarch64,
.stage2_riscv64,
.stage2_sparc64,
.stage2_x86_64,
=> |backend| {
@ -420,7 +435,7 @@ pub fn generateSymbol(
const int_tag_ty = ty.intTagType(zcu);
try generateSymbol(bin_file, pt, src_loc, try pt.getCoerced(Value.fromInterned(enum_tag.int), int_tag_ty), code, reloc_parent);
},
.float => |float| switch (float.storage) {
.float => |float| storage: switch (float.storage) {
.f16 => |f16_val| writeFloat(f16, f16_val, target, endian, try code.addManyAsArray(gpa, 2)),
.f32 => |f32_val| writeFloat(f32, f32_val, target, endian, try code.addManyAsArray(gpa, 4)),
.f64 => |f64_val| writeFloat(f64, f64_val, target, endian, try code.addManyAsArray(gpa, 8)),
@ -429,7 +444,13 @@ pub fn generateSymbol(
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
try code.appendNTimes(gpa, 0, abi_size - 10);
},
.f128 => |f128_val| writeFloat(f128, f128_val, target, endian, try code.addManyAsArray(gpa, 16)),
.f128 => |f128_val| switch (Type.fromInterned(float.ty).floatBits(target)) {
else => unreachable,
16 => continue :storage .{ .f16 = @floatCast(f128_val) },
32 => continue :storage .{ .f32 = @floatCast(f128_val) },
64 => continue :storage .{ .f64 = @floatCast(f128_val) },
128 => writeFloat(f128, f128_val, target, endian, try code.addManyAsArray(gpa, 16)),
},
},
.ptr => try lowerPtr(bin_file, pt, src_loc, val.toIntern(), code, reloc_parent, 0),
.slice => |slice| {
@ -1218,3 +1239,17 @@ pub fn errUnionErrorOffset(payload_ty: Type, zcu: *Zcu) u64 {
return 0;
}
}
pub fn fieldOffset(ptr_agg_ty: Type, ptr_field_ty: Type, field_index: u32, zcu: *Zcu) u64 {
const agg_ty = ptr_agg_ty.childType(zcu);
return switch (agg_ty.containerLayout(zcu)) {
.auto, .@"extern" => agg_ty.structFieldOffset(field_index, zcu),
.@"packed" => @divExact(@as(u64, ptr_agg_ty.ptrInfo(zcu).packed_offset.bit_offset) +
(if (zcu.typeToPackedStruct(agg_ty)) |loaded_struct| zcu.structPackedFieldBitOffset(loaded_struct, field_index) else 0) -
ptr_field_ty.ptrInfo(zcu).packed_offset.bit_offset, 8),
};
}
test {
_ = aarch64;
}

194
src/codegen/aarch64.zig Normal file
View File

@ -0,0 +1,194 @@
pub const abi = @import("aarch64/abi.zig");
pub const Assemble = @import("aarch64/Assemble.zig");
pub const Disassemble = @import("aarch64/Disassemble.zig");
pub const encoding = @import("aarch64/encoding.zig");
pub const Mir = @import("aarch64/Mir.zig");
pub const Select = @import("aarch64/Select.zig");
pub fn legalizeFeatures(_: *const std.Target) ?*Air.Legalize.Features {
return null;
}
pub fn generate(
_: *link.File,
pt: Zcu.PerThread,
_: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: *const Air,
liveness: *const ?Air.Liveness,
) !Mir {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const func = zcu.funcInfo(func_index);
const func_type = zcu.intern_pool.indexToKey(func.ty).func_type;
assert(liveness.* == null);
const mod = zcu.navFileScope(func.owner_nav).mod.?;
var isel: Select = .{
.pt = pt,
.target = &mod.resolved_target.result,
.air = air.*,
.nav_index = zcu.funcInfo(func_index).owner_nav,
.def_order = .empty,
.blocks = .empty,
.loops = .empty,
.active_loops = .empty,
.loop_live = .{
.set = .empty,
.list = .empty,
},
.dom_start = 0,
.dom_len = 0,
.dom = .empty,
.saved_registers = comptime .initEmpty(),
.instructions = .empty,
.literals = .empty,
.nav_relocs = .empty,
.uav_relocs = .empty,
.global_relocs = .empty,
.literal_relocs = .empty,
.returns = false,
.va_list = undefined,
.stack_size = 0,
.stack_align = .@"16",
.live_registers = comptime .initFill(.free),
.live_values = .empty,
.values = .empty,
};
defer isel.deinit();
const air_main_body = air.getMainBody();
var param_it: Select.CallAbiIterator = .init;
const air_args = for (air_main_body, 0..) |air_inst_index, body_index| {
if (air.instructions.items(.tag)[@intFromEnum(air_inst_index)] != .arg) break air_main_body[0..body_index];
const param_ty = air.instructions.items(.data)[@intFromEnum(air_inst_index)].arg.ty.toType();
const param_vi = try param_it.param(&isel, param_ty);
tracking_log.debug("${d} <- %{d}", .{ @intFromEnum(param_vi.?), @intFromEnum(air_inst_index) });
try isel.live_values.putNoClobber(gpa, air_inst_index, param_vi.?);
} else unreachable;
const saved_gra_start = if (mod.strip) param_it.ngrn else Select.CallAbiIterator.ngrn_start;
const saved_gra_end = if (func_type.is_var_args) Select.CallAbiIterator.ngrn_end else param_it.ngrn;
const saved_gra_len = @intFromEnum(saved_gra_end) - @intFromEnum(saved_gra_start);
const saved_vra_start = if (mod.strip) param_it.nsrn else Select.CallAbiIterator.nsrn_start;
const saved_vra_end = if (func_type.is_var_args) Select.CallAbiIterator.nsrn_end else param_it.nsrn;
const saved_vra_len = @intFromEnum(saved_vra_end) - @intFromEnum(saved_vra_start);
const frame_record = 2;
const named_stack_args: Select.Value.Indirect = .{
.base = .fp,
.offset = 8 * std.mem.alignForward(u7, frame_record + saved_gra_len, 2),
};
isel.va_list = .{
.__stack = named_stack_args.withOffset(param_it.nsaa),
.__gr_top = named_stack_args,
.__vr_top = .{ .base = .fp, .offset = 0 },
};
// translate arg locations from caller-based to callee-based
for (air_args) |air_inst_index| {
assert(air.instructions.items(.tag)[@intFromEnum(air_inst_index)] == .arg);
const arg_vi = isel.live_values.get(air_inst_index).?;
const passed_vi = switch (arg_vi.parent(&isel)) {
.unallocated, .stack_slot => arg_vi,
.value, .constant => unreachable,
.address => |address_vi| address_vi,
};
switch (passed_vi.parent(&isel)) {
.unallocated => if (!mod.strip) {
var part_it = arg_vi.parts(&isel);
const first_passed_part_vi = part_it.next() orelse passed_vi;
const hint_ra = first_passed_part_vi.hint(&isel).?;
passed_vi.setParent(&isel, .{ .stack_slot = if (hint_ra.isVector())
isel.va_list.__vr_top.withOffset(@as(i8, -16) *
(@intFromEnum(saved_vra_end) - @intFromEnum(hint_ra)))
else
isel.va_list.__gr_top.withOffset(@as(i8, -8) *
(@intFromEnum(saved_gra_end) - @intFromEnum(hint_ra))) });
},
.stack_slot => |stack_slot| {
assert(stack_slot.base == .sp);
passed_vi.setParent(&isel, .{
.stack_slot = named_stack_args.withOffset(stack_slot.offset),
});
},
.address, .value, .constant => unreachable,
}
}
ret: {
var ret_it: Select.CallAbiIterator = .init;
const ret_vi = try ret_it.ret(&isel, .fromInterned(func_type.return_type)) orelse break :ret;
tracking_log.debug("${d} <- %main", .{@intFromEnum(ret_vi)});
try isel.live_values.putNoClobber(gpa, Select.Block.main, ret_vi);
}
assert(!(try isel.blocks.getOrPut(gpa, Select.Block.main)).found_existing);
try isel.analyze(air_main_body);
try isel.finishAnalysis();
isel.verify(false);
isel.blocks.values()[0] = .{
.live_registers = isel.live_registers,
.target_label = @intCast(isel.instructions.items.len),
};
try isel.body(air_main_body);
if (isel.live_values.fetchRemove(Select.Block.main)) |ret_vi| {
switch (ret_vi.value.parent(&isel)) {
.unallocated, .stack_slot => {},
.value, .constant => unreachable,
.address => |address_vi| try address_vi.liveIn(
&isel,
address_vi.hint(&isel).?,
comptime &.initFill(.free),
),
}
ret_vi.value.deref(&isel);
}
isel.verify(true);
const prologue = isel.instructions.items.len;
const epilogue = try isel.layout(
param_it,
func_type.is_var_args,
saved_gra_len,
saved_vra_len,
mod,
);
const instructions = try isel.instructions.toOwnedSlice(gpa);
var mir: Mir = .{
.prologue = instructions[prologue..epilogue],
.body = instructions[0..prologue],
.epilogue = instructions[epilogue..],
.literals = &.{},
.nav_relocs = &.{},
.uav_relocs = &.{},
.global_relocs = &.{},
.literal_relocs = &.{},
};
errdefer mir.deinit(gpa);
mir.literals = try isel.literals.toOwnedSlice(gpa);
mir.nav_relocs = try isel.nav_relocs.toOwnedSlice(gpa);
mir.uav_relocs = try isel.uav_relocs.toOwnedSlice(gpa);
mir.global_relocs = try isel.global_relocs.toOwnedSlice(gpa);
mir.literal_relocs = try isel.literal_relocs.toOwnedSlice(gpa);
return mir;
}
test {
_ = Assemble;
}
const Air = @import("../Air.zig");
const assert = std.debug.assert;
const InternPool = @import("../InternPool.zig");
const link = @import("../link.zig");
const std = @import("std");
const tracking_log = std.log.scoped(.tracking);
const Zcu = @import("../Zcu.zig");

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,905 @@
case: Case = .lower,
mnemonic_operands_separator: []const u8 = " ",
operands_separator: []const u8 = ", ",
enable_aliases: bool = true,
pub const Case = enum { lower, upper };
pub fn printInstruction(dis: Disassemble, inst: Instruction, writer: *std.Io.Writer) std.Io.Writer.Error!void {
unallocated: switch (inst.decode()) {
.unallocated => break :unallocated,
.reserved => |reserved| switch (reserved.decode()) {
.unallocated => break :unallocated,
.udf => |udf| return writer.print("{f}{s}#0x{x}", .{
fmtCase(.udf, dis.case),
dis.mnemonic_operands_separator,
udf.imm16,
}),
},
.sme => {},
.sve => {},
.data_processing_immediate => |data_processing_immediate| switch (data_processing_immediate.decode()) {
.unallocated => break :unallocated,
.pc_relative_addressing => |pc_relative_addressing| {
const group = pc_relative_addressing.group;
const imm = (@as(i33, group.immhi) << 2 | @as(i33, group.immlo) << 0) + @as(i33, switch (group.op) {
.adr => Instruction.size,
.adrp => 0,
});
return writer.print("{f}{s}{f}{s}.{c}0x{x}", .{
fmtCase(group.op, dis.case),
dis.mnemonic_operands_separator,
group.Rd.decodeInteger(.doubleword, .{}).fmtCase(dis.case),
dis.operands_separator,
@as(u8, if (imm < 0) '-' else '+'),
switch (group.op) {
.adr => @abs(imm),
.adrp => @abs(imm) << 12,
},
});
},
.add_subtract_immediate => |add_subtract_immediate| {
const group = add_subtract_immediate.group;
const op = group.op;
const S = group.S;
const sf = group.sf;
const sh = group.sh;
const imm12 = group.imm12;
const Rn = group.Rn.decodeInteger(sf, .{ .sp = true });
const Rd = group.Rd.decodeInteger(sf, .{ .sp = !S });
const elide_shift = sh == .@"0";
if (dis.enable_aliases and op == .add and S == false and elide_shift and imm12 == 0 and
(Rn.alias == .sp or Rd.alias == .sp)) try writer.print("{f}{s}{f}{s}{f}", .{
fmtCase(.mov, dis.case),
dis.mnemonic_operands_separator,
Rd.fmtCase(dis.case),
dis.operands_separator,
Rn.fmtCase(dis.case),
}) else try writer.print("{f}{s}{s}{f}{s}{f}{s}#0x{x}", .{
fmtCase(op, dis.case),
if (S) "s" else "",
dis.mnemonic_operands_separator,
Rd.fmtCase(dis.case),
dis.operands_separator,
Rn.fmtCase(dis.case),
dis.operands_separator,
imm12,
});
return if (!elide_shift) writer.print("{s}{f} #{s}", .{
dis.operands_separator,
fmtCase(.lsl, dis.case),
@tagName(sh),
});
},
.add_subtract_immediate_with_tags => {},
.logical_immediate => |logical_immediate| {
const decoded = logical_immediate.decode();
if (decoded == .unallocated) break :unallocated;
const group = logical_immediate.group;
const sf = group.sf;
const decoded_imm = group.imm.decodeImmediate(sf);
const imm = switch (sf) {
.word => @as(i32, @bitCast(@as(u32, @intCast(decoded_imm)))),
.doubleword => @as(i64, @bitCast(decoded_imm)),
};
const Rn = group.Rn.decodeInteger(sf, .{});
const Rd = group.Rd.decodeInteger(sf, .{ .sp = decoded != .ands });
return if (dis.enable_aliases and decoded == .orr and Rn.alias == .zr and !group.imm.moveWidePreferred(sf)) writer.print("{f}{s}{f}{s}#{s}0x{x}", .{
fmtCase(.mov, dis.case),
dis.mnemonic_operands_separator,
Rd.fmtCase(dis.case),
dis.operands_separator,
if (imm < 0) "-" else "",
@abs(imm),
}) else if (dis.enable_aliases and decoded == .ands and Rd.alias == .zr) writer.print("{f}{s}{f}{s}#{s}0x{x}", .{
fmtCase(.tst, dis.case),
dis.mnemonic_operands_separator,
Rn.fmtCase(dis.case),
dis.operands_separator,
if (imm < 0) "-" else "",
@abs(imm),
}) else writer.print("{f}{s}{f}{s}{f}{s}#0x{x}", .{
fmtCase(decoded, dis.case),
dis.mnemonic_operands_separator,
Rd.fmtCase(dis.case),
dis.operands_separator,
Rn.fmtCase(dis.case),
dis.operands_separator,
decoded_imm,
});
},
.move_wide_immediate => |move_wide_immediate| {
const decoded = move_wide_immediate.decode();
if (decoded == .unallocated) break :unallocated;
const group = move_wide_immediate.group;
const sf = group.sf;
const hw = group.hw;
const imm16 = group.imm16;
const Rd = group.Rd.decodeInteger(sf, .{});
const elide_shift = hw == .@"0";
if (dis.enable_aliases and switch (decoded) {
.unallocated => unreachable,
.movz => elide_shift or group.imm16 != 0,
.movn => (elide_shift or group.imm16 != 0) and switch (sf) {
.word => group.imm16 != std.math.maxInt(u16),
.doubleword => true,
},
.movk => false,
}) {
const decoded_imm = switch (sf) {
.word => @as(i32, @bitCast(@as(u32, group.imm16) << @intCast(hw.int()))),
.doubleword => @as(i64, @bitCast(@as(u64, group.imm16) << hw.int())),
};
const imm = switch (decoded) {
.unallocated => unreachable,
.movz => decoded_imm,
.movn => ~decoded_imm,
.movk => unreachable,
};
return writer.print("{f}{s}{f}{s}#{s}0x{x}", .{
fmtCase(.mov, dis.case),
dis.mnemonic_operands_separator,
Rd.fmtCase(dis.case),
dis.operands_separator,
if (imm < 0) "-" else "",
@abs(imm),
});
}
try writer.print("{f}{s}{f}{s}#0x{x}", .{
fmtCase(decoded, dis.case),
dis.mnemonic_operands_separator,
Rd.fmtCase(dis.case),
dis.operands_separator,
imm16,
});
return if (!elide_shift) writer.print("{s}{f} #{s}", .{
dis.operands_separator,
fmtCase(.lsl, dis.case),
@tagName(hw),
});
},
.bitfield => |bitfield| {
const decoded = bitfield.decode();
if (decoded == .unallocated) break :unallocated;
const group = bitfield.group;
const sf = group.sf;
return writer.print("{f}{s}{f}{s}{f}{s}#{d}{s}#{d}", .{
fmtCase(decoded, dis.case),
dis.mnemonic_operands_separator,
group.Rd.decodeInteger(sf, .{}).fmtCase(dis.case),
dis.operands_separator,
group.Rn.decodeInteger(sf, .{}).fmtCase(dis.case),
dis.operands_separator,
group.imm.immr,
dis.operands_separator,
group.imm.imms,
});
},
.extract => |extract| {
const decoded = extract.decode();
if (decoded == .unallocated) break :unallocated;
const group = extract.group;
const sf = group.sf;
return writer.print("{f}{s}{f}{s}{f}{s}{f}{s}#{d}", .{
fmtCase(decoded, dis.case),
dis.mnemonic_operands_separator,
group.Rd.decodeInteger(sf, .{}).fmtCase(dis.case),
dis.operands_separator,
group.Rn.decodeInteger(sf, .{}).fmtCase(dis.case),
dis.operands_separator,
group.Rm.decodeInteger(sf, .{}).fmtCase(dis.case),
dis.operands_separator,
group.imms,
});
},
},
.branch_exception_generating_system => |branch_exception_generating_system| switch (branch_exception_generating_system.decode()) {
.unallocated => break :unallocated,
.conditional_branch_immediate => |conditional_branch_immediate| {
const decoded = conditional_branch_immediate.decode();
if (decoded == .unallocated) break :unallocated;
const group = conditional_branch_immediate.group;
const imm = @as(i21, group.imm19);
return writer.print("{f}.{f}{s}.{c}0x{x}", .{
fmtCase(decoded, dis.case),
fmtCase(group.cond, dis.case),
dis.mnemonic_operands_separator,
@as(u8, if (imm < 0) '-' else '+'),
@abs(imm) << 2,
});
},
.exception_generating => |exception_generating| {
const decoded = exception_generating.decode();
switch (decoded) {
.unallocated => break :unallocated,
.svc, .hvc, .smc, .brk, .hlt, .tcancel => {},
.dcps1, .dcps2, .dcps3 => switch (exception_generating.group.imm16) {
0 => return writer.print("{f}", .{fmtCase(decoded, dis.case)}),
else => {},
},
}
return switch (exception_generating.group.imm16) {
0 => writer.print("{f}{s}#0", .{
fmtCase(decoded, dis.case),
dis.mnemonic_operands_separator,
}),
else => writer.print("{f}{s}#0x{x}", .{
fmtCase(decoded, dis.case),
dis.mnemonic_operands_separator,
exception_generating.group.imm16,
}),
};
},
.system_register_argument => {},
.hints => |hints| switch (hints.decode()) {
.hint => |hint| return writer.print("{f}{s}#0x{x}", .{
fmtCase(.hint, dis.case),
dis.mnemonic_operands_separator,
@as(u7, hint.CRm) << 3 | @as(u7, hint.op2) << 0,
}),
else => |decoded| return writer.print("{f}", .{fmtCase(decoded, dis.case)}),
},
.barriers => {},
.pstate => {},
.system_result => {},
.system => {},
.system_register_move => {},
.unconditional_branch_register => |unconditional_branch_register| {
const decoded = unconditional_branch_register.decode();
if (decoded == .unallocated) break :unallocated;
const group = unconditional_branch_register.group;
const Rn = group.Rn.decodeInteger(.doubleword, .{});
try writer.print("{f}", .{fmtCase(decoded, dis.case)});
return if (decoded != .ret or Rn.alias != .r30) try writer.print("{s}{f}", .{
dis.mnemonic_operands_separator,
Rn.fmtCase(dis.case),
});
},
.unconditional_branch_immediate => |unconditional_branch_immediate| {
const group = unconditional_branch_immediate.group;
const imm = @as(i28, group.imm26);
return writer.print("{f}{s}.{c}0x{x}", .{
fmtCase(group.op, dis.case),
dis.mnemonic_operands_separator,
@as(u8, if (imm < 0) '-' else '+'),
@abs(imm) << 2,
});
},
.compare_branch_immediate => |compare_branch_immediate| {
const group = compare_branch_immediate.group;
const imm = @as(i21, group.imm19);
return writer.print("{f}{s}{f}{s}.{c}0x{x}", .{
fmtCase(group.op, dis.case),
dis.mnemonic_operands_separator,
group.Rt.decodeInteger(group.sf, .{}).fmtCase(dis.case),
dis.operands_separator,
@as(u8, if (imm < 0) '-' else '+'),
@abs(imm) << 2,
});
},
.test_branch_immediate => |test_branch_immediate| {
const group = test_branch_immediate.group;
const imm = @as(i16, group.imm14);
return writer.print("{f}{s}{f}{s}#0x{d}{s}.{c}0x{x}", .{
fmtCase(group.op, dis.case),
dis.mnemonic_operands_separator,
group.Rt.decodeInteger(@enumFromInt(group.b5), .{}).fmtCase(dis.case),
dis.operands_separator,
@as(u6, group.b5) << 5 |
@as(u6, group.b40) << 0,
dis.operands_separator,
@as(u8, if (imm < 0) '-' else '+'),
@abs(imm) << 2,
});
},
},
.load_store => |load_store| switch (load_store.decode()) {
.unallocated => break :unallocated,
.register_literal => {},
.memory => {},
.no_allocate_pair_offset => {},
.register_pair_post_indexed => |register_pair_post_indexed| switch (register_pair_post_indexed.decode()) {
.integer => |integer| {
const decoded = integer.decode();
if (decoded == .unallocated) break :unallocated;
const group = integer.group;
const sf: aarch64.encoding.Register.IntegerSize = @enumFromInt(group.opc >> 1);
return writer.print("{f}{s}{f}{s}{f}{s}[{f}]{s}#{s}0x{x}", .{
fmtCase(decoded, dis.case),
dis.mnemonic_operands_separator,
group.Rt.decodeInteger(sf, .{}).fmtCase(dis.case),
dis.operands_separator,
group.Rt2.decodeInteger(sf, .{}).fmtCase(dis.case),
dis.operands_separator,
group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
dis.operands_separator,
if (group.imm7 < 0) "-" else "",
@as(u10, @abs(group.imm7)) << (@as(u2, 2) + @intFromEnum(sf)),
});
},
.vector => |vector| {
const decoded = vector.decode();
if (decoded == .unallocated) break :unallocated;
const group = vector.group;
const vs = group.opc.decode();
return writer.print("{f}{s}{f}{s}{f}{s}[{f}]{s}#{s}0x{x}", .{
fmtCase(decoded, dis.case),
dis.mnemonic_operands_separator,
group.Rt.decodeVector(vs).fmtCase(dis.case),
dis.operands_separator,
group.Rt2.decodeVector(vs).fmtCase(dis.case),
dis.operands_separator,
group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
dis.operands_separator,
if (group.imm7 < 0) "-" else "",
@as(u11, @abs(group.imm7)) << (@as(u3, 2) + @intFromEnum(vs)),
});
},
},
.register_pair_offset => |register_pair_offset| switch (register_pair_offset.decode()) {
.integer => |integer| {
const decoded = integer.decode();
if (decoded == .unallocated) break :unallocated;
const group = integer.group;
const sf: aarch64.encoding.Register.IntegerSize = @enumFromInt(group.opc >> 1);
try writer.print("{f}{s}{f}{s}{f}{s}[{f}", .{
fmtCase(decoded, dis.case),
dis.mnemonic_operands_separator,
group.Rt.decodeInteger(sf, .{}).fmtCase(dis.case),
dis.operands_separator,
group.Rt2.decodeInteger(sf, .{}).fmtCase(dis.case),
dis.operands_separator,
group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
});
if (group.imm7 != 0) try writer.print("{s}#{s}0x{x}", .{
dis.operands_separator,
if (group.imm7 < 0) "-" else "",
@as(u10, @abs(group.imm7)) << (@as(u2, 2) + @intFromEnum(sf)),
});
return writer.writeByte(']');
},
.vector => |vector| {
const decoded = vector.decode();
if (decoded == .unallocated) break :unallocated;
const group = vector.group;
const vs = group.opc.decode();
try writer.print("{f}{s}{f}{s}{f}{s}[{f}", .{
fmtCase(decoded, dis.case),
dis.mnemonic_operands_separator,
group.Rt.decodeVector(vs).fmtCase(dis.case),
dis.operands_separator,
group.Rt2.decodeVector(vs).fmtCase(dis.case),
dis.operands_separator,
group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
});
if (group.imm7 != 0) try writer.print("{s}#{s}0x{x}", .{
dis.operands_separator,
if (group.imm7 < 0) "-" else "",
@as(u11, @abs(group.imm7)) << (@as(u3, 2) + @intFromEnum(vs)),
});
return writer.writeByte(']');
},
},
.register_pair_pre_indexed => |register_pair_pre_indexed| switch (register_pair_pre_indexed.decode()) {
.integer => |integer| {
const decoded = integer.decode();
if (decoded == .unallocated) break :unallocated;
const group = integer.group;
const sf: aarch64.encoding.Register.IntegerSize = @enumFromInt(group.opc >> 1);
return writer.print("{f}{s}{f}{s}{f}{s}[{f}{s}#{s}0x{x}]!", .{
fmtCase(decoded, dis.case),
dis.mnemonic_operands_separator,
group.Rt.decodeInteger(sf, .{}).fmtCase(dis.case),
dis.operands_separator,
group.Rt2.decodeInteger(sf, .{}).fmtCase(dis.case),
dis.operands_separator,
group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
dis.operands_separator,
if (group.imm7 < 0) "-" else "",
@as(u10, @abs(group.imm7)) << (@as(u2, 2) + @intFromEnum(sf)),
});
},
.vector => |vector| {
const decoded = vector.decode();
if (decoded == .unallocated) break :unallocated;
const group = vector.group;
const vs = group.opc.decode();
return writer.print("{f}{s}{f}{s}{f}{s}[{f}{s}#{s}0x{x}]!", .{
fmtCase(decoded, dis.case),
dis.mnemonic_operands_separator,
group.Rt.decodeVector(vs).fmtCase(dis.case),
dis.operands_separator,
group.Rt2.decodeVector(vs).fmtCase(dis.case),
dis.operands_separator,
group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
dis.operands_separator,
if (group.imm7 < 0) "-" else "",
@as(u11, @abs(group.imm7)) << (@as(u3, 2) + @intFromEnum(vs)),
});
},
},
.register_unscaled_immediate => {},
.register_immediate_post_indexed => |register_immediate_post_indexed| switch (register_immediate_post_indexed.decode()) {
.integer => |integer| {
const decoded = integer.decode();
const sf: aarch64.encoding.Register.IntegerSize = switch (decoded) {
.unallocated => break :unallocated,
.strb, .ldrb, .strh, .ldrh => .word,
inline .ldrsb, .ldrsh => |encoded| switch (encoded.opc0) {
0b0 => .doubleword,
0b1 => .word,
},
.ldrsw => .doubleword,
inline .str, .ldr => |encoded| encoded.sf,
};
const group = integer.group;
return writer.print("{f}{s}{f}{s}[{f}]{s}#{s}0x{x}", .{
fmtCase(decoded, dis.case),
dis.mnemonic_operands_separator,
group.Rt.decodeInteger(sf, .{}).fmtCase(dis.case),
dis.operands_separator,
group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
dis.operands_separator,
if (group.imm9 < 0) "-" else "",
@abs(group.imm9),
});
},
.vector => {},
},
.register_unprivileged => {},
.register_immediate_pre_indexed => |register_immediate_pre_indexed| switch (register_immediate_pre_indexed.decode()) {
.integer => |integer| {
const decoded = integer.decode();
const sf: aarch64.encoding.Register.IntegerSize = switch (decoded) {
.unallocated => break :unallocated,
inline .ldrsb, .ldrsh => |encoded| switch (encoded.opc0) {
0b0 => .doubleword,
0b1 => .word,
},
.strb, .ldrb, .strh, .ldrh => .word,
.ldrsw => .doubleword,
inline .str, .ldr => |encoded| encoded.sf,
};
const group = integer.group;
return writer.print("{f}{s}{f}{s}[{f}{s}#{s}0x{x}]!", .{
fmtCase(decoded, dis.case),
dis.mnemonic_operands_separator,
group.Rt.decodeInteger(sf, .{}).fmtCase(dis.case),
dis.operands_separator,
group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
dis.operands_separator,
if (group.imm9 < 0) "-" else "",
@abs(group.imm9),
});
},
.vector => |vector| {
const decoded = vector.decode();
if (decoded == .unallocated) break :unallocated;
const group = vector.group;
return writer.print("{f}{s}{f}{s}[{f}{s}#{s}0x{x}]!", .{
fmtCase(decoded, dis.case),
dis.mnemonic_operands_separator,
group.Rt.decodeVector(group.opc1.decode(group.size)).fmtCase(dis.case),
dis.operands_separator,
group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
dis.operands_separator,
if (group.imm9 < 0) "-" else "",
@abs(group.imm9),
});
},
},
.register_register_offset => |register_register_offset| switch (register_register_offset.decode()) {
.integer => |integer| {
const decoded = integer.decode();
const sf: aarch64.encoding.Register.IntegerSize = switch (decoded) {
.unallocated, .prfm => break :unallocated,
.strb, .ldrb, .strh, .ldrh => .word,
inline .ldrsb, .ldrsh => |encoded| switch (encoded.opc0) {
0b0 => .doubleword,
0b1 => .word,
},
.ldrsw => .doubleword,
inline .str, .ldr => |encoded| encoded.sf,
};
const group = integer.group;
try writer.print("{f}{s}{f}{s}[{f}{s}{f}", .{
fmtCase(decoded, dis.case),
dis.mnemonic_operands_separator,
group.Rt.decodeInteger(sf, .{}).fmtCase(dis.case),
dis.operands_separator,
group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
dis.operands_separator,
group.Rm.decodeInteger(group.option.sf(), .{}).fmtCase(dis.case),
});
if (group.option != .lsl or group.S) {
try writer.print("{s}{f}", .{
dis.operands_separator,
fmtCase(group.option, dis.case),
});
if (group.S) try writer.print(" #{d}", .{
@intFromEnum(group.size),
});
}
return writer.writeByte(']');
},
.vector => {},
},
.register_unsigned_immediate => |register_unsigned_immediate| switch (register_unsigned_immediate.decode()) {
.integer => |integer| {
const decoded = integer.decode();
const sf: aarch64.encoding.Register.IntegerSize = switch (decoded) {
.unallocated, .prfm => break :unallocated,
.strb, .ldrb, .strh, .ldrh => .word,
inline .ldrsb, .ldrsh => |encoded| switch (encoded.opc0) {
0b0 => .doubleword,
0b1 => .word,
},
.ldrsw => .doubleword,
inline .str, .ldr => |encoded| encoded.sf,
};
const group = integer.group;
try writer.print("{f}{s}{f}{s}[{f}", .{
fmtCase(decoded, dis.case),
dis.mnemonic_operands_separator,
group.Rt.decodeInteger(sf, .{}).fmtCase(dis.case),
dis.operands_separator,
group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
});
if (group.imm12 > 0) try writer.print("{s}#0x{x}", .{
dis.operands_separator,
@as(u15, group.imm12) << @intFromEnum(group.size),
});
return writer.writeByte(']');
},
.vector => {},
},
},
.data_processing_register => |data_processing_register| switch (data_processing_register.decode()) {
.unallocated => break :unallocated,
.data_processing_two_source => |data_processing_two_source| {
const decoded = data_processing_two_source.decode();
if (decoded == .unallocated) break :unallocated;
const group = data_processing_two_source.group;
const sf = group.sf;
return writer.print("{f}{s}{f}{s}{f}{s}{f}", .{
fmtCase(decoded, dis.case),
dis.mnemonic_operands_separator,
group.Rd.decodeInteger(sf, .{}).fmtCase(dis.case),
dis.operands_separator,
group.Rn.decodeInteger(sf, .{}).fmtCase(dis.case),
dis.operands_separator,
group.Rm.decodeInteger(sf, .{}).fmtCase(dis.case),
});
},
.data_processing_one_source => |data_processing_one_source| {
const decoded = data_processing_one_source.decode();
if (decoded == .unallocated) break :unallocated;
const group = data_processing_one_source.group;
const sf = group.sf;
return writer.print("{f}{s}{f}{s}{f}", .{
fmtCase(decoded, dis.case),
dis.mnemonic_operands_separator,
group.Rd.decodeInteger(sf, .{}).fmtCase(dis.case),
dis.operands_separator,
group.Rn.decodeInteger(sf, .{}).fmtCase(dis.case),
});
},
.logical_shifted_register => |logical_shifted_register| {
const decoded = logical_shifted_register.decode();
if (decoded == .unallocated) break :unallocated;
const group = logical_shifted_register.group;
const sf = group.sf;
const shift = group.shift;
const Rm = group.Rm.decodeInteger(sf, .{});
const amount = group.imm6;
const Rn = group.Rn.decodeInteger(sf, .{});
const Rd = group.Rd.decodeInteger(sf, .{});
const elide_shift = shift == .lsl and amount == 0;
if (dis.enable_aliases and switch (decoded) {
else => false,
.orr => elide_shift,
.orn => true,
} and Rn.alias == .zr) try writer.print("{f}{s}{f}{s}{f}", .{
fmtCase(@as(enum { mov, mvn }, switch (decoded) {
else => unreachable,
.orr => .mov,
.orn => .mvn,
}), dis.case),
dis.mnemonic_operands_separator,
Rd.fmtCase(dis.case),
dis.operands_separator,
Rm.fmtCase(dis.case),
}) else if (dis.enable_aliases and decoded == .ands and Rd.alias == .zr) try writer.print("{f}{s}{f}{s}{f}", .{
fmtCase(.tst, dis.case),
dis.mnemonic_operands_separator,
Rn.fmtCase(dis.case),
dis.operands_separator,
Rm.fmtCase(dis.case),
}) else try writer.print("{f}{s}{f}{s}{f}{s}{f}", .{
fmtCase(decoded, dis.case),
dis.mnemonic_operands_separator,
Rd.fmtCase(dis.case),
dis.operands_separator,
Rn.fmtCase(dis.case),
dis.operands_separator,
Rm.fmtCase(dis.case),
});
return if (!elide_shift) writer.print("{s}{f} #{d}", .{
dis.operands_separator,
fmtCase(shift, dis.case),
amount,
});
},
.add_subtract_shifted_register => |add_subtract_shifted_register| {
const decoded = add_subtract_shifted_register.decode();
if (decoded == .unallocated) break :unallocated;
const group = add_subtract_shifted_register.group;
const sf = group.sf;
const shift = group.shift;
const Rm = group.Rm.decodeInteger(sf, .{});
const imm6 = group.imm6;
const Rn = group.Rn.decodeInteger(sf, .{});
const Rd = group.Rd.decodeInteger(sf, .{});
if (dis.enable_aliases and group.S and Rd.alias == .zr) try writer.print("{f}{s}{f}{s}{f}", .{
fmtCase(@as(enum { cmn, cmp }, switch (group.op) {
.add => .cmn,
.sub => .cmp,
}), dis.case),
dis.mnemonic_operands_separator,
Rn.fmtCase(dis.case),
dis.operands_separator,
Rm.fmtCase(dis.case),
}) else if (dis.enable_aliases and group.op == .sub and Rn.alias == .zr) try writer.print("{f}{s}{f}{s}{f}", .{
fmtCase(@as(enum { neg, negs }, switch (group.S) {
false => .neg,
true => .negs,
}), dis.case),
dis.mnemonic_operands_separator,
Rd.fmtCase(dis.case),
dis.operands_separator,
Rm.fmtCase(dis.case),
}) else try writer.print("{f}{s}{f}{s}{f}{s}{f}", .{
fmtCase(decoded, dis.case),
dis.mnemonic_operands_separator,
Rd.fmtCase(dis.case),
dis.operands_separator,
Rn.fmtCase(dis.case),
dis.operands_separator,
Rm.fmtCase(dis.case),
});
return if (shift != .lsl or imm6 != 0) return writer.print("{s}{f} #{d}", .{
dis.operands_separator,
fmtCase(shift, dis.case),
imm6,
});
},
.add_subtract_extended_register => |add_subtract_extended_register| {
const decoded = add_subtract_extended_register.decode();
if (decoded == .unallocated) break :unallocated;
const group = add_subtract_extended_register.group;
const sf = group.sf;
const Rm = group.Rm.decodeInteger(group.option.sf(), .{});
const Rn = group.Rn.decodeInteger(sf, .{ .sp = true });
const Rd = group.Rd.decodeInteger(sf, .{ .sp = true });
if (dis.enable_aliases and group.S and Rd.alias == .zr) try writer.print("{f}{s}{f}{s}{f}", .{
fmtCase(@as(enum { cmn, cmp }, switch (group.op) {
.add => .cmn,
.sub => .cmp,
}), dis.case),
dis.mnemonic_operands_separator,
Rn.fmtCase(dis.case),
dis.operands_separator,
Rm.fmtCase(dis.case),
}) else try writer.print("{f}{s}{f}{s}{f}{s}{f}", .{
fmtCase(decoded, dis.case),
dis.mnemonic_operands_separator,
Rd.fmtCase(dis.case),
dis.operands_separator,
Rn.fmtCase(dis.case),
dis.operands_separator,
Rm.fmtCase(dis.case),
});
return if (group.option != @as(Instruction.DataProcessingRegister.AddSubtractExtendedRegister.Option, switch (sf) {
.word => .uxtw,
.doubleword => .uxtx,
}) or group.imm3 != 0) writer.print("{s}{f} #{d}", .{
dis.operands_separator,
fmtCase(group.option, dis.case),
group.imm3,
});
},
.add_subtract_with_carry => |add_subtract_with_carry| {
const decoded = add_subtract_with_carry.decode();
const group = add_subtract_with_carry.group;
const sf = group.sf;
const Rm = group.Rm.decodeInteger(sf, .{});
const Rn = group.Rn.decodeInteger(sf, .{});
const Rd = group.Rd.decodeInteger(sf, .{});
return if (dis.enable_aliases and group.op == .sbc and Rn.alias == .zr) try writer.print("{f}{s}{f}{s}{f}", .{
fmtCase(@as(enum { ngc, ngcs }, switch (group.S) {
false => .ngc,
true => .ngcs,
}), dis.case),
dis.mnemonic_operands_separator,
Rd.fmtCase(dis.case),
dis.operands_separator,
Rm.fmtCase(dis.case),
}) else try writer.print("{f}{s}{f}{s}{f}{s}{f}", .{
fmtCase(decoded, dis.case),
dis.mnemonic_operands_separator,
Rd.fmtCase(dis.case),
dis.operands_separator,
Rn.fmtCase(dis.case),
dis.operands_separator,
Rm.fmtCase(dis.case),
});
},
.rotate_right_into_flags => {},
.evaluate_into_flags => {},
.conditional_compare_register => {},
.conditional_compare_immediate => {},
.conditional_select => |conditional_select| {
const decoded = conditional_select.decode();
if (decoded == .unallocated) break :unallocated;
const group = conditional_select.group;
const sf = group.sf;
const Rm = group.Rm.decodeInteger(sf, .{});
const cond = group.cond;
const Rn = group.Rn.decodeInteger(sf, .{});
const Rd = group.Rd.decodeInteger(sf, .{});
return if (dis.enable_aliases and group.op != group.op2 and Rm.alias == .zr and cond != .al and cond != .nv and Rn.alias == Rm.alias) writer.print("{f}{s}{f}{s}{f}", .{
fmtCase(@as(enum { cset, csetm }, switch (decoded) {
else => unreachable,
.csinc => .cset,
.csinv => .csetm,
}), dis.case),
dis.mnemonic_operands_separator,
Rd.fmtCase(dis.case),
dis.operands_separator,
fmtCase(cond.invert(), dis.case),
}) else if (dis.enable_aliases and decoded != .csel and cond != .al and cond != .nv and Rn.alias == Rm.alias) writer.print("{f}{s}{f}{s}{f}{s}{f}", .{
fmtCase(@as(enum { cinc, cinv, cneg }, switch (decoded) {
else => unreachable,
.csinc => .cinc,
.csinv => .cinv,
.csneg => .cneg,
}), dis.case),
dis.mnemonic_operands_separator,
Rd.fmtCase(dis.case),
dis.operands_separator,
Rn.fmtCase(dis.case),
dis.operands_separator,
fmtCase(cond.invert(), dis.case),
}) else writer.print("{f}{s}{f}{s}{f}{s}{f}{s}{f}", .{
fmtCase(decoded, dis.case),
dis.mnemonic_operands_separator,
Rd.fmtCase(dis.case),
dis.operands_separator,
Rn.fmtCase(dis.case),
dis.operands_separator,
Rm.fmtCase(dis.case),
dis.operands_separator,
fmtCase(cond, dis.case),
});
},
.data_processing_three_source => |data_processing_three_source| {
const decoded = data_processing_three_source.decode();
if (decoded == .unallocated) break :unallocated;
const group = data_processing_three_source.group;
const sf = group.sf;
try writer.print("{f}{s}{f}{s}{f}{s}{f}", .{
fmtCase(decoded, dis.case),
dis.mnemonic_operands_separator,
group.Rd.decodeInteger(sf, .{}).fmtCase(dis.case),
dis.operands_separator,
group.Rn.decodeInteger(sf, .{}).fmtCase(dis.case),
dis.operands_separator,
group.Rm.decodeInteger(sf, .{}).fmtCase(dis.case),
});
return switch (decoded) {
.unallocated => unreachable,
.madd, .msub, .smaddl, .smsubl, .umaddl, .umsubl => writer.print("{s}{f}", .{
dis.operands_separator,
group.Ra.decodeInteger(sf, .{}).fmtCase(dis.case),
}),
.smulh, .umulh => {},
};
},
},
.data_processing_vector => {},
}
return writer.print(".{f}{s}0x{x:0>8}", .{
fmtCase(.word, dis.case),
dis.mnemonic_operands_separator,
@as(Instruction.Backing, @bitCast(inst)),
});
}
fn fmtCase(tag: anytype, case: Case) struct {
tag: []const u8,
case: Case,
pub fn format(data: @This(), writer: *std.Io.Writer) std.Io.Writer.Error!void {
for (data.tag) |c| try writer.writeByte(switch (data.case) {
.lower => std.ascii.toLower(c),
.upper => std.ascii.toUpper(c),
});
}
} {
return .{ .tag = @tagName(tag), .case = case };
}
pub const RegisterFormatter = struct {
reg: aarch64.encoding.Register,
case: Case,
pub fn format(data: @This(), writer: *std.Io.Writer) std.Io.Writer.Error!void {
switch (data.reg.format) {
.alias => try writer.print("{f}", .{fmtCase(data.reg.alias, data.case)}),
.integer => |size| switch (data.reg.alias) {
.r0,
.r1,
.r2,
.r3,
.r4,
.r5,
.r6,
.r7,
.r8,
.r9,
.r10,
.r11,
.r12,
.r13,
.r14,
.r15,
.r16,
.r17,
.r18,
.r19,
.r20,
.r21,
.r22,
.r23,
.r24,
.r25,
.r26,
.r27,
.r28,
.r29,
.r30,
=> |alias| try writer.print("{c}{d}", .{
size.prefix(),
@intFromEnum(alias.encode(.{})),
}),
.zr => try writer.print("{c}{f}", .{
size.prefix(),
fmtCase(data.reg.alias, data.case),
}),
else => try writer.print("{s}{f}", .{
switch (size) {
.word => "w",
.doubleword => "",
},
fmtCase(data.reg.alias, data.case),
}),
},
.scalar => |size| try writer.print("{c}{d}", .{
size.prefix(),
@intFromEnum(data.reg.alias.encode(.{ .V = true })),
}),
.vector => |arrangement| try writer.print("{f}.{f}", .{
fmtCase(data.reg.alias, data.case),
fmtCase(arrangement, data.case),
}),
.element => |element| try writer.print("{f}.{c}[{d}]", .{
fmtCase(data.reg.alias, data.case),
element.size.prefix(),
element.index,
}),
}
}
};
const aarch64 = @import("../aarch64.zig");
const Disassemble = @This();
const Instruction = aarch64.encoding.Instruction;
const std = @import("std");

275
src/codegen/aarch64/Mir.zig Normal file
View File

@ -0,0 +1,275 @@
prologue: []const Instruction,
body: []const Instruction,
epilogue: []const Instruction,
literals: []const u32,
nav_relocs: []const Reloc.Nav,
uav_relocs: []const Reloc.Uav,
global_relocs: []const Reloc.Global,
literal_relocs: []const Reloc.Literal,
pub const Reloc = struct {
label: u32,
addend: u64 align(@alignOf(u32)) = 0,
pub const Nav = struct {
nav: InternPool.Nav.Index,
reloc: Reloc,
};
pub const Uav = struct {
uav: InternPool.Key.Ptr.BaseAddr.Uav,
reloc: Reloc,
};
pub const Global = struct {
global: [*:0]const u8,
reloc: Reloc,
};
pub const Literal = struct {
label: u32,
};
};
pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
assert(mir.body.ptr + mir.body.len == mir.prologue.ptr);
assert(mir.prologue.ptr + mir.prologue.len == mir.epilogue.ptr);
gpa.free(mir.body.ptr[0 .. mir.body.len + mir.prologue.len + mir.epilogue.len]);
gpa.free(mir.literals);
gpa.free(mir.nav_relocs);
gpa.free(mir.uav_relocs);
gpa.free(mir.global_relocs);
gpa.free(mir.literal_relocs);
mir.* = undefined;
}
pub fn emit(
mir: Mir,
lf: *link.File,
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) !void {
_ = debug_output;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const gpa = zcu.gpa;
const func = zcu.funcInfo(func_index);
const nav = ip.getNav(func.owner_nav);
const mod = zcu.navFileScope(func.owner_nav).mod.?;
const target = &mod.resolved_target.result;
mir_log.debug("{f}:", .{nav.fqn.fmt(ip)});
const func_align = switch (nav.status.fully_resolved.alignment) {
.none => switch (mod.optimize_mode) {
.Debug, .ReleaseSafe, .ReleaseFast => target_util.defaultFunctionAlignment(target),
.ReleaseSmall => target_util.minFunctionAlignment(target),
},
else => |a| a.maxStrict(target_util.minFunctionAlignment(target)),
};
const code_len = mir.prologue.len + mir.body.len + mir.epilogue.len;
const literals_align_gap = -%code_len & (@divExact(
@as(u5, @intCast(func_align.minStrict(.@"16").toByteUnits().?)),
Instruction.size,
) - 1);
try code.ensureUnusedCapacity(gpa, Instruction.size *
(code_len + literals_align_gap + mir.literals.len));
emitInstructionsForward(code, mir.prologue);
emitInstructionsBackward(code, mir.body);
const body_end: u32 = @intCast(code.items.len);
emitInstructionsBackward(code, mir.epilogue);
code.appendNTimesAssumeCapacity(0, Instruction.size * literals_align_gap);
code.appendSliceAssumeCapacity(@ptrCast(mir.literals));
mir_log.debug("", .{});
for (mir.nav_relocs) |nav_reloc| try emitReloc(
lf,
zcu,
func.owner_nav,
switch (try @import("../../codegen.zig").genNavRef(
lf,
pt,
src_loc,
nav_reloc.nav,
&mod.resolved_target.result,
)) {
.sym_index => |sym_index| sym_index,
.fail => |em| return zcu.codegenFailMsg(func.owner_nav, em),
},
mir.body[nav_reloc.reloc.label],
body_end - Instruction.size * (1 + nav_reloc.reloc.label),
nav_reloc.reloc.addend,
);
for (mir.uav_relocs) |uav_reloc| try emitReloc(
lf,
zcu,
func.owner_nav,
switch (try lf.lowerUav(
pt,
uav_reloc.uav.val,
ZigType.fromInterned(uav_reloc.uav.orig_ty).ptrAlignment(zcu),
src_loc,
)) {
.sym_index => |sym_index| sym_index,
.fail => |em| return zcu.codegenFailMsg(func.owner_nav, em),
},
mir.body[uav_reloc.reloc.label],
body_end - Instruction.size * (1 + uav_reloc.reloc.label),
uav_reloc.reloc.addend,
);
for (mir.global_relocs) |global_reloc| try emitReloc(
lf,
zcu,
func.owner_nav,
if (lf.cast(.elf)) |ef|
try ef.getGlobalSymbol(std.mem.span(global_reloc.global), null)
else if (lf.cast(.macho)) |mf|
try mf.getGlobalSymbol(std.mem.span(global_reloc.global), null)
else if (lf.cast(.coff)) |cf|
try cf.getGlobalSymbol(std.mem.span(global_reloc.global), "compiler_rt")
else
return zcu.codegenFail(func.owner_nav, "external symbols unimplemented for {s}", .{@tagName(lf.tag)}),
mir.body[global_reloc.reloc.label],
body_end - Instruction.size * (1 + global_reloc.reloc.label),
global_reloc.reloc.addend,
);
const literal_reloc_offset: i19 = @intCast(mir.epilogue.len + literals_align_gap);
for (mir.literal_relocs) |literal_reloc| {
var instruction = mir.body[literal_reloc.label];
instruction.load_store.register_literal.group.imm19 += literal_reloc_offset;
instruction.write(
code.items[body_end - Instruction.size * (1 + literal_reloc.label) ..][0..Instruction.size],
);
}
}
fn emitInstructionsForward(code: *std.ArrayListUnmanaged(u8), instructions: []const Instruction) void {
for (instructions) |instruction| emitInstruction(code, instruction);
}
fn emitInstructionsBackward(code: *std.ArrayListUnmanaged(u8), instructions: []const Instruction) void {
var instruction_index = instructions.len;
while (instruction_index > 0) {
instruction_index -= 1;
emitInstruction(code, instructions[instruction_index]);
}
}
fn emitInstruction(code: *std.ArrayListUnmanaged(u8), instruction: Instruction) void {
mir_log.debug(" {f}", .{instruction});
instruction.write(code.addManyAsArrayAssumeCapacity(Instruction.size));
}
fn emitReloc(
lf: *link.File,
zcu: *Zcu,
owner_nav: InternPool.Nav.Index,
sym_index: u32,
instruction: Instruction,
offset: u32,
addend: u64,
) !void {
const gpa = zcu.gpa;
switch (instruction.decode()) {
else => unreachable,
.branch_exception_generating_system => |decoded| if (lf.cast(.elf)) |ef| {
const zo = ef.zigObjectPtr().?;
const atom = zo.symbol(try zo.getOrCreateMetadataForNav(zcu, owner_nav)).atom(ef).?;
const r_type: std.elf.R_AARCH64 = switch (decoded.decode().unconditional_branch_immediate.group.op) {
.b => .JUMP26,
.bl => .CALL26,
};
try atom.addReloc(gpa, .{
.r_offset = offset,
.r_info = @as(u64, sym_index) << 32 | @intFromEnum(r_type),
.r_addend = @bitCast(addend),
}, zo);
} else if (lf.cast(.macho)) |mf| {
const zo = mf.getZigObject().?;
const atom = zo.symbols.items[try zo.getOrCreateMetadataForNav(mf, owner_nav)].getAtom(mf).?;
try atom.addReloc(mf, .{
.tag = .@"extern",
.offset = offset,
.target = sym_index,
.addend = @bitCast(addend),
.type = .branch,
.meta = .{
.pcrel = true,
.has_subtractor = false,
.length = 2,
.symbolnum = @intCast(sym_index),
},
});
},
.data_processing_immediate => |decoded| if (lf.cast(.elf)) |ef| {
const zo = ef.zigObjectPtr().?;
const atom = zo.symbol(try zo.getOrCreateMetadataForNav(zcu, owner_nav)).atom(ef).?;
const r_type: std.elf.R_AARCH64 = switch (decoded.decode()) {
else => unreachable,
.pc_relative_addressing => |pc_relative_addressing| switch (pc_relative_addressing.group.op) {
.adr => .ADR_PREL_LO21,
.adrp => .ADR_PREL_PG_HI21,
},
.add_subtract_immediate => |add_subtract_immediate| switch (add_subtract_immediate.group.op) {
.add => .ADD_ABS_LO12_NC,
.sub => unreachable,
},
};
try atom.addReloc(gpa, .{
.r_offset = offset,
.r_info = @as(u64, sym_index) << 32 | @intFromEnum(r_type),
.r_addend = @bitCast(addend),
}, zo);
} else if (lf.cast(.macho)) |mf| {
const zo = mf.getZigObject().?;
const atom = zo.symbols.items[try zo.getOrCreateMetadataForNav(mf, owner_nav)].getAtom(mf).?;
switch (decoded.decode()) {
else => unreachable,
.pc_relative_addressing => |pc_relative_addressing| switch (pc_relative_addressing.group.op) {
.adr => unreachable,
.adrp => try atom.addReloc(mf, .{
.tag = .@"extern",
.offset = offset,
.target = sym_index,
.addend = @bitCast(addend),
.type = .page,
.meta = .{
.pcrel = true,
.has_subtractor = false,
.length = 2,
.symbolnum = @intCast(sym_index),
},
}),
},
.add_subtract_immediate => |add_subtract_immediate| switch (add_subtract_immediate.group.op) {
.add => try atom.addReloc(mf, .{
.tag = .@"extern",
.offset = offset,
.target = sym_index,
.addend = @bitCast(addend),
.type = .pageoff,
.meta = .{
.pcrel = false,
.has_subtractor = false,
.length = 2,
.symbolnum = @intCast(sym_index),
},
}),
.sub => unreachable,
},
}
},
}
}
const Air = @import("../../Air.zig");
const assert = std.debug.assert;
const mir_log = std.log.scoped(.mir);
const Instruction = @import("encoding.zig").Instruction;
const InternPool = @import("../../InternPool.zig");
const link = @import("../../link.zig");
const Mir = @This();
const std = @import("std");
const target_util = @import("../../target.zig");
const Zcu = @import("../../Zcu.zig");
const ZigType = @import("../../Type.zig");

10981
src/codegen/aarch64/Select.zig Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,5 @@
const assert = @import("std").debug.assert;
const std = @import("std");
const builtin = @import("builtin");
const bits = @import("../../arch/aarch64/bits.zig");
const Register = bits.Register;
const Type = @import("../../Type.zig");
const Zcu = @import("../../Zcu.zig");
@ -15,7 +13,7 @@ pub const Class = union(enum) {
/// For `float_array` the second element will be the amount of floats.
pub fn classifyType(ty: Type, zcu: *Zcu) Class {
std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(zcu));
assert(ty.hasRuntimeBitsIgnoreComptime(zcu));
var maybe_float_bits: ?u16 = null;
switch (ty.zigTypeTag(zcu)) {
@ -47,11 +45,11 @@ pub fn classifyType(ty: Type, zcu: *Zcu) Class {
return .byval;
},
.optional => {
std.debug.assert(ty.isPtrLikeOptional(zcu));
assert(ty.isPtrLikeOptional(zcu));
return .byval;
},
.pointer => {
std.debug.assert(!ty.isSlice(zcu));
assert(!ty.isSlice(zcu));
return .byval;
},
.error_union,
@ -138,13 +136,3 @@ pub fn getFloatArrayType(ty: Type, zcu: *Zcu) ?Type {
else => return null,
}
}
pub const callee_preserved_regs = [_]Register{
.x19, .x20, .x21, .x22, .x23,
.x24, .x25, .x26, .x27, .x28,
};
pub const c_abi_int_param_regs = [_]Register{ .x0, .x1, .x2, .x3, .x4, .x5, .x6, .x7 };
pub const c_abi_int_return_regs = [_]Register{ .x0, .x1, .x2, .x3, .x4, .x5, .x6, .x7 };
const allocatable_registers = callee_preserved_regs;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -449,14 +449,15 @@ pub const Function = struct {
if (gop.found_existing) return gop.value_ptr.*;
const pt = f.object.dg.pt;
const zcu = pt.zcu;
const val = (try f.air.value(ref, pt)).?;
const ty = f.typeOf(ref);
const result: CValue = if (lowersToArray(ty, pt)) result: {
const result: CValue = if (lowersToArray(ty, zcu)) result: {
const ch = &f.object.code_header.writer;
const decl_c_value = try f.allocLocalValue(.{
.ctype = try f.ctypeFromType(ty, .complete),
.alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(pt.zcu)),
.alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(zcu)),
});
const gpa = f.object.dg.gpa;
try f.allocs.put(gpa, decl_c_value.new_local, false);
@ -916,7 +917,7 @@ pub const DeclGen = struct {
// Ensure complete type definition is available before accessing fields.
_ = try dg.ctypeFromType(parent_ptr_ty.childType(zcu), .complete);
switch (fieldLocation(parent_ptr_ty, field.result_ptr_ty, field.field_idx, pt)) {
switch (fieldLocation(parent_ptr_ty, field.result_ptr_ty, field.field_idx, zcu)) {
.begin => {
const ptr_ctype = try dg.ctypeFromType(field.result_ptr_ty, .complete);
try w.writeByte('(');
@ -3008,7 +3009,7 @@ pub fn generate(
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: *const Air,
liveness: *const Air.Liveness,
liveness: *const ?Air.Liveness,
) @import("../codegen.zig").CodeGenError!Mir {
const zcu = pt.zcu;
const gpa = zcu.gpa;
@ -3021,7 +3022,7 @@ pub fn generate(
var function: Function = .{
.value_map = .init(gpa),
.air = air.*,
.liveness = liveness.*,
.liveness = liveness.*.?,
.func_index = func_index,
.object = .{
.dg = .{
@ -3961,7 +3962,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
ptr_info.flags.alignment.order(src_ty.abiAlignment(zcu)).compare(.gte)
else
true;
const is_array = lowersToArray(src_ty, pt);
const is_array = lowersToArray(src_ty, zcu);
const need_memcpy = !is_aligned or is_array;
const w = &f.object.code.writer;
@ -4044,7 +4045,7 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !void {
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
var deref = is_ptr;
const is_array = lowersToArray(ret_ty, pt);
const is_array = lowersToArray(ret_ty, zcu);
const ret_val = if (is_array) ret_val: {
const array_local = try f.allocAlignedLocal(inst, .{
.ctype = ret_ctype,
@ -4228,7 +4229,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
ptr_info.flags.alignment.order(src_ty.abiAlignment(zcu)).compare(.gte)
else
true;
const is_array = lowersToArray(.fromInterned(ptr_info.child), pt);
const is_array = lowersToArray(.fromInterned(ptr_info.child), zcu);
const need_memcpy = !is_aligned or is_array;
const src_val = try f.resolveInst(bin_op.rhs);
@ -4873,7 +4874,7 @@ fn airCall(
}
const result = result: {
if (result_local == .none or !lowersToArray(ret_ty, pt))
if (result_local == .none or !lowersToArray(ret_ty, zcu))
break :result result_local;
const array_local = try f.allocLocal(inst, ret_ty);
@ -5971,13 +5972,12 @@ fn fieldLocation(
container_ptr_ty: Type,
field_ptr_ty: Type,
field_index: u32,
pt: Zcu.PerThread,
zcu: *Zcu,
) union(enum) {
begin: void,
field: CValue,
byte_offset: u64,
} {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const container_ty: Type = .fromInterned(ip.indexToKey(container_ptr_ty.toIntern()).ptr_type.child);
switch (ip.indexToKey(container_ty.toIntern())) {
@ -5994,7 +5994,7 @@ fn fieldLocation(
else
.{ .field = field_index } },
.@"packed" => if (field_ptr_ty.ptrInfo(zcu).packed_offset.host_size == 0)
.{ .byte_offset = @divExact(pt.structPackedFieldBitOffset(loaded_struct, field_index) +
.{ .byte_offset = @divExact(zcu.structPackedFieldBitOffset(loaded_struct, field_index) +
container_ptr_ty.ptrInfo(zcu).packed_offset.bit_offset, 8) }
else
.begin,
@ -6076,7 +6076,7 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue {
try f.renderType(w, container_ptr_ty);
try w.writeByte(')');
switch (fieldLocation(container_ptr_ty, field_ptr_ty, extra.field_index, pt)) {
switch (fieldLocation(container_ptr_ty, field_ptr_ty, extra.field_index, zcu)) {
.begin => try f.writeCValue(w, field_ptr_val, .Other),
.field => |field| {
const u8_ptr_ty = try pt.adjustPtrTypeChild(field_ptr_ty, .u8);
@ -6131,7 +6131,7 @@ fn fieldPtr(
try f.renderType(w, field_ptr_ty);
try w.writeByte(')');
switch (fieldLocation(container_ptr_ty, field_ptr_ty, field_index, pt)) {
switch (fieldLocation(container_ptr_ty, field_ptr_ty, field_index, zcu)) {
.begin => try f.writeCValue(w, container_ptr_val, .Other),
.field => |field| {
try w.writeByte('&');
@ -6189,7 +6189,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1));
const bit_offset = pt.structPackedFieldBitOffset(loaded_struct, extra.field_index);
const bit_offset = zcu.structPackedFieldBitOffset(loaded_struct, extra.field_index);
const field_int_signedness = if (inst_ty.isAbiInt(zcu))
inst_ty.intInfo(zcu).signedness
@ -8573,8 +8573,7 @@ const Vectorize = struct {
}
};
fn lowersToArray(ty: Type, pt: Zcu.PerThread) bool {
const zcu = pt.zcu;
fn lowersToArray(ty: Type, zcu: *Zcu) bool {
return switch (ty.zigTypeTag(zcu)) {
.array, .vector => return true,
else => return ty.isAbiInt(zcu) and toCIntBits(@as(u32, @intCast(ty.bitSize(zcu)))) == null,

View File

@ -20,6 +20,7 @@ const Package = @import("../Package.zig");
const Air = @import("../Air.zig");
const Value = @import("../Value.zig");
const Type = @import("../Type.zig");
const codegen = @import("../codegen.zig");
const x86_64_abi = @import("../arch/x86_64/abi.zig");
const wasm_c_abi = @import("wasm/abi.zig");
const aarch64_c_abi = @import("aarch64/abi.zig");
@ -1131,7 +1132,7 @@ pub const Object = struct {
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: *const Air,
liveness: *const Air.Liveness,
liveness: *const ?Air.Liveness,
) !void {
const zcu = pt.zcu;
const comp = zcu.comp;
@ -1489,7 +1490,7 @@ pub const Object = struct {
var fg: FuncGen = .{
.gpa = gpa,
.air = air.*,
.liveness = liveness.*,
.liveness = liveness.*.?,
.ng = &ng,
.wip = wip,
.is_naked = fn_info.cc == .naked,
@ -4210,7 +4211,7 @@ pub const Object = struct {
.eu_payload => |eu_ptr| try o.lowerPtr(
pt,
eu_ptr,
offset + @import("../codegen.zig").errUnionPayloadOffset(
offset + codegen.errUnionPayloadOffset(
Value.fromInterned(eu_ptr).typeOf(zcu).childType(zcu),
zcu,
),
@ -6969,7 +6970,7 @@ pub const FuncGen = struct {
.@"struct" => switch (struct_ty.containerLayout(zcu)) {
.@"packed" => {
const struct_type = zcu.typeToStruct(struct_ty).?;
const bit_offset = pt.structPackedFieldBitOffset(struct_type, field_index);
const bit_offset = zcu.structPackedFieldBitOffset(struct_type, field_index);
const containing_int = struct_llvm_val;
const shift_amt =
try o.builder.intValue(containing_int.typeOfWip(&self.wip), bit_offset);
@ -11364,7 +11365,7 @@ pub const FuncGen = struct {
// We have a pointer to a packed struct field that happens to be byte-aligned.
// Offset our operand pointer by the correct number of bytes.
const byte_offset = @divExact(pt.structPackedFieldBitOffset(struct_type, field_index) + struct_ptr_ty_info.packed_offset.bit_offset, 8);
const byte_offset = @divExact(zcu.structPackedFieldBitOffset(struct_type, field_index) + struct_ptr_ty_info.packed_offset.bit_offset, 8);
if (byte_offset == 0) return struct_ptr;
const usize_ty = try o.lowerType(pt, Type.usize);
const llvm_index = try o.builder.intValue(usize_ty, byte_offset);

View File

@ -251,11 +251,11 @@ pub const Object = struct {
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: *const Air,
liveness: *const Air.Liveness,
liveness: *const ?Air.Liveness,
) !void {
const nav = pt.zcu.funcInfo(func_index).owner_nav;
// TODO: Separate types for generating decls and functions?
try self.genNav(pt, nav, air.*, liveness.*, true);
try self.genNav(pt, nav, air.*, liveness.*.?, true);
}
pub fn updateNav(
@ -5134,7 +5134,7 @@ const NavGen = struct {
.@"struct" => switch (object_ty.containerLayout(zcu)) {
.@"packed" => {
const struct_ty = zcu.typeToPackedStruct(object_ty).?;
const bit_offset = pt.structPackedFieldBitOffset(struct_ty, field_index);
const bit_offset = zcu.structPackedFieldBitOffset(struct_ty, field_index);
const bit_offset_id = try self.constInt(.u16, bit_offset);
const signedness = if (field_ty.isInt(zcu)) field_ty.intInfo(zcu).signedness else .unsigned;
const field_bit_size: u16 = @intCast(field_ty.bitSize(zcu));

View File

@ -25,13 +25,13 @@ pub const Env = enum {
/// - `zig build-* -fno-emit-bin`
sema,
/// - sema
/// - `zig build-* -fincremental -fno-llvm -fno-lld -target aarch64-linux --listen=-`
@"aarch64-linux",
/// - `zig build-* -ofmt=c`
cbe,
/// - sema
/// - `zig build-* -fincremental -fno-llvm -fno-lld -target x86_64-linux --listen=-`
@"x86_64-linux",
/// - sema
/// - `zig build-* -fincremental -fno-llvm -fno-lld -target powerpc(64)(le)-linux --listen=-`
@"powerpc-linux",
@ -48,6 +48,10 @@ pub const Env = enum {
/// - `zig build-* -fno-llvm -fno-lld -target wasm32-* --listen=-`
wasm,
/// - sema
/// - `zig build-* -fincremental -fno-llvm -fno-lld -target x86_64-linux --listen=-`
@"x86_64-linux",
pub inline fn supports(comptime dev_env: Env, comptime feature: Feature) bool {
return switch (dev_env) {
.full => true,
@ -153,6 +157,15 @@ pub const Env = enum {
=> true,
else => Env.ast_gen.supports(feature),
},
.@"aarch64-linux" => switch (feature) {
.build_command,
.stdio_listen,
.incremental,
.aarch64_backend,
.elf_linker,
=> true,
else => Env.sema.supports(feature),
},
.cbe => switch (feature) {
.legalize,
.c_backend,
@ -160,16 +173,6 @@ pub const Env = enum {
=> true,
else => Env.sema.supports(feature),
},
.@"x86_64-linux" => switch (feature) {
.build_command,
.stdio_listen,
.incremental,
.legalize,
.x86_64_backend,
.elf_linker,
=> true,
else => Env.sema.supports(feature),
},
.@"powerpc-linux" => switch (feature) {
.build_command,
.stdio_listen,
@ -199,6 +202,16 @@ pub const Env = enum {
=> true,
else => Env.sema.supports(feature),
},
.@"x86_64-linux" => switch (feature) {
.build_command,
.stdio_listen,
.incremental,
.legalize,
.x86_64_backend,
.elf_linker,
=> true,
else => Env.sema.supports(feature),
},
};
}

View File

@ -23,6 +23,7 @@ const dev = @import("dev.zig");
const target_util = @import("target.zig");
const codegen = @import("codegen.zig");
pub const aarch64 = @import("link/aarch64.zig");
pub const LdScript = @import("link/LdScript.zig");
pub const Queue = @import("link/Queue.zig");

View File

@ -1335,9 +1335,13 @@ fn updateNavCode(
log.debug("updateNavCode {f} 0x{x}", .{ nav.fqn.fmt(ip), nav_index });
const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
const required_alignment = switch (pt.navAlignment(nav_index)) {
.none => target_util.defaultFunctionAlignment(target),
const mod = zcu.navFileScope(nav_index).mod.?;
const target = &mod.resolved_target.result;
const required_alignment = switch (nav.status.fully_resolved.alignment) {
.none => switch (mod.optimize_mode) {
.Debug, .ReleaseSafe, .ReleaseFast => target_util.defaultFunctionAlignment(target),
.ReleaseSmall => target_util.minFunctionAlignment(target),
},
else => |a| a.maxStrict(target_util.minFunctionAlignment(target)),
};
@ -2832,58 +2836,33 @@ pub const Relocation = struct {
};
fn resolveAarch64(reloc: Relocation, ctx: Context) void {
const Instruction = aarch64_util.encoding.Instruction;
var buffer = ctx.code[reloc.offset..];
switch (reloc.type) {
.got_page, .import_page, .page => {
const source_page = @as(i32, @intCast(ctx.source_vaddr >> 12));
const target_page = @as(i32, @intCast(ctx.target_vaddr >> 12));
const pages = @as(u21, @bitCast(@as(i21, @intCast(target_page - source_page))));
var inst = aarch64_util.Instruction{
.pc_relative_address = mem.bytesToValue(@FieldType(
aarch64_util.Instruction,
@tagName(aarch64_util.Instruction.pc_relative_address),
), buffer[0..4]),
};
inst.pc_relative_address.immhi = @as(u19, @truncate(pages >> 2));
inst.pc_relative_address.immlo = @as(u2, @truncate(pages));
mem.writeInt(u32, buffer[0..4], inst.toU32(), .little);
const pages: i21 = @intCast(target_page - source_page);
var inst: Instruction = .read(buffer[0..Instruction.size]);
inst.data_processing_immediate.pc_relative_addressing.group.immhi = @intCast(pages >> 2);
inst.data_processing_immediate.pc_relative_addressing.group.immlo = @truncate(@as(u21, @bitCast(pages)));
inst.write(buffer[0..Instruction.size]);
},
.got_pageoff, .import_pageoff, .pageoff => {
assert(!reloc.pcrel);
const narrowed = @as(u12, @truncate(@as(u64, @intCast(ctx.target_vaddr))));
if (isArithmeticOp(buffer[0..4])) {
var inst = aarch64_util.Instruction{
.add_subtract_immediate = mem.bytesToValue(@FieldType(
aarch64_util.Instruction,
@tagName(aarch64_util.Instruction.add_subtract_immediate),
), buffer[0..4]),
};
inst.add_subtract_immediate.imm12 = narrowed;
mem.writeInt(u32, buffer[0..4], inst.toU32(), .little);
} else {
var inst = aarch64_util.Instruction{
.load_store_register = mem.bytesToValue(@FieldType(
aarch64_util.Instruction,
@tagName(aarch64_util.Instruction.load_store_register),
), buffer[0..4]),
};
const offset: u12 = blk: {
if (inst.load_store_register.size == 0) {
if (inst.load_store_register.v == 1) {
// 128-bit SIMD is scaled by 16.
break :blk @divExact(narrowed, 16);
}
// Otherwise, 8-bit SIMD or ldrb.
break :blk narrowed;
} else {
const denom: u4 = math.powi(u4, 2, inst.load_store_register.size) catch unreachable;
break :blk @divExact(narrowed, denom);
}
};
inst.load_store_register.offset = offset;
mem.writeInt(u32, buffer[0..4], inst.toU32(), .little);
const narrowed: u12 = @truncate(@as(u64, @intCast(ctx.target_vaddr)));
var inst: Instruction = .read(buffer[0..Instruction.size]);
switch (inst.decode()) {
else => unreachable,
.data_processing_immediate => inst.data_processing_immediate.add_subtract_immediate.group.imm12 = narrowed,
.load_store => |load_store| inst.load_store.register_unsigned_immediate.group.imm12 =
switch (load_store.register_unsigned_immediate.decode()) {
.integer => |integer| @shrExact(narrowed, @intFromEnum(integer.group.size)),
.vector => |vector| @shrExact(narrowed, @intFromEnum(vector.group.opc1.decode(vector.group.size))),
},
}
inst.write(buffer[0..Instruction.size]);
},
.direct => {
assert(!reloc.pcrel);
@ -2934,11 +2913,6 @@ pub const Relocation = struct {
},
}
}
fn isArithmeticOp(inst: *const [4]u8) bool {
const group_decode = @as(u5, @truncate(inst[3]));
return ((group_decode >> 2) == 4);
}
};
pub fn addRelocation(coff: *Coff, atom_index: Atom.Index, reloc: Relocation) !void {
@ -3112,7 +3086,7 @@ const Path = std.Build.Cache.Path;
const Directory = std.Build.Cache.Directory;
const Cache = std.Build.Cache;
const aarch64_util = @import("../arch/aarch64/bits.zig");
const aarch64_util = link.aarch64;
const allocPrint = std.fmt.allocPrint;
const codegen = @import("../codegen.zig");
const link = @import("../link.zig");

View File

@ -2487,7 +2487,13 @@ fn initWipNavInner(
try wip_nav.strp(nav.fqn.toSlice(ip));
const ty: Type = nav_val.typeOf(zcu);
const addr: Loc = .{ .addr_reloc = sym_index };
const loc: Loc = if (decl.is_threadlocal) .{ .form_tls_address = &addr } else addr;
const loc: Loc = if (decl.is_threadlocal) loc: {
const target = zcu.comp.root_mod.resolved_target.result;
break :loc switch (target.cpu.arch) {
.x86_64 => .{ .form_tls_address = &addr },
else => .empty,
};
} else addr;
switch (decl.kind) {
.unnamed_test, .@"test", .decltest, .@"comptime" => unreachable,
.@"const" => {

View File

@ -1627,7 +1627,7 @@ const aarch64 = struct {
const S_ = th.targetAddress(target_index, elf_file);
break :blk math.cast(i28, S_ + A - P) orelse return error.Overflow;
};
aarch64_util.writeBranchImm(disp, code);
util.writeBranchImm(disp, code);
},
.PREL32 => {
@ -1640,15 +1640,18 @@ const aarch64 = struct {
mem.writeInt(u64, code_buffer[r_offset..][0..8], @bitCast(value), .little);
},
.ADR_PREL_LO21 => {
const value = math.cast(i21, S + A - P) orelse return error.Overflow;
util.writeAdrInst(value, code);
},
.ADR_PREL_PG_HI21 => {
// TODO: check for relaxation of ADRP+ADD
const pages = @as(u21, @bitCast(try aarch64_util.calcNumberOfPages(P, S + A)));
aarch64_util.writeAdrpInst(pages, code);
util.writeAdrInst(try util.calcNumberOfPages(P, S + A), code);
},
.ADR_GOT_PAGE => if (target.flags.has_got) {
const pages = @as(u21, @bitCast(try aarch64_util.calcNumberOfPages(P, G + GOT + A)));
aarch64_util.writeAdrpInst(pages, code);
util.writeAdrInst(try util.calcNumberOfPages(P, G + GOT + A), code);
} else {
// TODO: relax
var err = try diags.addErrorWithNotes(1);
@ -1663,12 +1666,12 @@ const aarch64 = struct {
.LD64_GOT_LO12_NC => {
assert(target.flags.has_got);
const taddr = @as(u64, @intCast(G + GOT + A));
aarch64_util.writeLoadStoreRegInst(@divExact(@as(u12, @truncate(taddr)), 8), code);
util.writeLoadStoreRegInst(@divExact(@as(u12, @truncate(taddr)), 8), code);
},
.ADD_ABS_LO12_NC => {
const taddr = @as(u64, @intCast(S + A));
aarch64_util.writeAddImmInst(@truncate(taddr), code);
util.writeAddImmInst(@truncate(taddr), code);
},
.LDST8_ABS_LO12_NC,
@ -1687,57 +1690,54 @@ const aarch64 = struct {
.LDST128_ABS_LO12_NC => @divExact(@as(u12, @truncate(taddr)), 16),
else => unreachable,
};
aarch64_util.writeLoadStoreRegInst(off, code);
util.writeLoadStoreRegInst(off, code);
},
.TLSLE_ADD_TPREL_HI12 => {
const value = math.cast(i12, (S + A - TP) >> 12) orelse
return error.Overflow;
aarch64_util.writeAddImmInst(@bitCast(value), code);
util.writeAddImmInst(@bitCast(value), code);
},
.TLSLE_ADD_TPREL_LO12_NC => {
const value: i12 = @truncate(S + A - TP);
aarch64_util.writeAddImmInst(@bitCast(value), code);
util.writeAddImmInst(@bitCast(value), code);
},
.TLSIE_ADR_GOTTPREL_PAGE21 => {
const S_ = target.gotTpAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
const pages: u21 = @bitCast(try aarch64_util.calcNumberOfPages(P, S_ + A));
aarch64_util.writeAdrpInst(pages, code);
util.writeAdrInst(try util.calcNumberOfPages(P, S_ + A), code);
},
.TLSIE_LD64_GOTTPREL_LO12_NC => {
const S_ = target.gotTpAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
const off: u12 = try math.divExact(u12, @truncate(@as(u64, @bitCast(S_ + A))), 8);
aarch64_util.writeLoadStoreRegInst(off, code);
util.writeLoadStoreRegInst(off, code);
},
.TLSGD_ADR_PAGE21 => {
const S_ = target.tlsGdAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
const pages: u21 = @bitCast(try aarch64_util.calcNumberOfPages(P, S_ + A));
aarch64_util.writeAdrpInst(pages, code);
util.writeAdrInst(try util.calcNumberOfPages(P, S_ + A), code);
},
.TLSGD_ADD_LO12_NC => {
const S_ = target.tlsGdAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
const off: u12 = @truncate(@as(u64, @bitCast(S_ + A)));
aarch64_util.writeAddImmInst(off, code);
util.writeAddImmInst(off, code);
},
.TLSDESC_ADR_PAGE21 => {
if (target.flags.has_tlsdesc) {
const S_ = target.tlsDescAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
const pages: u21 = @bitCast(try aarch64_util.calcNumberOfPages(P, S_ + A));
aarch64_util.writeAdrpInst(pages, code);
util.writeAdrInst(try util.calcNumberOfPages(P, S_ + A), code);
} else {
relocs_log.debug(" relaxing adrp => nop", .{});
mem.writeInt(u32, code, Instruction.nop().toU32(), .little);
util.encoding.Instruction.nop().write(code);
}
},
@ -1746,10 +1746,10 @@ const aarch64 = struct {
const S_ = target.tlsDescAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
const off: u12 = try math.divExact(u12, @truncate(@as(u64, @bitCast(S_ + A))), 8);
aarch64_util.writeLoadStoreRegInst(off, code);
util.writeLoadStoreRegInst(off, code);
} else {
relocs_log.debug(" relaxing ldr => nop", .{});
mem.writeInt(u32, code, Instruction.nop().toU32(), .little);
util.encoding.Instruction.nop().write(code);
}
},
@ -1758,32 +1758,18 @@ const aarch64 = struct {
const S_ = target.tlsDescAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
const off: u12 = @truncate(@as(u64, @bitCast(S_ + A)));
aarch64_util.writeAddImmInst(off, code);
util.writeAddImmInst(off, code);
} else {
const old_inst: Instruction = .{
.add_subtract_immediate = mem.bytesToValue(@FieldType(
Instruction,
@tagName(Instruction.add_subtract_immediate),
), code),
};
const rd: Register = @enumFromInt(old_inst.add_subtract_immediate.rd);
relocs_log.debug(" relaxing add({s}) => movz(x0, {x})", .{ @tagName(rd), S + A - TP });
relocs_log.debug(" relaxing add => movz(x0, {x})", .{S + A - TP});
const value: u16 = @bitCast(math.cast(i16, (S + A - TP) >> 16) orelse return error.Overflow);
mem.writeInt(u32, code, Instruction.movz(.x0, value, 16).toU32(), .little);
util.encoding.Instruction.movz(.x0, value, .{ .lsl = .@"16" }).write(code);
}
},
.TLSDESC_CALL => if (!target.flags.has_tlsdesc) {
const old_inst: Instruction = .{
.unconditional_branch_register = mem.bytesToValue(@FieldType(
Instruction,
@tagName(Instruction.unconditional_branch_register),
), code),
};
const rn: Register = @enumFromInt(old_inst.unconditional_branch_register.rn);
relocs_log.debug(" relaxing br({s}) => movk(x0, {x})", .{ @tagName(rn), S + A - TP });
relocs_log.debug(" relaxing br => movk(x0, {x})", .{S + A - TP});
const value: u16 = @bitCast(@as(i16, @truncate(S + A - TP)));
mem.writeInt(u32, code, Instruction.movk(.x0, value, 0).toU32(), .little);
util.encoding.Instruction.movk(.x0, value, .{}).write(code);
},
else => try atom.reportUnhandledRelocError(rel, elf_file),
@ -1819,9 +1805,7 @@ const aarch64 = struct {
}
}
const aarch64_util = @import("../aarch64.zig");
const Instruction = aarch64_util.Instruction;
const Register = aarch64_util.Register;
const util = @import("../aarch64.zig");
};
const riscv = struct {

View File

@ -95,18 +95,21 @@ const aarch64 = struct {
const sym = elf_file.symbol(ref).?;
const saddr = thunk.address(elf_file) + @as(i64, @intCast(i * trampoline_size));
const taddr = sym.address(.{}, elf_file);
const pages = try util.calcNumberOfPages(saddr, taddr);
try writer.writeInt(u32, Instruction.adrp(.x16, pages).toU32(), .little);
const off: u12 = @truncate(@as(u64, @bitCast(taddr)));
try writer.writeInt(u32, Instruction.add(.x16, .x16, off, false).toU32(), .little);
try writer.writeInt(u32, Instruction.br(.x16).toU32(), .little);
try writer.writeInt(u32, @bitCast(
util.encoding.Instruction.adrp(.x16, try util.calcNumberOfPages(saddr, taddr) << 12),
), .little);
try writer.writeInt(u32, @bitCast(util.encoding.Instruction.add(
.x16,
.x16,
.{ .immediate = @truncate(@as(u64, @bitCast(taddr))) },
)), .little);
try writer.writeInt(u32, @bitCast(util.encoding.Instruction.br(.x16)), .little);
}
}
const trampoline_size = 3 * @sizeOf(u32);
const util = @import("../aarch64.zig");
const Instruction = util.Instruction;
};
const assert = std.debug.assert;

View File

@ -1270,9 +1270,13 @@ fn updateNavCode(
log.debug("updateNavCode {f}({d})", .{ nav.fqn.fmt(ip), nav_index });
const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
const required_alignment = switch (pt.navAlignment(nav_index)) {
.none => target_util.defaultFunctionAlignment(target),
const mod = zcu.navFileScope(nav_index).mod.?;
const target = &mod.resolved_target.result;
const required_alignment = switch (nav.status.fully_resolved.alignment) {
.none => switch (mod.optimize_mode) {
.Debug, .ReleaseSafe, .ReleaseFast => target_util.defaultFunctionAlignment(target),
.ReleaseSmall => target_util.minFunctionAlignment(target),
},
else => |a| a.maxStrict(target_util.minFunctionAlignment(target)),
};

View File

@ -94,14 +94,18 @@ pub fn encode(comptime kind: Kind, cpu_arch: std.Target.Cpu.Arch) u32 {
pub const dwarf = struct {
pub fn crossSectionRelocType(format: DW.Format, cpu_arch: std.Target.Cpu.Arch) u32 {
return switch (cpu_arch) {
.x86_64 => @intFromEnum(switch (format) {
.@"32" => elf.R_X86_64.@"32",
.x86_64 => @intFromEnum(@as(elf.R_X86_64, switch (format) {
.@"32" => .@"32",
.@"64" => .@"64",
}),
.riscv64 => @intFromEnum(switch (format) {
.@"32" => elf.R_RISCV.@"32",
})),
.aarch64 => @intFromEnum(@as(elf.R_AARCH64, switch (format) {
.@"32" => .ABS32,
.@"64" => .ABS64,
})),
.riscv64 => @intFromEnum(@as(elf.R_RISCV, switch (format) {
.@"32" => .@"32",
.@"64" => .@"64",
}),
})),
else => @panic("TODO unhandled cpu arch"),
};
}
@ -121,6 +125,14 @@ pub const dwarf = struct {
},
.debug_frame => .PC32,
})),
.aarch64 => @intFromEnum(@as(elf.R_AARCH64, switch (source_section) {
else => switch (address_size) {
.@"32" => .ABS32,
.@"64" => .ABS64,
else => unreachable,
},
.debug_frame => .PREL32,
})),
.riscv64 => @intFromEnum(@as(elf.R_RISCV, switch (source_section) {
else => switch (address_size) {
.@"32" => .@"32",

View File

@ -810,54 +810,43 @@ pub const PltSection = struct {
const got_plt_addr: i64 = @intCast(shdrs[elf_file.section_indexes.got_plt.?].sh_addr);
// TODO: relax if possible
// .got.plt[2]
const pages = try aarch64_util.calcNumberOfPages(plt_addr + 4, got_plt_addr + 16);
const ldr_off = try math.divExact(u12, @truncate(@as(u64, @bitCast(got_plt_addr + 16))), 8);
const pages = try util.calcNumberOfPages(plt_addr + 4, got_plt_addr + 16);
const ldr_off: u12 = @truncate(@as(u64, @bitCast(got_plt_addr + 16)));
const add_off: u12 = @truncate(@as(u64, @bitCast(got_plt_addr + 16)));
const preamble = &[_]Instruction{
Instruction.stp(
.x16,
.x30,
Register.sp,
Instruction.LoadStorePairOffset.pre_index(-16),
),
Instruction.adrp(.x16, pages),
Instruction.ldr(.x17, .x16, Instruction.LoadStoreOffset.imm(ldr_off)),
Instruction.add(.x16, .x16, add_off, false),
Instruction.br(.x17),
Instruction.nop(),
Instruction.nop(),
Instruction.nop(),
const preamble = [_]util.encoding.Instruction{
.stp(.x16, .x30, .{ .pre_index = .{ .base = .sp, .index = -16 } }),
.adrp(.x16, pages << 12),
.ldr(.x17, .{ .unsigned_offset = .{ .base = .x16, .offset = ldr_off } }),
.add(.x16, .x16, .{ .immediate = add_off }),
.br(.x17),
.nop(),
.nop(),
.nop(),
};
comptime assert(preamble.len == 8);
for (preamble) |inst| {
try writer.writeInt(u32, inst.toU32(), .little);
}
for (preamble) |inst| try writer.writeInt(util.encoding.Instruction.Backing, @bitCast(inst), .little);
}
for (plt.symbols.items) |ref| {
const sym = elf_file.symbol(ref).?;
const target_addr = sym.gotPltAddress(elf_file);
const source_addr = sym.pltAddress(elf_file);
const pages = try aarch64_util.calcNumberOfPages(source_addr, target_addr);
const ldr_off = try math.divExact(u12, @truncate(@as(u64, @bitCast(target_addr))), 8);
const pages = try util.calcNumberOfPages(source_addr, target_addr);
const ldr_off: u12 = @truncate(@as(u64, @bitCast(target_addr)));
const add_off: u12 = @truncate(@as(u64, @bitCast(target_addr)));
const insts = &[_]Instruction{
Instruction.adrp(.x16, pages),
Instruction.ldr(.x17, .x16, Instruction.LoadStoreOffset.imm(ldr_off)),
Instruction.add(.x16, .x16, add_off, false),
Instruction.br(.x17),
const insts = [_]util.encoding.Instruction{
.adrp(.x16, pages << 12),
.ldr(.x17, .{ .unsigned_offset = .{ .base = .x16, .offset = ldr_off } }),
.add(.x16, .x16, .{ .immediate = add_off }),
.br(.x17),
};
comptime assert(insts.len == 4);
for (insts) |inst| {
try writer.writeInt(u32, inst.toU32(), .little);
}
for (insts) |inst| try writer.writeInt(util.encoding.Instruction.Backing, @bitCast(inst), .little);
}
}
const aarch64_util = @import("../aarch64.zig");
const Instruction = aarch64_util.Instruction;
const Register = aarch64_util.Register;
const util = @import("../aarch64.zig");
};
};
@ -979,24 +968,20 @@ pub const PltGotSection = struct {
const sym = elf_file.symbol(ref).?;
const target_addr = sym.gotAddress(elf_file);
const source_addr = sym.pltGotAddress(elf_file);
const pages = try aarch64_util.calcNumberOfPages(source_addr, target_addr);
const off = try math.divExact(u12, @truncate(@as(u64, @bitCast(target_addr))), 8);
const insts = &[_]Instruction{
Instruction.adrp(.x16, pages),
Instruction.ldr(.x17, .x16, Instruction.LoadStoreOffset.imm(off)),
Instruction.br(.x17),
Instruction.nop(),
const pages = try util.calcNumberOfPages(source_addr, target_addr);
const off: u12 = @truncate(@as(u64, @bitCast(target_addr)));
const insts = [_]util.encoding.Instruction{
.adrp(.x16, pages << 12),
.ldr(.x17, .{ .unsigned_offset = .{ .base = .x16, .offset = off } }),
.br(.x17),
.nop(),
};
comptime assert(insts.len == 4);
for (insts) |inst| {
try writer.writeInt(u32, inst.toU32(), .little);
}
for (insts) |inst| try writer.writeInt(util.encoding.Instruction.Backing, @bitCast(inst), .little);
}
}
const aarch64_util = @import("../aarch64.zig");
const Instruction = aarch64_util.Instruction;
const Register = aarch64_util.Register;
const util = @import("../aarch64.zig");
};
};

View File

@ -328,6 +328,7 @@ pub fn deinit(self: *MachO) void {
self.unwind_info.deinit(gpa);
self.data_in_code.deinit(gpa);
for (self.thunks.items) |*thunk| thunk.deinit(gpa);
self.thunks.deinit(gpa);
}
@ -5373,7 +5374,7 @@ const mem = std.mem;
const meta = std.meta;
const Writer = std.io.Writer;
const aarch64 = @import("../arch/aarch64/bits.zig");
const aarch64 = codegen.aarch64.encoding;
const bind = @import("MachO/dyld_info/bind.zig");
const calcUuid = @import("MachO/uuid.zig").calcUuid;
const codegen = @import("../codegen.zig");

View File

@ -780,8 +780,7 @@ fn resolveRelocInner(
};
break :target math.cast(u64, target) orelse return error.Overflow;
};
const pages = @as(u21, @bitCast(try aarch64.calcNumberOfPages(@intCast(source), @intCast(target))));
aarch64.writeAdrpInst(pages, code[rel_offset..][0..4]);
aarch64.writeAdrInst(try aarch64.calcNumberOfPages(@intCast(source), @intCast(target)), code[rel_offset..][0..aarch64.encoding.Instruction.size]);
},
.pageoff => {
@ -789,26 +788,18 @@ fn resolveRelocInner(
assert(rel.meta.length == 2);
assert(!rel.meta.pcrel);
const target = math.cast(u64, S + A) orelse return error.Overflow;
const inst_code = code[rel_offset..][0..4];
if (aarch64.isArithmeticOp(inst_code)) {
aarch64.writeAddImmInst(@truncate(target), inst_code);
} else {
var inst = aarch64.Instruction{
.load_store_register = mem.bytesToValue(@FieldType(
aarch64.Instruction,
@tagName(aarch64.Instruction.load_store_register),
), inst_code),
};
inst.load_store_register.offset = switch (inst.load_store_register.size) {
0 => if (inst.load_store_register.v == 1)
try divExact(self, rel, @truncate(target), 16, macho_file)
else
@truncate(target),
1 => try divExact(self, rel, @truncate(target), 2, macho_file),
2 => try divExact(self, rel, @truncate(target), 4, macho_file),
3 => try divExact(self, rel, @truncate(target), 8, macho_file),
};
try writer.writeInt(u32, inst.toU32(), .little);
const inst_code = code[rel_offset..][0..aarch64.encoding.Instruction.size];
var inst: aarch64.encoding.Instruction = .read(inst_code);
switch (inst.decode()) {
else => unreachable,
.data_processing_immediate => aarch64.writeAddImmInst(@truncate(target), inst_code),
.load_store => |load_store| {
inst.load_store.register_unsigned_immediate.group.imm12 = switch (load_store.register_unsigned_immediate.decode()) {
.integer => |integer| try divExact(self, rel, @truncate(target), @as(u4, 1) << @intFromEnum(integer.group.size), macho_file),
.vector => |vector| try divExact(self, rel, @truncate(target), @as(u5, 1) << @intFromEnum(vector.group.opc1.decode(vector.group.size)), macho_file),
};
try writer.writeInt(u32, @bitCast(inst), .little);
},
}
},
@ -834,59 +825,26 @@ fn resolveRelocInner(
break :target math.cast(u64, target) orelse return error.Overflow;
};
const RegInfo = struct {
rd: u5,
rn: u5,
size: u2,
};
const inst_code = code[rel_offset..][0..4];
const reg_info: RegInfo = blk: {
if (aarch64.isArithmeticOp(inst_code)) {
const inst = mem.bytesToValue(@FieldType(
aarch64.Instruction,
@tagName(aarch64.Instruction.add_subtract_immediate),
), inst_code);
break :blk .{
.rd = inst.rd,
.rn = inst.rn,
.size = inst.sf,
};
} else {
const inst = mem.bytesToValue(@FieldType(
aarch64.Instruction,
@tagName(aarch64.Instruction.load_store_register),
), inst_code);
break :blk .{
.rd = inst.rt,
.rn = inst.rn,
.size = inst.size,
};
}
const rd, const rn = switch (aarch64.encoding.Instruction.read(inst_code).decode()) {
else => unreachable,
.data_processing_immediate => |decoded| .{
decoded.add_subtract_immediate.group.Rd.decodeInteger(.doubleword, .{ .sp = true }),
decoded.add_subtract_immediate.group.Rn.decodeInteger(.doubleword, .{ .sp = true }),
},
.load_store => |decoded| .{
decoded.register_unsigned_immediate.integer.group.Rt.decodeInteger(.doubleword, .{}),
decoded.register_unsigned_immediate.group.Rn.decodeInteger(.doubleword, .{ .sp = true }),
},
};
var inst = if (sym.getSectionFlags().tlv_ptr) aarch64.Instruction{
.load_store_register = .{
.rt = reg_info.rd,
.rn = reg_info.rn,
.offset = try divExact(self, rel, @truncate(target), 8, macho_file),
.opc = 0b01,
.op1 = 0b01,
.v = 0,
.size = reg_info.size,
},
} else aarch64.Instruction{
.add_subtract_immediate = .{
.rd = reg_info.rd,
.rn = reg_info.rn,
.imm12 = @truncate(target),
.sh = 0,
.s = 0,
.op = 0,
.sf = @as(u1, @truncate(reg_info.size)),
},
};
try writer.writeInt(u32, inst.toU32(), .little);
try writer.writeInt(u32, @bitCast(@as(
aarch64.encoding.Instruction,
if (sym.getSectionFlags().tlv_ptr) .ldr(rd, .{ .unsigned_offset = .{
.base = rn,
.offset = try divExact(self, rel, @truncate(target), 8, macho_file) * 8,
} }) else .add(rd, rn, .{ .immediate = @truncate(target) }),
)), .little);
},
}
}

View File

@ -21,15 +21,17 @@ pub fn getTargetAddress(thunk: Thunk, ref: MachO.Ref, macho_file: *MachO) u64 {
}
pub fn write(thunk: Thunk, macho_file: *MachO, writer: anytype) !void {
const Instruction = aarch64.encoding.Instruction;
for (thunk.symbols.keys(), 0..) |ref, i| {
const sym = ref.getSymbol(macho_file).?;
const saddr = thunk.getAddress(macho_file) + i * trampoline_size;
const taddr = sym.getAddress(.{}, macho_file);
const pages = try aarch64.calcNumberOfPages(@intCast(saddr), @intCast(taddr));
try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
const off: u12 = @truncate(taddr);
try writer.writeInt(u32, aarch64.Instruction.add(.x16, .x16, off, false).toU32(), .little);
try writer.writeInt(u32, aarch64.Instruction.br(.x16).toU32(), .little);
try writer.writeInt(u32, @bitCast(Instruction.adrp(.x16, pages << 12)), .little);
try writer.writeInt(u32, @bitCast(
Instruction.add(.x16, .x16, .{ .immediate = @truncate(taddr) }),
), .little);
try writer.writeInt(u32, @bitCast(Instruction.br(.x16)), .little);
}
}

View File

@ -945,9 +945,13 @@ fn updateNavCode(
log.debug("updateNavCode {f} 0x{x}", .{ nav.fqn.fmt(ip), nav_index });
const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
const required_alignment = switch (pt.navAlignment(nav_index)) {
.none => target_util.defaultFunctionAlignment(target),
const mod = zcu.navFileScope(nav_index).mod.?;
const target = &mod.resolved_target.result;
const required_alignment = switch (nav.status.fully_resolved.alignment) {
.none => switch (mod.optimize_mode) {
.Debug, .ReleaseSafe, .ReleaseFast => target_util.defaultFunctionAlignment(target),
.ReleaseSmall => target_util.minFunctionAlignment(target),
},
else => |a| a.maxStrict(target_util.minFunctionAlignment(target)),
};

View File

@ -105,16 +105,15 @@ pub const StubsSection = struct {
try writer.writeInt(i32, @intCast(target - source - 2 - 4), .little);
},
.aarch64 => {
const Instruction = aarch64.encoding.Instruction;
// TODO relax if possible
const pages = try aarch64.calcNumberOfPages(@intCast(source), @intCast(target));
try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
const off = try math.divExact(u12, @truncate(target), 8);
try writer.writeInt(
u32,
aarch64.Instruction.ldr(.x16, .x16, aarch64.Instruction.LoadStoreOffset.imm(off)).toU32(),
.little,
);
try writer.writeInt(u32, aarch64.Instruction.br(.x16).toU32(), .little);
try writer.writeInt(u32, @bitCast(Instruction.adrp(.x16, pages << 12)), .little);
try writer.writeInt(u32, @bitCast(Instruction.ldr(
.x16,
.{ .unsigned_offset = .{ .base = .x16, .offset = @as(u12, @truncate(target)) } },
)), .little);
try writer.writeInt(u32, @bitCast(Instruction.br(.x16)), .little);
},
else => unreachable,
}
@ -201,18 +200,16 @@ pub const StubsHelperSection = struct {
try writer.writeInt(i32, @intCast(target - source - 6 - 4), .little);
},
.aarch64 => {
const literal = blk: {
const div_res = try std.math.divExact(u64, entry_size - @sizeOf(u32), 4);
break :blk std.math.cast(u18, div_res) orelse return error.Overflow;
};
try writer.writeInt(u32, aarch64.Instruction.ldrLiteral(
.w16,
literal,
).toU32(), .little);
const Instruction = aarch64.encoding.Instruction;
if (entry_size % Instruction.size != 0) return error.UnexpectedRemainder;
try writer.writeInt(u32, @bitCast(
Instruction.ldr(.w16, .{ .literal = std.math.cast(i21, entry_size - Instruction.size) orelse
return error.Overflow }),
), .little);
const disp = math.cast(i28, @as(i64, @intCast(target)) - @as(i64, @intCast(source + 4))) orelse
return error.Overflow;
try writer.writeInt(u32, aarch64.Instruction.b(disp).toU32(), .little);
try writer.writeAll(&.{ 0x0, 0x0, 0x0, 0x0 });
try writer.writeInt(u32, @bitCast(Instruction.b(disp)), .little);
try writer.writeInt(u32, @bitCast(Instruction.udf(0x0)), .little);
},
else => unreachable,
}
@ -242,31 +239,28 @@ pub const StubsHelperSection = struct {
try writer.writeByte(0x90);
},
.aarch64 => {
const Instruction = aarch64.encoding.Instruction;
{
// TODO relax if possible
const pages = try aarch64.calcNumberOfPages(@intCast(sect.addr), @intCast(dyld_private_addr));
try writer.writeInt(u32, aarch64.Instruction.adrp(.x17, pages).toU32(), .little);
const off: u12 = @truncate(dyld_private_addr);
try writer.writeInt(u32, aarch64.Instruction.add(.x17, .x17, off, false).toU32(), .little);
try writer.writeInt(Instruction.Backing, @bitCast(Instruction.adrp(.x17, pages << 12)), .little);
try writer.writeInt(Instruction.Backing, @bitCast(
Instruction.add(.x17, .x17, .{ .immediate = @as(u12, @truncate(dyld_private_addr)) }),
), .little);
}
try writer.writeInt(u32, aarch64.Instruction.stp(
.x16,
.x17,
aarch64.Register.sp,
aarch64.Instruction.LoadStorePairOffset.pre_index(-16),
).toU32(), .little);
try writer.writeInt(Instruction.Backing, @bitCast(
Instruction.stp(.x16, .x17, .{ .pre_index = .{ .base = .sp, .index = -16 } }),
), .little);
{
// TODO relax if possible
const pages = try aarch64.calcNumberOfPages(@intCast(sect.addr + 12), @intCast(dyld_stub_binder_addr));
try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
const off = try math.divExact(u12, @truncate(dyld_stub_binder_addr), 8);
try writer.writeInt(u32, aarch64.Instruction.ldr(
try writer.writeInt(Instruction.Backing, @bitCast(Instruction.adrp(.x16, pages << 12)), .little);
try writer.writeInt(Instruction.Backing, @bitCast(Instruction.ldr(
.x16,
.x16,
aarch64.Instruction.LoadStoreOffset.imm(off),
).toU32(), .little);
.{ .unsigned_offset = .{ .base = .x16, .offset = @as(u12, @truncate(dyld_stub_binder_addr)) } },
)), .little);
}
try writer.writeInt(u32, aarch64.Instruction.br(.x16).toU32(), .little);
try writer.writeInt(Instruction.Backing, @bitCast(Instruction.br(.x16)), .little);
},
else => unreachable,
}
@ -426,35 +420,32 @@ pub const ObjcStubsSection = struct {
}
},
.aarch64 => {
const Instruction = aarch64.encoding.Instruction;
{
const target = sym.getObjcSelrefsAddress(macho_file);
const source = addr;
const pages = try aarch64.calcNumberOfPages(@intCast(source), @intCast(target));
try writer.writeInt(u32, aarch64.Instruction.adrp(.x1, pages).toU32(), .little);
const off = try math.divExact(u12, @truncate(target), 8);
try writer.writeInt(
u32,
aarch64.Instruction.ldr(.x1, .x1, aarch64.Instruction.LoadStoreOffset.imm(off)).toU32(),
.little,
);
try writer.writeInt(u32, @bitCast(Instruction.adrp(.x1, pages << 12)), .little);
try writer.writeInt(u32, @bitCast(Instruction.ldr(
.x1,
.{ .unsigned_offset = .{ .base = .x1, .offset = @as(u12, @truncate(target)) } },
)), .little);
}
{
const target_sym = obj.getObjcMsgSendRef(macho_file).?.getSymbol(macho_file).?;
const target = target_sym.getGotAddress(macho_file);
const source = addr + 2 * @sizeOf(u32);
const pages = try aarch64.calcNumberOfPages(@intCast(source), @intCast(target));
try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
const off = try math.divExact(u12, @truncate(target), 8);
try writer.writeInt(
u32,
aarch64.Instruction.ldr(.x16, .x16, aarch64.Instruction.LoadStoreOffset.imm(off)).toU32(),
.little,
);
try writer.writeInt(u32, @bitCast(Instruction.adrp(.x16, pages << 12)), .little);
try writer.writeInt(u32, @bitCast(Instruction.ldr(
.x16,
.{ .unsigned_offset = .{ .base = .x16, .offset = @as(u12, @truncate(target)) } },
)), .little);
}
try writer.writeInt(u32, aarch64.Instruction.br(.x16).toU32(), .little);
try writer.writeInt(u32, aarch64.Instruction.brk(1).toU32(), .little);
try writer.writeInt(u32, aarch64.Instruction.brk(1).toU32(), .little);
try writer.writeInt(u32, aarch64.Instruction.brk(1).toU32(), .little);
try writer.writeInt(u32, @bitCast(Instruction.br(.x16)), .little);
try writer.writeInt(u32, @bitCast(Instruction.brk(0x1)), .little);
try writer.writeInt(u32, @bitCast(Instruction.brk(0x1)), .little);
try writer.writeInt(u32, @bitCast(Instruction.brk(0x1)), .little);
},
else => unreachable,
}

View File

@ -1,66 +1,36 @@
pub inline fn isArithmeticOp(inst: *const [4]u8) bool {
const group_decode = @as(u5, @truncate(inst[3]));
return ((group_decode >> 2) == 4);
}
pub const encoding = @import("../codegen.zig").aarch64.encoding;
pub fn writeAddImmInst(value: u12, code: *[4]u8) void {
var inst = Instruction{
.add_subtract_immediate = mem.bytesToValue(@FieldType(
Instruction,
@tagName(Instruction.add_subtract_immediate),
), code),
};
inst.add_subtract_immediate.imm12 = value;
mem.writeInt(u32, code, inst.toU32(), .little);
var inst: encoding.Instruction = .read(code);
inst.data_processing_immediate.add_subtract_immediate.group.imm12 = value;
inst.write(code);
}
pub fn writeLoadStoreRegInst(value: u12, code: *[4]u8) void {
var inst: Instruction = .{
.load_store_register = mem.bytesToValue(@FieldType(
Instruction,
@tagName(Instruction.load_store_register),
), code),
};
inst.load_store_register.offset = value;
mem.writeInt(u32, code, inst.toU32(), .little);
var inst: encoding.Instruction = .read(code);
inst.load_store.register_unsigned_immediate.group.imm12 = value;
inst.write(code);
}
pub fn calcNumberOfPages(saddr: i64, taddr: i64) error{Overflow}!i21 {
const spage = math.cast(i32, saddr >> 12) orelse return error.Overflow;
const tpage = math.cast(i32, taddr >> 12) orelse return error.Overflow;
const pages = math.cast(i21, tpage - spage) orelse return error.Overflow;
return pages;
pub fn calcNumberOfPages(saddr: i64, taddr: i64) error{Overflow}!i33 {
return math.cast(i21, (taddr >> 12) - (saddr >> 12)) orelse error.Overflow;
}
pub fn writeAdrpInst(pages: u21, code: *[4]u8) void {
var inst = Instruction{
.pc_relative_address = mem.bytesToValue(@FieldType(
Instruction,
@tagName(Instruction.pc_relative_address),
), code),
};
inst.pc_relative_address.immhi = @as(u19, @truncate(pages >> 2));
inst.pc_relative_address.immlo = @as(u2, @truncate(pages));
mem.writeInt(u32, code, inst.toU32(), .little);
pub fn writeAdrInst(imm: i33, code: *[4]u8) void {
var inst: encoding.Instruction = .read(code);
inst.data_processing_immediate.pc_relative_addressing.group.immhi = @intCast(imm >> 2);
inst.data_processing_immediate.pc_relative_addressing.group.immlo = @bitCast(@as(i2, @truncate(imm)));
inst.write(code);
}
pub fn writeBranchImm(disp: i28, code: *[4]u8) void {
var inst = Instruction{
.unconditional_branch_immediate = mem.bytesToValue(@FieldType(
Instruction,
@tagName(Instruction.unconditional_branch_immediate),
), code),
};
inst.unconditional_branch_immediate.imm26 = @as(u26, @truncate(@as(u28, @bitCast(disp >> 2))));
mem.writeInt(u32, code, inst.toU32(), .little);
var inst: encoding.Instruction = .read(code);
inst.branch_exception_generating_system.unconditional_branch_immediate.group.imm26 = @intCast(@shrExact(disp, 2));
inst.write(code);
}
const assert = std.debug.assert;
const bits = @import("../arch/aarch64/bits.zig");
const builtin = @import("builtin");
const math = std.math;
const mem = std.mem;
const std = @import("std");
pub const Instruction = bits.Instruction;
pub const Register = bits.Register;

View File

@ -37,6 +37,7 @@ const dev = @import("dev.zig");
test {
_ = Package;
_ = @import("codegen.zig");
}
const thread_stack_size = 60 << 20;

View File

@ -347,7 +347,7 @@ pub fn defaultCompilerRtOptimizeMode(target: *const std.Target) std.builtin.Opti
}
}
pub fn canBuildLibCompilerRt(target: *const std.Target, use_llvm: bool, have_llvm: bool) bool {
pub fn canBuildLibCompilerRt(target: *const std.Target, use_llvm: bool, comptime have_llvm: bool) bool {
switch (target.os.tag) {
.plan9 => return false,
else => {},
@ -359,6 +359,7 @@ pub fn canBuildLibCompilerRt(target: *const std.Target, use_llvm: bool, have_llv
else => {},
}
return switch (zigBackend(target, use_llvm)) {
.stage2_aarch64 => true,
.stage2_llvm => true,
.stage2_x86_64 => switch (target.ofmt) {
.elf, .macho => true,
@ -368,13 +369,21 @@ pub fn canBuildLibCompilerRt(target: *const std.Target, use_llvm: bool, have_llv
};
}
pub fn canBuildLibUbsanRt(target: *const std.Target) bool {
pub fn canBuildLibUbsanRt(target: *const std.Target, use_llvm: bool, comptime have_llvm: bool) bool {
switch (target.cpu.arch) {
.spirv32, .spirv64 => return false,
// Remove this once https://github.com/ziglang/zig/issues/23715 is fixed
.nvptx, .nvptx64 => return false,
else => return true,
else => {},
}
return switch (zigBackend(target, use_llvm)) {
.stage2_llvm => true,
.stage2_x86_64 => switch (target.ofmt) {
.elf, .macho => true,
else => have_llvm,
},
else => have_llvm,
};
}
pub fn hasRedZone(target: *const std.Target) bool {
@ -767,6 +776,7 @@ pub fn supportsTailCall(target: *const std.Target, backend: std.builtin.Compiler
pub fn supportsThreads(target: *const std.Target, backend: std.builtin.CompilerBackend) bool {
return switch (backend) {
.stage2_aarch64 => false,
.stage2_powerpc => true,
.stage2_x86_64 => target.ofmt == .macho or target.ofmt == .elf,
else => true,
@ -864,7 +874,7 @@ pub inline fn backendSupportsFeature(backend: std.builtin.CompilerBackend, compt
else => false,
},
.field_reordering => switch (backend) {
.stage2_c, .stage2_llvm, .stage2_x86_64 => true,
.stage2_aarch64, .stage2_c, .stage2_llvm, .stage2_x86_64 => true,
else => false,
},
.separate_thread => switch (backend) {

View File

@ -123,7 +123,6 @@ test {
}
if (builtin.zig_backend != .stage2_arm and
builtin.zig_backend != .stage2_aarch64 and
builtin.zig_backend != .stage2_spirv)
{
_ = @import("behavior/export_keyword.zig");
@ -141,7 +140,8 @@ test {
}
// This bug only repros in the root file
test "deference @embedFile() of a file full of zero bytes" {
test "dereference @embedFile() of a file full of zero bytes" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const contents = @embedFile("behavior/zero.bin").*;

View File

@ -3,7 +3,6 @@ const std = @import("std");
const expect = std.testing.expect;
test "@abs integers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -50,7 +49,6 @@ fn testAbsIntegers() !void {
}
test "@abs unsigned integers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -90,7 +88,6 @@ fn testAbsUnsignedIntegers() !void {
}
test "@abs big int <= 128 bits" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
@ -153,7 +150,6 @@ fn testAbsUnsignedBigInt() !void {
}
test "@abs floats" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -207,9 +203,9 @@ fn testAbsFloats(comptime T: type) !void {
}
test "@abs int vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -275,8 +271,8 @@ fn testAbsIntVectors(comptime len: comptime_int) !void {
}
test "@abs unsigned int vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -334,8 +330,8 @@ fn testAbsUnsignedIntVectors(comptime len: comptime_int) !void {
}
test "@abs float vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;

View File

@ -16,7 +16,6 @@ test "global variable alignment" {
}
test "large alignment of local constant" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // flaky
@ -25,7 +24,6 @@ test "large alignment of local constant" {
}
test "slicing array of length 1 can not assume runtime index is always zero" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // flaky
@ -74,7 +72,6 @@ test "alignment of struct with pointer has same alignment as usize" {
test "alignment and size of structs with 128-bit fields" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const A = struct {
@ -160,7 +157,6 @@ test "alignment and size of structs with 128-bit fields" {
}
test "implicitly decreasing slice alignment" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -173,7 +169,6 @@ fn addUnalignedSlice(a: []align(1) const u32, b: []align(1) const u32) u32 {
}
test "specifying alignment allows pointer cast" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -186,7 +181,6 @@ fn testBytesAlign(b: u8) !void {
}
test "@alignCast slices" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -205,7 +199,6 @@ fn sliceExpects4(slice: []align(4) u32) void {
test "return error union with 128-bit integer" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -218,7 +211,6 @@ fn give() anyerror!u128 {
test "page aligned array on stack" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -238,7 +230,6 @@ test "page aligned array on stack" {
}
test "function alignment" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -268,7 +259,6 @@ test "function alignment" {
}
test "implicitly decreasing fn alignment" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -292,7 +282,6 @@ fn alignedBig() align(16) i32 {
}
test "@alignCast functions" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -376,7 +365,6 @@ const DefaultAligned = struct {
test "read 128-bit field from default aligned struct in stack memory" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -396,7 +384,6 @@ var default_aligned_global = DefaultAligned{
test "read 128-bit field from default aligned struct in global memory" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -405,8 +392,8 @@ test "read 128-bit field from default aligned struct in global memory" {
}
test "struct field explicit alignment" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // flaky
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
@ -426,7 +413,6 @@ test "struct field explicit alignment" {
}
test "align(N) on functions" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -455,7 +441,6 @@ test "comptime alloc alignment" {
// TODO: it's impossible to test this in Zig today, since comptime vars do not have runtime addresses.
if (true) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // flaky
@ -468,7 +453,6 @@ test "comptime alloc alignment" {
}
test "@alignCast null" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -484,7 +468,6 @@ test "alignment of slice element" {
}
test "sub-aligned pointer field access" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -538,7 +521,6 @@ test "alignment of zero-bit types is respected" {
test "zero-bit fields in extern struct pad fields appropriately" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;

View File

@ -19,7 +19,6 @@ test "array to slice" {
}
test "arrays" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -47,7 +46,6 @@ fn getArrayLen(a: []const u32) usize {
}
test "array concat with undefined" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -73,7 +71,6 @@ test "array concat with undefined" {
test "array concat with tuple" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const array: [2]u8 = .{ 1, 2 };
@ -89,7 +86,6 @@ test "array concat with tuple" {
test "array init with concat" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const a = 'a';
var i: [4]u8 = [2]u8{ a, 'b' } ++ [2]u8{ 'c', 'd' };
@ -98,7 +94,6 @@ test "array init with concat" {
test "array init with mult" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const a = 'a';
@ -110,7 +105,6 @@ test "array init with mult" {
}
test "array literal with explicit type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const hex_mult: [4]u16 = .{ 4096, 256, 16, 1 };
@ -138,7 +132,6 @@ const ArrayDotLenConstExpr = struct {
const some_array = [_]u8{ 0, 1, 2, 3 };
test "array literal with specified size" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -162,7 +155,6 @@ test "array len field" {
test "array with sentinels" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@ -200,7 +192,6 @@ test "void arrays" {
test "nested arrays of strings" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -215,7 +206,6 @@ test "nested arrays of strings" {
}
test "nested arrays of integers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const array_of_numbers = [_][2]u8{
@ -230,7 +220,6 @@ test "nested arrays of integers" {
}
test "implicit comptime in array type size" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var arr: [plusOne(10)]bool = undefined;
@ -243,7 +232,6 @@ fn plusOne(x: u32) u32 {
}
test "single-item pointer to array indexing and slicing" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -285,7 +273,6 @@ test "implicit cast zero sized array ptr to slice" {
}
test "anonymous list literal syntax" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -308,7 +295,6 @@ const Sub = struct { b: u8 };
const Str = struct { a: []Sub };
test "set global var array via slice embedded in struct" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var s = Str{ .a = s_array[0..] };
@ -323,7 +309,6 @@ test "set global var array via slice embedded in struct" {
}
test "read/write through global variable array of struct fields initialized via array mult" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -345,7 +330,6 @@ test "read/write through global variable array of struct fields initialized via
test "implicit cast single-item pointer" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testImplicitCastSingleItemPtr();
@ -364,7 +348,6 @@ fn testArrayByValAtComptime(b: [2]u8) u8 {
}
test "comptime evaluating function that takes array by value" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const arr = [_]u8{ 1, 2 };
@ -376,7 +359,6 @@ test "comptime evaluating function that takes array by value" {
test "runtime initialize array elem and then implicit cast to slice" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var two: i32 = 2;
@ -387,7 +369,6 @@ test "runtime initialize array elem and then implicit cast to slice" {
test "array literal as argument to function" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@ -414,8 +395,8 @@ test "array literal as argument to function" {
}
test "double nested array to const slice cast in array literal" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -476,7 +457,6 @@ test "double nested array to const slice cast in array literal" {
}
test "anonymous literal in array" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -502,7 +482,6 @@ test "anonymous literal in array" {
}
test "access the null element of a null terminated array" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@ -520,7 +499,6 @@ test "access the null element of a null terminated array" {
}
test "type deduction for array subscript expression" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -540,7 +518,6 @@ test "type deduction for array subscript expression" {
test "sentinel element count towards the ABI size calculation" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -564,7 +541,7 @@ test "sentinel element count towards the ABI size calculation" {
}
test "zero-sized array with recursive type definition" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -587,8 +564,8 @@ test "zero-sized array with recursive type definition" {
}
test "type coercion of anon struct literal to array" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -628,7 +605,6 @@ test "array with comptime-only element type" {
}
test "tuple to array handles sentinel" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -641,7 +617,6 @@ test "tuple to array handles sentinel" {
test "array init of container level array variable" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -675,8 +650,8 @@ test "runtime initialized sentinel-terminated array literal" {
}
test "array of array agregate init" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var a = [1]u32{11} ** 10;
@ -725,7 +700,6 @@ test "array init with no result location has result type" {
}
test "slicing array of zero-sized values" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -890,7 +864,6 @@ test "tuple initialized through reference to anonymous array init provides resul
test "copied array element doesn't alias source" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x: [10][10]u32 = undefined;
@ -945,7 +918,6 @@ test "array initialized with array with sentinel" {
}
test "store array of array of structs at comptime" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -970,7 +942,6 @@ test "store array of array of structs at comptime" {
}
test "accessing multidimensional global array at comptime" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -986,8 +957,8 @@ test "accessing multidimensional global array at comptime" {
}
test "union that needs padding bytes inside an array" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -1023,7 +994,6 @@ test "runtime index of array of zero-bit values" {
}
test "@splat array" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -1046,7 +1016,6 @@ test "@splat array" {
test "@splat array with sentinel" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -1070,7 +1039,6 @@ test "@splat array with sentinel" {
test "@splat zero-length array" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@ -7,7 +7,6 @@ const is_x86_64_linux = builtin.cpu.arch == .x86_64 and builtin.os.tag == .linux
comptime {
if (builtin.zig_backend != .stage2_arm and
builtin.zig_backend != .stage2_aarch64 and
!(builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) and // MSVC doesn't support inline assembly
is_x86_64_linux)
{
@ -30,7 +29,6 @@ test "module level assembly" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly
@ -41,9 +39,9 @@ test "module level assembly" {
}
test "output constraint modifiers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -63,9 +61,9 @@ test "output constraint modifiers" {
}
test "alternative constraints" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -83,7 +81,6 @@ test "alternative constraints" {
test "sized integer/float in asm input" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -127,7 +124,6 @@ test "sized integer/float in asm input" {
test "struct/array/union types as input values" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -167,7 +163,6 @@ test "rw constraint (x86_64)" {
test "asm modifiers (AArch64)" {
if (!builtin.target.cpu.arch.isAARCH64()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly

View File

@ -12,7 +12,7 @@ const supports_128_bit_atomics = switch (builtin.cpu.arch) {
};
test "cmpxchg" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -39,7 +39,7 @@ fn testCmpxchg() !void {
}
test "atomicrmw and atomicload" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -68,7 +68,7 @@ fn testAtomicLoad(ptr: *u8) !void {
}
test "cmpxchg with ptr" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -94,7 +94,7 @@ test "cmpxchg with ptr" {
}
test "cmpxchg with ignored result" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -110,8 +110,8 @@ test "128-bit cmpxchg" {
// TODO: this must appear first
if (!supports_128_bit_atomics) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -139,7 +139,7 @@ fn test_u128_cmpxchg() !void {
var a_global_variable = @as(u32, 1234);
test "cmpxchg on a global variable" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -149,7 +149,7 @@ test "cmpxchg on a global variable" {
}
test "atomic load and rmw with enum" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -167,7 +167,7 @@ test "atomic load and rmw with enum" {
}
test "atomic store" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -185,7 +185,7 @@ fn testAtomicStore() !void {
}
test "atomicrmw with floats" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -211,7 +211,7 @@ fn testAtomicRmwFloat() !void {
}
test "atomicrmw with ints" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -290,7 +290,7 @@ test "atomicrmw with 128-bit ints" {
// TODO: this must appear first
if (!supports_128_bit_atomics) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try testAtomicRmwInt128(.signed);
try testAtomicRmwInt128(.unsigned);
@ -359,7 +359,7 @@ fn testAtomicRmwInt128(comptime signedness: std.builtin.Signedness) !void {
}
test "atomics with different types" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -409,7 +409,6 @@ fn testAtomicsWithPackedStruct(comptime T: type, a: T, b: T) !void {
}
test "return @atomicStore, using it as a void value" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;

View File

@ -39,7 +39,6 @@ test "truncate to non-power-of-two integers" {
}
test "truncate to non-power-of-two integers from 128-bit" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -422,7 +421,6 @@ fn copy(src: *const u64, dst: *u64) void {
}
test "call result of if else expression" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -448,7 +446,6 @@ fn hereIsAnOpaqueType(ptr: *OpaqueA) *OpaqueA {
}
test "take address of parameter" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -536,7 +533,6 @@ fn nine() u8 {
}
test "struct inside function" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testStructInFn();
@ -588,7 +584,6 @@ test "global variable assignment with optional unwrapping with var initialized t
var global_foo: *i32 = undefined;
test "peer result location with typed parent, runtime condition, comptime prongs" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@ -719,7 +714,6 @@ test "global constant is loaded with a runtime-known index" {
}
test "multiline string literal is null terminated" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const s1 =
@ -732,7 +726,6 @@ test "multiline string literal is null terminated" {
}
test "string escapes" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -764,7 +757,6 @@ fn ptrEql(a: *const []const u8, b: *const []const u8) bool {
}
test "string concatenation" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -787,7 +779,6 @@ test "string concatenation" {
}
test "result location is optional inside error union" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -803,7 +794,6 @@ fn maybe(x: bool) anyerror!?u32 {
}
test "auto created variables have correct alignment" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -821,7 +811,6 @@ test "auto created variables have correct alignment" {
test "extern variable with non-pointer opaque type" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
@ -866,7 +855,6 @@ test "if expression type coercion" {
}
test "discarding the result of various expressions" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -908,7 +896,6 @@ test "labeled block implicitly ends in a break" {
}
test "catch in block has correct result location" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@ -964,7 +951,6 @@ test "vector initialized with array init syntax has proper type" {
}
test "weird array and tuple initializations" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -1010,7 +996,6 @@ test "generic function uses return type of other generic function" {
// https://github.com/ziglang/zig/issues/12208
return error.SkipZigTest;
}
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const S = struct {
fn call(
@ -1128,7 +1113,6 @@ test "returning an opaque type from a function" {
}
test "orelse coercion as function argument" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const Loc = struct { start: i32 = -1 };
@ -1378,7 +1362,6 @@ test "copy array of self-referential struct" {
test "break out of block based on comptime known values" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@ -1412,8 +1395,8 @@ test "break out of block based on comptime known values" {
}
test "allocation and looping over 3-byte integer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;

View File

@ -112,7 +112,7 @@ test "comptime shift safety check" {
}
test "Saturating Shift Left where lhs is of a computed type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -161,6 +161,7 @@ comptime {
}
test "Saturating Shift Left" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;

View File

@ -20,7 +20,6 @@ test "@bitCast iX -> uX (32, 64)" {
}
test "@bitCast iX -> uX (8, 16, 128)" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -35,8 +34,8 @@ test "@bitCast iX -> uX (8, 16, 128)" {
}
test "@bitCast iX -> uX exotic integers" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -80,8 +79,8 @@ fn conv_uN(comptime N: usize, x: std.meta.Int(.unsigned, N)) std.meta.Int(.signe
}
test "bitcast uX to bytes" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -296,9 +295,9 @@ test "triple level result location with bitcast sandwich passed as tuple element
}
test "@bitCast packed struct of floats" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -334,9 +333,9 @@ test "@bitCast packed struct of floats" {
}
test "comptime @bitCast packed struct to int and back" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -379,7 +378,6 @@ test "comptime bitcast with fields following f80" {
}
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -393,7 +391,7 @@ test "comptime bitcast with fields following f80" {
}
test "bitcast vector to integer and back" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
@ -420,7 +418,6 @@ fn bitCastWrapper128(x: f128) u128 {
return @as(u128, @bitCast(x));
}
test "bitcast nan float does not modify signaling bit" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -473,7 +470,7 @@ test "bitcast nan float does not modify signaling bit" {
}
test "@bitCast of packed struct of bools all true" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@ -494,7 +491,7 @@ test "@bitCast of packed struct of bools all true" {
}
test "@bitCast of packed struct of bools all false" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@ -514,7 +511,7 @@ test "@bitCast of packed struct of bools all false" {
}
test "@bitCast of packed struct containing pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
@ -544,7 +541,7 @@ test "@bitCast of packed struct containing pointer" {
}
test "@bitCast of extern struct containing pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO

View File

@ -8,8 +8,8 @@ test "@bitReverse large exotic integer" {
}
test "@bitReverse" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -121,9 +121,9 @@ fn vector8() !void {
}
test "bitReverse vectors u8" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -141,9 +141,9 @@ fn vector16() !void {
}
test "bitReverse vectors u16" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -161,9 +161,9 @@ fn vector24() !void {
}
test "bitReverse vectors u24" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;

View File

@ -7,7 +7,6 @@ var x: u8 = 1;
// This excludes builtin functions that return void or noreturn that cannot be tested.
test {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;

View File

@ -3,40 +3,8 @@ const builtin = @import("builtin");
const expect = std.testing.expect;
test "@byteSwap integers" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) {
// TODO: Remove when self-hosted wasm supports more types for byteswap
const ByteSwapIntTest = struct {
fn run() !void {
try t(u8, 0x12, 0x12);
try t(u16, 0x1234, 0x3412);
try t(u24, 0x123456, 0x563412);
try t(i24, @as(i24, @bitCast(@as(u24, 0xf23456))), 0x5634f2);
try t(i24, 0x1234f6, @as(i24, @bitCast(@as(u24, 0xf63412))));
try t(u32, 0x12345678, 0x78563412);
try t(i32, @as(i32, @bitCast(@as(u32, 0xf2345678))), 0x785634f2);
try t(i32, 0x123456f8, @as(i32, @bitCast(@as(u32, 0xf8563412))));
try t(u64, 0x123456789abcdef1, 0xf1debc9a78563412);
try t(u0, @as(u0, 0), 0);
try t(i8, @as(i8, -50), -50);
try t(i16, @as(i16, @bitCast(@as(u16, 0x1234))), @as(i16, @bitCast(@as(u16, 0x3412))));
try t(i24, @as(i24, @bitCast(@as(u24, 0x123456))), @as(i24, @bitCast(@as(u24, 0x563412))));
try t(i32, @as(i32, @bitCast(@as(u32, 0x12345678))), @as(i32, @bitCast(@as(u32, 0x78563412))));
try t(i64, @as(i64, @bitCast(@as(u64, 0x123456789abcdef1))), @as(i64, @bitCast(@as(u64, 0xf1debc9a78563412))));
}
fn t(comptime I: type, input: I, expected_output: I) !void {
try std.testing.expect(expected_output == @byteSwap(input));
}
};
try comptime ByteSwapIntTest.run();
try ByteSwapIntTest.run();
return;
}
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -51,23 +19,44 @@ test "@byteSwap integers" {
try t(u32, 0x12345678, 0x78563412);
try t(i32, @as(i32, @bitCast(@as(u32, 0xf2345678))), 0x785634f2);
try t(i32, 0x123456f8, @as(i32, @bitCast(@as(u32, 0xf8563412))));
try t(u40, 0x123456789a, 0x9a78563412);
try t(i48, 0x123456789abc, @as(i48, @bitCast(@as(u48, 0xbc9a78563412))));
try t(u56, 0x123456789abcde, 0xdebc9a78563412);
try t(u64, 0x123456789abcdef1, 0xf1debc9a78563412);
try t(u88, 0x123456789abcdef1112131, 0x312111f1debc9a78563412);
try t(u96, 0x123456789abcdef111213141, 0x41312111f1debc9a78563412);
try t(u128, 0x123456789abcdef11121314151617181, 0x8171615141312111f1debc9a78563412);
try t(u0, @as(u0, 0), 0);
try t(i8, @as(i8, -50), -50);
try t(i16, @as(i16, @bitCast(@as(u16, 0x1234))), @as(i16, @bitCast(@as(u16, 0x3412))));
try t(i24, @as(i24, @bitCast(@as(u24, 0x123456))), @as(i24, @bitCast(@as(u24, 0x563412))));
try t(i32, @as(i32, @bitCast(@as(u32, 0x12345678))), @as(i32, @bitCast(@as(u32, 0x78563412))));
try t(i64, @as(i64, @bitCast(@as(u64, 0x123456789abcdef1))), @as(i64, @bitCast(@as(u64, 0xf1debc9a78563412))));
}
fn t(comptime I: type, input: I, expected_output: I) !void {
try std.testing.expect(expected_output == @byteSwap(input));
}
};
try comptime ByteSwapIntTest.run();
try ByteSwapIntTest.run();
}
test "@byteSwap exotic integers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
const ByteSwapIntTest = struct {
fn run() !void {
try t(u0, 0, 0);
try t(u40, 0x123456789a, 0x9a78563412);
try t(i48, 0x123456789abc, @as(i48, @bitCast(@as(u48, 0xbc9a78563412))));
try t(u56, 0x123456789abcde, 0xdebc9a78563412);
try t(u88, 0x123456789abcdef1112131, 0x312111f1debc9a78563412);
try t(u96, 0x123456789abcdef111213141, 0x41312111f1debc9a78563412);
try t(u128, 0x123456789abcdef11121314151617181, 0x8171615141312111f1debc9a78563412);
try t(u40, @as(i40, @bitCast(@as(u40, 0x123456789a))), @as(u40, 0x9a78563412));
try t(i48, @as(i48, @bitCast(@as(u48, 0x123456789abc))), @as(i48, @bitCast(@as(u48, 0xbc9a78563412))));
try t(i56, @as(i56, @bitCast(@as(u56, 0x123456789abcde))), @as(i56, @bitCast(@as(u56, 0xdebc9a78563412))));
try t(i64, @as(i64, @bitCast(@as(u64, 0x123456789abcdef1))), @as(i64, @bitCast(@as(u64, 0xf1debc9a78563412))));
try t(i88, @as(i88, @bitCast(@as(u88, 0x123456789abcdef1112131))), @as(i88, @bitCast(@as(u88, 0x312111f1debc9a78563412))));
try t(i96, @as(i96, @bitCast(@as(u96, 0x123456789abcdef111213141))), @as(i96, @bitCast(@as(u96, 0x41312111f1debc9a78563412))));
try t(
@ -93,9 +82,9 @@ fn vector8() !void {
}
test "@byteSwap vectors u8" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -113,9 +102,9 @@ fn vector16() !void {
}
test "@byteSwap vectors u16" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -133,9 +122,9 @@ fn vector24() !void {
}
test "@byteSwap vectors u24" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;

View File

@ -20,8 +20,8 @@ test "super basic invocations" {
}
test "basic invocations" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -60,7 +60,6 @@ test "basic invocations" {
}
test "tuple parameters" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -95,7 +94,6 @@ test "tuple parameters" {
test "result location of function call argument through runtime condition and struct init" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const E = enum { a, b };
@ -115,6 +113,7 @@ test "result location of function call argument through runtime condition and st
}
test "function call with 40 arguments" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -270,7 +269,7 @@ test "arguments to comptime parameters generated in comptime blocks" {
}
test "forced tail call" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -305,7 +304,7 @@ test "forced tail call" {
}
test "inline call preserves tail call" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -342,7 +341,6 @@ test "inline call preserves tail call" {
}
test "inline call doesn't re-evaluate non generic struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -409,7 +407,6 @@ test "recursive inline call with comptime known argument" {
}
test "inline while with @call" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const S = struct {
@ -439,7 +436,6 @@ test "method call as parameter type" {
}
test "non-anytype generic parameters provide result type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -468,7 +464,6 @@ test "non-anytype generic parameters provide result type" {
}
test "argument to generic function has correct result type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -521,7 +516,6 @@ test "call function in comptime field" {
test "call function pointer in comptime field" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -573,7 +567,6 @@ test "value returned from comptime function is comptime known" {
}
test "registers get overwritten when ignoring return" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.cpu.arch != .x86_64 or builtin.os.tag != .linux) return error.SkipZigTest;
@ -619,7 +612,6 @@ test "call with union with zero sized field is not memorized incorrectly" {
}
test "function call with cast to anyopaque pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -637,6 +629,7 @@ test "function call with cast to anyopaque pointer" {
}
test "arguments pointed to on stack into tailcall" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -708,7 +701,7 @@ test "arguments pointed to on stack into tailcall" {
}
test "tail call function pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO

View File

@ -21,7 +21,6 @@ test "integer literal to pointer cast" {
}
test "peer type resolution: ?T and T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -100,7 +99,6 @@ test "comptime_int @floatFromInt" {
}
test "@floatFromInt" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -121,7 +119,6 @@ test "@floatFromInt" {
}
test "@floatFromInt(f80)" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -157,7 +154,6 @@ test "@floatFromInt(f80)" {
}
test "@intFromFloat" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -181,7 +177,6 @@ fn expectIntFromFloat(comptime F: type, f: F, comptime I: type, i: I) !void {
}
test "implicitly cast indirect pointer to maybe-indirect pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -241,7 +236,6 @@ test "@floatCast comptime_int and comptime_float" {
}
test "coerce undefined to optional" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -262,7 +256,6 @@ fn MakeType(comptime T: type) type {
}
test "implicit cast from *[N]T to [*c]T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -299,7 +292,6 @@ test "@intCast to u0 and use the result" {
}
test "peer result null and comptime_int" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -324,7 +316,6 @@ test "peer result null and comptime_int" {
}
test "*const ?[*]const T to [*c]const [*c]const T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -338,7 +329,6 @@ test "*const ?[*]const T to [*c]const [*c]const T" {
}
test "array coercion to undefined at runtime" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -368,7 +358,6 @@ fn implicitIntLitToOptional() void {
}
test "return u8 coercing into ?u32 return type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -390,7 +379,6 @@ test "cast from ?[*]T to ??[*]T" {
}
test "peer type unsigned int to signed" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var w: u31 = 5;
@ -403,7 +391,6 @@ test "peer type unsigned int to signed" {
}
test "expected [*c]const u8, found [*:0]const u8" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -415,7 +402,6 @@ test "expected [*c]const u8, found [*:0]const u8" {
}
test "explicit cast from integer to error type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -431,7 +417,6 @@ fn testCastIntToErr(err: anyerror) !void {
}
test "peer resolve array and const slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -447,7 +432,6 @@ fn testPeerResolveArrayConstSlice(b: bool) !void {
}
test "implicitly cast from T to anyerror!?T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -473,7 +457,6 @@ fn castToOptionalTypeError(z: i32) !void {
}
test "implicitly cast from [0]T to anyerror![]T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testCastZeroArrayToErrSliceMut();
@ -489,7 +472,6 @@ fn gimmeErrOrSlice() anyerror![]u8 {
}
test "peer type resolution: [0]u8, []const u8, and anyerror![]u8" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -522,7 +504,6 @@ fn peerTypeEmptyArrayAndSliceAndError(a: bool, slice: []u8) anyerror![]u8 {
}
test "implicit cast from *const [N]T to []const T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -548,7 +529,6 @@ fn testCastConstArrayRefToConstSlice() !void {
}
test "peer type resolution: error and [N]T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -573,7 +553,6 @@ fn testPeerErrorAndArray2(x: u8) anyerror![]const u8 {
}
test "single-item pointer of array to slice to unknown length pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -603,7 +582,6 @@ fn testCastPtrOfArrayToSliceAndPtr() !void {
}
test "cast *[1][*]const u8 to [*]const ?[*]const u8" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -613,8 +591,8 @@ test "cast *[1][*]const u8 to [*]const ?[*]const u8" {
}
test "@intCast on vector" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -651,7 +629,6 @@ test "@intCast on vector" {
}
test "@floatCast cast down" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -670,7 +647,6 @@ test "@floatCast cast down" {
}
test "peer type resolution: unreachable, error set, unreachable" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const Error = error{
@ -704,7 +680,6 @@ test "peer cast: error set any anyerror" {
}
test "peer type resolution: error set supersets" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -735,7 +710,6 @@ test "peer type resolution: error set supersets" {
test "peer type resolution: disjoint error sets" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -765,7 +739,6 @@ test "peer type resolution: disjoint error sets" {
test "peer type resolution: error union and error set" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -799,7 +772,6 @@ test "peer type resolution: error union and error set" {
test "peer type resolution: error union after non-error" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -833,7 +805,6 @@ test "peer type resolution: error union after non-error" {
test "peer cast *[0]T to E![]const T" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -849,7 +820,6 @@ test "peer cast *[0]T to E![]const T" {
test "peer cast *[0]T to []const T" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -872,7 +842,6 @@ test "peer cast *[N]T to [*]T" {
}
test "peer resolution of string literals" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -895,7 +864,6 @@ test "peer resolution of string literals" {
}
test "peer cast [:x]T to []T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -912,7 +880,6 @@ test "peer cast [:x]T to []T" {
}
test "peer cast [N:x]T to [N]T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -929,7 +896,6 @@ test "peer cast [N:x]T to [N]T" {
}
test "peer cast *[N:x]T to *[N]T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -945,7 +911,6 @@ test "peer cast *[N:x]T to *[N]T" {
}
test "peer cast [*:x]T to [*]T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -966,7 +931,6 @@ test "peer cast [*:x]T to [*]T" {
}
test "peer cast [:x]T to [*:x]T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -988,7 +952,6 @@ test "peer cast [:x]T to [*:x]T" {
}
test "peer type resolution implicit cast to return type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1009,7 +972,6 @@ test "peer type resolution implicit cast to return type" {
}
test "peer type resolution implicit cast to variable type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1035,7 +997,6 @@ test "variable initialization uses result locations properly with regards to the
}
test "cast between C pointer with different but compatible types" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1053,7 +1014,6 @@ test "cast between C pointer with different but compatible types" {
}
test "peer type resolve string lit with sentinel-terminated mutable slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1104,7 +1064,6 @@ test "comptime float casts" {
}
test "pointer reinterpret const float to int" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
// The hex representation is 0x3fe3333333333303.
@ -1119,7 +1078,6 @@ test "pointer reinterpret const float to int" {
}
test "implicit cast from [*]T to ?*anyopaque" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1184,7 +1142,6 @@ test "cast function with an opaque parameter" {
}
test "implicit ptr to *anyopaque" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1199,7 +1156,6 @@ test "implicit ptr to *anyopaque" {
}
test "return null from fn () anyerror!?&T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1216,7 +1172,6 @@ fn returnNullLitFromOptionalTypeErrorRef() anyerror!?*A {
}
test "peer type resolution: [0]u8 and []const u8" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1237,7 +1192,6 @@ fn peerTypeEmptyArrayAndSlice(a: bool, slice: []const u8) []const u8 {
}
test "implicitly cast from [N]T to ?[]const T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1251,7 +1205,6 @@ fn castToOptionalSlice() ?[]const u8 {
}
test "cast u128 to f128 and back" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1274,7 +1227,6 @@ fn cast128Float(x: u128) f128 {
}
test "implicit cast from *[N]T to ?[*]T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -1291,7 +1243,6 @@ test "implicit cast from *[N]T to ?[*]T" {
}
test "implicit cast from *T to ?*anyopaque" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1306,7 +1257,6 @@ fn incrementVoidPtrValue(value: ?*anyopaque) void {
}
test "implicit cast *[0]T to E![]const u8" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x = @as(anyerror![]const u8, &[0]u8{});
@ -1330,7 +1280,6 @@ test "cast from array reference to fn: runtime fn ptr" {
}
test "*const [N]null u8 to ?[]const u8" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1367,7 +1316,6 @@ test "cast between [*c]T and ?[*:0]T on fn parameter" {
var global_struct: struct { f0: usize } = undefined;
test "assignment to optional pointer result loc" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1386,7 +1334,6 @@ test "cast between *[N]void and []void" {
}
test "peer resolve arrays of different size to const slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1400,7 +1347,6 @@ fn boolToStr(b: bool) []const u8 {
}
test "cast f16 to wider types" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1421,7 +1367,6 @@ test "cast f16 to wider types" {
}
test "cast f128 to narrower types" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1441,7 +1386,6 @@ test "cast f128 to narrower types" {
}
test "peer type resolution: unreachable, null, slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1460,7 +1404,6 @@ test "peer type resolution: unreachable, null, slice" {
}
test "cast i8 fn call peers to i32 result" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@ -1482,7 +1425,6 @@ test "cast i8 fn call peers to i32 result" {
}
test "cast compatible optional types" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1494,7 +1436,6 @@ test "cast compatible optional types" {
}
test "coerce undefined single-item pointer of array to error union of slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const a = @as([*]u8, undefined)[0..0];
@ -1513,7 +1454,6 @@ test "pointer to empty struct literal to mutable slice" {
}
test "coerce between pointers of compatible differently-named floats" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows and !builtin.link_libc) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -1548,7 +1488,6 @@ test "peer type resolution of const and non-const pointer to array" {
}
test "intFromFloat to zero-bit int" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -1573,8 +1512,6 @@ test "cast typed undefined to int" {
}
// test "implicit cast from [:0]T to [*c]T" {
// if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
// var a: [:0]const u8 = "foo";
// _ = &a;
// const b: [*c]const u8 = a;
@ -1584,7 +1521,6 @@ test "cast typed undefined to int" {
// }
test "bitcast packed struct with u0" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const S = packed struct(u2) { a: u0, b: u2 };
@ -1691,7 +1627,6 @@ test "coercion from single-item pointer to @as to slice" {
}
test "peer type resolution: const sentinel slice and mutable non-sentinel slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1721,7 +1656,6 @@ test "peer type resolution: const sentinel slice and mutable non-sentinel slice"
}
test "peer type resolution: float and comptime-known fixed-width integer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -1743,7 +1677,7 @@ test "peer type resolution: float and comptime-known fixed-width integer" {
}
test "peer type resolution: same array type with sentinel" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1766,7 +1700,6 @@ test "peer type resolution: same array type with sentinel" {
}
test "peer type resolution: array with sentinel and array without sentinel" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1789,7 +1722,7 @@ test "peer type resolution: array with sentinel and array without sentinel" {
}
test "peer type resolution: array and vector with same child type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1813,7 +1746,7 @@ test "peer type resolution: array and vector with same child type" {
}
test "peer type resolution: array with smaller child type and vector with larger child type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -1837,7 +1770,7 @@ test "peer type resolution: array with smaller child type and vector with larger
}
test "peer type resolution: error union and optional of same type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1861,7 +1794,6 @@ test "peer type resolution: error union and optional of same type" {
}
test "peer type resolution: C pointer and @TypeOf(null)" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1884,7 +1816,7 @@ test "peer type resolution: C pointer and @TypeOf(null)" {
}
test "peer type resolution: three-way resolution combines error set and optional" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1927,7 +1859,7 @@ test "peer type resolution: three-way resolution combines error set and optional
}
test "peer type resolution: vector and optional vector" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -1952,7 +1884,6 @@ test "peer type resolution: vector and optional vector" {
}
test "peer type resolution: optional fixed-width int and comptime_int" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -1974,7 +1905,7 @@ test "peer type resolution: optional fixed-width int and comptime_int" {
}
test "peer type resolution: array and tuple" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1998,7 +1929,7 @@ test "peer type resolution: array and tuple" {
}
test "peer type resolution: vector and tuple" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -2022,7 +1953,7 @@ test "peer type resolution: vector and tuple" {
}
test "peer type resolution: vector and array and tuple" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -2066,7 +1997,6 @@ test "peer type resolution: vector and array and tuple" {
}
test "peer type resolution: empty tuple pointer and slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -2088,7 +2018,6 @@ test "peer type resolution: empty tuple pointer and slice" {
}
test "peer type resolution: tuple pointer and slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -2110,7 +2039,6 @@ test "peer type resolution: tuple pointer and slice" {
}
test "peer type resolution: tuple pointer and optional slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
// Miscompilation on Intel's OpenCL CPU runtime.
@ -2133,7 +2061,6 @@ test "peer type resolution: tuple pointer and optional slice" {
}
test "peer type resolution: many compatible pointers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -2200,7 +2127,6 @@ test "peer type resolution: many compatible pointers" {
}
test "peer type resolution: tuples with comptime fields" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@ -2232,7 +2158,6 @@ test "peer type resolution: tuples with comptime fields" {
}
test "peer type resolution: C pointer and many pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -2256,7 +2181,6 @@ test "peer type resolution: C pointer and many pointer" {
}
test "peer type resolution: pointer attributes are combined correctly" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -2338,7 +2262,7 @@ test "peer type resolution: pointer attributes are combined correctly" {
}
test "peer type resolution: arrays of compatible types" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -2356,7 +2280,6 @@ test "peer type resolution: arrays of compatible types" {
}
test "cast builtins can wrap result in optional" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -2394,7 +2317,6 @@ test "cast builtins can wrap result in optional" {
}
test "cast builtins can wrap result in error union" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -2432,7 +2354,6 @@ test "cast builtins can wrap result in error union" {
}
test "cast builtins can wrap result in error union and optional" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -2471,8 +2392,8 @@ test "cast builtins can wrap result in error union and optional" {
}
test "@floatCast on vector" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -2512,8 +2433,8 @@ test "@floatCast on vector" {
}
test "@ptrFromInt on vector" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -2537,8 +2458,8 @@ test "@ptrFromInt on vector" {
}
test "@intFromPtr on vector" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -2562,8 +2483,8 @@ test "@intFromPtr on vector" {
}
test "@floatFromInt on vector" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -2582,8 +2503,8 @@ test "@floatFromInt on vector" {
}
test "@intFromFloat on vector" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -2602,8 +2523,8 @@ test "@intFromFloat on vector" {
}
test "@intFromBool on vector" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -2639,7 +2560,6 @@ test "15-bit int to float" {
}
test "@as does not corrupt values with incompatible representations" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -2654,7 +2574,6 @@ test "@as does not corrupt values with incompatible representations" {
}
test "result information is preserved through many nested structures" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -2679,7 +2598,7 @@ test "result information is preserved through many nested structures" {
}
test "@intCast vector of signed integer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -2703,7 +2622,6 @@ test "result type is preserved into comptime block" {
}
test "bitcast vector" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const u8x32 = @Vector(32, u8);
@ -2766,6 +2684,7 @@ test "@intFromFloat boundary cases" {
}
test "@intFromFloat vector boundary cases" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;

View File

@ -5,7 +5,6 @@ const expectEqual = std.testing.expectEqual;
const maxInt = std.math.maxInt;
test "@intCast i32 to u7" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -19,7 +18,6 @@ test "@intCast i32 to u7" {
}
test "coerce i8 to i32 and @intCast back" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -36,6 +34,7 @@ test "coerce i8 to i32 and @intCast back" {
test "coerce non byte-sized integers accross 32bits boundary" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
{
var v: u21 = 6417;
_ = &v;
@ -164,8 +163,9 @@ const Piece = packed struct {
}
};
// Originally reported at https://github.com/ziglang/zig/issues/14200
test "load non byte-sized optional value" {
// Originally reported at https://github.com/ziglang/zig/issues/14200
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
@ -181,6 +181,7 @@ test "load non byte-sized optional value" {
}
test "load non byte-sized value in struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.cpu.arch.endian() != .little) return error.SkipZigTest; // packed struct TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO

View File

@ -66,7 +66,6 @@ fn bigToNativeEndian(comptime T: type, v: T) T {
return if (endian == .big) v else @byteSwap(v);
}
test "type pun endianness" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
comptime {
@ -360,7 +359,6 @@ test "offset field ptr by enclosing array element size" {
}
test "accessing reinterpreted memory of parent object" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = extern struct {

View File

@ -7,7 +7,6 @@ const expect = testing.expect;
var argv: [*]const [*]const u8 = undefined;
test "const slice child" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;

View File

@ -33,9 +33,9 @@ test "decl literal with pointer" {
}
test "call decl literal with optional" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const S = struct {
@ -74,6 +74,7 @@ test "call decl literal" {
}
test "call decl literal with error union" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
const S = struct {

View File

@ -32,7 +32,6 @@ test "defer and labeled break" {
}
test "errdefer does not apply to fn inside fn" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (testNestedFnErrDefer()) |_| @panic("expected error") else |e| try expect(e == error.Bad);
@ -51,7 +50,6 @@ fn testNestedFnErrDefer() anyerror!void {
test "return variable while defer expression in scope to modify it" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@ -91,7 +89,6 @@ fn runSomeErrorDefers(x: bool) !bool {
}
test "mixing normal and error defers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -110,7 +107,7 @@ test "mixing normal and error defers" {
}
test "errdefer with payload" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -132,8 +129,8 @@ test "errdefer with payload" {
}
test "reference to errdefer payload" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -157,7 +154,6 @@ test "reference to errdefer payload" {
}
test "simple else prong doesn't emit an error for unreachable else prong" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;

View File

@ -25,7 +25,6 @@ fn testEnumFromIntEval(x: i32) !void {
const EnumFromIntNumber = enum { Zero, One, Two, Three, Four };
test "int to enum" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testEnumFromIntEval(3);
@ -608,7 +607,6 @@ fn testEnumWithSpecifiedTagValues(x: MultipleChoice) !void {
}
test "enum with specified tag values" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testEnumWithSpecifiedTagValues(MultipleChoice.C);
@ -616,7 +614,6 @@ test "enum with specified tag values" {
}
test "non-exhaustive enum" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@ -680,7 +677,6 @@ test "empty non-exhaustive enum" {
}
test "single field non-exhaustive enum" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@ -744,7 +740,6 @@ test "cast integer literal to enum" {
}
test "enum with specified and unspecified tag values" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2.D);
@ -904,8 +899,8 @@ test "enum value allocation" {
}
test "enum literal casting to tagged union" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const Arch = union(enum) {
@ -941,8 +936,8 @@ test "enum literal casting to error union with payload enum" {
}
test "constant enum initialization with differing sizes" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -985,8 +980,8 @@ fn test3_2(f: Test3Foo) !void {
}
test "@tagName" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -1002,8 +997,8 @@ fn testEnumTagNameBare(n: anytype) []const u8 {
const BareNumber = enum { One, Two, Three };
test "@tagName non-exhaustive enum" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -1014,8 +1009,8 @@ test "@tagName non-exhaustive enum" {
const NonExhaustive = enum(u8) { A, B, _ };
test "@tagName is null-terminated" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -1030,8 +1025,8 @@ test "@tagName is null-terminated" {
}
test "tag name with assigned enum values" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -1046,7 +1041,6 @@ test "tag name with assigned enum values" {
}
test "@tagName on enum literals" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1055,8 +1049,8 @@ test "@tagName on enum literals" {
}
test "tag name with signed enum values" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -1073,8 +1067,8 @@ test "tag name with signed enum values" {
}
test "@tagName in callconv(.c) function" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -1091,7 +1085,6 @@ fn testEnumTagNameCallconvC() callconv(.c) [*:0]const u8 {
test "enum literal casting to optional" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var bar: ?Bar = undefined;
@ -1117,8 +1110,8 @@ const bit_field_1 = BitFieldOfEnums{
};
test "bit field access with enum fields" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
@ -1158,8 +1151,8 @@ test "enum literal in array literal" {
}
test "tag name functions are unique" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -1179,7 +1172,6 @@ test "tag name functions are unique" {
}
test "size of enum with only one tag which has explicit integer tag type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const E = enum(u8) { nope = 10 };

View File

@ -145,11 +145,14 @@ test "implicit cast to optional to error union to return result loc" {
}
test "fn returning empty error set can be passed as fn returning any error" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
entry();
comptime entry();
}
test "fn returning empty error set can be passed as fn returning any error - pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
entryPtr();
@ -401,8 +404,8 @@ fn intLiteral(str: []const u8) !?i64 {
}
test "nested error union function call in optional unwrap" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@ -448,7 +451,6 @@ test "nested error union function call in optional unwrap" {
}
test "return function call to error set from error union function" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@ -465,7 +467,6 @@ test "return function call to error set from error union function" {
}
test "optional error set is the same size as error set" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -481,7 +482,7 @@ test "optional error set is the same size as error set" {
}
test "nested catch" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@ -530,7 +531,7 @@ test "function pointer with return type that is error union with payload which i
}
test "return result loc as peer result loc in inferred error set function" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -562,7 +563,6 @@ test "return result loc as peer result loc in inferred error set function" {
test "error payload type is correctly resolved" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const MyIntWrapper = struct {
@ -590,8 +590,8 @@ test "error union comptime caching" {
}
test "@errorName" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -605,8 +605,8 @@ fn gimmeItBroke() anyerror {
}
test "@errorName sentinel length matches slice length" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -700,8 +700,8 @@ test "coerce error set to the current inferred error set" {
}
test "error union payload is properly aligned" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -719,7 +719,6 @@ test "error union payload is properly aligned" {
}
test "ret_ptr doesn't cause own inferred error set to be resolved" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@ -760,7 +759,7 @@ test "simple else prong allowed even when all errors handled" {
}
test "pointer to error union payload" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -794,7 +793,6 @@ const NoReturn = struct {
};
test "error union of noreturn used with if" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -809,7 +807,6 @@ test "error union of noreturn used with if" {
}
test "error union of noreturn used with try" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -821,7 +818,6 @@ test "error union of noreturn used with try" {
}
test "error union of noreturn used with catch" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -833,7 +829,6 @@ test "error union of noreturn used with catch" {
}
test "alignment of wrapping an error union payload" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -852,6 +847,7 @@ test "alignment of wrapping an error union payload" {
}
test "compare error union and error set" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var a: anyerror = error.Foo;
@ -887,7 +883,7 @@ test "catch within a function that calls no errorable functions" {
}
test "error from comptime string" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -913,7 +909,6 @@ test "field access of anyerror results in smaller error set" {
}
test "optional error union return type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const S = struct {
@ -928,7 +923,6 @@ test "optional error union return type" {
test "optional error set return type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const E = error{ A, B };
const S = struct {
@ -952,8 +946,8 @@ test "optional error set function parameter" {
}
test "returning an error union containing a type with no runtime bits" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const ZeroByteType = struct {
@ -969,7 +963,7 @@ test "returning an error union containing a type with no runtime bits" {
}
test "try used in recursive function with inferred error set" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1010,7 +1004,6 @@ test "generic inline function returns inferred error set" {
}
test "function called at runtime is properly analyzed for inferred error set" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -1045,6 +1038,8 @@ test "errorCast to adhoc inferred error set" {
}
test "@errorCast from error set to error union" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const S = struct {
fn doTheTest(set: error{ A, B }) error{A}!i32 {
return @errorCast(set);
@ -1055,6 +1050,8 @@ test "@errorCast from error set to error union" {
}
test "@errorCast from error union to error union" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const S = struct {
fn doTheTest(set: error{ A, B }!i32) error{A}!i32 {
return @errorCast(set);
@ -1065,8 +1062,8 @@ test "@errorCast from error union to error union" {
}
test "result location initialization of error union with OPV payload" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO

View File

@ -18,7 +18,6 @@ fn unwrapAndAddOne(blah: ?i32) i32 {
}
const should_be_1235 = unwrapAndAddOne(1234);
test "static add one" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -71,7 +70,6 @@ fn constExprEvalOnSingleExprBlocksFn(x: i32, b: bool) i32 {
}
test "constant expressions" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var array: [array_size]u8 = undefined;
@ -93,7 +91,6 @@ fn letsTryToCompareBools(a: bool, b: bool) bool {
return max(bool, a, b);
}
test "inlined block and runtime block phi" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(letsTryToCompareBools(true, true));
@ -140,7 +137,6 @@ test "pointer to type" {
}
test "a type constructed in a global expression" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -236,7 +232,6 @@ const vertices = [_]Vertex{
};
test "statically initialized list" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(static_point_list[0].x == 1);
@ -342,7 +337,6 @@ fn doesAlotT(comptime T: type, value: usize) T {
}
test "@setEvalBranchQuota at same scope as generic function call" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(doesAlotT(u32, 2) == 2);
@ -394,7 +388,6 @@ test "return 0 from function that has u0 return type" {
}
test "statically initialized struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
st_init_str_foo.x += 1;
@ -444,7 +437,6 @@ fn copyWithPartialInline(s: []u32, b: []u8) void {
test "binary math operator in partially inlined function" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -462,7 +454,6 @@ test "binary math operator in partially inlined function" {
}
test "comptime shl" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -491,6 +482,7 @@ test "comptime bitwise operators" {
}
test "comptime shlWithOverflow" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -503,7 +495,6 @@ test "comptime shlWithOverflow" {
}
test "const ptr to variable data changes at runtime" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -521,7 +512,6 @@ const foo_ref = &foo_contents;
test "runtime 128 bit integer division" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -536,7 +526,6 @@ test "runtime 128 bit integer division" {
}
test "@tagName of @typeInfo" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -545,7 +534,6 @@ test "@tagName of @typeInfo" {
}
test "static eval list init" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -578,7 +566,6 @@ test "inlined loop has array literal with elided runtime scope on first iteratio
}
test "ptr to local array argument at comptime" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
comptime {
@ -741,7 +728,6 @@ test "*align(1) u16 is the same as *align(1:0:2) u16" {
test "array concatenation of function calls" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -751,7 +737,6 @@ test "array concatenation of function calls" {
test "array multiplication of function calls" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -769,7 +754,6 @@ fn scalar(x: u32) u32 {
test "array concatenation peer resolves element types - value" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var a = [2]u3{ 1, 7 };
@ -786,7 +770,6 @@ test "array concatenation peer resolves element types - value" {
test "array concatenation peer resolves element types - pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -803,7 +786,6 @@ test "array concatenation peer resolves element types - pointer" {
test "array concatenation sets the sentinel - value" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -823,7 +805,6 @@ test "array concatenation sets the sentinel - value" {
}
test "array concatenation sets the sentinel - pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -843,7 +824,6 @@ test "array concatenation sets the sentinel - pointer" {
test "array multiplication sets the sentinel - value" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -862,7 +842,6 @@ test "array multiplication sets the sentinel - value" {
test "array multiplication sets the sentinel - pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -889,7 +868,6 @@ test "comptime assign int to optional int" {
test "two comptime calls with array default initialized to undefined" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@ -976,7 +954,6 @@ test "const local with comptime init through array init" {
}
test "closure capture type of runtime-known parameter" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@ -992,7 +969,6 @@ test "closure capture type of runtime-known parameter" {
}
test "closure capture type of runtime-known var" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x: u32 = 1234;
@ -1035,7 +1011,6 @@ test "comptime break passing through runtime condition converted to runtime brea
}
test "comptime break to outer loop passing through runtime condition converted to runtime break" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -1088,7 +1063,6 @@ test "comptime break operand passing through runtime condition converted to runt
}
test "comptime break operand passing through runtime switch converted to runtime break" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@ -1108,7 +1082,6 @@ test "comptime break operand passing through runtime switch converted to runtime
}
test "no dependency loop for alignment of self struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1147,7 +1120,6 @@ test "no dependency loop for alignment of self struct" {
}
test "no dependency loop for alignment of self bare union" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1186,7 +1158,6 @@ test "no dependency loop for alignment of self bare union" {
}
test "no dependency loop for alignment of self tagged union" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1230,7 +1201,6 @@ test "equality of pointers to comptime const" {
}
test "storing an array of type in a field" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1260,7 +1230,6 @@ test "storing an array of type in a field" {
}
test "pass pointer to field of comptime-only type as a runtime parameter" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1387,7 +1356,6 @@ test "lazy sizeof union tag size in compare" {
}
test "lazy value is resolved as slice operand" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -1568,7 +1536,7 @@ test "x or true is comptime-known true" {
}
test "non-optional and optional array elements concatenated" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@ -4,7 +4,6 @@ const expect = std.testing.expect;
test "exporting enum value" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.cpu.arch.isWasm()) {
// https://github.com/ziglang/zig/issues/4866
@ -23,7 +22,6 @@ test "exporting enum value" {
test "exporting with internal linkage" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const S = struct {
fn foo() callconv(.c) void {}
@ -36,7 +34,6 @@ test "exporting with internal linkage" {
test "exporting using namespace access" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.cpu.arch.isWasm()) {
// https://github.com/ziglang/zig/issues/4866
@ -57,7 +54,6 @@ test "exporting using namespace access" {
test "exporting comptime-known value" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;

View File

@ -2,6 +2,7 @@ const expect = @import("std").testing.expect;
const builtin = @import("builtin");
test "@fieldParentPtr struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -586,6 +587,7 @@ test "@fieldParentPtr extern struct last zero-bit field" {
}
test "@fieldParentPtr unaligned packed struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -724,6 +726,7 @@ test "@fieldParentPtr unaligned packed struct" {
}
test "@fieldParentPtr aligned packed struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1339,6 +1342,7 @@ test "@fieldParentPtr packed struct last zero-bit field" {
}
test "@fieldParentPtr tagged union" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -1475,6 +1479,7 @@ test "@fieldParentPtr tagged union" {
}
test "@fieldParentPtr untagged union" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;

Some files were not shown because too many files have changed in this diff Show More