diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig index d4e5c25f36..f2a78100ad 100644 --- a/lib/std/Thread.zig +++ b/lib/std/Thread.zig @@ -1141,7 +1141,7 @@ test "setName, getName" { thread.join(); } -test "std.Thread" { +test { // Doesn't use testing.refAllDecls() since that would pull in the compileError spinLoopHint. _ = Futex; _ = ResetEvent; diff --git a/lib/std/atomic.zig b/lib/std/atomic.zig index d10c122482..8038d383a5 100644 --- a/lib/std/atomic.zig +++ b/lib/std/atomic.zig @@ -7,7 +7,7 @@ pub const Stack = @import("atomic/stack.zig").Stack; pub const Queue = @import("atomic/queue.zig").Queue; pub const Atomic = @import("atomic/Atomic.zig").Atomic; -test "std.atomic" { +test { _ = @import("atomic/stack.zig"); _ = @import("atomic/queue.zig"); _ = @import("atomic/Atomic.zig"); diff --git a/lib/std/bit_set.zig b/lib/std/bit_set.zig index a027022497..d8f9b7f90e 100644 --- a/lib/std/bit_set.zig +++ b/lib/std/bit_set.zig @@ -1635,6 +1635,8 @@ fn testStaticBitSet(comptime Set: type) !void { } test "IntegerBitSet" { + if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; + try testStaticBitSet(IntegerBitSet(0)); try testStaticBitSet(IntegerBitSet(1)); try testStaticBitSet(IntegerBitSet(2)); diff --git a/lib/std/crypto/aes.zig b/lib/std/crypto/aes.zig index f5d96a5fe8..70246e6630 100644 --- a/lib/std/crypto/aes.zig +++ b/lib/std/crypto/aes.zig @@ -5,9 +5,10 @@ const testing = std.testing; const has_aesni = std.Target.x86.featureSetHas(builtin.cpu.features, .aes); const has_avx = std.Target.x86.featureSetHas(builtin.cpu.features, .avx); const has_armaes = std.Target.aarch64.featureSetHas(builtin.cpu.features, .aes); -const impl = if (builtin.cpu.arch == .x86_64 and has_aesni and has_avx) impl: { +// C backend doesn't currently support passing vectors to inline asm. +const impl = if (builtin.cpu.arch == .x86_64 and builtin.zig_backend != .stage2_c and has_aesni and has_avx) impl: { break :impl @import("aes/aesni.zig"); -} else if (builtin.cpu.arch == .aarch64 and has_armaes) +} else if (builtin.cpu.arch == .aarch64 and builtin.zig_backend != .stage2_c and has_armaes) impl: { break :impl @import("aes/armcrypto.zig"); } else impl: { diff --git a/lib/std/crypto/aes_ocb.zig b/lib/std/crypto/aes_ocb.zig index a5d1001ed5..83e33e5fca 100644 --- a/lib/std/crypto/aes_ocb.zig +++ b/lib/std/crypto/aes_ocb.zig @@ -257,6 +257,8 @@ inline fn xorWith(x: *Block, y: Block) void { const hexToBytes = std.fmt.hexToBytes; test "AesOcb test vector 1" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + var k: [Aes128Ocb.key_length]u8 = undefined; var nonce: [Aes128Ocb.nonce_length]u8 = undefined; var tag: [Aes128Ocb.tag_length]u8 = undefined; @@ -274,6 +276,8 @@ test "AesOcb test vector 1" { } test "AesOcb test vector 2" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + var k: [Aes128Ocb.key_length]u8 = undefined; var nonce: [Aes128Ocb.nonce_length]u8 = undefined; var tag: [Aes128Ocb.tag_length]u8 = undefined; @@ -293,6 +297,8 @@ test "AesOcb test vector 2" { } test "AesOcb test vector 3" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + var k: [Aes128Ocb.key_length]u8 = undefined; var nonce: [Aes128Ocb.nonce_length]u8 = undefined; var tag: [Aes128Ocb.tag_length]u8 = undefined; @@ -315,6 +321,8 @@ test "AesOcb test vector 3" { } test "AesOcb test vector 4" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + var k: [Aes128Ocb.key_length]u8 = undefined; var nonce: [Aes128Ocb.nonce_length]u8 = undefined; var tag: [Aes128Ocb.tag_length]u8 = undefined; diff --git a/lib/std/crypto/ecdsa.zig b/lib/std/crypto/ecdsa.zig index 919578c02e..37ae57a7e6 100644 --- a/lib/std/crypto/ecdsa.zig +++ b/lib/std/crypto/ecdsa.zig @@ -1,3 +1,4 @@ +const builtin = @import("builtin"); const std = @import("std"); const crypto = std.crypto; const fmt = std.fmt; @@ -373,6 +374,8 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type { } test "ECDSA - Basic operations over EcdsaP384Sha384" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + const Scheme = EcdsaP384Sha384; const kp = try Scheme.KeyPair.create(null); const msg = "test"; @@ -387,6 +390,8 @@ test "ECDSA - Basic operations over EcdsaP384Sha384" { } test "ECDSA - Basic operations over Secp256k1" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + const Scheme = EcdsaSecp256k1Sha256oSha256; const kp = try Scheme.KeyPair.create(null); const msg = "test"; @@ -401,6 +406,8 @@ test "ECDSA - Basic operations over Secp256k1" { } test "ECDSA - Basic operations over EcdsaP384Sha256" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + const Scheme = Ecdsa(crypto.ecc.P384, crypto.hash.sha2.Sha256); const kp = try Scheme.KeyPair.create(null); const msg = "test"; @@ -415,6 +422,8 @@ test "ECDSA - Basic operations over EcdsaP384Sha256" { } test "ECDSA - Verifying a existing signature with EcdsaP384Sha256" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + const Scheme = Ecdsa(crypto.ecc.P384, crypto.hash.sha2.Sha256); // zig fmt: off const sk_bytes = [_]u8{ @@ -457,6 +466,8 @@ const TestVector = struct { }; test "ECDSA - Test vectors from Project Wycheproof" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + const vectors = [_]TestVector{ .{ .key = "042927b10512bae3eddcfe467828128bad2903269919f7086069c8c4df6c732838c7787964eaac00e5921fb1498a60f4606766b3d9685001558d1a974e7341513e", .msg = "313233343030", .sig = "304402202ba3a8be6b94d5ec80a6d9d1190a436effe50d85a1eee859b8cc6af9bd5c2e1802204cd60b855d442f5b3c7b11eb6c4e0ae7525fe710fab9aa7c77a67f79e6fadd76", .result = .valid }, .{ .key = "042927b10512bae3eddcfe467828128bad2903269919f7086069c8c4df6c732838c7787964eaac00e5921fb1498a60f4606766b3d9685001558d1a974e7341513e", .msg = "313233343030", .sig = "304402202ba3a8be6b94d5ec80a6d9d1190a436effe50d85a1eee859b8cc6af9bd5c2e180220b329f479a2bbd0a5c384ee1493b1f5186a87139cac5df4087c134b49156847db", .result = .acceptable }, @@ -869,6 +880,8 @@ fn tvTry(vector: TestVector) !void { } test "ECDSA - Sec1 encoding/decoding" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + const Scheme = EcdsaP384Sha384; const kp = try Scheme.KeyPair.create(null); const pk = kp.public_key; diff --git a/lib/std/crypto/ghash_polyval.zig b/lib/std/crypto/ghash_polyval.zig index 908bace73e..2bb118c791 100644 --- a/lib/std/crypto/ghash_polyval.zig +++ b/lib/std/crypto/ghash_polyval.zig @@ -248,9 +248,10 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type { const has_pclmul = std.Target.x86.featureSetHas(builtin.cpu.features, .pclmul); const has_avx = std.Target.x86.featureSetHas(builtin.cpu.features, .avx); const has_armaes = std.Target.aarch64.featureSetHas(builtin.cpu.features, .aes); - const clmul = if (builtin.cpu.arch == .x86_64 and has_pclmul and has_avx) impl: { + // C backend doesn't currently support passing vectors to inline asm. + const clmul = if (builtin.cpu.arch == .x86_64 and builtin.zig_backend != .stage2_c and has_pclmul and has_avx) impl: { break :impl clmulPclmul; - } else if (builtin.cpu.arch == .aarch64 and has_armaes) impl: { + } else if (builtin.cpu.arch == .aarch64 and builtin.zig_backend != .stage2_c and has_armaes) impl: { break :impl clmulPmull; } else impl: { break :impl clmulSoft; diff --git a/lib/std/crypto/pcurves/p256.zig b/lib/std/crypto/pcurves/p256.zig index d060abd12b..a160d08016 100644 --- a/lib/std/crypto/pcurves/p256.zig +++ b/lib/std/crypto/pcurves/p256.zig @@ -473,6 +473,6 @@ pub const AffineCoordinates = struct { } }; -test "p256" { +test { _ = @import("tests/p256.zig"); } diff --git a/lib/std/crypto/pcurves/p384.zig b/lib/std/crypto/pcurves/p384.zig index 8ea787a417..3aaf9e341f 100644 --- a/lib/std/crypto/pcurves/p384.zig +++ b/lib/std/crypto/pcurves/p384.zig @@ -473,6 +473,8 @@ pub const AffineCoordinates = struct { } }; -test "p384" { +test { + if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; + _ = @import("tests/p384.zig"); } diff --git a/lib/std/crypto/pcurves/secp256k1.zig b/lib/std/crypto/pcurves/secp256k1.zig index 9dd8f55051..6998b0db82 100644 --- a/lib/std/crypto/pcurves/secp256k1.zig +++ b/lib/std/crypto/pcurves/secp256k1.zig @@ -551,6 +551,8 @@ pub const AffineCoordinates = struct { } }; -test "secp256k1" { +test { + if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; + _ = @import("tests/secp256k1.zig"); } diff --git a/lib/std/crypto/sha2.zig b/lib/std/crypto/sha2.zig index 24c22ecc9f..07bda89585 100644 --- a/lib/std/crypto/sha2.zig +++ b/lib/std/crypto/sha2.zig @@ -205,7 +205,7 @@ fn Sha2x32(comptime params: Sha2Params32) type { if (!isComptime()) { switch (builtin.cpu.arch) { - .aarch64 => if (comptime std.Target.aarch64.featureSetHas(builtin.cpu.features, .sha2)) { + .aarch64 => if (builtin.zig_backend != .stage2_c and comptime std.Target.aarch64.featureSetHas(builtin.cpu.features, .sha2)) { var x: v4u32 = d.s[0..4].*; var y: v4u32 = d.s[4..8].*; const s_v = @ptrCast(*[16]v4u32, &s); @@ -242,7 +242,8 @@ fn Sha2x32(comptime params: Sha2Params32) type { d.s[4..8].* = y +% @as(v4u32, d.s[4..8].*); return; }, - .x86_64 => if (comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sha)) { + // C backend doesn't currently support passing vectors to inline asm. + .x86_64 => if (builtin.zig_backend != .stage2_c and comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sha)) { var x: v4u32 = [_]u32{ d.s[5], d.s[4], d.s[1], d.s[0] }; var y: v4u32 = [_]u32{ d.s[7], d.s[6], d.s[3], d.s[2] }; const s_v = @ptrCast(*[16]v4u32, &s); diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 663540c182..e093fa5dc8 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -2189,6 +2189,8 @@ pub fn dumpStackPointerAddr(prefix: []const u8) void { } test "manage resources correctly" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // error.UnsupportedBackend + if (builtin.os.tag == .wasi) return error.SkipZigTest; if (builtin.os.tag == .windows and builtin.cpu.arch == .x86_64) { diff --git a/lib/std/hash.zig b/lib/std/hash.zig index 8e92b4c9de..5c85b38d55 100644 --- a/lib/std/hash.zig +++ b/lib/std/hash.zig @@ -36,7 +36,7 @@ const xxhash = @import("hash/xxhash.zig"); pub const XxHash64 = xxhash.XxHash64; pub const XxHash32 = xxhash.XxHash32; -test "hash" { +test { _ = adler; _ = auto_hash; _ = crc; diff --git a/lib/std/math/big/int_test.zig b/lib/std/math/big/int_test.zig index d5e35a787c..9eed0c3e3d 100644 --- a/lib/std/math/big/int_test.zig +++ b/lib/std/math/big/int_test.zig @@ -915,6 +915,8 @@ test "big.int mul multi-single" { } test "big.int mul multi-multi" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + var op1: u256 = 0x998888efefefefefefefef; var op2: u256 = 0x333000abababababababab; var a = try Managed.initSet(testing.allocator, op1); @@ -1034,6 +1036,8 @@ test "big.int mulWrap single-single signed" { } test "big.int mulWrap multi-multi unsigned" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + var op1: u256 = 0x998888efefefefefefefef; var op2: u256 = 0x333000abababababababab; var a = try Managed.initSet(testing.allocator, op1); @@ -1049,6 +1053,8 @@ test "big.int mulWrap multi-multi unsigned" { } test "big.int mulWrap multi-multi signed" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + var a = try Managed.initSet(testing.allocator, maxInt(SignedDoubleLimb) - 1); defer a.deinit(); var b = try Managed.initSet(testing.allocator, maxInt(SignedDoubleLimb)); @@ -1252,6 +1258,8 @@ test "big.int div q=0 alias" { } test "big.int div multi-multi q < r" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + const op1 = 0x1ffffffff0078f432; const op2 = 0x1ffffffff01000000; var a = try Managed.initSet(testing.allocator, op1); @@ -1608,6 +1616,8 @@ test "big.int div floor positive close to zero" { } test "big.int div multi-multi with rem" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + var a = try Managed.initSet(testing.allocator, 0x8888999911110000ffffeeeeddddccccbbbbaaaa9999); defer a.deinit(); var b = try Managed.initSet(testing.allocator, 0x99990000111122223333); @@ -1624,6 +1634,8 @@ test "big.int div multi-multi with rem" { } test "big.int div multi-multi no rem" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + var a = try Managed.initSet(testing.allocator, 0x8888999911110000ffffeeeedb4fec200ee3a4286361); defer a.deinit(); var b = try Managed.initSet(testing.allocator, 0x99990000111122223333); @@ -1640,6 +1652,8 @@ test "big.int div multi-multi no rem" { } test "big.int div multi-multi (2 branch)" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + var a = try Managed.initSet(testing.allocator, 0x866666665555555588888887777777761111111111111111); defer a.deinit(); var b = try Managed.initSet(testing.allocator, 0x86666666555555554444444433333333); @@ -1656,6 +1670,8 @@ test "big.int div multi-multi (2 branch)" { } test "big.int div multi-multi (3.1/3.3 branch)" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + var a = try Managed.initSet(testing.allocator, 0x11111111111111111111111111111111111111111111111111111111111111); defer a.deinit(); var b = try Managed.initSet(testing.allocator, 0x1111111111111111111111111111111111111111171); @@ -1672,6 +1688,8 @@ test "big.int div multi-multi (3.1/3.3 branch)" { } test "big.int div multi-single zero-limb trailing" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + var a = try Managed.initSet(testing.allocator, 0x60000000000000000000000000000000000000000000000000000000000000000); defer a.deinit(); var b = try Managed.initSet(testing.allocator, 0x10000000000000000); @@ -1690,6 +1708,8 @@ test "big.int div multi-single zero-limb trailing" { } test "big.int div multi-multi zero-limb trailing (with rem)" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + var a = try Managed.initSet(testing.allocator, 0x86666666555555558888888777777776111111111111111100000000000000000000000000000000); defer a.deinit(); var b = try Managed.initSet(testing.allocator, 0x8666666655555555444444443333333300000000000000000000000000000000); @@ -1709,6 +1729,8 @@ test "big.int div multi-multi zero-limb trailing (with rem)" { } test "big.int div multi-multi zero-limb trailing (with rem) and dividend zero-limb count > divisor zero-limb count" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + var a = try Managed.initSet(testing.allocator, 0x8666666655555555888888877777777611111111111111110000000000000000); defer a.deinit(); var b = try Managed.initSet(testing.allocator, 0x8666666655555555444444443333333300000000000000000000000000000000); @@ -1728,6 +1750,8 @@ test "big.int div multi-multi zero-limb trailing (with rem) and dividend zero-li } test "big.int div multi-multi zero-limb trailing (with rem) and dividend zero-limb count < divisor zero-limb count" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + var a = try Managed.initSet(testing.allocator, 0x86666666555555558888888777777776111111111111111100000000000000000000000000000000); defer a.deinit(); var b = try Managed.initSet(testing.allocator, 0x866666665555555544444444333333330000000000000000); @@ -2486,6 +2510,8 @@ test "big.int gcd non-one large" { } test "big.int gcd large multi-limb result" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + var a = try Managed.initSet(testing.allocator, 0x12345678123456781234567812345678123456781234567812345678); defer a.deinit(); var b = try Managed.initSet(testing.allocator, 0x12345671234567123456712345671234567123456712345671234567); diff --git a/lib/std/math/complex.zig b/lib/std/math/complex.zig index b4a87492ce..8b9e8befab 100644 --- a/lib/std/math/complex.zig +++ b/lib/std/math/complex.zig @@ -189,7 +189,7 @@ test "complex.magnitude" { try testing.expect(math.approxEqAbs(f32, c, 5.83095, epsilon)); } -test "complex.cmath" { +test { _ = @import("complex/abs.zig"); _ = @import("complex/acosh.zig"); _ = @import("complex/acos.zig"); diff --git a/lib/std/mem.zig b/lib/std/mem.zig index 6b48b7ebde..940882e930 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -1504,6 +1504,8 @@ test "comptime read/write int" { } test "readIntBig and readIntLittle" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + try testing.expect(readIntSliceBig(u0, &[_]u8{}) == 0x0); try testing.expect(readIntSliceLittle(u0, &[_]u8{}) == 0x0); @@ -1795,6 +1797,8 @@ pub fn writeVarPackedInt(bytes: []u8, bit_offset: usize, bit_count: usize, value } test "writeIntBig and writeIntLittle" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + var buf0: [0]u8 = undefined; var buf1: [1]u8 = undefined; var buf2: [2]u8 = undefined; @@ -4011,6 +4015,8 @@ pub fn alignInSlice(slice: anytype, comptime new_alignment: usize) ?AlignedSlice } test "read/write(Var)PackedInt" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + switch (builtin.cpu.arch) { // This test generates too much code to execute on WASI. // LLVM backend fails with "too many locals: locals exceed maximum" diff --git a/lib/std/meta.zig b/lib/std/meta.zig index 3aca23b267..97c2ff4fb0 100644 --- a/lib/std/meta.zig +++ b/lib/std/meta.zig @@ -10,7 +10,7 @@ pub const TrailerFlags = @import("meta/trailer_flags.zig").TrailerFlags; const Type = std.builtin.Type; -test "std.meta.TrailerFlags" { +test { _ = TrailerFlags; } diff --git a/lib/std/os/test.zig b/lib/std/os/test.zig index df7a7a2881..5174090712 100644 --- a/lib/std/os/test.zig +++ b/lib/std/os/test.zig @@ -502,8 +502,7 @@ fn iter_fn(info: *dl_phdr_info, size: usize, counter: *usize) IterFnError!void { } test "dl_iterate_phdr" { - if (native_os == .windows or native_os == .wasi or native_os == .macos) - return error.SkipZigTest; + if (builtin.object_format != .elf) return error.SkipZigTest; var counter: usize = 0; try os.dl_iterate_phdr(&counter, IterFnError, iter_fn); @@ -797,6 +796,11 @@ test "sigaction" { if (native_os == .linux and builtin.target.cpu.arch == .x86) return error.SkipZigTest; + // https://github.com/ziglang/zig/issues/15381 + if (native_os == .macos and builtin.target.cpu.arch == .x86_64) { + return error.SkipZigTest; + } + const S = struct { var handler_called_count: u32 = 0; diff --git a/lib/std/rand/Xoshiro256.zig b/lib/std/rand/Xoshiro256.zig index 42ad43c445..35af701ea1 100644 --- a/lib/std/rand/Xoshiro256.zig +++ b/lib/std/rand/Xoshiro256.zig @@ -90,6 +90,8 @@ pub fn fill(self: *Xoshiro256, buf: []u8) void { } test "xoroshiro sequence" { + if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; + var r = Xoshiro256.init(0); const seq1 = [_]u64{ diff --git a/lib/zig.h b/lib/zig.h index 36f3318650..f73dfb72ef 100644 --- a/lib/zig.h +++ b/lib/zig.h @@ -253,97 +253,6 @@ typedef char bool; #define zig_concat(lhs, rhs) lhs##rhs #define zig_expand_concat(lhs, rhs) zig_concat(lhs, rhs) -#if __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) -#include -#define zig_atomic(type) _Atomic(type) -#define zig_cmpxchg_strong(obj, expected, desired, succ, fail, type) atomic_compare_exchange_strong_explicit(obj, &(expected), desired, succ, fail) -#define zig_cmpxchg_weak(obj, expected, desired, succ, fail, type) atomic_compare_exchange_weak_explicit (obj, &(expected), desired, succ, fail) -#define zig_atomicrmw_xchg(obj, arg, order, type) atomic_exchange_explicit (obj, arg, order) -#define zig_atomicrmw_add(obj, arg, order, type) atomic_fetch_add_explicit (obj, arg, order) -#define zig_atomicrmw_sub(obj, arg, order, type) atomic_fetch_sub_explicit (obj, arg, order) -#define zig_atomicrmw_or(obj, arg, order, type) atomic_fetch_or_explicit (obj, arg, order) -#define zig_atomicrmw_xor(obj, arg, order, type) atomic_fetch_xor_explicit (obj, arg, order) -#define zig_atomicrmw_and(obj, arg, order, type) atomic_fetch_and_explicit (obj, arg, order) -#define zig_atomicrmw_nand(obj, arg, order, type) __atomic_fetch_nand (obj, arg, order) -#define zig_atomicrmw_min(obj, arg, order, type) __atomic_fetch_min (obj, arg, order) -#define zig_atomicrmw_max(obj, arg, order, type) __atomic_fetch_max (obj, arg, order) -#define zig_atomic_store(obj, arg, order, type) atomic_store_explicit (obj, arg, order) -#define zig_atomic_load(obj, order, type) atomic_load_explicit (obj, order) -#define zig_fence(order) atomic_thread_fence(order) -#elif defined(__GNUC__) -#define memory_order_relaxed __ATOMIC_RELAXED -#define memory_order_consume __ATOMIC_CONSUME -#define memory_order_acquire __ATOMIC_ACQUIRE -#define memory_order_release __ATOMIC_RELEASE -#define memory_order_acq_rel __ATOMIC_ACQ_REL -#define memory_order_seq_cst __ATOMIC_SEQ_CST -#define zig_atomic(type) type -#define zig_cmpxchg_strong(obj, expected, desired, succ, fail, type) __atomic_compare_exchange_n(obj, &(expected), desired, false, succ, fail) -#define zig_cmpxchg_weak(obj, expected, desired, succ, fail, type) __atomic_compare_exchange_n(obj, &(expected), desired, true , succ, fail) -#define zig_atomicrmw_xchg(obj, arg, order, type) __atomic_exchange_n(obj, arg, order) -#define zig_atomicrmw_add(obj, arg, order, type) __atomic_fetch_add (obj, arg, order) -#define zig_atomicrmw_sub(obj, arg, order, type) __atomic_fetch_sub (obj, arg, order) -#define zig_atomicrmw_or(obj, arg, order, type) __atomic_fetch_or (obj, arg, order) -#define zig_atomicrmw_xor(obj, arg, order, type) __atomic_fetch_xor (obj, arg, order) -#define zig_atomicrmw_and(obj, arg, order, type) __atomic_fetch_and (obj, arg, order) -#define zig_atomicrmw_nand(obj, arg, order, type) __atomic_fetch_nand(obj, arg, order) -#define zig_atomicrmw_min(obj, arg, order, type) __atomic_fetch_min (obj, arg, order) -#define zig_atomicrmw_max(obj, arg, order, type) __atomic_fetch_max (obj, arg, order) -#define zig_atomic_store(obj, arg, order, type) __atomic_store_n (obj, arg, order) -#define zig_atomic_load(obj, order, type) __atomic_load_n (obj, order) -#define zig_fence(order) __atomic_thread_fence(order) -#elif _MSC_VER && (_M_IX86 || _M_X64) -#define memory_order_relaxed 0 -#define memory_order_consume 1 -#define memory_order_acquire 2 -#define memory_order_release 3 -#define memory_order_acq_rel 4 -#define memory_order_seq_cst 5 -#define zig_atomic(type) type -#define zig_cmpxchg_strong(obj, expected, desired, succ, fail, type) zig_expand_concat(zig_msvc_cmpxchg_, type)(obj, &(expected), desired) -#define zig_cmpxchg_weak(obj, expected, desired, succ, fail, type) zig_cmpxchg_strong(obj, expected, desired, succ, fail, type) -#define zig_atomicrmw_xchg(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_xchg_, type)(obj, arg) -#define zig_atomicrmw_add(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_add_, type)(obj, arg) -#define zig_atomicrmw_sub(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_sub_, type)(obj, arg) -#define zig_atomicrmw_or(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_or_, type)(obj, arg) -#define zig_atomicrmw_xor(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_xor_, type)(obj, arg) -#define zig_atomicrmw_and(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_and_, type)(obj, arg) -#define zig_atomicrmw_nand(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_nand_, type)(obj, arg) -#define zig_atomicrmw_min(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_min_, type)(obj, arg) -#define zig_atomicrmw_max(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_max_, type)(obj, arg) -#define zig_atomic_store(obj, arg, order, type) zig_expand_concat(zig_msvc_atomic_store_, type)(obj, arg) -#define zig_atomic_load(obj, order, type) zig_expand_concat(zig_msvc_atomic_load_, type)(obj) -#if _M_X64 -#define zig_fence(order) __faststorefence() -#else -#define zig_fence(order) zig_msvc_atomic_barrier() -#endif - -// TODO: _MSC_VER && (_M_ARM || _M_ARM64) -#else -#define memory_order_relaxed 0 -#define memory_order_consume 1 -#define memory_order_acquire 2 -#define memory_order_release 3 -#define memory_order_acq_rel 4 -#define memory_order_seq_cst 5 -#define zig_atomic(type) type -#define zig_cmpxchg_strong(obj, expected, desired, succ, fail, type) zig_unimplemented() -#define zig_cmpxchg_weak(obj, expected, desired, succ, fail, type) zig_unimplemented() -#define zig_atomicrmw_xchg(obj, arg, order, type) zig_unimplemented() -#define zig_atomicrmw_add(obj, arg, order, type) zig_unimplemented() -#define zig_atomicrmw_sub(obj, arg, order, type) zig_unimplemented() -#define zig_atomicrmw_or(obj, arg, order, type) zig_unimplemented() -#define zig_atomicrmw_xor(obj, arg, order, type) zig_unimplemented() -#define zig_atomicrmw_and(obj, arg, order, type) zig_unimplemented() -#define zig_atomicrmw_nand(obj, arg, order, type) zig_unimplemented() -#define zig_atomicrmw_min(obj, arg, order, type) zig_unimplemented() -#define zig_atomicrmw_max(obj, arg, order, type) zig_unimplemented() -#define zig_atomic_store(obj, arg, order, type) zig_unimplemented() -#define zig_atomic_load(obj, order, type) zig_unimplemented() -#define zig_fence(order) zig_unimplemented() -#endif - #if __STDC_VERSION__ >= 201112L #define zig_noreturn _Noreturn #elif zig_has_attribute(noreturn) || defined(zig_gnuc) @@ -502,15 +411,6 @@ typedef ptrdiff_t intptr_t; #endif -#define zig_make_small_i8(val) INT8_C(val) -#define zig_make_small_u8(val) UINT8_C(val) -#define zig_make_small_i16(val) INT16_C(val) -#define zig_make_small_u16(val) UINT16_C(val) -#define zig_make_small_i32(val) INT32_C(val) -#define zig_make_small_u32(val) UINT32_C(val) -#define zig_make_small_i64(val) INT64_C(val) -#define zig_make_small_u64(val) UINT64_C(val) - #define zig_minInt_i8 INT8_MIN #define zig_maxInt_i8 INT8_MAX #define zig_minInt_u8 UINT8_C(0) @@ -534,24 +434,24 @@ typedef ptrdiff_t intptr_t; #define zig_minInt_u(w, bits) zig_intLimit(u, w, min, bits) #define zig_maxInt_u(w, bits) zig_intLimit(u, w, max, bits) -#define zig_int_operator(Type, RhsType, operation, operator) \ +#define zig_operator(Type, RhsType, operation, operator) \ static inline Type zig_##operation(Type lhs, RhsType rhs) { \ return lhs operator rhs; \ } -#define zig_int_basic_operator(Type, operation, operator) \ - zig_int_operator(Type, Type, operation, operator) -#define zig_int_shift_operator(Type, operation, operator) \ - zig_int_operator(Type, uint8_t, operation, operator) +#define zig_basic_operator(Type, operation, operator) \ + zig_operator(Type, Type, operation, operator) +#define zig_shift_operator(Type, operation, operator) \ + zig_operator(Type, uint8_t, operation, operator) #define zig_int_helpers(w) \ - zig_int_basic_operator(uint##w##_t, and_u##w, &) \ - zig_int_basic_operator( int##w##_t, and_i##w, &) \ - zig_int_basic_operator(uint##w##_t, or_u##w, |) \ - zig_int_basic_operator( int##w##_t, or_i##w, |) \ - zig_int_basic_operator(uint##w##_t, xor_u##w, ^) \ - zig_int_basic_operator( int##w##_t, xor_i##w, ^) \ - zig_int_shift_operator(uint##w##_t, shl_u##w, <<) \ - zig_int_shift_operator( int##w##_t, shl_i##w, <<) \ - zig_int_shift_operator(uint##w##_t, shr_u##w, >>) \ + zig_basic_operator(uint##w##_t, and_u##w, &) \ + zig_basic_operator( int##w##_t, and_i##w, &) \ + zig_basic_operator(uint##w##_t, or_u##w, |) \ + zig_basic_operator( int##w##_t, or_i##w, |) \ + zig_basic_operator(uint##w##_t, xor_u##w, ^) \ + zig_basic_operator( int##w##_t, xor_i##w, ^) \ + zig_shift_operator(uint##w##_t, shl_u##w, <<) \ + zig_shift_operator( int##w##_t, shl_i##w, <<) \ + zig_shift_operator(uint##w##_t, shr_u##w, >>) \ \ static inline int##w##_t zig_shr_i##w(int##w##_t lhs, uint8_t rhs) { \ int##w##_t sign_mask = lhs < INT##w##_C(0) ? -INT##w##_C(1) : INT##w##_C(0); \ @@ -576,13 +476,13 @@ typedef ptrdiff_t intptr_t; ? val | zig_minInt_i(w, bits) : val & zig_maxInt_i(w, bits); \ } \ \ - zig_int_basic_operator(uint##w##_t, div_floor_u##w, /) \ + zig_basic_operator(uint##w##_t, div_floor_u##w, /) \ \ static inline int##w##_t zig_div_floor_i##w(int##w##_t lhs, int##w##_t rhs) { \ return lhs / rhs - (((lhs ^ rhs) & (lhs % rhs)) < INT##w##_C(0)); \ } \ \ - zig_int_basic_operator(uint##w##_t, mod_u##w, %) \ + zig_basic_operator(uint##w##_t, mod_u##w, %) \ \ static inline int##w##_t zig_mod_i##w(int##w##_t lhs, int##w##_t rhs) { \ int##w##_t rem = lhs % rhs; \ @@ -1253,8 +1153,8 @@ typedef signed __int128 zig_i128; #define zig_lo_u128(val) ((uint64_t)((val) >> 0)) #define zig_hi_i128(val) (( int64_t)((val) >> 64)) #define zig_lo_i128(val) ((uint64_t)((val) >> 0)) -#define zig_bitcast_u128(val) ((zig_u128)(val)) -#define zig_bitcast_i128(val) ((zig_i128)(val)) +#define zig_bitCast_u128(val) ((zig_u128)(val)) +#define zig_bitCast_i128(val) ((zig_i128)(val)) #define zig_cmp_int128(Type) \ static inline int32_t zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \ return (lhs > rhs) - (lhs < rhs); \ @@ -1288,8 +1188,8 @@ typedef struct { zig_align(16) int64_t hi; uint64_t lo; } zig_i128; #define zig_lo_u128(val) ((val).lo) #define zig_hi_i128(val) ((val).hi) #define zig_lo_i128(val) ((val).lo) -#define zig_bitcast_u128(val) zig_make_u128((uint64_t)(val).hi, (val).lo) -#define zig_bitcast_i128(val) zig_make_i128(( int64_t)(val).hi, (val).lo) +#define zig_bitCast_u128(val) zig_make_u128((uint64_t)(val).hi, (val).lo) +#define zig_bitCast_i128(val) zig_make_i128(( int64_t)(val).hi, (val).lo) #define zig_cmp_int128(Type) \ static inline int32_t zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \ return (lhs.hi == rhs.hi) \ @@ -1303,9 +1203,6 @@ typedef struct { zig_align(16) int64_t hi; uint64_t lo; } zig_i128; #endif /* zig_has_int128 */ -#define zig_make_small_u128(val) zig_make_u128(0, val) -#define zig_make_small_i128(val) zig_make_i128((val) < 0 ? -INT64_C(1) : INT64_C(0), val) - #define zig_minInt_u128 zig_make_u128(zig_minInt_u64, zig_minInt_u64) #define zig_maxInt_u128 zig_make_u128(zig_maxInt_u64, zig_maxInt_u64) #define zig_minInt_i128 zig_make_i128(zig_minInt_i64, zig_minInt_u64) @@ -1466,18 +1363,18 @@ static zig_i128 zig_mul_i128(zig_i128 lhs, zig_i128 rhs) { } static zig_u128 zig_mul_u128(zig_u128 lhs, zig_u128 rhs) { - return zig_bitcast_u128(zig_mul_i128(zig_bitcast_i128(lhs), zig_bitcast_i128(rhs))); + return zig_bitCast_u128(zig_mul_i128(zig_bitCast_i128(lhs), zig_bitCast_i128(rhs))); } zig_extern zig_u128 __udivti3(zig_u128 lhs, zig_u128 rhs); static zig_u128 zig_div_trunc_u128(zig_u128 lhs, zig_u128 rhs) { return __udivti3(lhs, rhs); -}; +} zig_extern zig_i128 __divti3(zig_i128 lhs, zig_i128 rhs); static zig_i128 zig_div_trunc_i128(zig_i128 lhs, zig_i128 rhs) { return __divti3(lhs, rhs); -}; +} zig_extern zig_u128 __umodti3(zig_u128 lhs, zig_u128 rhs); static zig_u128 zig_rem_u128(zig_u128 lhs, zig_u128 rhs) { @@ -1503,10 +1400,6 @@ static inline zig_i128 zig_div_floor_i128(zig_i128 lhs, zig_i128 rhs) { #define zig_div_floor_u128 zig_div_trunc_u128 #define zig_mod_u128 zig_rem_u128 -static inline zig_u128 zig_nand_u128(zig_u128 lhs, zig_u128 rhs) { - return zig_not_u128(zig_and_u128(lhs, rhs), 128); -} - static inline zig_u128 zig_min_u128(zig_u128 lhs, zig_u128 rhs) { return zig_cmp_u128(lhs, rhs) < INT32_C(0) ? lhs : rhs; } @@ -1538,7 +1431,7 @@ static inline zig_u128 zig_shlw_u128(zig_u128 lhs, uint8_t rhs, uint8_t bits) { } static inline zig_i128 zig_shlw_i128(zig_i128 lhs, uint8_t rhs, uint8_t bits) { - return zig_wrap_i128(zig_bitcast_i128(zig_shl_u128(zig_bitcast_u128(lhs), rhs)), bits); + return zig_wrap_i128(zig_bitCast_i128(zig_shl_u128(zig_bitCast_u128(lhs), rhs)), bits); } static inline zig_u128 zig_addw_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) { @@ -1546,7 +1439,7 @@ static inline zig_u128 zig_addw_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) { } static inline zig_i128 zig_addw_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) { - return zig_wrap_i128(zig_bitcast_i128(zig_add_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits); + return zig_wrap_i128(zig_bitCast_i128(zig_add_u128(zig_bitCast_u128(lhs), zig_bitCast_u128(rhs))), bits); } static inline zig_u128 zig_subw_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) { @@ -1554,7 +1447,7 @@ static inline zig_u128 zig_subw_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) { } static inline zig_i128 zig_subw_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) { - return zig_wrap_i128(zig_bitcast_i128(zig_sub_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits); + return zig_wrap_i128(zig_bitCast_i128(zig_sub_u128(zig_bitCast_u128(lhs), zig_bitCast_u128(rhs))), bits); } static inline zig_u128 zig_mulw_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) { @@ -1562,7 +1455,7 @@ static inline zig_u128 zig_mulw_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) { } static inline zig_i128 zig_mulw_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) { - return zig_wrap_i128(zig_bitcast_i128(zig_mul_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits); + return zig_wrap_i128(zig_bitCast_i128(zig_mul_u128(zig_bitCast_u128(lhs), zig_bitCast_u128(rhs))), bits); } #if zig_has_int128 @@ -1697,7 +1590,7 @@ static inline bool zig_shlo_u128(zig_u128 *res, zig_u128 lhs, uint8_t rhs, uint8 static inline bool zig_shlo_i128(zig_i128 *res, zig_i128 lhs, uint8_t rhs, uint8_t bits) { *res = zig_shlw_i128(lhs, rhs, bits); - zig_i128 mask = zig_bitcast_i128(zig_shl_u128(zig_maxInt_u128, bits - rhs - UINT8_C(1))); + zig_i128 mask = zig_bitCast_i128(zig_shl_u128(zig_maxInt_u128, bits - rhs - UINT8_C(1))); return zig_cmp_i128(zig_and_i128(lhs, mask), zig_make_i128(0, 0)) != INT32_C(0) && zig_cmp_i128(zig_and_i128(lhs, mask), mask) != INT32_C(0); } @@ -1711,7 +1604,7 @@ static inline zig_u128 zig_shls_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) { static inline zig_i128 zig_shls_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) { zig_i128 res; - if (zig_cmp_u128(zig_bitcast_u128(rhs), zig_make_u128(0, bits)) < INT32_C(0) && !zig_shlo_i128(&res, lhs, (uint8_t)zig_lo_i128(rhs), bits)) return res; + if (zig_cmp_u128(zig_bitCast_u128(rhs), zig_make_u128(0, bits)) < INT32_C(0) && !zig_shlo_i128(&res, lhs, (uint8_t)zig_lo_i128(rhs), bits)) return res; return zig_cmp_i128(lhs, zig_make_i128(0, 0)) < INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits); } @@ -1755,7 +1648,7 @@ static inline uint8_t zig_clz_u128(zig_u128 val, uint8_t bits) { } static inline uint8_t zig_clz_i128(zig_i128 val, uint8_t bits) { - return zig_clz_u128(zig_bitcast_u128(val), bits); + return zig_clz_u128(zig_bitCast_u128(val), bits); } static inline uint8_t zig_ctz_u128(zig_u128 val, uint8_t bits) { @@ -1764,7 +1657,7 @@ static inline uint8_t zig_ctz_u128(zig_u128 val, uint8_t bits) { } static inline uint8_t zig_ctz_i128(zig_i128 val, uint8_t bits) { - return zig_ctz_u128(zig_bitcast_u128(val), bits); + return zig_ctz_u128(zig_bitCast_u128(val), bits); } static inline uint8_t zig_popcount_u128(zig_u128 val, uint8_t bits) { @@ -1773,7 +1666,7 @@ static inline uint8_t zig_popcount_u128(zig_u128 val, uint8_t bits) { } static inline uint8_t zig_popcount_i128(zig_i128 val, uint8_t bits) { - return zig_popcount_u128(zig_bitcast_u128(val), bits); + return zig_popcount_u128(zig_bitCast_u128(val), bits); } static inline zig_u128 zig_byte_swap_u128(zig_u128 val, uint8_t bits) { @@ -1788,7 +1681,7 @@ static inline zig_u128 zig_byte_swap_u128(zig_u128 val, uint8_t bits) { } static inline zig_i128 zig_byte_swap_i128(zig_i128 val, uint8_t bits) { - return zig_bitcast_i128(zig_byte_swap_u128(zig_bitcast_u128(val), bits)); + return zig_bitCast_i128(zig_byte_swap_u128(zig_bitCast_u128(val), bits)); } static inline zig_u128 zig_bit_reverse_u128(zig_u128 val, uint8_t bits) { @@ -1798,7 +1691,7 @@ static inline zig_u128 zig_bit_reverse_u128(zig_u128 val, uint8_t bits) { } static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, uint8_t bits) { - return zig_bitcast_i128(zig_bit_reverse_u128(zig_bitcast_u128(val), bits)); + return zig_bitCast_i128(zig_bit_reverse_u128(zig_bitCast_u128(val), bits)); } /* ========================== Big Integer Support =========================== */ @@ -1972,6 +1865,243 @@ static inline int32_t zig_cmp_big(const void *lhs, const void *rhs, bool is_sign return 0; } +static inline void zig_and_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) { + uint8_t *res_bytes = res; + const uint8_t *lhs_bytes = lhs; + const uint8_t *rhs_bytes = rhs; + uint16_t byte_offset = 0; + uint16_t remaining_bytes = zig_int_bytes(bits); + (void)is_signed; + + while (remaining_bytes >= 128 / CHAR_BIT) { + zig_u128 res_limb; + zig_u128 lhs_limb; + zig_u128 rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + res_limb = zig_and_u128(lhs_limb, rhs_limb); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + + remaining_bytes -= 128 / CHAR_BIT; + byte_offset += 128 / CHAR_BIT; + } + + while (remaining_bytes >= 64 / CHAR_BIT) { + uint64_t res_limb; + uint64_t lhs_limb; + uint64_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + res_limb = zig_and_u64(lhs_limb, rhs_limb); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + + remaining_bytes -= 64 / CHAR_BIT; + byte_offset += 64 / CHAR_BIT; + } + + while (remaining_bytes >= 32 / CHAR_BIT) { + uint32_t res_limb; + uint32_t lhs_limb; + uint32_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + res_limb = zig_and_u32(lhs_limb, rhs_limb); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + + remaining_bytes -= 32 / CHAR_BIT; + byte_offset += 32 / CHAR_BIT; + } + + while (remaining_bytes >= 16 / CHAR_BIT) { + uint16_t res_limb; + uint16_t lhs_limb; + uint16_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + res_limb = zig_and_u16(lhs_limb, rhs_limb); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + + remaining_bytes -= 16 / CHAR_BIT; + byte_offset += 16 / CHAR_BIT; + } + + while (remaining_bytes >= 8 / CHAR_BIT) { + uint8_t res_limb; + uint8_t lhs_limb; + uint8_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + res_limb = zig_and_u8(lhs_limb, rhs_limb); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + + remaining_bytes -= 8 / CHAR_BIT; + byte_offset += 8 / CHAR_BIT; + } +} + +static inline void zig_or_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) { + uint8_t *res_bytes = res; + const uint8_t *lhs_bytes = lhs; + const uint8_t *rhs_bytes = rhs; + uint16_t byte_offset = 0; + uint16_t remaining_bytes = zig_int_bytes(bits); + (void)is_signed; + + while (remaining_bytes >= 128 / CHAR_BIT) { + zig_u128 res_limb; + zig_u128 lhs_limb; + zig_u128 rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + res_limb = zig_or_u128(lhs_limb, rhs_limb); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + + remaining_bytes -= 128 / CHAR_BIT; + byte_offset += 128 / CHAR_BIT; + } + + while (remaining_bytes >= 64 / CHAR_BIT) { + uint64_t res_limb; + uint64_t lhs_limb; + uint64_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + res_limb = zig_or_u64(lhs_limb, rhs_limb); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + + remaining_bytes -= 64 / CHAR_BIT; + byte_offset += 64 / CHAR_BIT; + } + + while (remaining_bytes >= 32 / CHAR_BIT) { + uint32_t res_limb; + uint32_t lhs_limb; + uint32_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + res_limb = zig_or_u32(lhs_limb, rhs_limb); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + + remaining_bytes -= 32 / CHAR_BIT; + byte_offset += 32 / CHAR_BIT; + } + + while (remaining_bytes >= 16 / CHAR_BIT) { + uint16_t res_limb; + uint16_t lhs_limb; + uint16_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + res_limb = zig_or_u16(lhs_limb, rhs_limb); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + + remaining_bytes -= 16 / CHAR_BIT; + byte_offset += 16 / CHAR_BIT; + } + + while (remaining_bytes >= 8 / CHAR_BIT) { + uint8_t res_limb; + uint8_t lhs_limb; + uint8_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + res_limb = zig_or_u8(lhs_limb, rhs_limb); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + + remaining_bytes -= 8 / CHAR_BIT; + byte_offset += 8 / CHAR_BIT; + } +} + +static inline void zig_xor_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) { + uint8_t *res_bytes = res; + const uint8_t *lhs_bytes = lhs; + const uint8_t *rhs_bytes = rhs; + uint16_t byte_offset = 0; + uint16_t remaining_bytes = zig_int_bytes(bits); + (void)is_signed; + + while (remaining_bytes >= 128 / CHAR_BIT) { + zig_u128 res_limb; + zig_u128 lhs_limb; + zig_u128 rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + res_limb = zig_xor_u128(lhs_limb, rhs_limb); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + + remaining_bytes -= 128 / CHAR_BIT; + byte_offset += 128 / CHAR_BIT; + } + + while (remaining_bytes >= 64 / CHAR_BIT) { + uint64_t res_limb; + uint64_t lhs_limb; + uint64_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + res_limb = zig_xor_u64(lhs_limb, rhs_limb); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + + remaining_bytes -= 64 / CHAR_BIT; + byte_offset += 64 / CHAR_BIT; + } + + while (remaining_bytes >= 32 / CHAR_BIT) { + uint32_t res_limb; + uint32_t lhs_limb; + uint32_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + res_limb = zig_xor_u32(lhs_limb, rhs_limb); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + + remaining_bytes -= 32 / CHAR_BIT; + byte_offset += 32 / CHAR_BIT; + } + + while (remaining_bytes >= 16 / CHAR_BIT) { + uint16_t res_limb; + uint16_t lhs_limb; + uint16_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + res_limb = zig_xor_u16(lhs_limb, rhs_limb); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + + remaining_bytes -= 16 / CHAR_BIT; + byte_offset += 16 / CHAR_BIT; + } + + while (remaining_bytes >= 8 / CHAR_BIT) { + uint8_t res_limb; + uint8_t lhs_limb; + uint8_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + res_limb = zig_xor_u8(lhs_limb, rhs_limb); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + + remaining_bytes -= 8 / CHAR_BIT; + byte_offset += 8 / CHAR_BIT; + } +} + static inline bool zig_addo_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) { uint8_t *res_bytes = res; const uint8_t *lhs_bytes = lhs; @@ -2827,24 +2957,20 @@ long double __cdecl nanl(char const* input); #endif #if (zig_has_builtin(nan) && zig_has_builtin(nans) && zig_has_builtin(inf)) || defined(zig_gnuc) -#define zig_has_float_builtins 1 -#define zig_make_special_f16(sign, name, arg, repr) sign zig_make_f16(__builtin_##name, )(arg) -#define zig_make_special_f32(sign, name, arg, repr) sign zig_make_f32(__builtin_##name, )(arg) -#define zig_make_special_f64(sign, name, arg, repr) sign zig_make_f64(__builtin_##name, )(arg) -#define zig_make_special_f80(sign, name, arg, repr) sign zig_make_f80(__builtin_##name, )(arg) +#define zig_make_special_f16(sign, name, arg, repr) sign zig_make_f16 (__builtin_##name, )(arg) +#define zig_make_special_f32(sign, name, arg, repr) sign zig_make_f32 (__builtin_##name, )(arg) +#define zig_make_special_f64(sign, name, arg, repr) sign zig_make_f64 (__builtin_##name, )(arg) +#define zig_make_special_f80(sign, name, arg, repr) sign zig_make_f80 (__builtin_##name, )(arg) #define zig_make_special_f128(sign, name, arg, repr) sign zig_make_f128(__builtin_##name, )(arg) #else -#define zig_has_float_builtins 0 -#define zig_make_special_f16(sign, name, arg, repr) zig_float_from_repr_f16(repr) -#define zig_make_special_f32(sign, name, arg, repr) zig_float_from_repr_f32(repr) -#define zig_make_special_f64(sign, name, arg, repr) zig_float_from_repr_f64(repr) -#define zig_make_special_f80(sign, name, arg, repr) zig_float_from_repr_f80(repr) -#define zig_make_special_f128(sign, name, arg, repr) zig_float_from_repr_f128(repr) +#define zig_make_special_f16(sign, name, arg, repr) zig_bitCast_f16 (repr) +#define zig_make_special_f32(sign, name, arg, repr) zig_bitCast_f32 (repr) +#define zig_make_special_f64(sign, name, arg, repr) zig_bitCast_f64 (repr) +#define zig_make_special_f80(sign, name, arg, repr) zig_bitCast_f80 (repr) +#define zig_make_special_f128(sign, name, arg, repr) zig_bitCast_f128(repr) #endif #define zig_has_f16 1 -#define zig_bitSizeOf_f16 16 -typedef uint16_t zig_repr_f16; #define zig_libc_name_f16(name) __##name##h #define zig_init_special_f16(sign, name, arg, repr) zig_make_special_f16(sign, name, arg, repr) #if FLT_MANT_DIG == 11 @@ -2854,10 +2980,6 @@ typedef float zig_f16; typedef double zig_f16; #define zig_make_f16(fp, repr) fp #elif LDBL_MANT_DIG == 11 -#define zig_bitSizeOf_c_longdouble 16 -#ifndef ZIG_TARGET_ABI_MSVC -typedef zig_repr_f16 zig_repr_c_longdouble; -#endif typedef long double zig_f16; #define zig_make_f16(fp, repr) fp##l #elif FLT16_MANT_DIG == 11 && (zig_has_builtin(inff16) || defined(zig_gnuc)) @@ -2869,8 +2991,8 @@ typedef __fp16 zig_f16; #else #undef zig_has_f16 #define zig_has_f16 0 -#define zig_bitSizeOf_repr_f16 16 -typedef zig_repr_f16 zig_f16; +#define zig_repr_f16 u16 +typedef uint16_t zig_f16; #define zig_make_f16(fp, repr) repr #undef zig_make_special_f16 #define zig_make_special_f16(sign, name, arg, repr) repr @@ -2878,15 +3000,12 @@ typedef zig_repr_f16 zig_f16; #define zig_init_special_f16(sign, name, arg, repr) repr #endif #if __APPLE__ && (defined(__i386__) || defined(__x86_64__)) -typedef zig_repr_f16 zig_compiler_rt_f16; +typedef uint16_t zig_compiler_rt_f16; #else typedef zig_f16 zig_compiler_rt_f16; #endif -#define zig_compiler_rt_abbrev_zig_compiler_rt_f16 zig_compiler_rt_abbrev_zig_f16 #define zig_has_f32 1 -#define zig_bitSizeOf_f32 32 -typedef uint32_t zig_repr_f32; #define zig_libc_name_f32(name) name##f #if _MSC_VER #define zig_init_special_f32(sign, name, arg, repr) sign zig_make_f32(zig_msvc_flt_##name, ) @@ -2900,10 +3019,6 @@ typedef float zig_f32; typedef double zig_f32; #define zig_make_f32(fp, repr) fp #elif LDBL_MANT_DIG == 24 -#define zig_bitSizeOf_c_longdouble 32 -#ifndef ZIG_TARGET_ABI_MSVC -typedef zig_repr_f32 zig_repr_c_longdouble; -#endif typedef long double zig_f32; #define zig_make_f32(fp, repr) fp##l #elif FLT32_MANT_DIG == 24 @@ -2912,8 +3027,8 @@ typedef _Float32 zig_f32; #else #undef zig_has_f32 #define zig_has_f32 0 -#define zig_bitSizeOf_repr_f32 32 -typedef zig_repr_f32 zig_f32; +#define zig_repr_f32 u32 +ypedef uint32_t zig_f32; #define zig_make_f32(fp, repr) repr #undef zig_make_special_f32 #define zig_make_special_f32(sign, name, arg, repr) repr @@ -2922,20 +3037,12 @@ typedef zig_repr_f32 zig_f32; #endif #define zig_has_f64 1 -#define zig_bitSizeOf_f64 64 -typedef uint64_t zig_repr_f64; #define zig_libc_name_f64(name) name #if _MSC_VER -#ifdef ZIG_TARGET_ABI_MSVC -#define zig_bitSizeOf_c_longdouble 64 -#ifndef ZIG_TARGET_ABI_MSVC -typedef zig_repr_f64 zig_repr_c_longdouble; -#endif -#endif #define zig_init_special_f64(sign, name, arg, repr) sign zig_make_f64(zig_msvc_flt_##name, ) -#else /* _MSC_VER */ +#else #define zig_init_special_f64(sign, name, arg, repr) zig_make_special_f64(sign, name, arg, repr) -#endif /* _MSC_VER */ +#endif #if FLT_MANT_DIG == 53 typedef float zig_f64; #define zig_make_f64(fp, repr) fp##f @@ -2943,10 +3050,6 @@ typedef float zig_f64; typedef double zig_f64; #define zig_make_f64(fp, repr) fp #elif LDBL_MANT_DIG == 53 -#define zig_bitSizeOf_c_longdouble 64 -#ifndef ZIG_TARGET_ABI_MSVC -typedef zig_repr_f64 zig_repr_c_longdouble; -#endif typedef long double zig_f64; #define zig_make_f64(fp, repr) fp##l #elif FLT64_MANT_DIG == 53 @@ -2958,8 +3061,8 @@ typedef _Float32x zig_f64; #else #undef zig_has_f64 #define zig_has_f64 0 -#define zig_bitSizeOf_repr_f64 64 -typedef zig_repr_f64 zig_f64; +#define zig_repr_f64 u64 +typedef uint64_t zig_f64; #define zig_make_f64(fp, repr) repr #undef zig_make_special_f64 #define zig_make_special_f64(sign, name, arg, repr) repr @@ -2968,8 +3071,6 @@ typedef zig_repr_f64 zig_f64; #endif #define zig_has_f80 1 -#define zig_bitSizeOf_f80 80 -typedef zig_u128 zig_repr_f80; #define zig_libc_name_f80(name) __##name##x #define zig_init_special_f80(sign, name, arg, repr) zig_make_special_f80(sign, name, arg, repr) #if FLT_MANT_DIG == 64 @@ -2979,10 +3080,6 @@ typedef float zig_f80; typedef double zig_f80; #define zig_make_f80(fp, repr) fp #elif LDBL_MANT_DIG == 64 -#define zig_bitSizeOf_c_longdouble 80 -#ifndef ZIG_TARGET_ABI_MSVC -typedef zig_repr_f80 zig_repr_c_longdouble; -#endif typedef long double zig_f80; #define zig_make_f80(fp, repr) fp##l #elif FLT80_MANT_DIG == 64 @@ -2997,8 +3094,8 @@ typedef __float80 zig_f80; #else #undef zig_has_f80 #define zig_has_f80 0 -#define zig_bitSizeOf_repr_f80 128 -typedef zig_repr_f80 zig_f80; +#define zig_repr_f80 u128 +typedef zig_u128 zig_f80; #define zig_make_f80(fp, repr) repr #undef zig_make_special_f80 #define zig_make_special_f80(sign, name, arg, repr) repr @@ -3007,8 +3104,6 @@ typedef zig_repr_f80 zig_f80; #endif #define zig_has_f128 1 -#define zig_bitSizeOf_f128 128 -typedef zig_u128 zig_repr_f128; #define zig_libc_name_f128(name) name##q #define zig_init_special_f128(sign, name, arg, repr) zig_make_special_f128(sign, name, arg, repr) #if FLT_MANT_DIG == 113 @@ -3018,10 +3113,6 @@ typedef float zig_f128; typedef double zig_f128; #define zig_make_f128(fp, repr) fp #elif LDBL_MANT_DIG == 113 -#define zig_bitSizeOf_c_longdouble 128 -#ifndef ZIG_TARGET_ABI_MSVC -typedef zig_repr_f128 zig_repr_c_longdouble; -#endif typedef long double zig_f128; #define zig_make_f128(fp, repr) fp##l #elif FLT128_MANT_DIG == 113 @@ -3038,50 +3129,49 @@ typedef __float128 zig_f128; #else #undef zig_has_f128 #define zig_has_f128 0 -#define zig_bitSizeOf_repr_f128 128 -typedef zig_repr_f128 zig_f128; -#define zig_make_f128(fp, repr) repr #undef zig_make_special_f128 -#define zig_make_special_f128(sign, name, arg, repr) repr #undef zig_init_special_f128 +#if __APPLE__ || defined(__aarch64__) +typedef __attribute__((__vector_size__(2 * sizeof(uint64_t)))) uint64_t zig_v2u64; +zig_basic_operator(zig_v2u64, xor_v2u64, ^) +#define zig_repr_f128 v2u64 +typedef zig_v2u64 zig_f128; +#define zig_make_f128_zig_make_u128(hi, lo) (zig_f128){ lo, hi } +#define zig_make_f128_zig_init_u128 zig_make_f128_zig_make_u128 +#define zig_make_f128(fp, repr) zig_make_f128_##repr +#define zig_make_special_f128(sign, name, arg, repr) zig_make_f128_##repr +#define zig_init_special_f128(sign, name, arg, repr) zig_make_f128_##repr +#else +#define zig_repr_f128 u128 +typedef zig_u128 zig_f128; +#define zig_make_f128(fp, repr) repr +#define zig_make_special_f128(sign, name, arg, repr) repr #define zig_init_special_f128(sign, name, arg, repr) repr #endif +#endif -#ifdef zig_bitSizeOf_c_longdouble - -#define zig_has_c_longdouble 1 -#ifdef ZIG_TARGET_ABI_MSVC -#undef zig_bitSizeOf_c_longdouble -#define zig_bitSizeOf_c_longdouble 64 +#if !_MSC_VER && defined(ZIG_TARGET_ABI_MSVC) +/* Emulate msvc abi on a gnu compiler */ typedef zig_f64 zig_c_longdouble; -typedef zig_repr_f64 zig_repr_c_longdouble; +#elif _MSC_VER && !defined(ZIG_TARGET_ABI_MSVC) +/* Emulate gnu abi on an msvc compiler */ +typedef zig_f128 zig_c_longdouble; #else +/* Target and compiler abi match */ typedef long double zig_c_longdouble; #endif -#else /* zig_bitSizeOf_c_longdouble */ - -#define zig_has_c_longdouble 0 -#define zig_bitSizeOf_repr_c_longdouble 128 -typedef zig_f128 zig_c_longdouble; -typedef zig_repr_f128 zig_repr_c_longdouble; - -#endif /* zig_bitSizeOf_c_longdouble */ - -#if !zig_has_float_builtins -#define zig_float_from_repr(Type) \ - static inline zig_##Type zig_float_from_repr_##Type(zig_repr_##Type repr) { \ +#define zig_bitCast_float(Type, ReprType) \ + static inline zig_##Type zig_bitCast_##Type(ReprType repr) { \ zig_##Type result; \ memcpy(&result, &repr, sizeof(result)); \ return result; \ } - -zig_float_from_repr(f16) -zig_float_from_repr(f32) -zig_float_from_repr(f64) -zig_float_from_repr(f80) -zig_float_from_repr(f128) -#endif +zig_bitCast_float(f16, uint16_t) +zig_bitCast_float(f32, uint32_t) +zig_bitCast_float(f64, uint64_t) +zig_bitCast_float(f80, zig_u128) +zig_bitCast_float(f128, zig_u128) #define zig_cast_f16 (zig_f16) #define zig_cast_f32 (zig_f32) @@ -3095,44 +3185,53 @@ zig_float_from_repr(f128) #define zig_cast_f128 (zig_f128) #endif -#define zig_convert_builtin(ResType, operation, ArgType, version) \ - zig_extern ResType zig_expand_concat(zig_expand_concat(zig_expand_concat(__##operation, \ - zig_compiler_rt_abbrev_##ArgType), zig_compiler_rt_abbrev_##ResType), version)(ArgType); -zig_convert_builtin(zig_compiler_rt_f16, trunc, zig_f32, 2) -zig_convert_builtin(zig_compiler_rt_f16, trunc, zig_f64, 2) -zig_convert_builtin(zig_f16, trunc, zig_f80, 2) -zig_convert_builtin(zig_f16, trunc, zig_f128, 2) -zig_convert_builtin(zig_f32, extend, zig_compiler_rt_f16, 2) -zig_convert_builtin(zig_f32, trunc, zig_f64, 2) -zig_convert_builtin(zig_f32, trunc, zig_f80, 2) -zig_convert_builtin(zig_f32, trunc, zig_f128, 2) -zig_convert_builtin(zig_f64, extend, zig_compiler_rt_f16, 2) -zig_convert_builtin(zig_f64, extend, zig_f32, 2) -zig_convert_builtin(zig_f64, trunc, zig_f80, 2) -zig_convert_builtin(zig_f64, trunc, zig_f128, 2) -zig_convert_builtin(zig_f80, extend, zig_f16, 2) -zig_convert_builtin(zig_f80, extend, zig_f32, 2) -zig_convert_builtin(zig_f80, extend, zig_f64, 2) -zig_convert_builtin(zig_f80, trunc, zig_f128, 2) -zig_convert_builtin(zig_f128, extend, zig_f16, 2) -zig_convert_builtin(zig_f128, extend, zig_f32, 2) -zig_convert_builtin(zig_f128, extend, zig_f64, 2) -zig_convert_builtin(zig_f128, extend, zig_f80, 2) +#define zig_convert_builtin(ExternResType, ResType, operation, ExternArgType, ArgType, version) \ + zig_extern ExternResType zig_expand_concat(zig_expand_concat(zig_expand_concat(__##operation, \ + zig_compiler_rt_abbrev_##ArgType), zig_compiler_rt_abbrev_##ResType), version)(ExternArgType); \ + static inline ResType zig_expand_concat(zig_expand_concat(zig_##operation, \ + zig_compiler_rt_abbrev_##ArgType), zig_compiler_rt_abbrev_##ResType)(ArgType arg) { \ + ResType res; \ + ExternResType extern_res; \ + ExternArgType extern_arg; \ + memcpy(&extern_arg, &arg, sizeof(extern_arg)); \ + extern_res = zig_expand_concat(zig_expand_concat(zig_expand_concat(__##operation, \ + zig_compiler_rt_abbrev_##ArgType), zig_compiler_rt_abbrev_##ResType), version)(extern_arg); \ + memcpy(&res, &extern_res, sizeof(res)); \ + return extern_res; \ + } +zig_convert_builtin(zig_compiler_rt_f16, zig_f16, trunc, zig_f32, zig_f32, 2) +zig_convert_builtin(zig_compiler_rt_f16, zig_f16, trunc, zig_f64, zig_f64, 2) +zig_convert_builtin(zig_f16, zig_f16, trunc, zig_f80, zig_f80, 2) +zig_convert_builtin(zig_f16, zig_f16, trunc, zig_f128, zig_f128, 2) +zig_convert_builtin(zig_f32, zig_f32, extend, zig_compiler_rt_f16, zig_f16, 2) +zig_convert_builtin(zig_f32, zig_f32, trunc, zig_f64, zig_f64, 2) +zig_convert_builtin(zig_f32, zig_f32, trunc, zig_f80, zig_f80, 2) +zig_convert_builtin(zig_f32, zig_f32, trunc, zig_f128, zig_f128, 2) +zig_convert_builtin(zig_f64, zig_f64, extend, zig_compiler_rt_f16, zig_f16, 2) +zig_convert_builtin(zig_f64, zig_f64, extend, zig_f32, zig_f32, 2) +zig_convert_builtin(zig_f64, zig_f64, trunc, zig_f80, zig_f80, 2) +zig_convert_builtin(zig_f64, zig_f64, trunc, zig_f128, zig_f128, 2) +zig_convert_builtin(zig_f80, zig_f80, extend, zig_f16, zig_f16, 2) +zig_convert_builtin(zig_f80, zig_f80, extend, zig_f32, zig_f32, 2) +zig_convert_builtin(zig_f80, zig_f80, extend, zig_f64, zig_f64, 2) +zig_convert_builtin(zig_f80, zig_f80, trunc, zig_f128, zig_f128, 2) +zig_convert_builtin(zig_f128, zig_f128, extend, zig_f16, zig_f16, 2) +zig_convert_builtin(zig_f128, zig_f128, extend, zig_f32, zig_f32, 2) +zig_convert_builtin(zig_f128, zig_f128, extend, zig_f64, zig_f64, 2) +zig_convert_builtin(zig_f128, zig_f128, extend, zig_f80, zig_f80, 2) -#define zig_float_negate_builtin_0(w) \ +#define zig_float_negate_builtin_0(w, c, sb) \ + zig_expand_concat(zig_xor_, zig_repr_f##w)(arg, zig_make_f##w(-0x0.0p0, c sb)) +#define zig_float_negate_builtin_1(w, c, sb) -arg +#define zig_float_negate_builtin(w, c, sb) \ static inline zig_f##w zig_neg_f##w(zig_f##w arg) { \ - return zig_expand_concat(zig_xor_u, zig_bitSizeOf_repr_f##w)( \ - arg, \ - zig_expand_concat(zig_shl_u, zig_bitSizeOf_repr_f##w)( \ - zig_expand_concat(zig_make_small_u, zig_bitSizeOf_repr_f##w)(1), \ - UINT8_C(w - 1) \ - ) \ - ); \ - } -#define zig_float_negate_builtin_1(w) \ - static inline zig_f##w zig_neg_f##w(zig_f##w arg) { \ - return -arg; \ + return zig_expand_concat(zig_float_negate_builtin_, zig_has_f##w)(w, c, sb); \ } +zig_float_negate_builtin(16, , UINT16_C(1) << 15 ) +zig_float_negate_builtin(32, , UINT32_C(1) << 31 ) +zig_float_negate_builtin(64, , UINT64_C(1) << 63 ) +zig_float_negate_builtin(80, zig_make_u128, (UINT64_C(1) << 15, UINT64_C(0))) +zig_float_negate_builtin(128, zig_make_u128, (UINT64_C(1) << 63, UINT64_C(0))) #define zig_float_less_builtin_0(Type, operation) \ zig_extern int32_t zig_expand_concat(zig_expand_concat(__##operation, \ @@ -3164,19 +3263,18 @@ zig_convert_builtin(zig_f128, extend, zig_f80, 2) } #define zig_float_builtins(w) \ - zig_convert_builtin( int32_t, fix, zig_f##w, ) \ - zig_convert_builtin(uint32_t, fixuns, zig_f##w, ) \ - zig_convert_builtin( int64_t, fix, zig_f##w, ) \ - zig_convert_builtin(uint64_t, fixuns, zig_f##w, ) \ - zig_convert_builtin(zig_i128, fix, zig_f##w, ) \ - zig_convert_builtin(zig_u128, fixuns, zig_f##w, ) \ - zig_convert_builtin(zig_f##w, float, int32_t, ) \ - zig_convert_builtin(zig_f##w, floatun, uint32_t, ) \ - zig_convert_builtin(zig_f##w, float, int64_t, ) \ - zig_convert_builtin(zig_f##w, floatun, uint64_t, ) \ - zig_convert_builtin(zig_f##w, float, zig_i128, ) \ - zig_convert_builtin(zig_f##w, floatun, zig_u128, ) \ - zig_expand_concat(zig_float_negate_builtin_, zig_has_f##w)(w) \ + zig_convert_builtin( int32_t, int32_t, fix, zig_f##w, zig_f##w, ) \ + zig_convert_builtin(uint32_t, uint32_t, fixuns, zig_f##w, zig_f##w, ) \ + zig_convert_builtin( int64_t, int64_t, fix, zig_f##w, zig_f##w, ) \ + zig_convert_builtin(uint64_t, uint64_t, fixuns, zig_f##w, zig_f##w, ) \ + zig_convert_builtin(zig_i128, zig_i128, fix, zig_f##w, zig_f##w, ) \ + zig_convert_builtin(zig_u128, zig_u128, fixuns, zig_f##w, zig_f##w, ) \ + zig_convert_builtin(zig_f##w, zig_f##w, float, int32_t, int32_t, ) \ + zig_convert_builtin(zig_f##w, zig_f##w, floatun, uint32_t, uint32_t, ) \ + zig_convert_builtin(zig_f##w, zig_f##w, float, int64_t, int64_t, ) \ + zig_convert_builtin(zig_f##w, zig_f##w, floatun, uint64_t, uint64_t, ) \ + zig_convert_builtin(zig_f##w, zig_f##w, float, zig_i128, zig_i128, ) \ + zig_convert_builtin(zig_f##w, zig_f##w, floatun, zig_u128, zig_u128, ) \ zig_expand_concat(zig_float_less_builtin_, zig_has_f##w)(f##w, cmp) \ zig_expand_concat(zig_float_less_builtin_, zig_has_f##w)(f##w, ne) \ zig_expand_concat(zig_float_less_builtin_, zig_has_f##w)(f##w, eq) \ @@ -3224,9 +3322,238 @@ zig_float_builtins(64) zig_float_builtins(80) zig_float_builtins(128) +/* ============================ Atomics Support ============================= */ + +/* Note that atomics should be implemented as macros because most + compilers silently discard runtime atomic order information. */ + +/* Define fallback implementations first that can later be undef'd on compilers with builtin support. */ +/* Note that zig_atomicrmw_expected is needed to handle aliasing between res and arg. */ +#define zig_atomicrmw_xchg_float(res, obj, arg, order, Type, ReprType) do { \ + zig_##Type zig_atomicrmw_expected; \ + zig_atomic_load(zig_atomicrmw_expected, obj, memory_order_relaxed, Type, ReprType); \ + while (!zig_cmpxchg_weak(obj, zig_atomicrmw_expected, arg, order, memory_order_relaxed, Type, ReprType)); \ + res = zig_atomicrmw_expected; \ +} while (0) +#define zig_atomicrmw_add_float(res, obj, arg, order, Type, ReprType) do { \ + zig_##Type zig_atomicrmw_expected; \ + zig_##Type zig_atomicrmw_desired; \ + zig_atomic_load(zig_atomicrmw_expected, obj, memory_order_relaxed, Type, ReprType); \ + do { \ + zig_atomicrmw_desired = zig_add_##Type(zig_atomicrmw_expected, arg); \ + } while (!zig_cmpxchg_weak(obj, zig_atomicrmw_expected, zig_atomicrmw_desired, order, memory_order_relaxed, Type, ReprType)); \ + res = zig_atomicrmw_expected; \ +} while (0) +#define zig_atomicrmw_sub_float(res, obj, arg, order, Type, ReprType) do { \ + zig_##Type zig_atomicrmw_expected; \ + zig_##Type zig_atomicrmw_desired; \ + zig_atomic_load(zig_atomicrmw_expected, obj, memory_order_relaxed, Type, ReprType); \ + do { \ + zig_atomicrmw_desired = zig_sub_##Type(zig_atomicrmw_expected, arg); \ + } while (!zig_cmpxchg_weak(obj, zig_atomicrmw_expected, zig_atomicrmw_desired, order, memory_order_relaxed, Type, ReprType)); \ + res = zig_atomicrmw_expected; \ +} while (0) +#define zig_atomicrmw_min_float(res, obj, arg, order, Type, ReprType) do { \ + zig_##Type zig_atomicrmw_expected; \ + zig_##Type zig_atomicrmw_desired; \ + zig_atomic_load(zig_atomicrmw_expected, obj, memory_order_relaxed, Type, ReprType); \ + do { \ + zig_atomicrmw_desired = zig_libc_name_##Type(fmin)(zig_atomicrmw_expected, arg); \ + } while (!zig_cmpxchg_weak(obj, zig_atomicrmw_expected, zig_atomicrmw_desired, order, memory_order_relaxed, Type, ReprType)); \ + res = zig_atomicrmw_expected; \ +} while (0) +#define zig_atomicrmw_max_float(res, obj, arg, order, Type, ReprType) do { \ + zig_##Type zig_atomicrmw_expected; \ + zig_##Type zig_atomicrmw_desired; \ + zig_atomic_load(zig_atomicrmw_expected, obj, memory_order_relaxed, Type, ReprType); \ + do { \ + zig_atomicrmw_desired = zig_libc_name_##Type(fmax)(zig_atomicrmw_expected, arg); \ + } while (!zig_cmpxchg_weak(obj, zig_atomicrmw_expected, zig_atomicrmw_desired, order, memory_order_relaxed, Type, ReprType)); \ + res = zig_atomicrmw_expected; \ +} while (0) + +#define zig_atomicrmw_xchg_int128(res, obj, arg, order, Type, ReprType) do { \ + zig_##Type zig_atomicrmw_expected; \ + zig_atomic_load(zig_atomicrmw_expected, obj, memory_order_relaxed, Type, ReprType); \ + while (!zig_cmpxchg_weak(obj, zig_atomicrmw_expected, arg, order, memory_order_relaxed, Type, ReprType)); \ + res = zig_atomicrmw_expected; \ +} while (0) +#define zig_atomicrmw_add_int128(res, obj, arg, order, Type, ReprType) do { \ + zig_##Type zig_atomicrmw_expected; \ + zig_##Type zig_atomicrmw_desired; \ + zig_atomic_load(zig_atomicrmw_expected, obj, memory_order_relaxed, Type, ReprType); \ + do { \ + zig_atomicrmw_desired = zig_add_##Type(zig_atomicrmw_expected, arg); \ + } while (!zig_cmpxchg_weak(obj, zig_atomicrmw_expected, zig_atomicrmw_desired, order, memory_order_relaxed, Type, ReprType)); \ + res = zig_atomicrmw_expected; \ +} while (0) +#define zig_atomicrmw_sub_int128(res, obj, arg, order, Type, ReprType) do { \ + zig_##Type zig_atomicrmw_expected; \ + zig_##Type zig_atomicrmw_desired; \ + zig_atomic_load(zig_atomicrmw_expected, obj, memory_order_relaxed, Type, ReprType); \ + do { \ + zig_atomicrmw_desired = zig_sub_##Type(zig_atomicrmw_expected, arg); \ + } while (!zig_cmpxchg_weak(obj, zig_atomicrmw_expected, zig_atomicrmw_desired, order, memory_order_relaxed, Type, ReprType)); \ + res = zig_atomicrmw_expected; \ +} while (0) +#define zig_atomicrmw_and_int128(res, obj, arg, order, Type, ReprType) do { \ + zig_##Type zig_atomicrmw_expected; \ + zig_##Type zig_atomicrmw_desired; \ + zig_atomic_load(zig_atomicrmw_expected, obj, memory_order_relaxed, Type, ReprType); \ + do { \ + zig_atomicrmw_desired = zig_and_##Type(zig_atomicrmw_expected, arg); \ + } while (!zig_cmpxchg_weak(obj, zig_atomicrmw_expected, zig_atomicrmw_desired, order, memory_order_relaxed, Type, ReprType)); \ + res = zig_atomicrmw_expected; \ +} while (0) +#define zig_atomicrmw_nand_int128(res, obj, arg, order, Type, ReprType) do { \ + zig_##Type zig_atomicrmw_expected; \ + zig_##Type zig_atomicrmw_desired; \ + zig_atomic_load(zig_atomicrmw_expected, obj, memory_order_relaxed, Type, ReprType); \ + do { \ + zig_atomicrmw_desired = zig_not_##Type(zig_and_##Type(zig_atomicrmw_expected, arg), 128); \ + } while (!zig_cmpxchg_weak(obj, zig_atomicrmw_expected, zig_atomicrmw_desired, order, memory_order_relaxed, Type, ReprType)); \ + res = zig_atomicrmw_expected; \ +} while (0) +#define zig_atomicrmw_or_int128(res, obj, arg, order, Type, ReprType) do { \ + zig_##Type zig_atomicrmw_expected; \ + zig_##Type zig_atomicrmw_desired; \ + zig_atomic_load(zig_atomicrmw_expected, obj, memory_order_relaxed, Type, ReprType); \ + do { \ + zig_atomicrmw_desired = zig_or_##Type(zig_atomicrmw_expected, arg); \ + } while (!zig_cmpxchg_weak(obj, zig_atomicrmw_expected, zig_atomicrmw_desired, order, memory_order_relaxed, Type, ReprType)); \ + res = zig_atomicrmw_expected; \ +} while (0) +#define zig_atomicrmw_xor_int128(res, obj, arg, order, Type, ReprType) do { \ + zig_##Type zig_atomicrmw_expected; \ + zig_##Type zig_atomicrmw_desired; \ + zig_atomic_load(zig_atomicrmw_expected, obj, memory_order_relaxed, Type, ReprType); \ + do { \ + zig_atomicrmw_desired = zig_xor_##Type(zig_atomicrmw_expected, arg); \ + } while (!zig_cmpxchg_weak(obj, zig_atomicrmw_expected, zig_atomicrmw_desired, order, memory_order_relaxed, Type, ReprType)); \ + res = zig_atomicrmw_expected; \ +} while (0) +#define zig_atomicrmw_min_int128(res, obj, arg, order, Type, ReprType) do { \ + zig_##Type zig_atomicrmw_expected; \ + zig_##Type zig_atomicrmw_desired; \ + zig_atomic_load(zig_atomicrmw_expected, obj, memory_order_relaxed, Type, ReprType); \ + do { \ + zig_atomicrmw_desired = zig_min_##Type(zig_atomicrmw_expected, arg); \ + } while (!zig_cmpxchg_weak(obj, zig_atomicrmw_expected, zig_atomicrmw_desired, order, memory_order_relaxed, Type, ReprType)); \ + res = zig_atomicrmw_expected; \ +} while (0) +#define zig_atomicrmw_max_int128(res, obj, arg, order, Type, ReprType) do { \ + zig_##Type zig_atomicrmw_expected; \ + zig_##Type zig_atomicrmw_desired; \ + zig_atomic_load(zig_atomicrmw_expected, obj, memory_order_relaxed, Type, ReprType); \ + do { \ + zig_atomicrmw_desired = zig_max_##Type(zig_atomicrmw_expected, arg); \ + } while (!zig_cmpxchg_weak(obj, zig_atomicrmw_expected, zig_atomicrmw_desired, order, memory_order_relaxed, Type, ReprType)); \ + res = zig_atomicrmw_expected; \ +} while (0) + +#if __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) +#include +typedef enum memory_order zig_memory_order; +#define zig_atomic(Type) _Atomic(Type) +#define zig_cmpxchg_strong( obj, expected, desired, succ, fail, Type, ReprType) atomic_compare_exchange_strong_explicit(obj, &(expected), desired, succ, fail) +#define zig_cmpxchg_weak( obj, expected, desired, succ, fail, Type, ReprType) atomic_compare_exchange_weak_explicit (obj, &(expected), desired, succ, fail) +#define zig_atomicrmw_xchg(res, obj, arg, order, Type, ReprType) res = atomic_exchange_explicit (obj, arg, order) +#define zig_atomicrmw_add(res, obj, arg, order, Type, ReprType) res = atomic_fetch_add_explicit (obj, arg, order) +#define zig_atomicrmw_sub(res, obj, arg, order, Type, ReprType) res = atomic_fetch_sub_explicit (obj, arg, order) +#define zig_atomicrmw_or(res, obj, arg, order, Type, ReprType) res = atomic_fetch_or_explicit (obj, arg, order) +#define zig_atomicrmw_xor(res, obj, arg, order, Type, ReprType) res = atomic_fetch_xor_explicit (obj, arg, order) +#define zig_atomicrmw_and(res, obj, arg, order, Type, ReprType) res = atomic_fetch_and_explicit (obj, arg, order) +#define zig_atomicrmw_nand(res, obj, arg, order, Type, ReprType) res = __atomic_fetch_nand(obj, arg, order) +#define zig_atomicrmw_min(res, obj, arg, order, Type, ReprType) res = __atomic_fetch_min (obj, arg, order) +#define zig_atomicrmw_max(res, obj, arg, order, Type, ReprType) res = __atomic_fetch_max (obj, arg, order) +#define zig_atomic_store( obj, arg, order, Type, ReprType) atomic_store_explicit (obj, arg, order) +#define zig_atomic_load(res, obj, order, Type, ReprType) res = atomic_load_explicit (obj, order) +#undef zig_atomicrmw_xchg_float +#define zig_atomicrmw_xchg_float zig_atomicrmw_xchg +#undef zig_atomicrmw_add_float +#define zig_atomicrmw_add_float zig_atomicrmw_add +#undef zig_atomicrmw_sub_float +#define zig_atomicrmw_sub_float zig_atomicrmw_sub +#define zig_fence(order) atomic_thread_fence(order) +#elif defined(__GNUC__) +typedef int zig_memory_order; +#define memory_order_relaxed __ATOMIC_RELAXED +#define memory_order_consume __ATOMIC_CONSUME +#define memory_order_acquire __ATOMIC_ACQUIRE +#define memory_order_release __ATOMIC_RELEASE +#define memory_order_acq_rel __ATOMIC_ACQ_REL +#define memory_order_seq_cst __ATOMIC_SEQ_CST +#define zig_atomic(Type) Type +#define zig_cmpxchg_strong( obj, expected, desired, succ, fail, Type, ReprType) __atomic_compare_exchange(obj, &(expected), &(desired), false, succ, fail) +#define zig_cmpxchg_weak( obj, expected, desired, succ, fail, Type, ReprType) __atomic_compare_exchange(obj, &(expected), &(desired), true, succ, fail) +#define zig_atomicrmw_xchg(res, obj, arg, order, Type, ReprType) __atomic_exchange(obj, &(arg), &(res), order) +#define zig_atomicrmw_add(res, obj, arg, order, Type, ReprType) res = __atomic_fetch_add (obj, arg, order) +#define zig_atomicrmw_sub(res, obj, arg, order, Type, ReprType) res = __atomic_fetch_sub (obj, arg, order) +#define zig_atomicrmw_or(res, obj, arg, order, Type, ReprType) res = __atomic_fetch_or (obj, arg, order) +#define zig_atomicrmw_xor(res, obj, arg, order, Type, ReprType) res = __atomic_fetch_xor (obj, arg, order) +#define zig_atomicrmw_and(res, obj, arg, order, Type, ReprType) res = __atomic_fetch_and (obj, arg, order) +#define zig_atomicrmw_nand(res, obj, arg, order, Type, ReprType) res = __atomic_fetch_nand(obj, arg, order) +#define zig_atomicrmw_min(res, obj, arg, order, Type, ReprType) res = __atomic_fetch_min (obj, arg, order) +#define zig_atomicrmw_max(res, obj, arg, order, Type, ReprType) res = __atomic_fetch_max (obj, arg, order) +#define zig_atomic_store( obj, arg, order, Type, ReprType) __atomic_store (obj, &(arg), order) +#define zig_atomic_load(res, obj, order, Type, ReprType) __atomic_load (obj, &(res), order) +#undef zig_atomicrmw_xchg_float +#define zig_atomicrmw_xchg_float zig_atomicrmw_xchg +#define zig_fence(order) __atomic_thread_fence(order) +#elif _MSC_VER && (_M_IX86 || _M_X64) +#define memory_order_relaxed 0 +#define memory_order_consume 1 +#define memory_order_acquire 2 +#define memory_order_release 3 +#define memory_order_acq_rel 4 +#define memory_order_seq_cst 5 +#define zig_atomic(Type) Type +#define zig_cmpxchg_strong( obj, expected, desired, succ, fail, Type, ReprType) zig_msvc_cmpxchg_##Type(obj, &(expected), desired) +#define zig_cmpxchg_weak( obj, expected, desired, succ, fail, Type, ReprType) zig_cmpxchg_strong(obj, expected, desired, succ, fail, Type, ReprType) +#define zig_atomicrmw_xchg(res, obj, arg, order, Type, ReprType) res = zig_msvc_atomicrmw_xchg_##Type(obj, arg) +#define zig_atomicrmw_add(res, obj, arg, order, Type, ReprType) res = zig_msvc_atomicrmw_add_ ##Type(obj, arg) +#define zig_atomicrmw_sub(res, obj, arg, order, Type, ReprType) res = zig_msvc_atomicrmw_sub_ ##Type(obj, arg) +#define zig_atomicrmw_or(res, obj, arg, order, Type, ReprType) res = zig_msvc_atomicrmw_or_ ##Type(obj, arg) +#define zig_atomicrmw_xor(res, obj, arg, order, Type, ReprType) res = zig_msvc_atomicrmw_xor_ ##Type(obj, arg) +#define zig_atomicrmw_and(res, obj, arg, order, Type, ReprType) res = zig_msvc_atomicrmw_and_ ##Type(obj, arg) +#define zig_atomicrmw_nand(res, obj, arg, order, Type, ReprType) res = zig_msvc_atomicrmw_nand_##Type(obj, arg) +#define zig_atomicrmw_min(res, obj, arg, order, Type, ReprType) res = zig_msvc_atomicrmw_min_ ##Type(obj, arg) +#define zig_atomicrmw_max(res, obj, arg, order, Type, ReprType) res = zig_msvc_atomicrmw_max_ ##Type(obj, arg) +#define zig_atomic_store( obj, arg, order, Type, ReprType) zig_msvc_atomic_store_ ##Type(obj, arg) +#define zig_atomic_load(res, obj, order, Type, ReprType) res = zig_msvc_atomic_load_ ##Type(obj) +#if _M_X64 +#define zig_fence(order) __faststorefence() +#else +#define zig_fence(order) zig_msvc_atomic_barrier() +#endif +/* TODO: _MSC_VER && (_M_ARM || _M_ARM64) */ +#else +#define memory_order_relaxed 0 +#define memory_order_consume 1 +#define memory_order_acquire 2 +#define memory_order_release 3 +#define memory_order_acq_rel 4 +#define memory_order_seq_cst 5 +#define zig_atomic(Type) Type +#define zig_cmpxchg_strong( obj, expected, desired, succ, fail, Type, ReprType) zig_atomics_unavailable +#define zig_cmpxchg_weak( obj, expected, desired, succ, fail, Type, ReprType) zig_atomics_unavailable +#define zig_atomicrmw_xchg(res, obj, arg, order, Type, ReprType) zig_atomics_unavailable +#define zig_atomicrmw_add(res, obj, arg, order, Type, ReprType) zig_atomics_unavailable +#define zig_atomicrmw_sub(res, obj, arg, order, Type, ReprType) zig_atomics_unavailable +#define zig_atomicrmw_or(res, obj, arg, order, Type, ReprType) zig_atomics_unavailable +#define zig_atomicrmw_xor(res, obj, arg, order, Type, ReprType) zig_atomics_unavailable +#define zig_atomicrmw_and(res, obj, arg, order, Type, ReprType) zig_atomics_unavailable +#define zig_atomicrmw_nand(res, obj, arg, order, Type, ReprType) zig_atomics_unavailable +#define zig_atomicrmw_min(res, obj, arg, order, Type, ReprType) zig_atomics_unavailable +#define zig_atomicrmw_max(res, obj, arg, order, Type, ReprType) zig_atomics_unavailable +#define zig_atomic_store( obj, arg, order, Type, ReprType) zig_atomics_unavailable +#define zig_atomic_load(res, obj, order, Type, ReprType) zig_atomics_unavailable +#define zig_fence(order) zig_fence_unavailable +#endif + #if _MSC_VER && (_M_IX86 || _M_X64) -// TODO: zig_msvc_atomic_load should load 32 bit without interlocked on x86, and load 64 bit without interlocked on x64 +/* TODO: zig_msvc_atomic_load should load 32 bit without interlocked on x86, and load 64 bit without interlocked on x64 */ #define zig_msvc_atomics(ZigType, Type, SigType, suffix) \ static inline bool zig_msvc_cmpxchg_##ZigType(Type volatile* obj, Type* expected, Type desired) { \ @@ -3316,51 +3643,30 @@ zig_msvc_atomics(u64, uint64_t, __int64, 64) zig_msvc_atomics(i64, int64_t, __int64, 64) #endif -#define zig_msvc_flt_atomics(Type, ReprType, suffix) \ +#define zig_msvc_flt_atomics(Type, SigType, suffix) \ static inline bool zig_msvc_cmpxchg_##Type(zig_##Type volatile* obj, zig_##Type* expected, zig_##Type desired) { \ - ReprType exchange; \ - ReprType comparand; \ - ReprType initial; \ + SigType exchange; \ + SigType comparand; \ + SigType initial; \ bool success; \ memcpy(&comparand, expected, sizeof(comparand)); \ memcpy(&exchange, &desired, sizeof(exchange)); \ - initial = _InterlockedCompareExchange##suffix((ReprType volatile*)obj, exchange, comparand); \ + initial = _InterlockedCompareExchange##suffix((SigType volatile*)obj, exchange, comparand); \ success = initial == comparand; \ if (!success) memcpy(expected, &initial, sizeof(*expected)); \ return success; \ } \ - static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \ - ReprType repr; \ - ReprType initial; \ + static inline void zig_msvc_atomic_store_##Type(zig_##Type volatile* obj, zig_##Type arg) { \ + SigType value; \ + memcpy(&value, &arg, sizeof(value)); \ + (void)_InterlockedExchange##suffix((SigType volatile*)obj, value); \ + } \ + static inline zig_##Type zig_msvc_atomic_load_##Type(zig_##Type volatile* obj) { \ zig_##Type result; \ - memcpy(&repr, &value, sizeof(repr)); \ - initial = _InterlockedExchange##suffix((ReprType volatile*)obj, repr); \ + SigType initial = _InterlockedExchangeAdd##suffix((SigType volatile*)obj, (SigType)0); \ memcpy(&result, &initial, sizeof(result)); \ return result; \ - } \ - static inline zig_##Type zig_msvc_atomicrmw_add_##Type(zig_##Type volatile* obj, zig_##Type value) { \ - ReprType repr; \ - zig_##Type expected; \ - zig_##Type desired; \ - repr = *(ReprType volatile*)obj; \ - memcpy(&expected, &repr, sizeof(expected)); \ - do { \ - desired = expected + value; \ - } while (!zig_msvc_cmpxchg_##Type(obj, &expected, desired)); \ - return expected; \ - } \ - static inline zig_##Type zig_msvc_atomicrmw_sub_##Type(zig_##Type volatile* obj, zig_##Type value) { \ - ReprType repr; \ - zig_##Type expected; \ - zig_##Type desired; \ - repr = *(ReprType volatile*)obj; \ - memcpy(&expected, &repr, sizeof(expected)); \ - do { \ - desired = expected - value; \ - } while (!zig_msvc_cmpxchg_##Type(obj, &expected, desired)); \ - return expected; \ } - zig_msvc_flt_atomics(f32, long, ) #if _M_X64 zig_msvc_flt_atomics(f64, int64_t, 64) @@ -3421,42 +3727,6 @@ static inline bool zig_msvc_cmpxchg_u128(zig_u128 volatile* obj, zig_u128* expec static inline bool zig_msvc_cmpxchg_i128(zig_i128 volatile* obj, zig_i128* expected, zig_i128 desired) { return _InterlockedCompareExchange128((__int64 volatile*)obj, (__int64)zig_hi_i128(desired), (__int64)zig_lo_i128(desired), (__int64*)expected); } - -#define zig_msvc_atomics_128xchg(Type) \ - static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \ - bool success = false; \ - zig_##Type prev; \ - while (!success) { \ - prev = *obj; \ - success = zig_msvc_cmpxchg_##Type(obj, &prev, value); \ - } \ - return prev; \ - } - -zig_msvc_atomics_128xchg(u128) -zig_msvc_atomics_128xchg(i128) - -#define zig_msvc_atomics_128op(Type, operation) \ - static inline zig_##Type zig_msvc_atomicrmw_##operation##_##Type(zig_##Type volatile* obj, zig_##Type value) { \ - bool success = false; \ - zig_##Type new; \ - zig_##Type prev; \ - while (!success) { \ - prev = *obj; \ - new = zig_##operation##_##Type(prev, value); \ - success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \ - } \ - return prev; \ - } - -zig_msvc_atomics_128op(u128, add) -zig_msvc_atomics_128op(u128, sub) -zig_msvc_atomics_128op(u128, or) -zig_msvc_atomics_128op(u128, xor) -zig_msvc_atomics_128op(u128, and) -zig_msvc_atomics_128op(u128, nand) -zig_msvc_atomics_128op(u128, min) -zig_msvc_atomics_128op(u128, max) #endif /* _M_IX86 */ #endif /* _MSC_VER && (_M_IX86 || _M_X64) */ diff --git a/src/Sema.zig b/src/Sema.zig index 95aa667950..5c19d37431 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -21319,8 +21319,8 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A return sema.fail(block, op_src, "@atomicRmw with bool only allowed with .Xchg", .{}); }, .Float => switch (op) { - .Xchg, .Add, .Sub => {}, - else => return sema.fail(block, op_src, "@atomicRmw with float only allowed with .Xchg, .Add, and .Sub", .{}), + .Xchg, .Add, .Sub, .Max, .Min => {}, + else => return sema.fail(block, op_src, "@atomicRmw with float only allowed with .Xchg, .Add, .Sub, .Max, and .Min", .{}), }, else => {}, } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index d2d594c901..903d2449fd 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -45,9 +45,6 @@ pub const CValue = union(enum) { identifier: []const u8, /// Render the slice as an payload.identifier (using fmtIdent) payload_identifier: []const u8, - /// Render these bytes literally. - /// TODO make this a [*:0]const u8 to save memory - bytes: []const u8, }; const BlockData = struct { @@ -434,7 +431,7 @@ pub const Function = struct { return f.object.dg.renderCType(w, t); } - fn renderIntCast(f: *Function, w: anytype, dest_ty: Type, src: CValue, v: Vectorizer, src_ty: Type, location: ValueRenderLocation) !void { + fn renderIntCast(f: *Function, w: anytype, dest_ty: Type, src: CValue, v: Vectorize, src_ty: Type, location: ValueRenderLocation) !void { return f.object.dg.renderIntCast(w, dest_ty, .{ .c_value = .{ .f = f, .value = src, .v = v } }, src_ty, location); } @@ -811,11 +808,13 @@ pub const DeclGen = struct { try writer.writeByte('{'); var empty = true; - for (ty.structFields().values()) |field| { - if (!field.ty.hasRuntimeBits()) continue; + for (0..ty.structFieldCount()) |field_i| { + if (ty.structFieldIsComptime(field_i)) continue; + const field_ty = ty.structFieldType(field_i); + if (!field_ty.hasRuntimeBits()) continue; if (!empty) try writer.writeByte(','); - try dg.renderValue(writer, field.ty, val, initializer_type); + try dg.renderValue(writer, field_ty, val, initializer_type); empty = false; } @@ -837,19 +836,27 @@ pub const DeclGen = struct { if (layout.tag_size != 0) { try writer.writeAll(" .tag = "); try dg.renderValue(writer, tag_ty, val, initializer_type); - try writer.writeByte(','); } + if (ty.unionHasAllZeroBitFieldTypes()) return try writer.writeByte('}'); + if (layout.tag_size != 0) try writer.writeByte(','); try writer.writeAll(" .payload = {"); } for (ty.unionFields().values()) |field| { if (!field.ty.hasRuntimeBits()) continue; try dg.renderValue(writer, field.ty, val, initializer_type); break; - } else try writer.print("{x}", .{try dg.fmtIntLiteral(Type.u8, Value.undef, .Other)}); + } if (ty.unionTagTypeSafety()) |_| try writer.writeByte('}'); return writer.writeByte('}'); }, .ErrorUnion => { + const payload_ty = ty.errorUnionPayload(); + const error_ty = ty.errorUnionSet(); + + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + return dg.renderValue(writer, error_ty, val, location); + } + if (!location.isInitializer()) { try writer.writeByte('('); try dg.renderType(writer, ty); @@ -857,18 +864,12 @@ pub const DeclGen = struct { } try writer.writeAll("{ .payload = "); - try dg.renderValue(writer, ty.errorUnionPayload(), val, initializer_type); - return writer.print(", .error = {x} }}", .{ - try dg.fmtIntLiteral(ty.errorUnionSet(), val, .Other), - }); + try dg.renderValue(writer, payload_ty, val, initializer_type); + try writer.writeAll(", .error = "); + try dg.renderValue(writer, error_ty, val, initializer_type); + return writer.writeAll(" }"); }, .Array, .Vector => { - if (!location.isInitializer()) { - try writer.writeByte('('); - try dg.renderType(writer, ty); - try writer.writeByte(')'); - } - const ai = ty.arrayInfo(); if (ai.elem_type.eql(Type.u8, dg.module)) { var literal = stringLiteral(writer); @@ -879,6 +880,12 @@ pub const DeclGen = struct { try literal.writeChar(0xaa); return literal.end(); } else { + if (!location.isInitializer()) { + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + } + try writer.writeByte('{'); const c_len = ty.arrayLenIncludingSentinel(); var index: u64 = 0; @@ -1199,23 +1206,20 @@ pub const DeclGen = struct { try writer.writeAll(" }"); }, .ErrorSet => { - const error_name = if (val.castTag(.@"error")) |error_pl| - error_pl.data.name - else - dg.module.error_name_list.items[0]; - // Error values are already defined by genErrDecls. - try writer.print("zig_error_{}", .{fmtIdent(error_name)}); + if (val.castTag(.@"error")) |error_pl| { + // Error values are already defined by genErrDecls. + try writer.print("zig_error_{}", .{fmtIdent(error_pl.data.name)}); + } else { + try writer.print("{}", .{try dg.fmtIntLiteral(ty, val, .Other)}); + } }, .ErrorUnion => { - const error_ty = ty.errorUnionSet(); const payload_ty = ty.errorUnionPayload(); + const error_ty = ty.errorUnionSet(); + const error_val = if (val.errorUnionIsPayload()) Value.zero else val; - if (!payload_ty.hasRuntimeBits()) { - // We use the error type directly as the type. - if (val.errorUnionIsPayload()) { - return try writer.writeByte('0'); - } - return dg.renderValue(writer, error_ty, val, location); + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + return dg.renderValue(writer, error_ty, error_val, location); } if (!location.isInitializer()) { @@ -1225,8 +1229,6 @@ pub const DeclGen = struct { } const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.undef; - const error_val = if (val.errorUnionIsPayload()) Value.zero else val; - try writer.writeAll("{ .payload = "); try dg.renderValue(writer, payload_ty, payload_val, initializer_type); try writer.writeAll(", .error = "); @@ -1290,9 +1292,10 @@ pub const DeclGen = struct { try writer.writeByte('{'); var empty = true; - for (field_vals, 0..) |field_val, field_index| { - const field_ty = ty.structFieldType(field_index); - if (!field_ty.hasRuntimeBits()) continue; + for (field_vals, 0..) |field_val, field_i| { + if (ty.structFieldIsComptime(field_i)) continue; + const field_ty = ty.structFieldType(field_i); + if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; if (!empty) try writer.writeByte(','); try dg.renderValue(writer, field_ty, field_val, initializer_type); @@ -1315,8 +1318,9 @@ pub const DeclGen = struct { const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); var eff_num_fields: usize = 0; - for (0..field_vals.len) |index| { - const field_ty = ty.structFieldType(index); + for (0..field_vals.len) |field_i| { + if (ty.structFieldIsComptime(field_i)) continue; + const field_ty = ty.structFieldType(field_i); if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; eff_num_fields += 1; @@ -1337,8 +1341,9 @@ pub const DeclGen = struct { var eff_index: usize = 0; var needs_closing_paren = false; - for (field_vals, 0..) |field_val, index| { - const field_ty = ty.structFieldType(index); + for (field_vals, 0..) |field_val, field_i| { + if (ty.structFieldIsComptime(field_i)) continue; + const field_ty = ty.structFieldType(field_i); if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; const cast_context = IntCastContext{ .value = .{ .value = field_val } }; @@ -1365,8 +1370,9 @@ pub const DeclGen = struct { try writer.writeByte('('); // a << a_off | b << b_off | c << c_off var empty = true; - for (field_vals, 0..) |field_val, index| { - const field_ty = ty.structFieldType(index); + for (field_vals, 0..) |field_val, field_i| { + if (ty.structFieldIsComptime(field_i)) continue; + const field_ty = ty.structFieldType(field_i); if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; if (!empty) try writer.writeAll(" | "); @@ -1398,9 +1404,9 @@ pub const DeclGen = struct { try writer.writeByte(')'); } - const index = ty.unionTagFieldIndex(union_obj.tag, dg.module).?; - const field_ty = ty.unionFields().values()[index].ty; - const field_name = ty.unionFields().keys()[index]; + const field_i = ty.unionTagFieldIndex(union_obj.tag, dg.module).?; + const field_ty = ty.unionFields().values()[field_i].ty; + const field_name = ty.unionFields().keys()[field_i]; if (ty.containerLayout() == .Packed) { if (field_ty.hasRuntimeBits()) { if (field_ty.isPtrAtRuntime()) { @@ -1419,32 +1425,27 @@ pub const DeclGen = struct { return; } - var has_payload_init = false; try writer.writeByte('{'); if (ty.unionTagTypeSafety()) |tag_ty| { const layout = ty.unionGetLayout(target); if (layout.tag_size != 0) { - try writer.writeAll(".tag = "); + try writer.writeAll(" .tag = "); try dg.renderValue(writer, tag_ty, union_obj.tag, initializer_type); - try writer.writeAll(", "); - } - if (!ty.unionHasAllZeroBitFieldTypes()) { - try writer.writeAll(".payload = {"); - has_payload_init = true; } + if (ty.unionHasAllZeroBitFieldTypes()) return try writer.writeByte('}'); + if (layout.tag_size != 0) try writer.writeByte(','); + try writer.writeAll(" .payload = {"); } - - var it = ty.unionFields().iterator(); if (field_ty.hasRuntimeBits()) { - try writer.print(".{ } = ", .{fmtIdent(field_name)}); + try writer.print(" .{ } = ", .{fmtIdent(field_name)}); try dg.renderValue(writer, field_ty, union_obj.val, initializer_type); - } else while (it.next()) |field| { - if (!field.value_ptr.ty.hasRuntimeBits()) continue; - try writer.print(".{ } = ", .{fmtIdent(field.key_ptr.*)}); - try dg.renderValue(writer, field.value_ptr.ty, Value.undef, initializer_type); + try writer.writeByte(' '); + } else for (ty.unionFields().values()) |field| { + if (!field.ty.hasRuntimeBits()) continue; + try dg.renderValue(writer, field.ty, Value.undef, initializer_type); break; } - if (has_payload_init) try writer.writeByte('}'); + if (ty.unionTagTypeSafety()) |_| try writer.writeByte('}'); try writer.writeByte('}'); }, @@ -1585,7 +1586,7 @@ pub const DeclGen = struct { c_value: struct { f: *Function, value: CValue, - v: Vectorizer, + v: Vectorize, }, value: struct { value: Value, @@ -1766,7 +1767,6 @@ pub const DeclGen = struct { fmtIdent("payload"), fmtIdent(ident), }), - .bytes => |bytes| return w.writeAll(bytes), } } @@ -1795,11 +1795,6 @@ pub const DeclGen = struct { fmtIdent("payload"), fmtIdent(ident), }), - .bytes => |bytes| { - try w.writeAll("(*"); - try w.writeAll(bytes); - return w.writeByte(')'); - }, } } @@ -1812,7 +1807,7 @@ pub const DeclGen = struct { fn writeCValueDerefMember(dg: *DeclGen, writer: anytype, c_value: CValue, member: CValue) !void { switch (c_value) { .none, .constant, .field, .undef => unreachable, - .new_local, .local, .arg, .arg_array, .decl, .identifier, .payload_identifier, .bytes => { + .new_local, .local, .arg, .arg_array, .decl, .identifier, .payload_identifier => { try dg.writeCValue(writer, c_value); try writer.writeAll("->"); }, @@ -3073,15 +3068,17 @@ fn airSliceField(f: *Function, inst: Air.Inst.Index, is_ptr: bool, field_name: [ const inst_ty = f.air.typeOfIndex(inst); const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); + const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); + const a = try Assignment.start(f, writer, inst_ty); try f.writeCValue(writer, local, .Other); - try writer.writeAll(" = "); + try a.assign(f, writer); if (is_ptr) { try writer.writeByte('&'); try f.writeCValueDerefMember(writer, operand, .{ .identifier = field_name }); } else try f.writeCValueMember(writer, operand, .{ .identifier = field_name }); - try writer.writeAll(";\n"); + try a.end(f, writer); return local; } @@ -3097,29 +3094,16 @@ fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue { const index = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const target = f.object.dg.module.getTarget(); - const is_array = lowersToArray(inst_ty, target); - - const local = try f.allocLocal(inst, inst_ty); const writer = f.object.writer(); - if (is_array) { - try writer.writeAll("memcpy("); - try f.writeCValue(writer, local, .FunctionArgument); - try writer.writeAll(", "); - } else { - try f.writeCValue(writer, local, .Other); - try writer.writeAll(" = "); - } + const local = try f.allocLocal(inst, inst_ty); + const a = try Assignment.start(f, writer, inst_ty); + try f.writeCValue(writer, local, .Other); + try a.assign(f, writer); try f.writeCValue(writer, ptr, .Other); try writer.writeByte('['); try f.writeCValue(writer, index, .Other); try writer.writeByte(']'); - if (is_array) { - try writer.writeAll(", sizeof("); - try f.renderType(writer, inst_ty); - try writer.writeAll("))"); - } - try writer.writeAll(";\n"); + try a.end(f, writer); return local; } @@ -3129,35 +3113,32 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.air.typeOfIndex(inst); const ptr_ty = f.air.typeOf(bin_op.lhs); - const child_ty = ptr_ty.childType(); + const elem_ty = ptr_ty.childType(); + const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(); const ptr = try f.resolveInst(bin_op.lhs); const index = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); const writer = f.object.writer(); - const local = try f.allocLocal(inst, f.air.typeOfIndex(inst)); + const local = try f.allocLocal(inst, inst_ty); + const a = try Assignment.start(f, writer, inst_ty); try f.writeCValue(writer, local, .Other); - try writer.writeAll(" = "); - - if (!child_ty.hasRuntimeBitsIgnoreComptime()) { - try f.writeCValue(writer, ptr, .Initializer); - try writer.writeAll(";\n"); - return local; - } - + try a.assign(f, writer); try writer.writeByte('('); try f.renderType(writer, inst_ty); - try writer.writeAll(")&("); - if (ptr_ty.ptrSize() == .One) { + try writer.writeByte(')'); + if (elem_has_bits) try writer.writeByte('&'); + if (elem_has_bits and ptr_ty.ptrSize() == .One) { // It's a pointer to an array, so we need to de-reference. try f.writeCValueDeref(writer, ptr); - } else { - try f.writeCValue(writer, ptr, .Other); + } else try f.writeCValue(writer, ptr, .Other); + if (elem_has_bits) { + try writer.writeByte('['); + try f.writeCValue(writer, index, .Other); + try writer.writeByte(']'); } - try writer.writeAll(")["); - try f.writeCValue(writer, index, .Other); - try writer.writeAll("];\n"); + try a.end(f, writer); return local; } @@ -3173,29 +3154,16 @@ fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue { const index = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const target = f.object.dg.module.getTarget(); - const is_array = lowersToArray(inst_ty, target); - - const local = try f.allocLocal(inst, inst_ty); const writer = f.object.writer(); - if (is_array) { - try writer.writeAll("memcpy("); - try f.writeCValue(writer, local, .FunctionArgument); - try writer.writeAll(", "); - } else { - try f.writeCValue(writer, local, .Other); - try writer.writeAll(" = "); - } - try f.writeCValue(writer, slice, .Other); - try writer.writeAll(".ptr["); + const local = try f.allocLocal(inst, inst_ty); + const a = try Assignment.start(f, writer, inst_ty); + try f.writeCValue(writer, local, .Other); + try a.assign(f, writer); + try f.writeCValueMember(writer, slice, .{ .identifier = "ptr" }); + try writer.writeByte('['); try f.writeCValue(writer, index, .Other); try writer.writeByte(']'); - if (is_array) { - try writer.writeAll(", sizeof("); - try f.renderType(writer, inst_ty); - try writer.writeAll("))"); - } - try writer.writeAll(";\n"); + try a.end(f, writer); return local; } @@ -3203,25 +3171,28 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; + const inst_ty = f.air.typeOfIndex(inst); const slice_ty = f.air.typeOf(bin_op.lhs); - const child_ty = slice_ty.elemType2(); + const elem_ty = slice_ty.elemType2(); + const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(); + const slice = try f.resolveInst(bin_op.lhs); const index = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); const writer = f.object.writer(); - const local = try f.allocLocal(inst, f.air.typeOfIndex(inst)); + const local = try f.allocLocal(inst, inst_ty); + const a = try Assignment.start(f, writer, inst_ty); try f.writeCValue(writer, local, .Other); - try writer.writeAll(" = "); - if (child_ty.hasRuntimeBitsIgnoreComptime()) try writer.writeByte('&'); - try f.writeCValue(writer, slice, .Other); - try writer.writeAll(".ptr"); - if (child_ty.hasRuntimeBitsIgnoreComptime()) { + try a.assign(f, writer); + if (elem_has_bits) try writer.writeByte('&'); + try f.writeCValueMember(writer, slice, .{ .identifier = "ptr" }); + if (elem_has_bits) { try writer.writeByte('['); try f.writeCValue(writer, index, .Other); try writer.writeByte(']'); } - try writer.writeAll(";\n"); + try a.end(f, writer); return local; } @@ -3237,29 +3208,16 @@ fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue { const index = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const target = f.object.dg.module.getTarget(); - const is_array = lowersToArray(inst_ty, target); - - const local = try f.allocLocal(inst, inst_ty); const writer = f.object.writer(); - if (is_array) { - try writer.writeAll("memcpy("); - try f.writeCValue(writer, local, .FunctionArgument); - try writer.writeAll(", "); - } else { - try f.writeCValue(writer, local, .Other); - try writer.writeAll(" = "); - } + const local = try f.allocLocal(inst, inst_ty); + const a = try Assignment.start(f, writer, inst_ty); + try f.writeCValue(writer, local, .Other); + try a.assign(f, writer); try f.writeCValue(writer, array, .Other); try writer.writeByte('['); try f.writeCValue(writer, index, .Other); try writer.writeByte(']'); - if (is_array) { - try writer.writeAll(", sizeof("); - try f.renderType(writer, inst_ty); - try writer.writeAll("))"); - } - try writer.writeAll(";\n"); + try a.end(f, writer); return local; } @@ -3343,7 +3301,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const local = try f.allocLocal(inst, src_ty); - const v = try Vectorizer.start(f, inst, writer, ptr_ty); + const v = try Vectorize.start(f, inst, writer, ptr_ty); if (need_memcpy) { try writer.writeAll("memcpy("); @@ -3484,12 +3442,13 @@ fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - const v = try Vectorizer.start(f, inst, writer, operand_ty); + const v = try Vectorize.start(f, inst, writer, operand_ty); + const a = try Assignment.start(f, writer, scalar_ty); try f.writeCValue(writer, local, .Other); try v.elem(f, writer); - try writer.writeAll(" = "); + try a.assign(f, writer); try f.renderIntCast(writer, inst_scalar_ty, operand, v, scalar_ty, .Other); - try writer.writeAll(";\n"); + try a.end(f, writer); try v.end(f, inst, writer); return local; @@ -3513,7 +3472,7 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - const v = try Vectorizer.start(f, inst, writer, operand_ty); + const v = try Vectorize.start(f, inst, writer, operand_ty); try f.writeCValue(writer, local, .Other); try v.elem(f, writer); @@ -3564,13 +3523,13 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("zig_shr_"); try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty); if (c_bits == 128) { - try writer.print("(zig_bitcast_i{d}(", .{c_bits}); + try writer.print("(zig_bitCast_i{d}(", .{c_bits}); } else { try writer.print("((int{d}_t)", .{c_bits}); } try writer.print("zig_shl_u{d}(", .{c_bits}); if (c_bits == 128) { - try writer.print("zig_bitcast_u{d}(", .{c_bits}); + try writer.print("zig_bitCast_u{d}(", .{c_bits}); } else { try writer.print("(uint{d}_t)", .{c_bits}); } @@ -3597,10 +3556,11 @@ fn airBoolToInt(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const inst_ty = f.air.typeOfIndex(inst); const local = try f.allocLocal(inst, inst_ty); + const a = try Assignment.start(f, writer, inst_ty); try f.writeCValue(writer, local, .Other); - try writer.writeAll(" = "); + try a.assign(f, writer); try f.writeCValue(writer, operand, .Other); - try writer.writeAll(";\n"); + try a.end(f, writer); return local; } @@ -3632,8 +3592,13 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue { const src_val_is_undefined = if (f.air.value(bin_op.rhs)) |v| v.isUndefDeep() else false; if (src_val_is_undefined) { - try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - return try storeUndefined(f, ptr_info.pointee_type, ptr_val); + if (ptr_info.host_size == 0) { + try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); + return try storeUndefined(f, ptr_info.pointee_type, ptr_val); + } else if (!f.wantSafety()) { + try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); + return .none; + } } const target = f.object.dg.module.getTarget(); @@ -3646,7 +3611,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); const writer = f.object.writer(); - const v = try Vectorizer.start(f, inst, writer, ptr_ty); + const v = try Vectorize.start(f, inst, writer, ptr_ty); if (need_memcpy) { // For this memcpy to safely work we need the rhs to have the same @@ -3775,7 +3740,7 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: const w = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - const v = try Vectorizer.start(f, inst, w, operand_ty); + const v = try Vectorize.start(f, inst, w, operand_ty); try f.writeCValueMember(w, local, .{ .field = 1 }); try v.elem(f, w); try w.writeAll(" = zig_"); @@ -3811,7 +3776,7 @@ fn airNot(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - const v = try Vectorizer.start(f, inst, writer, operand_ty); + const v = try Vectorize.start(f, inst, writer, operand_ty); try f.writeCValue(writer, local, .Other); try v.elem(f, writer); try writer.writeAll(" = "); @@ -3846,7 +3811,7 @@ fn airBinOp( const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - const v = try Vectorizer.start(f, inst, writer, operand_ty); + const v = try Vectorize.start(f, inst, writer, operand_ty); try f.writeCValue(writer, local, .Other); try v.elem(f, writer); try writer.writeAll(" = "); @@ -3893,7 +3858,7 @@ fn airCmpOp( const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - const v = try Vectorizer.start(f, inst, writer, operand_ty); + const v = try Vectorize.start(f, inst, writer, operand_ty); try f.writeCValue(writer, local, .Other); try v.elem(f, writer); try writer.writeAll(" = "); @@ -3942,7 +3907,7 @@ fn airEquality( try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); - if (operand_ty.zigTypeTag() == .Optional and !operand_ty.isPtrLikeOptional()) { + if (operand_ty.zigTypeTag() == .Optional and !operand_ty.optionalReprIsPayload()) { // (A && B) || (C && (A == B)) // A = lhs.is_null ; B = rhs.is_null ; C = rhs.payload == lhs.payload @@ -4008,7 +3973,7 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { const local = try f.allocLocal(inst, inst_ty); const writer = f.object.writer(); - const v = try Vectorizer.start(f, inst, writer, inst_ty); + const v = try Vectorize.start(f, inst, writer, inst_ty); try f.writeCValue(writer, local, .Other); try v.elem(f, writer); try writer.writeAll(" = "); @@ -4059,7 +4024,7 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []cons const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - const v = try Vectorizer.start(f, inst, writer, inst_ty); + const v = try Vectorize.start(f, inst, writer, inst_ty); try f.writeCValue(writer, local, .Other); try v.elem(f, writer); // (lhs <> rhs) ? lhs : rhs @@ -4091,21 +4056,29 @@ fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue { const len = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const writer = f.object.writer(); const inst_ty = f.air.typeOfIndex(inst); - const local = try f.allocLocal(inst, inst_ty); - try f.writeCValue(writer, local, .Other); - try writer.writeAll(".ptr = ("); var buf: Type.SlicePtrFieldTypeBuffer = undefined; - try f.renderType(writer, inst_ty.slicePtrFieldType(&buf)); - try writer.writeByte(')'); - try f.writeCValue(writer, ptr, .Other); - try writer.writeAll("; "); - try f.writeCValue(writer, local, .Other); - try writer.writeAll(".len = "); - try f.writeCValue(writer, len, .Initializer); - try writer.writeAll(";\n"); + const ptr_ty = inst_ty.slicePtrFieldType(&buf); + const writer = f.object.writer(); + const local = try f.allocLocal(inst, inst_ty); + { + const a = try Assignment.start(f, writer, ptr_ty); + try f.writeCValueMember(writer, local, .{ .identifier = "ptr" }); + try a.assign(f, writer); + try writer.writeByte('('); + try f.renderType(writer, ptr_ty); + try writer.writeByte(')'); + try f.writeCValue(writer, ptr, .Other); + try a.end(f, writer); + } + { + const a = try Assignment.start(f, writer, Type.usize); + try f.writeCValueMember(writer, local, .{ .identifier = "len" }); + try a.assign(f, writer); + try f.writeCValue(writer, len, .Other); + try a.end(f, writer); + } return local; } @@ -4346,10 +4319,10 @@ fn lowerTry( operand: Air.Inst.Ref, body: []const Air.Inst.Index, err_union_ty: Type, - operand_is_ptr: bool, + is_ptr: bool, ) !CValue { const err_union = try f.resolveInst(operand); - const result_ty = f.air.typeOfIndex(inst); + const inst_ty = f.air.typeOfIndex(inst); const liveness_condbr = f.liveness.getCondBr(inst); const writer = f.object.writer(); const payload_ty = err_union_ty.errorUnionPayload(); @@ -4358,7 +4331,7 @@ fn lowerTry( if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) { try writer.writeAll("if ("); if (!payload_has_bits) { - if (operand_is_ptr) + if (is_ptr) try f.writeCValueDeref(writer, err_union) else try f.writeCValue(writer, err_union, .Other); @@ -4367,7 +4340,7 @@ fn lowerTry( // Remember we must avoid calling reap() twice for the same operand // in this function. try reap(f, inst, &.{operand}); - if (operand_is_ptr or isByRef(err_union_ty)) + if (is_ptr) try f.writeCValueDerefMember(writer, err_union, .{ .identifier = "error" }) else try f.writeCValueMember(writer, err_union, .{ .identifier = "error" }); @@ -4384,7 +4357,7 @@ fn lowerTry( } if (!payload_has_bits) { - if (!operand_is_ptr) { + if (!is_ptr) { return .none; } else { return err_union; @@ -4397,26 +4370,15 @@ fn lowerTry( return .none; } - const target = f.object.dg.module.getTarget(); - const is_array = lowersToArray(payload_ty, target); - const local = try f.allocLocal(inst, result_ty); - if (is_array) { - try writer.writeAll("memcpy("); - try f.writeCValue(writer, local, .FunctionArgument); - try writer.writeAll(", "); - try f.writeCValueMember(writer, err_union, .{ .identifier = "payload" }); - try writer.writeAll(", sizeof("); - try f.renderType(writer, payload_ty); - try writer.writeAll("));\n"); - } else { - try f.writeCValue(writer, local, .Other); - try writer.writeAll(" = "); - if (operand_is_ptr or isByRef(payload_ty)) { - try writer.writeByte('&'); - try f.writeCValueDerefMember(writer, err_union, .{ .identifier = "payload" }); - } else try f.writeCValueMember(writer, err_union, .{ .identifier = "payload" }); - try writer.writeAll(";\n"); - } + const local = try f.allocLocal(inst, inst_ty); + const a = try Assignment.start(f, writer, inst_ty); + try f.writeCValue(writer, local, .Other); + try a.assign(f, writer); + if (is_ptr) { + try writer.writeByte('&'); + try f.writeCValueDerefMember(writer, err_union, .{ .identifier = "payload" }); + } else try f.writeCValueMember(writer, err_union, .{ .identifier = "payload" }); + try a.end(f, writer); return local; } @@ -4428,25 +4390,15 @@ fn airBr(f: *Function, inst: Air.Inst.Index) !CValue { // If result is .none then the value of the block is unused. if (result != .none) { + const operand_ty = f.air.typeOf(branch.operand); const operand = try f.resolveInst(branch.operand); try reap(f, inst, &.{branch.operand}); - const operand_ty = f.air.typeOf(branch.operand); - const target = f.object.dg.module.getTarget(); - if (lowersToArray(operand_ty, target)) { - try writer.writeAll("memcpy("); - try f.writeCValue(writer, result, .FunctionArgument); - try writer.writeAll(", "); - try f.writeCValue(writer, operand, .FunctionArgument); - try writer.writeAll(", sizeof("); - try f.renderType(writer, operand_ty); - try writer.writeAll("))"); - } else { - try f.writeCValue(writer, result, .Other); - try writer.writeAll(" = "); - try f.writeCValue(writer, operand, .Other); - } - try writer.writeAll(";\n"); + const a = try Assignment.start(f, writer, operand_ty); + try f.writeCValue(writer, result, .Other); + try a.assign(f, writer); + try f.writeCValue(writer, operand, .Other); + try a.end(f, writer); } try writer.print("goto zig_block_{d};\n", .{block.block_id}); @@ -4532,7 +4484,7 @@ fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue { } try writer.writeAll(" = "); if (need_bitcasts) { - try writer.writeAll("zig_bitcast_"); + try writer.writeAll("zig_bitCast_"); try f.object.dg.renderCTypeForBuiltinFnName(writer, wrap_cty.?.toUnsigned()); try writer.writeByte('('); } @@ -4544,7 +4496,7 @@ fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue { try f.object.dg.renderTypeForBuiltinFnName(writer, info_ty); try writer.writeByte('('); if (need_bitcasts) { - try writer.writeAll("zig_bitcast_"); + try writer.writeAll("zig_bitCast_"); try f.object.dg.renderCTypeForBuiltinFnName(writer, wrap_cty.?); try writer.writeByte('('); } @@ -4771,7 +4723,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { if (f.wantSafety()) { try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); - try f.writeCValue(writer, .{ .undef = inst_ty }, .Initializer); + try f.writeCValue(writer, .{ .undef = inst_ty }, .Other); try writer.writeAll(";\n"); } break :local local; @@ -4806,7 +4758,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("\")"); if (f.wantSafety()) { try writer.writeAll(" = "); - try f.writeCValue(writer, .{ .undef = output_ty }, .Initializer); + try f.writeCValue(writer, .{ .undef = output_ty }, .Other); } try writer.writeAll(";\n"); } @@ -4840,7 +4792,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("\")"); } try writer.writeAll(" = "); - try f.writeCValue(writer, input_val, .Initializer); + try f.writeCValue(writer, input_val, .Other); try writer.writeAll(";\n"); } } @@ -5072,8 +5024,8 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { } const inst_ty = f.air.typeOfIndex(inst); - const local = try f.allocLocal(inst, inst_ty); const writer = f.object.writer(); + const local = try f.allocLocal(inst, inst_ty); if (opt_ty.optionalReprIsPayload()) { try f.writeCValue(writer, local, .Other); @@ -5083,24 +5035,11 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { return local; } - const target = f.object.dg.module.getTarget(); - const is_array = lowersToArray(inst_ty, target); - - if (is_array) { - try writer.writeAll("memcpy("); - try f.writeCValue(writer, local, .FunctionArgument); - try writer.writeAll(", "); - } else { - try f.writeCValue(writer, local, .Other); - try writer.writeAll(" = "); - } + const a = try Assignment.start(f, writer, inst_ty); + try f.writeCValue(writer, local, .Other); + try a.assign(f, writer); try f.writeCValueMember(writer, operand, .{ .identifier = "payload" }); - if (is_array) { - try writer.writeAll(", sizeof("); - try f.renderType(writer, inst_ty); - try writer.writeAll("))"); - } - try writer.writeAll(";\n"); + try a.end(f, writer); return local; } @@ -5193,6 +5132,7 @@ fn fieldLocation( if (container_ty.structFieldIsComptime(next_field_index)) continue; const field_ty = container_ty.structFieldType(next_field_index); if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + break .{ .field = if (container_ty.isSimpleTuple()) .{ .field = next_field_index } else @@ -5437,13 +5377,17 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); try writer.writeByte('('); } - try writer.writeAll("zig_shr_"); - try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); - try writer.writeByte('('); + if (bit_offset_val_pl.data > 0) { + try writer.writeAll("zig_shr_"); + try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); + try writer.writeByte('('); + } try f.writeCValue(writer, struct_byval, .Other); - try writer.writeAll(", "); - try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); - try writer.writeByte(')'); + if (bit_offset_val_pl.data > 0) { + try writer.writeAll(", "); + try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); + try writer.writeByte(')'); + } if (cant_cast) try writer.writeByte(')'); try f.object.dg.renderBuiltinInfo(writer, field_int_ty, .bits); try writer.writeAll(");\n"); @@ -5473,9 +5417,9 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { const local = try f.allocLocal(inst, inst_ty); try writer.writeAll("memcpy(&"); - try f.writeCValue(writer, local, .FunctionArgument); + try f.writeCValue(writer, local, .Other); try writer.writeAll(", &"); - try f.writeCValue(writer, operand_lval, .FunctionArgument); + try f.writeCValue(writer, operand_lval, .Other); try writer.writeAll(", sizeof("); try f.renderType(writer, inst_ty); try writer.writeAll("));\n"); @@ -5496,20 +5440,11 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { }; const local = try f.allocLocal(inst, inst_ty); - if (lowersToArray(inst_ty, target)) { - try writer.writeAll("memcpy("); - try f.writeCValue(writer, local, .FunctionArgument); - try writer.writeAll(", "); - try f.writeCValueMember(writer, struct_byval, field_name); - try writer.writeAll(", sizeof("); - try f.renderType(writer, inst_ty); - try writer.writeAll("))"); - } else { - try f.writeCValue(writer, local, .Other); - try writer.writeAll(" = "); - try f.writeCValueMember(writer, struct_byval, field_name); - } - try writer.writeAll(";\n"); + const a = try Assignment.start(f, writer, inst_ty); + try f.writeCValue(writer, local, .Other); + try a.assign(f, writer); + try f.writeCValueMember(writer, struct_byval, field_name); + try a.end(f, writer); return local; } @@ -5554,33 +5489,31 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const operand_ty = f.air.typeOf(ty_op.operand); - const operand_is_ptr = operand_ty.zigTypeTag() == .Pointer; - const error_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; + const error_union_ty = if (is_ptr) operand_ty.childType() else operand_ty; + const writer = f.object.writer(); if (!error_union_ty.errorUnionPayload().hasRuntimeBits()) { if (!is_ptr) return .none; - const w = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - try f.writeCValue(w, local, .Other); - try w.writeAll(" = ("); - try f.renderType(w, inst_ty); - try w.writeByte(')'); - try f.writeCValue(w, operand, .Initializer); - try w.writeAll(";\n"); + try f.writeCValue(writer, local, .Other); + try writer.writeAll(" = ("); + try f.renderType(writer, inst_ty); + try writer.writeByte(')'); + try f.writeCValue(writer, operand, .Initializer); + try writer.writeAll(";\n"); return local; } - const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); + const a = try Assignment.start(f, writer, inst_ty); try f.writeCValue(writer, local, .Other); - try writer.writeAll(" = "); - if (is_ptr) try writer.writeByte('&'); - if (operand_is_ptr) - try f.writeCValueDerefMember(writer, operand, .{ .identifier = "payload" }) - else - try f.writeCValueMember(writer, operand, .{ .identifier = "payload" }); - try writer.writeAll(";\n"); + try a.assign(f, writer); + if (is_ptr) { + try writer.writeByte('&'); + try f.writeCValueDerefMember(writer, operand, .{ .identifier = "payload" }); + } else try f.writeCValueMember(writer, operand, .{ .identifier = "payload" }); + try a.end(f, writer); return local; } @@ -5588,40 +5521,29 @@ fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue { const ty_op = f.air.instructions.items(.data)[inst].ty_op; const inst_ty = f.air.typeOfIndex(inst); + const repr_is_payload = inst_ty.optionalReprIsPayload(); + const payload_ty = f.air.typeOf(ty_op.operand); const payload = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); + const writer = f.object.writer(); - - if (inst_ty.optionalReprIsPayload()) { - const local = try f.allocLocal(inst, inst_ty); - try f.writeCValue(writer, local, .Other); - try writer.writeAll(" = "); - try f.writeCValue(writer, payload, .Other); - try writer.writeAll(";\n"); - return local; - } - - const payload_ty = f.air.typeOf(ty_op.operand); - const target = f.object.dg.module.getTarget(); - const is_array = lowersToArray(payload_ty, target); - const local = try f.allocLocal(inst, inst_ty); - if (!is_array) { - try f.writeCValue(writer, local, .Other); - try writer.writeAll(".payload = "); + { + const a = try Assignment.start(f, writer, payload_ty); + if (repr_is_payload) + try f.writeCValue(writer, local, .Other) + else + try f.writeCValueMember(writer, local, .{ .identifier = "payload" }); + try a.assign(f, writer); try f.writeCValue(writer, payload, .Other); - try writer.writeAll("; "); + try a.end(f, writer); } - try f.writeCValue(writer, local, .Other); - try writer.writeAll(".is_null = false;\n"); - if (is_array) { - try writer.writeAll("memcpy("); - try f.writeCValueMember(writer, local, .{ .identifier = "payload" }); - try writer.writeAll(", "); - try f.writeCValue(writer, payload, .FunctionArgument); - try writer.writeAll(", sizeof("); - try f.renderType(writer, payload_ty); - try writer.writeAll("));\n"); + if (!repr_is_payload) { + const a = try Assignment.start(f, writer, Type.bool); + try f.writeCValueMember(writer, local, .{ .identifier = "is_null" }); + try a.assign(f, writer); + try f.object.dg.renderValue(writer, Type.bool, Value.false, .Other); + try a.end(f, writer); } return local; } @@ -5629,29 +5551,32 @@ fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue { fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const writer = f.object.writer(); - const operand = try f.resolveInst(ty_op.operand); + const inst_ty = f.air.typeOfIndex(inst); + const payload_ty = inst_ty.errorUnionPayload(); + const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(); + const err_ty = inst_ty.errorUnionSet(); + const err = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const error_union_ty = f.air.typeOfIndex(inst); - const payload_ty = error_union_ty.errorUnionPayload(); - const local = try f.allocLocal(inst, error_union_ty); - if (!payload_ty.hasRuntimeBits()) { - try f.writeCValue(writer, local, .Other); - try writer.writeAll(" = "); - try f.writeCValue(writer, operand, .Other); - try writer.writeAll(";\n"); - return local; + const writer = f.object.writer(); + const local = try f.allocLocal(inst, inst_ty); + if (!repr_is_err) { + const a = try Assignment.start(f, writer, payload_ty); + try f.writeCValueMember(writer, local, .{ .identifier = "payload" }); + try a.assign(f, writer); + try f.object.dg.renderValue(writer, payload_ty, Value.undef, .Other); + try a.end(f, writer); } - { - // TODO: set the payload to undefined - //try f.writeCValue(writer, local, .Other); + const a = try Assignment.start(f, writer, err_ty); + if (repr_is_err) + try f.writeCValue(writer, local, .Other) + else + try f.writeCValueMember(writer, local, .{ .identifier = "error" }); + try a.assign(f, writer); + try f.writeCValue(writer, err, .Other); + try a.end(f, writer); } - try f.writeCValue(writer, local, .Other); - try writer.writeAll(".error = "); - try f.writeCValue(writer, operand, .Other); - try writer.writeAll(";\n"); return local; } @@ -5711,29 +5636,28 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.air.typeOfIndex(inst); const payload_ty = inst_ty.errorUnionPayload(); const payload = try f.resolveInst(ty_op.operand); + const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(); + const err_ty = inst_ty.errorUnionSet(); try reap(f, inst, &.{ty_op.operand}); - const target = f.object.dg.module.getTarget(); - const is_array = lowersToArray(payload_ty, target); - const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - if (!is_array) { - try f.writeCValue(writer, local, .Other); - try writer.writeAll(".payload = "); - try f.writeCValue(writer, payload, .Other); - try writer.writeAll("; "); - } - try f.writeCValue(writer, local, .Other); - try writer.writeAll(".error = 0;\n"); - if (is_array) { - try writer.writeAll("memcpy("); + if (!repr_is_err) { + const a = try Assignment.start(f, writer, payload_ty); try f.writeCValueMember(writer, local, .{ .identifier = "payload" }); - try writer.writeAll(", "); - try f.writeCValue(writer, payload, .FunctionArgument); - try writer.writeAll(", sizeof("); - try f.renderType(writer, payload_ty); - try writer.writeAll("));\n"); + try a.assign(f, writer); + try f.writeCValue(writer, payload, .Other); + try a.end(f, writer); + } + { + const a = try Assignment.start(f, writer, err_ty); + if (repr_is_err) + try f.writeCValue(writer, local, .Other) + else + try f.writeCValueMember(writer, local, .{ .identifier = "error" }); + try a.assign(f, writer); + try f.object.dg.renderValue(writer, err_ty, Value.zero, .Other); + try a.end(f, writer); } return local; } @@ -5831,11 +5755,10 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue { try f.object.dg.renderTypeForBuiltinFnName(writer, inst_ty); try writer.writeByte('('); } - try writer.writeAll("__"); + try writer.writeAll("zig_"); try writer.writeAll(operation); try writer.writeAll(compilerRtAbbrev(operand_ty, target)); try writer.writeAll(compilerRtAbbrev(inst_ty, target)); - if (inst_ty.isRuntimeFloat() and operand_ty.isRuntimeFloat()) try writer.writeByte('2'); try writer.writeByte('('); try f.writeCValue(writer, operand, .FunctionArgument); try writer.writeByte(')'); @@ -5885,7 +5808,7 @@ fn airUnBuiltinCall( const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - const v = try Vectorizer.start(f, inst, writer, operand_ty); + const v = try Vectorize.start(f, inst, writer, operand_ty); if (!ref_ret) { try f.writeCValue(writer, local, .Other); try v.elem(f, writer); @@ -5934,7 +5857,7 @@ fn airBinBuiltinCall( const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); if (is_big) try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const v = try Vectorizer.start(f, inst, writer, operand_ty); + const v = try Vectorize.start(f, inst, writer, operand_ty); if (!ref_ret) { try f.writeCValue(writer, local, .Other); try v.elem(f, writer); @@ -5982,7 +5905,7 @@ fn airCmpBuiltinCall( const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - const v = try Vectorizer.start(f, inst, writer, operand_ty); + const v = try Vectorize.start(f, inst, writer, operand_ty); if (!ref_ret) { try f.writeCValue(writer, local, .Other); try v.elem(f, writer); @@ -6023,18 +5946,33 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue const ptr = try f.resolveInst(extra.ptr); const expected_value = try f.resolveInst(extra.expected_value); const new_value = try f.resolveInst(extra.new_value); - try reap(f, inst, &.{ extra.ptr, extra.expected_value, extra.new_value }); - const writer = f.object.writer(); const ptr_ty = f.air.typeOf(extra.ptr); + const ty = ptr_ty.childType(); + + const writer = f.object.writer(); + const new_value_mat = try Materialize.start(f, inst, writer, ty, new_value); + try reap(f, inst, &.{ extra.ptr, extra.expected_value, extra.new_value }); + + const target = f.object.dg.module.getTarget(); + var repr_pl = Type.Payload.Bits{ + .base = .{ .tag = .int_unsigned }, + .data = @intCast(u16, ty.abiSize(target) * 8), + }; + const repr_ty = if (ty.isRuntimeFloat()) Type.initPayload(&repr_pl.base) else ty; + const local = try f.allocLocal(inst, inst_ty); if (inst_ty.isPtrLikeOptional()) { - try f.writeCValue(writer, local, .Other); - try writer.writeAll(" = "); - try f.writeCValue(writer, expected_value, .Initializer); - try writer.writeAll(";\n"); + { + const a = try Assignment.start(f, writer, ty); + try f.writeCValue(writer, local, .Other); + try a.assign(f, writer); + try f.writeCValue(writer, expected_value, .Other); + try a.end(f, writer); + } + try writer.writeAll("if ("); try writer.print("zig_cmpxchg_{s}((zig_atomic(", .{flavor}); - try f.renderType(writer, ptr_ty.childType()); + try f.renderType(writer, ty); try writer.writeByte(')'); if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); try writer.writeAll(" *)"); @@ -6042,45 +5980,62 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue try writer.writeAll(", "); try f.writeCValue(writer, local, .FunctionArgument); try writer.writeAll(", "); - try f.writeCValue(writer, new_value, .FunctionArgument); + try new_value_mat.mat(f, writer); try writer.writeAll(", "); try writeMemoryOrder(writer, extra.successOrder()); try writer.writeAll(", "); try writeMemoryOrder(writer, extra.failureOrder()); try writer.writeAll(", "); - try f.object.dg.renderTypeForBuiltinFnName(writer, ptr_ty.childType()); + try f.object.dg.renderTypeForBuiltinFnName(writer, ty); + try writer.writeAll(", "); + try f.object.dg.renderType(writer, repr_ty); try writer.writeByte(')'); try writer.writeAll(") {\n"); f.object.indent_writer.pushIndent(); - try f.writeCValue(writer, local, .Other); - try writer.writeAll(" = NULL;\n"); + { + const a = try Assignment.start(f, writer, ty); + try f.writeCValue(writer, local, .Other); + try a.assign(f, writer); + try writer.writeAll("NULL"); + try a.end(f, writer); + } f.object.indent_writer.popIndent(); try writer.writeAll("}\n"); } else { - try f.writeCValue(writer, local, .Other); - try writer.writeAll(".payload = "); - try f.writeCValue(writer, expected_value, .Other); - try writer.writeAll(";\n"); - try f.writeCValue(writer, local, .Other); - try writer.print(".is_null = zig_cmpxchg_{s}((zig_atomic(", .{flavor}); - try f.renderType(writer, ptr_ty.childType()); - try writer.writeByte(')'); - if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); - try writer.writeAll(" *)"); - try f.writeCValue(writer, ptr, .Other); - try writer.writeAll(", "); - try f.writeCValueMember(writer, local, .{ .identifier = "payload" }); - try writer.writeAll(", "); - try f.writeCValue(writer, new_value, .FunctionArgument); - try writer.writeAll(", "); - try writeMemoryOrder(writer, extra.successOrder()); - try writer.writeAll(", "); - try writeMemoryOrder(writer, extra.failureOrder()); - try writer.writeAll(", "); - try f.object.dg.renderTypeForBuiltinFnName(writer, ptr_ty.childType()); - try writer.writeByte(')'); - try writer.writeAll(";\n"); + { + const a = try Assignment.start(f, writer, ty); + try f.writeCValueMember(writer, local, .{ .identifier = "payload" }); + try a.assign(f, writer); + try f.writeCValue(writer, expected_value, .Other); + try a.end(f, writer); + } + { + const a = try Assignment.start(f, writer, Type.bool); + try f.writeCValueMember(writer, local, .{ .identifier = "is_null" }); + try a.assign(f, writer); + try writer.print("zig_cmpxchg_{s}((zig_atomic(", .{flavor}); + try f.renderType(writer, ty); + try writer.writeByte(')'); + if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); + try writer.writeAll(" *)"); + try f.writeCValue(writer, ptr, .Other); + try writer.writeAll(", "); + try f.writeCValueMember(writer, local, .{ .identifier = "payload" }); + try writer.writeAll(", "); + try new_value_mat.mat(f, writer); + try writer.writeAll(", "); + try writeMemoryOrder(writer, extra.successOrder()); + try writer.writeAll(", "); + try writeMemoryOrder(writer, extra.failureOrder()); + try writer.writeAll(", "); + try f.object.dg.renderTypeForBuiltinFnName(writer, ty); + try writer.writeAll(", "); + try f.object.dg.renderType(writer, repr_ty); + try writer.writeByte(')'); + try a.end(f, writer); + } } + try new_value_mat.end(f, inst); if (f.liveness.isUnused(inst)) { try freeLocal(f, inst, local.new_local, 0); @@ -6095,35 +6050,50 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { const extra = f.air.extraData(Air.AtomicRmw, pl_op.payload).data; const inst_ty = f.air.typeOfIndex(inst); const ptr_ty = f.air.typeOf(pl_op.operand); + const ty = ptr_ty.childType(); const ptr = try f.resolveInst(pl_op.operand); const operand = try f.resolveInst(extra.operand); - try reap(f, inst, &.{ pl_op.operand, extra.operand }); - const writer = f.object.writer(); - const local = try f.allocLocal(inst, inst_ty); + const writer = f.object.writer(); + const operand_mat = try Materialize.start(f, inst, writer, ty, operand); + try reap(f, inst, &.{ pl_op.operand, extra.operand }); + + const target = f.object.dg.module.getTarget(); + var repr_pl = Type.Payload.Bits{ + .base = .{ .tag = .int_unsigned }, + .data = @intCast(u16, ty.abiSize(target) * 8), + }; + const is_float = ty.isRuntimeFloat(); + const is_128 = repr_pl.data == 128; + const repr_ty = if (is_float) Type.initPayload(&repr_pl.base) else ty; + + const local = try f.allocLocal(inst, inst_ty); + try writer.print("zig_atomicrmw_{s}", .{toAtomicRmwSuffix(extra.op())}); + if (is_float) try writer.writeAll("_float") else if (is_128) try writer.writeAll("_int128"); + try writer.writeByte('('); try f.writeCValue(writer, local, .Other); - try writer.print(" = zig_atomicrmw_{s}((", .{toAtomicRmwSuffix(extra.op())}); - switch (extra.op()) { - else => { - try writer.writeAll("zig_atomic("); - try f.renderType(writer, ptr_ty.elemType()); - try writer.writeByte(')'); - }, - .Nand, .Min, .Max => { - // These are missing from stdatomic.h, so no atomic types for now. - try f.renderType(writer, ptr_ty.elemType()); - }, - } + try writer.writeAll(", ("); + const use_atomic = switch (extra.op()) { + else => true, + // These are missing from stdatomic.h, so no atomic types unless a fallback is used. + .Nand, .Min, .Max => is_float or is_128, + }; + if (use_atomic) try writer.writeAll("zig_atomic("); + try f.renderType(writer, ty); + if (use_atomic) try writer.writeByte(')'); if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); try writer.writeAll(" *)"); try f.writeCValue(writer, ptr, .Other); try writer.writeAll(", "); - try f.writeCValue(writer, operand, .FunctionArgument); + try operand_mat.mat(f, writer); try writer.writeAll(", "); try writeMemoryOrder(writer, extra.ordering()); try writer.writeAll(", "); - try f.object.dg.renderTypeForBuiltinFnName(writer, ptr_ty.childType()); + try f.object.dg.renderTypeForBuiltinFnName(writer, ty); + try writer.writeAll(", "); + try f.object.dg.renderType(writer, repr_ty); try writer.writeAll(");\n"); + try operand_mat.end(f, inst); if (f.liveness.isUnused(inst)) { try freeLocal(f, inst, local.new_local, 0); @@ -6138,14 +6108,23 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { const ptr = try f.resolveInst(atomic_load.ptr); try reap(f, inst, &.{atomic_load.ptr}); const ptr_ty = f.air.typeOf(atomic_load.ptr); + const ty = ptr_ty.childType(); + + const target = f.object.dg.module.getTarget(); + var repr_pl = Type.Payload.Bits{ + .base = .{ .tag = .int_unsigned }, + .data = @intCast(u16, ty.abiSize(target) * 8), + }; + const repr_ty = if (ty.isRuntimeFloat()) Type.initPayload(&repr_pl.base) else ty; const inst_ty = f.air.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - try f.writeCValue(writer, local, .Other); - try writer.writeAll(" = zig_atomic_load((zig_atomic("); - try f.renderType(writer, ptr_ty.elemType()); + try writer.writeAll("zig_atomic_load("); + try f.writeCValue(writer, local, .Other); + try writer.writeAll(", (zig_atomic("); + try f.renderType(writer, ty); try writer.writeByte(')'); if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); try writer.writeAll(" *)"); @@ -6153,7 +6132,9 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(", "); try writeMemoryOrder(writer, atomic_load.order); try writer.writeAll(", "); - try f.object.dg.renderTypeForBuiltinFnName(writer, ptr_ty.childType()); + try f.object.dg.renderTypeForBuiltinFnName(writer, ty); + try writer.writeAll(", "); + try f.object.dg.renderType(writer, repr_ty); try writer.writeAll(");\n"); return local; @@ -6162,22 +6143,35 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CValue { const bin_op = f.air.instructions.items(.data)[inst].bin_op; const ptr_ty = f.air.typeOf(bin_op.lhs); + const ty = ptr_ty.childType(); const ptr = try f.resolveInst(bin_op.lhs); const element = try f.resolveInst(bin_op.rhs); - try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); + const writer = f.object.writer(); + const element_mat = try Materialize.start(f, inst, writer, ty, element); + try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); + + const target = f.object.dg.module.getTarget(); + var repr_pl = Type.Payload.Bits{ + .base = .{ .tag = .int_unsigned }, + .data = @intCast(u16, ty.abiSize(target) * 8), + }; + const repr_ty = if (ty.isRuntimeFloat()) Type.initPayload(&repr_pl.base) else ty; try writer.writeAll("zig_atomic_store((zig_atomic("); - try f.renderType(writer, ptr_ty.elemType()); + try f.renderType(writer, ty); try writer.writeByte(')'); if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); try writer.writeAll(" *)"); try f.writeCValue(writer, ptr, .Other); try writer.writeAll(", "); - try f.writeCValue(writer, element, .FunctionArgument); + try element_mat.mat(f, writer); try writer.print(", {s}, ", .{order}); - try f.object.dg.renderTypeForBuiltinFnName(writer, ptr_ty.childType()); + try f.object.dg.renderTypeForBuiltinFnName(writer, ty); + try writer.writeAll(", "); + try f.object.dg.renderType(writer, repr_ty); try writer.writeAll(");\n"); + try element_mat.end(f, inst); return .none; } @@ -6262,19 +6256,19 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { const union_ptr = try f.resolveInst(bin_op.lhs); const new_tag = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const writer = f.object.writer(); - const union_ty = f.air.typeOf(bin_op.lhs).childType(); const target = f.object.dg.module.getTarget(); + const union_ty = f.air.typeOf(bin_op.lhs).childType(); const layout = union_ty.unionGetLayout(target); if (layout.tag_size == 0) return .none; + const tag_ty = union_ty.unionTagTypeSafety().?; - try writer.writeByte('('); - try f.writeCValue(writer, union_ptr, .Other); - try writer.writeAll(")->tag = "); + const writer = f.object.writer(); + const a = try Assignment.start(f, writer, tag_ty); + try f.writeCValueDerefMember(writer, union_ptr, .{ .identifier = "tag" }); + try a.assign(f, writer); try f.writeCValue(writer, new_tag, .Other); - try writer.writeAll(";\n"); - + try a.end(f, writer); return .none; } @@ -6284,20 +6278,19 @@ fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const un_ty = f.air.typeOf(ty_op.operand); - + const union_ty = f.air.typeOf(ty_op.operand); const target = f.object.dg.module.getTarget(); - const layout = un_ty.unionGetLayout(target); + const layout = union_ty.unionGetLayout(target); if (layout.tag_size == 0) return .none; const inst_ty = f.air.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); + const a = try Assignment.start(f, writer, inst_ty); try f.writeCValue(writer, local, .Other); - - try writer.writeAll(" = "); - try f.writeCValue(writer, operand, .Other); - try writer.writeAll(".tag;\n"); + try a.assign(f, writer); + try f.writeCValueMember(writer, operand, .{ .identifier = "tag" }); + try a.end(f, writer); return local; } @@ -6350,7 +6343,7 @@ fn airSplat(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - const v = try Vectorizer.start(f, inst, writer, inst_ty); + const v = try Vectorize.start(f, inst, writer, inst_ty); if (need_memcpy) try writer.writeAll("memcpy(&"); try f.writeCValue(writer, local, .Other); try v.elem(f, writer); @@ -6380,7 +6373,7 @@ fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - const v = try Vectorizer.start(f, inst, writer, inst_ty); + const v = try Vectorize.start(f, inst, writer, inst_ty); try f.writeCValue(writer, local, .Other); try v.elem(f, writer); try writer.writeAll(" = "); @@ -6547,7 +6540,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { }, .Initializer); try writer.writeAll(";\n"); - const v = try Vectorizer.start(f, inst, writer, operand_ty); + const v = try Vectorize.start(f, inst, writer, operand_ty); try f.writeCValue(writer, accum, .Other); switch (op) { .float_op => |func| { @@ -6621,87 +6614,38 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { switch (inst_ty.zigTypeTag()) { .Array, .Vector => { const elem_ty = inst_ty.childType(); - - const is_array = lowersToArray(elem_ty, target); - const need_memcpy = is_array; - if (need_memcpy) { - for (resolved_elements, 0..) |element, i| { - try writer.writeAll("memcpy("); - try f.writeCValue(writer, local, .Other); - try writer.print("[{d}]", .{i}); - try writer.writeAll(", "); - try f.writeCValue(writer, element, .Other); - try writer.writeAll(", sizeof("); - try f.renderType(writer, elem_ty); - try writer.writeAll("))"); - try writer.writeAll(";\n"); - } - assert(inst_ty.sentinel() == null); - } else { - for (resolved_elements, 0..) |element, i| { - try f.writeCValue(writer, local, .Other); - try writer.print("[{d}] = ", .{i}); - try f.writeCValue(writer, element, .Other); - try writer.writeAll(";\n"); - } - if (inst_ty.sentinel()) |sentinel| { - try f.writeCValue(writer, local, .Other); - try writer.print("[{d}] = ", .{resolved_elements.len}); - try f.object.dg.renderValue(writer, elem_ty, sentinel, .Other); - try writer.writeAll(";\n"); - } + const a = try Assignment.init(f, elem_ty); + for (resolved_elements, 0..) |element, i| { + try a.restart(f, writer); + try f.writeCValue(writer, local, .Other); + try writer.print("[{d}]", .{i}); + try a.assign(f, writer); + try f.writeCValue(writer, element, .Other); + try a.end(f, writer); + } + if (inst_ty.sentinel()) |sentinel| { + try a.restart(f, writer); + try f.writeCValue(writer, local, .Other); + try writer.print("[{d}]", .{resolved_elements.len}); + try a.assign(f, writer); + try f.object.dg.renderValue(writer, elem_ty, sentinel, .Other); + try a.end(f, writer); } }, .Struct => switch (inst_ty.containerLayout()) { - .Auto, .Extern => { - try f.writeCValue(writer, local, .Other); - try writer.writeAll(" = ("); - try f.renderType(writer, inst_ty); - try writer.writeAll(")"); - try writer.writeByte('{'); - var empty = true; - for (elements, resolved_elements, 0..) |element, resolved_element, field_i| { - if (inst_ty.structFieldValueComptime(field_i)) |_| continue; + .Auto, .Extern => for (resolved_elements, 0..) |element, field_i| { + if (inst_ty.structFieldIsComptime(field_i)) continue; + const field_ty = inst_ty.structFieldType(field_i); + if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; - if (!empty) try writer.writeAll(", "); - - const field_name: CValue = if (inst_ty.isSimpleTuple()) - .{ .field = field_i } - else - .{ .identifier = inst_ty.structFieldName(field_i) }; - try writer.writeByte('.'); - try f.object.dg.writeCValue(writer, field_name); - try writer.writeAll(" = "); - - const element_ty = f.air.typeOf(element); - try f.writeCValue(writer, switch (element_ty.zigTypeTag()) { - .Array => .{ .undef = element_ty }, - else => resolved_element, - }, .Initializer); - empty = false; - } - try writer.writeAll("};\n"); - - for (elements, resolved_elements, 0..) |element, resolved_element, field_i| { - if (inst_ty.structFieldValueComptime(field_i)) |_| continue; - - const element_ty = f.air.typeOf(element); - if (element_ty.zigTypeTag() != .Array) continue; - - const field_name: CValue = if (inst_ty.isSimpleTuple()) - .{ .field = field_i } - else - .{ .identifier = inst_ty.structFieldName(field_i) }; - - try writer.writeAll(";\n"); - try writer.writeAll("memcpy("); - try f.writeCValueMember(writer, local, field_name); - try writer.writeAll(", "); - try f.writeCValue(writer, resolved_element, .FunctionArgument); - try writer.writeAll(", sizeof("); - try f.renderType(writer, element_ty); - try writer.writeAll("));\n"); - } + const a = try Assignment.start(f, writer, field_ty); + try f.writeCValueMember(writer, local, if (inst_ty.isSimpleTuple()) + .{ .field = field_i } + else + .{ .identifier = inst_ty.structFieldName(field_i) }); + try a.assign(f, writer); + try f.writeCValue(writer, element, .Other); + try a.end(f, writer); }, .Packed => { try f.writeCValue(writer, local, .Other); @@ -6718,8 +6662,9 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); var empty = true; - for (0..elements.len) |index| { - const field_ty = inst_ty.structFieldType(index); + for (0..elements.len) |field_i| { + if (inst_ty.structFieldIsComptime(field_i)) continue; + const field_ty = inst_ty.structFieldType(field_i); if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; if (!empty) { @@ -6730,8 +6675,9 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { empty = false; } empty = true; - for (resolved_elements, 0..) |element, index| { - const field_ty = inst_ty.structFieldType(index); + for (resolved_elements, 0..) |element, field_i| { + if (inst_ty.structFieldIsComptime(field_i)) continue; + const field_ty = inst_ty.structFieldType(field_i); if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; if (!empty) try writer.writeAll(", "); @@ -6784,6 +6730,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { const target = f.object.dg.module.getTarget(); const union_obj = union_ty.cast(Type.Payload.Union).?.data; const field_name = union_obj.fields.keys()[extra.field_index]; + const payload_ty = f.air.typeOf(extra.init); const payload = try f.resolveInst(extra.init); try reap(f, inst, &.{extra.init}); @@ -6811,16 +6758,20 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { var int_pl: Value.Payload.U64 = undefined; const int_val = tag_val.enumToInt(tag_ty, &int_pl); - try f.writeCValue(writer, local, .Other); - try writer.print(".tag = {}; ", .{try f.fmtIntLiteral(tag_ty, int_val)}); + const a = try Assignment.start(f, writer, tag_ty); + try f.writeCValueMember(writer, local, .{ .identifier = "tag" }); + try a.assign(f, writer); + try writer.print("{}", .{try f.fmtIntLiteral(tag_ty, int_val)}); + try a.end(f, writer); } break :field .{ .payload_identifier = field_name }; } else .{ .identifier = field_name }; + const a = try Assignment.start(f, writer, payload_ty); try f.writeCValueMember(writer, local, field); - try writer.writeAll(" = "); + try a.assign(f, writer); try f.writeCValue(writer, payload, .Other); - try writer.writeAll(";\n"); + try a.end(f, writer); return local; } @@ -6887,7 +6838,7 @@ fn airFloatNeg(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const local = try f.allocLocal(inst, operand_ty); - const v = try Vectorizer.start(f, inst, writer, operand_ty); + const v = try Vectorize.start(f, inst, writer, operand_ty); try f.writeCValue(writer, local, .Other); try v.elem(f, writer); try writer.writeAll(" = zig_neg_"); @@ -6912,7 +6863,7 @@ fn airUnFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVal const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - const v = try Vectorizer.start(f, inst, writer, inst_ty); + const v = try Vectorize.start(f, inst, writer, inst_ty); try f.writeCValue(writer, local, .Other); try v.elem(f, writer); try writer.writeAll(" = zig_libc_name_"); @@ -6940,7 +6891,7 @@ fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVa const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - const v = try Vectorizer.start(f, inst, writer, inst_ty); + const v = try Vectorize.start(f, inst, writer, inst_ty); try f.writeCValue(writer, local, .Other); try v.elem(f, writer); try writer.writeAll(" = zig_libc_name_"); @@ -6973,7 +6924,7 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - const v = try Vectorizer.start(f, inst, writer, inst_ty); + const v = try Vectorize.start(f, inst, writer, inst_ty); try f.writeCValue(writer, local, .Other); try v.elem(f, writer); try writer.writeAll(" = zig_libc_name_"); @@ -7480,10 +7431,96 @@ fn formatIntLiteral( try data.cty.renderLiteralSuffix(writer); } -const Vectorizer = struct { +const Materialize = struct { + local: CValue, + + pub fn start( + f: *Function, + inst: Air.Inst.Index, + writer: anytype, + ty: Type, + value: CValue, + ) !Materialize { + switch (value) { + .local_ref, .constant, .decl_ref, .undef => { + const local = try f.allocLocal(inst, ty); + + const a = try Assignment.start(f, writer, ty); + try f.writeCValue(writer, local, .Other); + try a.assign(f, writer); + try f.writeCValue(writer, value, .Other); + try a.end(f, writer); + + return .{ .local = local }; + }, + .new_local => |local| return .{ .local = .{ .local = local } }, + else => return .{ .local = value }, + } + } + + pub fn mat(self: Materialize, f: *Function, writer: anytype) !void { + try f.writeCValue(writer, self.local, .Other); + } + + pub fn end(self: Materialize, f: *Function, inst: Air.Inst.Index) !void { + switch (self.local) { + .new_local => |local| try freeLocal(f, inst, local, 0), + else => {}, + } + } +}; + +const Assignment = struct { + cty: CType.Index, + + pub fn init(f: *Function, ty: Type) !Assignment { + return .{ .cty = try f.typeToIndex(ty, .complete) }; + } + + pub fn start(f: *Function, writer: anytype, ty: Type) !Assignment { + const self = try init(f, ty); + try self.restart(f, writer); + return self; + } + + pub fn restart(self: Assignment, f: *Function, writer: anytype) !void { + switch (self.strategy(f)) { + .assign => {}, + .memcpy => try writer.writeAll("memcpy("), + } + } + + pub fn assign(self: Assignment, f: *Function, writer: anytype) !void { + switch (self.strategy(f)) { + .assign => try writer.writeAll(" = "), + .memcpy => try writer.writeAll(", "), + } + } + + pub fn end(self: Assignment, f: *Function, writer: anytype) !void { + switch (self.strategy(f)) { + .assign => {}, + .memcpy => { + try writer.writeAll(", sizeof("); + try f.renderCType(writer, self.cty); + try writer.writeAll("))"); + }, + } + try writer.writeAll(";\n"); + } + + fn strategy(self: Assignment, f: *Function) enum { assign, memcpy } { + return switch (f.indexToCType(self.cty).tag()) { + else => .assign, + .array, .vector => .memcpy, + }; + } +}; + +const Vectorize = struct { index: CValue = .none, - pub fn start(f: *Function, inst: Air.Inst.Index, writer: anytype, ty: Type) !Vectorizer { + pub fn start(f: *Function, inst: Air.Inst.Index, writer: anytype, ty: Type) !Vectorize { return if (ty.zigTypeTag() == .Vector) index: { var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = ty.vectorLen() }; @@ -7504,7 +7541,7 @@ const Vectorizer = struct { } else .{}; } - pub fn elem(self: Vectorizer, f: *Function, writer: anytype) !void { + pub fn elem(self: Vectorize, f: *Function, writer: anytype) !void { if (self.index != .none) { try writer.writeByte('['); try f.writeCValue(writer, self.index, .Other); @@ -7512,7 +7549,7 @@ const Vectorizer = struct { } } - pub fn end(self: Vectorizer, f: *Function, inst: Air.Inst.Index, writer: anytype) !void { + pub fn end(self: Vectorize, f: *Function, inst: Air.Inst.Index, writer: anytype) !void { if (self.index != .none) { f.object.indent_writer.popIndent(); try writer.writeAll("}\n"); @@ -7521,11 +7558,6 @@ const Vectorizer = struct { } }; -fn isByRef(ty: Type) bool { - _ = ty; - return false; -} - const LowerFnRetTyBuffer = struct { names: [1][]const u8, types: [1]Type, @@ -7557,29 +7589,6 @@ fn lowersToArray(ty: Type, target: std.Target) bool { }; } -fn loweredArrayInfo(ty: Type, target: std.Target) ?Type.ArrayInfo { - if (!lowersToArray(ty, target)) return null; - - switch (ty.zigTypeTag()) { - .Array, .Vector => return ty.arrayInfo(), - else => { - const abi_size = ty.abiSize(target); - const abi_align = ty.abiAlignment(target); - return Type.ArrayInfo{ - .elem_type = switch (abi_align) { - 1 => Type.u8, - 2 => Type.u16, - 4 => Type.u32, - 8 => Type.u64, - 16 => Type.initTag(.u128), - else => unreachable, - }, - .len = @divExact(abi_size, abi_align), - }; - }, - } -} - fn reap(f: *Function, inst: Air.Inst.Index, operands: []const Air.Inst.Ref) !void { assert(operands.len <= Liveness.bpi - 1); var tomb_bits = f.liveness.getTombBits(inst); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 897a582952..76dabd3e9b 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -10135,14 +10135,14 @@ fn toLlvmAtomicRmwBinOp( ) llvm.AtomicRMWBinOp { return switch (op) { .Xchg => .Xchg, - .Add => if (is_float) llvm.AtomicRMWBinOp.FAdd else return .Add, - .Sub => if (is_float) llvm.AtomicRMWBinOp.FSub else return .Sub, + .Add => if (is_float) .FAdd else return .Add, + .Sub => if (is_float) .FSub else return .Sub, .And => .And, .Nand => .Nand, .Or => .Or, .Xor => .Xor, - .Max => if (is_signed) llvm.AtomicRMWBinOp.Max else return .UMax, - .Min => if (is_signed) llvm.AtomicRMWBinOp.Min else return .UMin, + .Max => if (is_float) .FMax else if (is_signed) .Max else return .UMax, + .Min => if (is_float) .FMin else if (is_signed) .Min else return .UMin, }; } diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig index 55f0ba8963..0e5a2d0a7d 100644 --- a/src/codegen/llvm/bindings.zig +++ b/src/codegen/llvm/bindings.zig @@ -1436,6 +1436,8 @@ pub const AtomicRMWBinOp = enum(c_int) { UMin, FAdd, FSub, + FMax, + FMin, }; pub const TypeKind = enum(c_int) { diff --git a/src/target.zig b/src/target.zig index 76186db269..5e66c8f417 100644 --- a/src/target.zig +++ b/src/target.zig @@ -575,9 +575,6 @@ pub fn atomicPtrAlignment( .xtensa, => 32, - .aarch64, - .aarch64_be, - .aarch64_32, .amdgcn, .bpfel, .bpfeb, @@ -600,7 +597,12 @@ pub fn atomicPtrAlignment( .loongarch64, => 64, - .x86_64 => 128, + .aarch64, + .aarch64_be, + .aarch64_32, + => 128, + + .x86_64 => if (std.Target.x86.featureSetHas(target.cpu.features, .cx16)) 128 else 64, }; var buffer: Type.Payload.Bits = undefined; diff --git a/test/behavior/align.zig b/test/behavior/align.zig index 1887f0a180..d72a3e6689 100644 --- a/test/behavior/align.zig +++ b/test/behavior/align.zig @@ -286,20 +286,11 @@ test "page aligned array on stack" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_llvm and - builtin.cpu.arch == .aarch64 and builtin.os.tag == .windows) - { + if (builtin.cpu.arch == .aarch64 and builtin.os.tag == .windows) { // https://github.com/ziglang/zig/issues/13679 return error.SkipZigTest; } - if (builtin.zig_backend == .stage2_c and - builtin.os.tag == .windows and builtin.cpu.arch == .aarch64) - { - // https://github.com/ziglang/zig/issues/13876 - return error.SkipZigTest; - } - // Large alignment value to make it hard to accidentally pass. var array align(0x1000) = [_]u8{ 1, 2, 3, 4, 5, 6, 7, 8 }; var number1: u8 align(16) = 42; diff --git a/test/behavior/atomics.zig b/test/behavior/atomics.zig index 6b6e7c8430..656eee78f9 100644 --- a/test/behavior/atomics.zig +++ b/test/behavior/atomics.zig @@ -3,6 +3,13 @@ const builtin = @import("builtin"); const expect = std.testing.expect; const expectEqual = std.testing.expectEqual; +const supports_128_bit_atomics = switch (builtin.cpu.arch) { + // TODO: Ideally this could be sync'd with the logic in Sema. + .aarch64, .aarch64_be, .aarch64_32 => true, + .x86_64 => std.Target.x86.featureSetHas(builtin.cpu.features, .cx16), + else => false, +}; + test "cmpxchg" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO @@ -107,15 +114,14 @@ test "cmpxchg with ignored result" { } test "128-bit cmpxchg" { + if (!supports_128_bit_atomics) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.cpu.arch != .x86_64) return error.SkipZigTest; - if (comptime !std.Target.x86.featureSetHas(builtin.cpu.features, .cx16)) return error.SkipZigTest; - try test_u128_cmpxchg(); comptime try test_u128_cmpxchg(); } @@ -209,15 +215,7 @@ test "atomicrmw with floats" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) { - // TODO: test.c:34929:7: error: address argument to atomic operation must be a pointer to integer or pointer ('zig_f32 *' (aka 'float *') invalid - // when compiling with -std=c99 -pedantic - return error.SkipZigTest; - } - - if ((builtin.zig_backend == .stage2_llvm or builtin.zig_backend == .stage2_c) and - builtin.cpu.arch == .aarch64) - { + if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) { // https://github.com/ziglang/zig/issues/10627 return error.SkipZigTest; } @@ -234,6 +232,10 @@ fn testAtomicRmwFloat() !void { try expect(x == 6); _ = @atomicRmw(f32, &x, .Sub, 2, .SeqCst); try expect(x == 4); + _ = @atomicRmw(f32, &x, .Max, 13, .SeqCst); + try expect(x == 13); + _ = @atomicRmw(f32, &x, .Min, 42, .SeqCst); + try expect(x == 13); } test "atomicrmw with ints" { @@ -242,10 +244,6 @@ test "atomicrmw with ints" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c and builtin.cpu.arch == .aarch64) { - return error.SkipZigTest; - } - try testAtomicRmwInts(); comptime try testAtomicRmwInts(); } @@ -311,24 +309,25 @@ fn testAtomicRmwInt(comptime signedness: std.builtin.Signedness, comptime N: usi } test "atomicrmw with 128-bit ints" { - if (builtin.cpu.arch != .x86_64) { - // TODO: Ideally this could use target.atomicPtrAlignment and check for IntTooBig - return error.SkipZigTest; - } + if (!supports_128_bit_atomics) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO // TODO "ld.lld: undefined symbol: __sync_lock_test_and_set_16" on -mcpu x86_64 - if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; + if (builtin.cpu.arch == .x86_64 and builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; + try testAtomicRmwInt128(.signed); try testAtomicRmwInt128(.unsigned); + comptime try testAtomicRmwInt128(.signed); comptime try testAtomicRmwInt128(.unsigned); } fn testAtomicRmwInt128(comptime signedness: std.builtin.Signedness) !void { + const uint = std.meta.Int(.unsigned, 128); const int = std.meta.Int(signedness, 128); - const initial: int = 0xaaaaaaaa_bbbbbbbb_cccccccc_dddddddd; + const initial: int = @bitCast(int, @as(uint, 0xaaaaaaaa_bbbbbbbb_cccccccc_dddddddd)); const replacement: int = 0x00000000_00000005_00000000_00000003; var x: int align(16) = initial; @@ -390,10 +389,6 @@ test "atomics with different types" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c and builtin.cpu.arch == .aarch64) { - return error.SkipZigTest; - } - try testAtomicsWithType(bool, true, false); try testAtomicsWithType(u1, 0, 1); diff --git a/test/behavior/builtin_functions_returning_void_or_noreturn.zig b/test/behavior/builtin_functions_returning_void_or_noreturn.zig index 3caf01542c..0d4598bd09 100644 --- a/test/behavior/builtin_functions_returning_void_or_noreturn.zig +++ b/test/behavior/builtin_functions_returning_void_or_noreturn.zig @@ -7,7 +7,6 @@ var x: u8 = 1; // This excludes builtin functions that return void or noreturn that cannot be tested. test { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index 642d4cd18e..bdff7c4de4 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -1313,16 +1313,6 @@ test "cast f16 to wider types" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.os.tag == .macos and builtin.zig_backend == .stage2_c) { - // TODO: test is failing - return error.SkipZigTest; - } - - if (builtin.os.tag == .windows and builtin.zig_backend == .stage2_c and builtin.cpu.arch == .aarch64) { - // TODO: test is failing - return error.SkipZigTest; - } - const S = struct { fn doTheTest() !void { var x: f16 = 1234.0; @@ -1342,18 +1332,6 @@ test "cast f128 to narrower types" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.os.tag == .windows and builtin.cpu.arch == .aarch64 and - builtin.zig_backend == .stage2_c) - { - // https://github.com/ziglang/zig/issues/13876 - return error.SkipZigTest; - } - - if (builtin.os.tag == .macos and builtin.zig_backend == .stage2_c) { - // TODO: test is failing - return error.SkipZigTest; - } - const S = struct { fn doTheTest() !void { var x: f128 = 1234.0; @@ -1444,11 +1422,6 @@ test "coerce between pointers of compatible differently-named floats" { return error.SkipZigTest; } - if (builtin.os.tag == .macos and builtin.zig_backend == .stage2_c and builtin.cpu.arch == .aarch64) { - // TODO: test is failing - return error.SkipZigTest; - } - const F = switch (@typeInfo(c_longdouble).Float.bits) { 16 => f16, 32 => f32, diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig index c4f1168efc..3fc2249ccc 100644 --- a/test/behavior/floatop.zig +++ b/test/behavior/floatop.zig @@ -535,18 +535,6 @@ test "another, possibly redundant, @fabs test" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO - if (builtin.os.tag == .windows and builtin.cpu.arch == .aarch64 and - builtin.zig_backend == .stage2_c) - { - // https://github.com/ziglang/zig/issues/13876 - return error.SkipZigTest; - } - - if (builtin.os.tag == .macos and builtin.zig_backend == .stage2_c) { - // TODO: test is failing - return error.SkipZigTest; - } - try testFabsLegacy(f128, 12.0); comptime try testFabsLegacy(f128, 12.0); try testFabsLegacy(f64, 12.0); @@ -584,18 +572,6 @@ test "a third @fabs test, surely there should not be three fabs tests" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO - if (builtin.os.tag == .windows and builtin.cpu.arch == .aarch64 and - builtin.zig_backend == .stage2_c) - { - // https://github.com/ziglang/zig/issues/13876 - return error.SkipZigTest; - } - - if (builtin.os.tag == .macos and builtin.zig_backend == .stage2_c) { - // TODO: test is failing - return error.SkipZigTest; - } - inline for ([_]type{ f16, f32, f64, f80, f128, c_longdouble }) |T| { // normals try expect(@fabs(@as(T, 1.0)) == 1.0); @@ -698,11 +674,6 @@ test "@floor f128" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO - if (builtin.os.tag == .macos and builtin.zig_backend == .stage2_c) { - // TODO: test is failing - return error.SkipZigTest; - } - try testFloorLegacy(f128, 12.0); comptime try testFloorLegacy(f128, 12.0); } @@ -793,11 +764,6 @@ test "@ceil f128" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO - if (builtin.os.tag == .macos and builtin.zig_backend == .stage2_c and builtin.cpu.arch == .x86_64) { - // TODO: test is failing - return error.SkipZigTest; - } - try testCeilLegacy(f128, 12.0); comptime try testCeilLegacy(f128, 12.0); } @@ -894,11 +860,6 @@ test "@trunc f128" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO - if (builtin.os.tag == .macos and builtin.zig_backend == .stage2_c) { - // TODO: test is failing - return error.SkipZigTest; - } - try testTruncLegacy(f128, 12.0); comptime try testTruncLegacy(f128, 12.0); } @@ -1010,18 +971,6 @@ test "negation f128" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.os.tag == .windows and builtin.cpu.arch == .aarch64 and - builtin.zig_backend == .stage2_c) - { - // https://github.com/ziglang/zig/issues/13876 - return error.SkipZigTest; - } - - if (builtin.os.tag == .macos and builtin.zig_backend == .stage2_c) { - // TODO: test is failing - return error.SkipZigTest; - } - const S = struct { fn doTheTest() !void { var a: f128 = 1; @@ -1065,11 +1014,6 @@ test "comptime fixed-width float zero divided by zero produces NaN" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.os.tag == .macos and builtin.zig_backend == .stage2_c and builtin.cpu.arch == .x86_64) { - // TODO: test is failing - return error.SkipZigTest; - } - inline for (.{ f16, f32, f64, f80, f128 }) |F| { try expect(math.isNan(@as(F, 0) / @as(F, 0))); } diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 5da24f7747..fddf7ee2e5 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -615,18 +615,6 @@ test "f128" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.os.tag == .windows and builtin.cpu.arch == .aarch64 and - builtin.zig_backend == .stage2_c) - { - // https://github.com/ziglang/zig/issues/13876 - return error.SkipZigTest; - } - - if (builtin.os.tag == .macos and builtin.zig_backend == .stage2_c) { - // TODO: test is failing - return error.SkipZigTest; - } - try test_f128(); comptime try test_f128(); } @@ -1297,18 +1285,6 @@ test "remainder division" { return error.SkipZigTest; } - if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows and - builtin.cpu.arch == .aarch64) - { - // https://github.com/ziglang/zig/issues/13876 - return error.SkipZigTest; - } - - if (builtin.os.tag == .macos and builtin.zig_backend == .stage2_c) { - // TODO: test is failing - return error.SkipZigTest; - } - comptime try remdiv(f16); comptime try remdiv(f32); comptime try remdiv(f64); @@ -1340,11 +1316,6 @@ test "float remainder division using @rem" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO - if (builtin.os.tag == .macos and builtin.zig_backend == .stage2_c and builtin.cpu.arch == .x86_64) { - // TODO: test is failing - return error.SkipZigTest; - } - comptime try frem(f16); comptime try frem(f32); comptime try frem(f64); @@ -1387,11 +1358,6 @@ test "float modulo division using @mod" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.os.tag == .macos and builtin.zig_backend == .stage2_c and builtin.cpu.arch == .x86_64) { - // TODO: test is failing - return error.SkipZigTest; - } - comptime try fmod(f16); comptime try fmod(f32); comptime try fmod(f64); @@ -1465,11 +1431,6 @@ test "@round f128" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.os.tag == .macos and builtin.zig_backend == .stage2_c) { - // TODO: test is failing - return error.SkipZigTest; - } - try testRound(f128, 12.0); comptime try testRound(f128, 12.0); } @@ -1508,18 +1469,6 @@ test "NaN comparison" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows and - builtin.cpu.arch == .aarch64) - { - // https://github.com/ziglang/zig/issues/13876 - return error.SkipZigTest; - } - - if (builtin.os.tag == .macos and builtin.zig_backend == .stage2_c) { - // TODO: test is failing - return error.SkipZigTest; - } - try testNanEqNan(f16); try testNanEqNan(f32); try testNanEqNan(f64); @@ -1589,18 +1538,6 @@ test "signed zeros are represented properly" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.os.tag == .windows and builtin.cpu.arch == .aarch64 and - builtin.zig_backend == .stage2_c) - { - // TODO: test is failing - return error.SkipZigTest; - } - - if (builtin.os.tag == .macos and builtin.zig_backend == .stage2_c and builtin.cpu.arch == .aarch64) { - // TODO: test is failing - return error.SkipZigTest; - } - const S = struct { fn doTheTest() !void { try testOne(f16); diff --git a/test/behavior/muladd.zig b/test/behavior/muladd.zig index abc251ecbd..aa36c99784 100644 --- a/test/behavior/muladd.zig +++ b/test/behavior/muladd.zig @@ -68,18 +68,6 @@ test "@mulAdd f128" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.os.tag == .windows and builtin.cpu.arch == .aarch64 and - builtin.zig_backend == .stage2_c) - { - // https://github.com/ziglang/zig/issues/13876 - return error.SkipZigTest; - } - - if (builtin.os.tag == .macos and builtin.zig_backend == .stage2_c) { - // TODO: test is failing - return error.SkipZigTest; - } - comptime try testMulAdd128(); try testMulAdd128(); } @@ -201,18 +189,6 @@ test "vector f128" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.os.tag == .windows and builtin.cpu.arch == .aarch64 and - builtin.zig_backend == .stage2_c) - { - // https://github.com/ziglang/zig/issues/13876 - return error.SkipZigTest; - } - - if (builtin.os.tag == .macos and builtin.zig_backend == .stage2_c and builtin.cpu.arch == .aarch64) { - // TODO: test is failing - return error.SkipZigTest; - } - comptime try vector128(); try vector128(); } diff --git a/test/behavior/packed-struct.zig b/test/behavior/packed-struct.zig index 858d4f9c17..ca7d295dc1 100644 --- a/test/behavior/packed-struct.zig +++ b/test/behavior/packed-struct.zig @@ -615,3 +615,15 @@ test "pointer to container level packed struct field" { @ptrCast(*S, &S.arr[0]).other_bits.enable_3 = true; try expect(S.arr[0] == 0x10000000); } + +test "store undefined to packed result location" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + + var x: u4 = 0; + var s = packed struct { x: u4, y: u4 }{ .x = x, .y = if (x > 0) x else undefined }; + try expectEqual(x, s.x); +} diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index 8875f60eb7..df115409df 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -96,18 +96,6 @@ test "vector float operators" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.os.tag == .windows and builtin.cpu.arch == .aarch64 and - builtin.zig_backend == .stage2_c) - { - // https://github.com/ziglang/zig/issues/13876 - return error.SkipZigTest; - } - - if (builtin.os.tag == .macos and builtin.zig_backend == .stage2_c) { - // TODO: test is failing - return error.SkipZigTest; - } - inline for ([_]type{ f16, f32, f64, f80, f128 }) |T| { const S = struct { fn doTheTest() !void { diff --git a/test/behavior/widening.zig b/test/behavior/widening.zig index 5638446cb8..a4c1d1f78e 100644 --- a/test/behavior/widening.zig +++ b/test/behavior/widening.zig @@ -43,18 +43,6 @@ test "float widening" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.os.tag == .windows and builtin.cpu.arch == .aarch64 and - builtin.zig_backend == .stage2_c) - { - // https://github.com/ziglang/zig/issues/13876 - return error.SkipZigTest; - } - - if (builtin.os.tag == .macos and builtin.zig_backend == .stage2_c) { - // TODO: test is failing - return error.SkipZigTest; - } - var a: f16 = 12.34; var b: f32 = a; var c: f64 = b; @@ -75,18 +63,6 @@ test "float widening f16 to f128" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.os.tag == .windows and builtin.cpu.arch == .aarch64 and - builtin.zig_backend == .stage2_c) - { - // https://github.com/ziglang/zig/issues/13876 - return error.SkipZigTest; - } - - if (builtin.os.tag == .macos and builtin.zig_backend == .stage2_c) { - // TODO: test is failing - return error.SkipZigTest; - } - var x: f16 = 12.34; var y: f128 = x; try expect(x == y); diff --git a/test/cases/compile_errors/atomicrmw_with_float_op_not_.Xchg_.Add_or_.Sub.zig b/test/cases/compile_errors/atomicrmw_with_float_op_not_.Xchg_.Add_.Sub_.Max_or_.Min.zig similarity index 88% rename from test/cases/compile_errors/atomicrmw_with_float_op_not_.Xchg_.Add_or_.Sub.zig rename to test/cases/compile_errors/atomicrmw_with_float_op_not_.Xchg_.Add_.Sub_.Max_or_.Min.zig index 80720fbca1..5714b323f1 100644 --- a/test/cases/compile_errors/atomicrmw_with_float_op_not_.Xchg_.Add_or_.Sub.zig +++ b/test/cases/compile_errors/atomicrmw_with_float_op_not_.Xchg_.Add_.Sub_.Max_or_.Min.zig @@ -7,4 +7,4 @@ export fn entry() void { // backend=stage2 // target=native // -// :3:30: error: @atomicRmw with float only allowed with .Xchg, .Add, and .Sub +// :3:30: error: @atomicRmw with float only allowed with .Xchg, .Add, .Sub, .Max, and .Min diff --git a/test/tests.zig b/test/tests.zig index 4b16ac50c7..3202d19b3e 100644 --- a/test/tests.zig +++ b/test/tests.zig @@ -962,13 +962,6 @@ pub fn addModuleTests(b: *std.Build, options: ModuleTestOptions) *Step { if (test_target.use_llvm == false and mem.eql(u8, options.name, "std")) continue; - // TODO get std lib tests passing for the C backend - if (test_target.target.ofmt == std.Target.ObjectFormat.c and - mem.eql(u8, options.name, "std")) - { - continue; - } - const want_this_mode = for (options.optimize_modes) |m| { if (m == test_target.optimize_mode) break true; } else false; @@ -1033,6 +1026,8 @@ pub fn addModuleTests(b: *std.Build, options: ModuleTestOptions) *Step { "-std=c99", "-pedantic", "-Werror", + // TODO stop violating these pedantic errors. spotted everywhere + "-Wno-builtin-requires-header", // TODO stop violating these pedantic errors. spotted on linux "-Wno-address-of-packed-member", "-Wno-gnu-folding-constant", @@ -1044,10 +1039,21 @@ pub fn addModuleTests(b: *std.Build, options: ModuleTestOptions) *Step { }, }); compile_c.addIncludePath("lib"); // for zig.h - if (test_target.link_libc == false and test_target.target.getOsTag() == .windows) { - compile_c.subsystem = .Console; - compile_c.linkSystemLibrary("kernel32"); - compile_c.linkSystemLibrary("ntdll"); + if (test_target.target.getOsTag() == .windows) { + if (test_target.link_libc == false) { + compile_c.subsystem = .Console; + compile_c.linkSystemLibrary("kernel32"); + compile_c.linkSystemLibrary("ntdll"); + } + if (mem.eql(u8, options.name, "std")) { + if (test_target.link_libc == false) { + compile_c.linkSystemLibrary("shell32"); + compile_c.linkSystemLibrary("advapi32"); + } + compile_c.linkSystemLibrary("crypt32"); + compile_c.linkSystemLibrary("ws2_32"); + compile_c.linkSystemLibrary("ole32"); + } } const run = b.addRunArtifact(compile_c);