diff --git a/lib/compiler_rt/divti3.zig b/lib/compiler_rt/divti3.zig index cd38d7a93e..31302aab4d 100644 --- a/lib/compiler_rt/divti3.zig +++ b/lib/compiler_rt/divti3.zig @@ -7,21 +7,8 @@ const common = @import("common.zig"); pub const panic = common.panic; comptime { - if (builtin.os.tag == .windows) { - switch (arch) { - .x86 => { - @export(__divti3, .{ .name = "__divti3", .linkage = common.linkage, .visibility = common.visibility }); - }, - .x86_64 => { - // The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI - // that LLVM expects compiler-rt to have. - @export(__divti3_windows_x86_64, .{ .name = "__divti3", .linkage = common.linkage, .visibility = common.visibility }); - }, - else => {}, - } - if (arch.isAARCH64()) { - @export(__divti3, .{ .name = "__divti3", .linkage = common.linkage, .visibility = common.visibility }); - } + if (common.want_windows_v2u64_abi) { + @export(__divti3_windows_x86_64, .{ .name = "__divti3", .linkage = common.linkage, .visibility = common.visibility }); } else { @export(__divti3, .{ .name = "__divti3", .linkage = common.linkage, .visibility = common.visibility }); } @@ -31,7 +18,7 @@ pub fn __divti3(a: i128, b: i128) callconv(.C) i128 { return div(a, b); } -const v128 = @import("std").meta.Vector(2, u64); +const v128 = @Vector(2, u64); fn __divti3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 { return @bitCast(v128, div(@bitCast(i128, a), @bitCast(i128, b))); diff --git a/lib/compiler_rt/fixunshfti.zig b/lib/compiler_rt/fixunshfti.zig index f7dcb3f790..0c67d4998a 100644 --- a/lib/compiler_rt/fixunshfti.zig +++ b/lib/compiler_rt/fixunshfti.zig @@ -16,7 +16,7 @@ pub fn __fixunshfti(a: f16) callconv(.C) u128 { return floatToInt(u128, a); } -const v2u64 = @import("std").meta.Vector(2, u64); +const v2u64 = @Vector(2, u64); fn __fixunshfti_windows_x86_64(a: f16) callconv(.C) v2u64 { return @bitCast(v2u64, floatToInt(u128, a)); diff --git a/lib/compiler_rt/udivmodei4.zig b/lib/compiler_rt/udivmodei4.zig index e57b5e4d04..de2427b79f 100644 --- a/lib/compiler_rt/udivmodei4.zig +++ b/lib/compiler_rt/udivmodei4.zig @@ -129,6 +129,8 @@ pub fn __umodei4(r_p: [*c]u32, u_p: [*c]const u32, v_p: [*c]const u32, bits: usi } test "__udivei4/__umodei4" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + const RndGen = std.rand.DefaultPrng; var rnd = RndGen.init(42); var i: usize = 10000; diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig index 8410e25864..d222d6913b 100644 --- a/lib/std/math/big/int.zig +++ b/lib/std/math/big/int.zig @@ -1677,6 +1677,40 @@ pub const Mutable = struct { y.shiftRight(y.toConst(), norm_shift); } + /// If a is positive, this passes through to truncate. + /// If a is negative, then r is set to positive with the bit pattern ~(a - 1). + /// + /// Asserts `r` has enough storage to store the result. + /// The upper bound is `calcTwosCompLimbCount(a.len)`. + pub fn convertToTwosComplement(r: *Mutable, a: Const, signedness: Signedness, bit_count: usize) void { + if (a.positive) { + r.truncate(a, signedness, bit_count); + return; + } + + const req_limbs = calcTwosCompLimbCount(bit_count); + if (req_limbs == 0 or a.eqZero()) { + r.set(0); + return; + } + + const bit = @truncate(Log2Limb, bit_count - 1); + const signmask = @as(Limb, 1) << bit; + const mask = (signmask << 1) -% 1; + + r.addScalar(a.abs(), -1); + if (req_limbs > r.len) { + mem.set(Limb, r.limbs[r.len..req_limbs], 0); + } + + assert(r.limbs.len >= req_limbs); + r.len = req_limbs; + + llnot(r.limbs[0..r.len]); + r.limbs[r.len - 1] &= mask; + r.normalize(r.len); + } + /// Truncate an integer to a number of bits, following 2s-complement semantics. /// r may alias a. /// diff --git a/lib/std/mem.zig b/lib/std/mem.zig index 774db2caa4..42b35281e0 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -3771,7 +3771,7 @@ pub fn doNotOptimizeAway(val: anytype) void { .Bool => doNotOptimizeAway(@boolToInt(val)), .Int => { const bits = t.Int.bits; - if (bits <= max_gp_register_bits) { + if (bits <= max_gp_register_bits and builtin.zig_backend != .stage2_c) { const val2 = @as( std.meta.Int(t.Int.signedness, @max(8, std.math.ceilPowerOfTwoAssert(u16, bits))), val, @@ -3783,18 +3783,24 @@ pub fn doNotOptimizeAway(val: anytype) void { } else doNotOptimizeAway(&val); }, .Float => { - if (t.Float.bits == 32 or t.Float.bits == 64) { + if ((t.Float.bits == 32 or t.Float.bits == 64) and builtin.zig_backend != .stage2_c) { asm volatile ("" : : [val] "rm" (val), ); } else doNotOptimizeAway(&val); }, - .Pointer => asm volatile ("" - : - : [val] "m" (val), - : "memory" - ), + .Pointer => { + if (builtin.zig_backend == .stage2_c) { + doNotOptimizeAwayC(val); + } else { + asm volatile ("" + : + : [val] "m" (val), + : "memory" + ); + } + }, .Array => { if (t.Array.len * @sizeOf(t.Array.child) <= 64) { for (val) |v| doNotOptimizeAway(v); @@ -3804,6 +3810,16 @@ pub fn doNotOptimizeAway(val: anytype) void { } } +/// .stage2_c doesn't support asm blocks yet, so use volatile stores instead +var deopt_target: if (builtin.zig_backend == .stage2_c) u8 else void = undefined; +fn doNotOptimizeAwayC(ptr: anytype) void { + const dest = @ptrCast(*volatile u8, &deopt_target); + for (asBytes(ptr)) |b| { + dest.* = b; + } + dest.* = 0; +} + test "doNotOptimizeAway" { comptime doNotOptimizeAway("test"); diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig index f261b9cae1..2a4d0d9a9b 100644 --- a/lib/std/os/windows.zig +++ b/lib/std/os/windows.zig @@ -1776,16 +1776,26 @@ pub fn UnlockFile( } } +/// This is a workaround for the C backend until zig has the ability to put +/// C code in inline assembly. +extern fn zig_x86_64_windows_teb() callconv(.C) *anyopaque; + pub fn teb() *TEB { return switch (native_arch) { .x86 => asm volatile ( \\ movl %%fs:0x18, %[ptr] : [ptr] "=r" (-> *TEB), ), - .x86_64 => asm volatile ( - \\ movq %%gs:0x30, %[ptr] - : [ptr] "=r" (-> *TEB), - ), + .x86_64 => blk: { + if (builtin.zig_backend == .stage2_c) { + break :blk @ptrCast(*TEB, @alignCast(@alignOf(TEB), zig_x86_64_windows_teb())); + } else { + break :blk asm volatile ( + \\ movq %%gs:0x30, %[ptr] + : [ptr] "=r" (-> *TEB), + ); + } + }, .aarch64 => asm volatile ( \\ mov %[ptr], x18 : [ptr] "=r" (-> *TEB), @@ -3455,6 +3465,21 @@ pub const ASSEMBLY_STORAGE_MAP = opaque {}; pub const FLS_CALLBACK_INFO = opaque {}; pub const RTL_BITMAP = opaque {}; pub const KAFFINITY = usize; +pub const KPRIORITY = i32; + +pub const CLIENT_ID = extern struct { + UniqueProcess: HANDLE, + UniqueThread: HANDLE, +}; + +pub const THREAD_BASIC_INFORMATION = extern struct { + ExitStatus: NTSTATUS, + TebBaseAddress: PVOID, + ClientId: CLIENT_ID, + AffinityMask: KAFFINITY, + Priority: KPRIORITY, + BasePriority: KPRIORITY, +}; pub const TEB = extern struct { Reserved1: [12]PVOID, diff --git a/lib/std/zig/system/x86.zig b/lib/std/zig/system/x86.zig index 66468ba6ff..873659e58c 100644 --- a/lib/std/zig/system/x86.zig +++ b/lib/std/zig/system/x86.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const builtin = @import("builtin"); const Target = std.Target; const CrossTarget = std.zig.CrossTarget; @@ -527,25 +528,43 @@ const CpuidLeaf = packed struct { edx: u32, }; +/// This is a workaround for the C backend until zig has the ability to put +/// C code in inline assembly. +extern fn zig_x86_cpuid(leaf_id: u32, subid: u32, eax: *u32, ebx: *u32, ecx: *u32, edx: *u32) callconv(.C) void; + fn cpuid(leaf_id: u32, subid: u32) CpuidLeaf { // valid for both x86 and x86_64 var eax: u32 = undefined; var ebx: u32 = undefined; var ecx: u32 = undefined; var edx: u32 = undefined; - asm volatile ("cpuid" - : [_] "={eax}" (eax), - [_] "={ebx}" (ebx), - [_] "={ecx}" (ecx), - [_] "={edx}" (edx), - : [_] "{eax}" (leaf_id), - [_] "{ecx}" (subid), - ); + + if (builtin.zig_backend == .stage2_c) { + zig_x86_cpuid(leaf_id, subid, &eax, &ebx, &ecx, &edx); + } else { + asm volatile ("cpuid" + : [_] "={eax}" (eax), + [_] "={ebx}" (ebx), + [_] "={ecx}" (ecx), + [_] "={edx}" (edx), + : [_] "{eax}" (leaf_id), + [_] "{ecx}" (subid), + ); + } + return .{ .eax = eax, .ebx = ebx, .ecx = ecx, .edx = edx }; } +/// This is a workaround for the C backend until zig has the ability to put +/// C code in inline assembly. +extern fn zig_x86_get_xcr0() callconv(.C) u32; + // Read control register 0 (XCR0). Used to detect features such as AVX. fn getXCR0() u32 { + if (builtin.zig_backend == .stage2_c) { + return zig_x86_get_xcr0(); + } + return asm volatile ( \\ xor %%ecx, %%ecx \\ xgetbv diff --git a/lib/zig.h b/lib/zig.h index cea9a0532a..baa220e744 100644 --- a/lib/zig.h +++ b/lib/zig.h @@ -6,6 +6,12 @@ #include #include +#if _MSC_VER +#include +#elif defined(__i386__) || defined(__x86_64__) +#include +#endif + #if !defined(__cplusplus) && __STDC_VERSION__ <= 201710L #if __STDC_VERSION__ >= 199901L #include @@ -38,6 +44,12 @@ typedef char bool; #define zig_threadlocal zig_threadlocal_unavailable #endif +#if _MSC_VER +#define zig_const_arr +#else +#define zig_const_arr static const +#endif + #if zig_has_attribute(naked) || defined(__GNUC__) #define zig_naked __attribute__((naked)) #elif defined(_MSC_VER) @@ -65,7 +77,7 @@ typedef char bool; #elif zig_has_attribute(aligned) #define zig_align(alignment) __attribute__((aligned(alignment))) #elif _MSC_VER -#define zig_align zig_align_unavailable +#define zig_align(alignment) __declspec(align(alignment)) #else #define zig_align zig_align_unavailable #endif @@ -73,7 +85,7 @@ typedef char bool; #if zig_has_attribute(aligned) #define zig_align_fn(alignment) __attribute__((aligned(alignment))) #elif _MSC_VER -#define zig_align_fn zig_align_fn_unavailable +#define zig_align_fn(alignment) #else #define zig_align_fn zig_align_fn_unavailable #endif @@ -92,6 +104,9 @@ typedef char bool; #if zig_has_attribute(alias) #define zig_export(sig, symbol, name) zig_extern sig __attribute__((alias(symbol))) +#elif _MSC_VER +#define zig_export(sig, symbol, name) sig;\ + __pragma(comment(linker, "/alternatename:" name "=" symbol )) #else #define zig_export(sig, symbol, name) __asm(name " = " symbol) #endif @@ -136,22 +151,25 @@ typedef char bool; #define zig_wasm_memory_grow(index, delta) zig_unimplemented() #endif +#define zig_concat(lhs, rhs) lhs##rhs +#define zig_expand_concat(lhs, rhs) zig_concat(lhs, rhs) + #if __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) #include #define zig_atomic(type) _Atomic(type) -#define zig_cmpxchg_strong(obj, expected, desired, succ, fail) atomic_compare_exchange_strong_explicit(obj, &(expected), desired, succ, fail) -#define zig_cmpxchg_weak(obj, expected, desired, succ, fail) atomic_compare_exchange_weak_explicit (obj, &(expected), desired, succ, fail) -#define zig_atomicrmw_xchg(obj, arg, order) atomic_exchange_explicit (obj, arg, order) -#define zig_atomicrmw_add(obj, arg, order) atomic_fetch_add_explicit (obj, arg, order) -#define zig_atomicrmw_sub(obj, arg, order) atomic_fetch_sub_explicit (obj, arg, order) -#define zig_atomicrmw_or(obj, arg, order) atomic_fetch_or_explicit (obj, arg, order) -#define zig_atomicrmw_xor(obj, arg, order) atomic_fetch_xor_explicit (obj, arg, order) -#define zig_atomicrmw_and(obj, arg, order) atomic_fetch_and_explicit (obj, arg, order) -#define zig_atomicrmw_nand(obj, arg, order) __atomic_fetch_nand (obj, arg, order) -#define zig_atomicrmw_min(obj, arg, order) __atomic_fetch_min (obj, arg, order) -#define zig_atomicrmw_max(obj, arg, order) __atomic_fetch_max (obj, arg, order) -#define zig_atomic_store(obj, arg, order) atomic_store_explicit (obj, arg, order) -#define zig_atomic_load(obj, order) atomic_load_explicit (obj, order) +#define zig_cmpxchg_strong(obj, expected, desired, succ, fail, type) atomic_compare_exchange_strong_explicit(obj, &(expected), desired, succ, fail) +#define zig_cmpxchg_weak(obj, expected, desired, succ, fail, type) atomic_compare_exchange_weak_explicit (obj, &(expected), desired, succ, fail) +#define zig_atomicrmw_xchg(obj, arg, order, type) atomic_exchange_explicit (obj, arg, order) +#define zig_atomicrmw_add(obj, arg, order, type) atomic_fetch_add_explicit (obj, arg, order) +#define zig_atomicrmw_sub(obj, arg, order, type) atomic_fetch_sub_explicit (obj, arg, order) +#define zig_atomicrmw_or(obj, arg, order, type) atomic_fetch_or_explicit (obj, arg, order) +#define zig_atomicrmw_xor(obj, arg, order, type) atomic_fetch_xor_explicit (obj, arg, order) +#define zig_atomicrmw_and(obj, arg, order, type) atomic_fetch_and_explicit (obj, arg, order) +#define zig_atomicrmw_nand(obj, arg, order, type) __atomic_fetch_nand (obj, arg, order) +#define zig_atomicrmw_min(obj, arg, order, type) __atomic_fetch_min (obj, arg, order) +#define zig_atomicrmw_max(obj, arg, order, type) __atomic_fetch_max (obj, arg, order) +#define zig_atomic_store(obj, arg, order, type) atomic_store_explicit (obj, arg, order) +#define zig_atomic_load(obj, order, type) atomic_load_explicit (obj, order) #define zig_fence(order) atomic_thread_fence(order) #elif defined(__GNUC__) #define memory_order_relaxed __ATOMIC_RELAXED @@ -161,20 +179,43 @@ typedef char bool; #define memory_order_acq_rel __ATOMIC_ACQ_REL #define memory_order_seq_cst __ATOMIC_SEQ_CST #define zig_atomic(type) type -#define zig_cmpxchg_strong(obj, expected, desired, succ, fail) __atomic_compare_exchange_n(obj, &(expected), desired, false, succ, fail) -#define zig_cmpxchg_weak(obj, expected, desired, succ, fail) __atomic_compare_exchange_n(obj, &(expected), desired, true , succ, fail) -#define zig_atomicrmw_xchg(obj, arg, order) __atomic_exchange_n(obj, arg, order) -#define zig_atomicrmw_add(obj, arg, order) __atomic_fetch_add (obj, arg, order) -#define zig_atomicrmw_sub(obj, arg, order) __atomic_fetch_sub (obj, arg, order) -#define zig_atomicrmw_or(obj, arg, order) __atomic_fetch_or (obj, arg, order) -#define zig_atomicrmw_xor(obj, arg, order) __atomic_fetch_xor (obj, arg, order) -#define zig_atomicrmw_and(obj, arg, order) __atomic_fetch_and (obj, arg, order) -#define zig_atomicrmw_nand(obj, arg, order) __atomic_fetch_nand(obj, arg, order) -#define zig_atomicrmw_min(obj, arg, order) __atomic_fetch_min (obj, arg, order) -#define zig_atomicrmw_max(obj, arg, order) __atomic_fetch_max (obj, arg, order) -#define zig_atomic_store(obj, arg, order) __atomic_store_n (obj, arg, order) -#define zig_atomic_load(obj, order) __atomic_load_n (obj, order) +#define zig_cmpxchg_strong(obj, expected, desired, succ, fail, type) __atomic_compare_exchange_n(obj, &(expected), desired, false, succ, fail) +#define zig_cmpxchg_weak(obj, expected, desired, succ, fail, type) __atomic_compare_exchange_n(obj, &(expected), desired, true , succ, fail) +#define zig_atomicrmw_xchg(obj, arg, order, type) __atomic_exchange_n(obj, arg, order) +#define zig_atomicrmw_add(obj, arg, order, type) __atomic_fetch_add (obj, arg, order) +#define zig_atomicrmw_sub(obj, arg, order, type) __atomic_fetch_sub (obj, arg, order) +#define zig_atomicrmw_or(obj, arg, order, type) __atomic_fetch_or (obj, arg, order) +#define zig_atomicrmw_xor(obj, arg, order, type) __atomic_fetch_xor (obj, arg, order) +#define zig_atomicrmw_and(obj, arg, order, type) __atomic_fetch_and (obj, arg, order) +#define zig_atomicrmw_nand(obj, arg, order, type) __atomic_fetch_nand(obj, arg, order) +#define zig_atomicrmw_min(obj, arg, order, type) __atomic_fetch_min (obj, arg, order) +#define zig_atomicrmw_max(obj, arg, order, type) __atomic_fetch_max (obj, arg, order) +#define zig_atomic_store(obj, arg, order, type) __atomic_store_n (obj, arg, order) +#define zig_atomic_load(obj, order, type) __atomic_load_n (obj, order) #define zig_fence(order) __atomic_thread_fence(order) +#elif _MSC_VER && (_M_IX86 || _M_X64) +#define memory_order_relaxed 0 +#define memory_order_consume 1 +#define memory_order_acquire 2 +#define memory_order_release 3 +#define memory_order_acq_rel 4 +#define memory_order_seq_cst 5 +#define zig_atomic(type) type +#define zig_cmpxchg_strong(obj, expected, desired, succ, fail, type) zig_expand_concat(zig_msvc_cmpxchg_, type)(obj, &(expected), desired) +#define zig_cmpxchg_weak(obj, expected, desired, succ, fail, type) zig_cmpxchg_strong(obj, expected, desired, succ, fail, type) +#define zig_atomicrmw_xchg(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_xchg_, type)(obj, arg) +#define zig_atomicrmw_add(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_add_, type)(obj, arg) +#define zig_atomicrmw_sub(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_sub_, type)(obj, arg) +#define zig_atomicrmw_or(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_or_, type)(obj, arg) +#define zig_atomicrmw_xor(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_xor_, type)(obj, arg) +#define zig_atomicrmw_and(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_and_, type)(obj, arg) +#define zig_atomicrmw_nand(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_nand_, type)(obj, arg) +#define zig_atomicrmw_min(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_min_, type)(obj, arg) +#define zig_atomicrmw_max(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_max_, type)(obj, arg) +#define zig_atomic_store(obj, arg, order, type) zig_expand_concat(zig_msvc_atomic_store_, type)(obj, arg) +#define zig_atomic_load(obj, order, type) zig_expand_concat(zig_msvc_atomic_load_, type)(obj) +#define zig_fence(order) __faststorefence() +// TODO: _MSC_VER && (_M_ARM || _M_ARM64) #else #define memory_order_relaxed 0 #define memory_order_consume 1 @@ -183,19 +224,19 @@ typedef char bool; #define memory_order_acq_rel 4 #define memory_order_seq_cst 5 #define zig_atomic(type) type -#define zig_cmpxchg_strong(obj, expected, desired, succ, fail) zig_unimplemented() -#define zig_cmpxchg_weak(obj, expected, desired, succ, fail) zig_unimplemented() -#define zig_atomicrmw_xchg(obj, arg, order) zig_unimplemented() -#define zig_atomicrmw_add(obj, arg, order) zig_unimplemented() -#define zig_atomicrmw_sub(obj, arg, order) zig_unimplemented() -#define zig_atomicrmw_or(obj, arg, order) zig_unimplemented() -#define zig_atomicrmw_xor(obj, arg, order) zig_unimplemented() -#define zig_atomicrmw_and(obj, arg, order) zig_unimplemented() -#define zig_atomicrmw_nand(obj, arg, order) zig_unimplemented() -#define zig_atomicrmw_min(obj, arg, order) zig_unimplemented() -#define zig_atomicrmw_max(obj, arg, order) zig_unimplemented() -#define zig_atomic_store(obj, arg, order) zig_unimplemented() -#define zig_atomic_load(obj, order) zig_unimplemented() +#define zig_cmpxchg_strong(obj, expected, desired, succ, fail, type) zig_unimplemented() +#define zig_cmpxchg_weak(obj, expected, desired, succ, fail, type) zig_unimplemented() +#define zig_atomicrmw_xchg(obj, arg, order, type) zig_unimplemented() +#define zig_atomicrmw_add(obj, arg, order, type) zig_unimplemented() +#define zig_atomicrmw_sub(obj, arg, order, type) zig_unimplemented() +#define zig_atomicrmw_or(obj, arg, order, type) zig_unimplemented() +#define zig_atomicrmw_xor(obj, arg, order, type) zig_unimplemented() +#define zig_atomicrmw_and(obj, arg, order, type) zig_unimplemented() +#define zig_atomicrmw_nand(obj, arg, order, type) zig_unimplemented() +#define zig_atomicrmw_min(obj, arg, order, type) zig_unimplemented() +#define zig_atomicrmw_max(obj, arg, order, type) zig_unimplemented() +#define zig_atomic_store(obj, arg, order, type) zig_unimplemented() +#define zig_atomic_load(obj, order, type) zig_unimplemented() #define zig_fence(order) zig_unimplemented() #endif @@ -209,9 +250,6 @@ typedef char bool; #define zig_noreturn void #endif -#define zig_concat(lhs, rhs) lhs##rhs -#define zig_expand_concat(lhs, rhs) zig_concat(lhs, rhs) - #define zig_bitSizeOf(T) (CHAR_BIT * sizeof(T)) typedef uintptr_t zig_usize; @@ -1141,6 +1179,8 @@ typedef signed __int128 zig_i128; #define zig_as_u128(hi, lo) ((zig_u128)(hi)<<64|(lo)) #define zig_as_i128(hi, lo) ((zig_i128)zig_as_u128(hi, lo)) +#define zig_as_constant_u128(hi, lo) zig_as_u128(hi, lo) +#define zig_as_constant_i128(hi, lo) zig_as_i128(hi, lo) #define zig_hi_u128(val) ((zig_u64)((val) >> 64)) #define zig_lo_u128(val) ((zig_u64)((val) >> 0)) #define zig_hi_i128(val) ((zig_i64)((val) >> 64)) @@ -1168,6 +1208,8 @@ typedef struct { zig_align(16) zig_i64 hi; zig_u64 lo; } zig_i128; #define zig_as_u128(hi, lo) ((zig_u128){ .h##i = (hi), .l##o = (lo) }) #define zig_as_i128(hi, lo) ((zig_i128){ .h##i = (hi), .l##o = (lo) }) +#define zig_as_constant_u128(hi, lo) { .h##i = (hi), .l##o = (lo) } +#define zig_as_constant_i128(hi, lo) { .h##i = (hi), .l##o = (lo) } #define zig_hi_u128(val) ((val).hi) #define zig_lo_u128(val) ((val).lo) #define zig_hi_i128(val) ((val).hi) @@ -1289,51 +1331,79 @@ static inline zig_i128 zig_not_i128(zig_i128 val, zig_u8 bits) { } static inline zig_u128 zig_shr_u128(zig_u128 lhs, zig_u8 rhs) { - if (rhs >= zig_as_u8(64)) return (zig_u128){ .hi = lhs.hi << (rhs - zig_as_u8(64)), .lo = zig_minInt_u64 }; - return (zig_u128){ .hi = lhs.hi << rhs | lhs.lo >> (zig_as_u8(64) - rhs), .lo = lhs.lo << rhs }; + if (rhs == zig_as_u8(0)) return lhs; + if (rhs >= zig_as_u8(64)) return (zig_u128){ .hi = zig_minInt_u64, .lo = lhs.hi >> (rhs - zig_as_u8(64)) }; + return (zig_u128){ .hi = lhs.hi >> rhs, .lo = lhs.hi << (zig_as_u8(64) - rhs) | lhs.lo >> rhs }; } static inline zig_u128 zig_shl_u128(zig_u128 lhs, zig_u8 rhs) { - if (rhs >= zig_as_u8(64)) return (zig_u128){ .hi = lhs.hi << (rhs - zig_as_u8(64)), .lo = zig_minInt_u64 }; + if (rhs == zig_as_u8(0)) return lhs; + if (rhs >= zig_as_u8(64)) return (zig_u128){ .hi = lhs.lo << rhs, .lo = zig_minInt_u64 }; return (zig_u128){ .hi = lhs.hi << rhs | lhs.lo >> (zig_as_u8(64) - rhs), .lo = lhs.lo << rhs }; } static inline zig_i128 zig_shl_i128(zig_i128 lhs, zig_u8 rhs) { - if (rhs >= zig_as_u8(64)) return (zig_i128){ .hi = lhs.hi << (rhs - zig_as_u8(64)), .lo = zig_minInt_u64 }; + if (rhs == zig_as_u8(0)) return lhs; + if (rhs >= zig_as_u8(64)) return (zig_i128){ .hi = lhs.lo << rhs, .lo = zig_minInt_u64 }; return (zig_i128){ .hi = lhs.hi << rhs | lhs.lo >> (zig_as_u8(64) - rhs), .lo = lhs.lo << rhs }; } static inline zig_u128 zig_add_u128(zig_u128 lhs, zig_u128 rhs) { zig_u128 res; - res.hi = lhs.hi + rhs.hi + zig_addo_u64(&res.lo, lhs.lo, rhs.lo, zig_maxInt_u64); + res.hi = lhs.hi + rhs.hi + zig_addo_u64(&res.lo, lhs.lo, rhs.lo, 64); return res; } static inline zig_i128 zig_add_i128(zig_i128 lhs, zig_i128 rhs) { zig_i128 res; - res.hi = lhs.hi + rhs.hi + zig_addo_u64(&res.lo, lhs.lo, rhs.lo, zig_maxInt_u64); + res.hi = lhs.hi + rhs.hi + zig_addo_u64(&res.lo, lhs.lo, rhs.lo, 64); return res; } static inline zig_u128 zig_sub_u128(zig_u128 lhs, zig_u128 rhs) { zig_u128 res; - res.hi = lhs.hi - rhs.hi - zig_subo_u64(&res.lo, lhs.lo, rhs.lo, zig_maxInt_u64); + res.hi = lhs.hi - rhs.hi - zig_subo_u64(&res.lo, lhs.lo, rhs.lo, 64); return res; } static inline zig_i128 zig_sub_i128(zig_i128 lhs, zig_i128 rhs) { zig_i128 res; - res.hi = lhs.hi - rhs.hi - zig_subo_u64(&res.lo, lhs.lo, rhs.lo, zig_maxInt_u64); + res.hi = lhs.hi - rhs.hi - zig_subo_u64(&res.lo, lhs.lo, rhs.lo, 64); return res; } -static inline zig_i128 zig_div_floor_i128(zig_i128 lhs, zig_i128 rhs) { - return zig_sub_i128(zig_div_trunc_i128(lhs, rhs), (((lhs.hi ^ rhs.hi) & zig_rem_i128(lhs, rhs).hi) < zig_as_i64(0)) ? zig_as_i128(0, 1) : zig_as_i128(0, 0)); +zig_extern zig_i128 __multi3(zig_i128 lhs, zig_i128 rhs); +static zig_i128 zig_mul_i128(zig_i128 lhs, zig_i128 rhs) { + return __multi3(lhs, rhs); +} + +zig_extern zig_u128 __udivti3(zig_u128 lhs, zig_u128 rhs); +static zig_u128 zig_div_trunc_u128(zig_u128 lhs, zig_u128 rhs) { + return __udivti3(lhs, rhs); +}; + +zig_extern zig_i128 __divti3(zig_i128 lhs, zig_i128 rhs); +static zig_i128 zig_div_trunc_i128(zig_i128 lhs, zig_i128 rhs) { + return __divti3(lhs, rhs); +}; + +zig_extern zig_u128 __umodti3(zig_u128 lhs, zig_u128 rhs); +static zig_u128 zig_rem_u128(zig_u128 lhs, zig_u128 rhs) { + return __umodti3(lhs, rhs); +} + +zig_extern zig_i128 __modti3(zig_i128 lhs, zig_i128 rhs); +static zig_i128 zig_rem_i128(zig_i128 lhs, zig_i128 rhs) { + return __modti3(lhs, rhs); } static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) { zig_i128 rem = zig_rem_i128(lhs, rhs); - return rem + (((lhs.hi ^ rhs.hi) & rem.hi) < zig_as_i64(0) ? rhs : zig_as_i128(0, 0)); + return zig_add_i128(rem, (((lhs.hi ^ rhs.hi) & rem.hi) < zig_as_i64(0) ? rhs : zig_as_i128(0, 0))); +} + +static inline zig_i128 zig_div_floor_i128(zig_i128 lhs, zig_i128 rhs) { + return zig_sub_i128(zig_div_trunc_i128(lhs, rhs), zig_as_i128(0, zig_cmp_i128(zig_and_i128(zig_xor_i128(lhs, rhs), zig_rem_i128(lhs, rhs)), zig_as_i128(0, 0)) < zig_as_i32(0))); } #endif /* zig_has_int128 */ @@ -1341,6 +1411,10 @@ static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) { #define zig_div_floor_u128 zig_div_trunc_u128 #define zig_mod_u128 zig_rem_u128 +static inline zig_u128 zig_nand_u128(zig_u128 lhs, zig_u128 rhs) { + return zig_not_u128(zig_and_u128(lhs, rhs), 128); +} + static inline zig_u128 zig_min_u128(zig_u128 lhs, zig_u128 rhs) { return zig_cmp_u128(lhs, rhs) < zig_as_i32(0) ? lhs : rhs; } @@ -1358,7 +1432,7 @@ static inline zig_i128 zig_max_i128(zig_i128 lhs, zig_i128 rhs) { } static inline zig_i128 zig_shr_i128(zig_i128 lhs, zig_u8 rhs) { - zig_i128 sign_mask = zig_cmp_i128(lhs, zig_as_i128(0, 0)) < zig_as_i32(0) ? -zig_as_i128(0, 1) : zig_as_i128(0, 0); + zig_i128 sign_mask = zig_cmp_i128(lhs, zig_as_i128(0, 0)) < zig_as_i32(0) ? zig_sub_i128(zig_as_i128(0, 0), zig_as_i128(0, 1)) : zig_as_i128(0, 0); return zig_xor_i128(zig_bitcast_i128(zig_shr_u128(zig_bitcast_u128(zig_xor_i128(lhs, sign_mask)), rhs)), sign_mask); } @@ -1375,7 +1449,7 @@ static inline zig_u128 zig_shlw_u128(zig_u128 lhs, zig_u8 rhs, zig_u8 bits) { } static inline zig_i128 zig_shlw_i128(zig_i128 lhs, zig_u8 rhs, zig_u8 bits) { - return zig_wrap_i128(zig_bitcast_i128(zig_shl_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits); + return zig_wrap_i128(zig_bitcast_i128(zig_shl_u128(zig_bitcast_u128(lhs), rhs)), bits); } static inline zig_u128 zig_addw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { @@ -1394,6 +1468,17 @@ static inline zig_i128 zig_subw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { return zig_wrap_i128(zig_bitcast_i128(zig_sub_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits); } +#if _MSC_VER +static zig_u128 zig_mul_u128(zig_u128 lhs, zig_u128 rhs) { + zig_u64 lo_carry; + zig_u64 lo = _umul128(lhs.lo, rhs.lo, &lo_carry); + zig_u64 hi = lhs.hi * rhs.lo + lhs.lo * rhs.hi + lo_carry; + return zig_as_u128(hi, lo); +} +#else +static zig_u128 zig_mul_u128(zig_u128 lhs, zig_u128 rhs); // TODO +#endif + static inline zig_u128 zig_mulw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { return zig_wrap_u128(zig_mul_u128(lhs, rhs), bits); } @@ -1404,18 +1489,6 @@ static inline zig_i128 zig_mulw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { #if zig_has_int128 -static inline bool zig_shlo_u128(zig_u128 *res, zig_u128 lhs, zig_u8 rhs, zig_u8 bits) { - *res = zig_shlw_u128(lhs, rhs, bits); - return zig_cmp_u128(lhs, zig_shr_u128(zig_maxInt(u128, bits), rhs)) > zig_as_i32(0); -} - -static inline bool zig_shlo_i128(zig_i128 *res, zig_i128 lhs, zig_u8 rhs, zig_u8 bits) { - *res = zig_shlw_i128(lhs, rhs, bits); - zig_i128 mask = zig_bitcast_i128(zig_shl_u128(zig_maxInt_u128, bits - rhs - zig_as_u8(1))); - return zig_cmp_i128(zig_and_i128(lhs, mask), zig_as_i128(0, 0)) != zig_as_i32(0) && - zig_cmp_i128(zig_and_i128(lhs, mask), mask) != zig_as_i32(0); -} - static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { #if zig_has_builtin(add_overflow) zig_u128 full_res; @@ -1496,28 +1569,95 @@ static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_ #else /* zig_has_int128 */ -static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs) { - return zig_addo_u64(&res->hi, lhs.hi, rhs.hi, UINT64_MAX) | - zig_addo_u64(&res->hi, res->hi, zig_addo_u64(&res->lo, lhs.lo, rhs.lo, UINT64_MAX)); +static inline bool zig_overflow_u128(bool overflow, zig_u128 full_res, zig_u8 bits) { + return overflow || + zig_cmp_u128(full_res, zig_minInt(u128, bits)) < zig_as_i32(0) || + zig_cmp_u128(full_res, zig_maxInt(u128, bits)) > zig_as_i32(0); } -static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs) { - return zig_subo_u64(&res->hi, lhs.hi, rhs.hi, UINT64_MAX) | - zig_subo_u64(&res->hi, res->hi, zig_subo_u64(&res->lo, lhs.lo, rhs.lo, UINT64_MAX)); +static inline bool zig_overflow_i128(bool overflow, zig_i128 full_res, zig_u8 bits) { + return overflow || + zig_cmp_i128(full_res, zig_minInt(i128, bits)) < zig_as_i32(0) || + zig_cmp_i128(full_res, zig_maxInt(i128, bits)) > zig_as_i32(0); +} + +static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { + zig_u128 full_res; + bool overflow = + zig_addo_u64(&full_res.hi, lhs.hi, rhs.hi, 64) | + zig_addo_u64(&full_res.hi, full_res.hi, zig_addo_u64(&full_res.lo, lhs.lo, rhs.lo, 64), 64); + *res = zig_wrap_u128(full_res, bits); + return zig_overflow_u128(overflow, full_res, bits); +} + +zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow); +static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { + zig_c_int overflow_int; + zig_i128 full_res = __addoti4(lhs, rhs, &overflow_int); + *res = zig_wrap_i128(full_res, bits); + return zig_overflow_i128(overflow_int, full_res, bits); +} + +static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { + zig_u128 full_res; + bool overflow = + zig_subo_u64(&full_res.hi, lhs.hi, rhs.hi, 64) | + zig_subo_u64(&full_res.hi, full_res.hi, zig_subo_u64(&full_res.lo, lhs.lo, rhs.lo, 64), 64); + *res = zig_wrap_u128(full_res, bits); + return zig_overflow_u128(overflow, full_res, bits); +} + +zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow); +static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { + zig_c_int overflow_int; + zig_i128 full_res = __suboti4(lhs, rhs, &overflow_int); + *res = zig_wrap_i128(full_res, bits); + return zig_overflow_i128(overflow_int, full_res, bits); +} + +static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { + *res = zig_mulw_u128(lhs, rhs, bits); + return zig_cmp_u128(*res, zig_as_u128(0, 0)) != zig_as_i32(0) && + zig_cmp_u128(lhs, zig_div_trunc_u128(zig_maxInt(u128, bits), rhs)) > zig_as_i32(0); +} + +zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow); +static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { + zig_c_int overflow_int; + zig_i128 full_res = __muloti4(lhs, rhs, &overflow_int); + *res = zig_wrap_i128(full_res, bits); + return zig_overflow_i128(overflow_int, full_res, bits); } #endif /* zig_has_int128 */ +static inline bool zig_shlo_u128(zig_u128 *res, zig_u128 lhs, zig_u8 rhs, zig_u8 bits) { + *res = zig_shlw_u128(lhs, rhs, bits); + return zig_cmp_u128(lhs, zig_shr_u128(zig_maxInt(u128, bits), rhs)) > zig_as_i32(0); +} + +static inline bool zig_shlo_i128(zig_i128 *res, zig_i128 lhs, zig_u8 rhs, zig_u8 bits) { + *res = zig_shlw_i128(lhs, rhs, bits); + zig_i128 mask = zig_bitcast_i128(zig_shl_u128(zig_maxInt_u128, bits - rhs - zig_as_u8(1))); + return zig_cmp_i128(zig_and_i128(lhs, mask), zig_as_i128(0, 0)) != zig_as_i32(0) && + zig_cmp_i128(zig_and_i128(lhs, mask), mask) != zig_as_i32(0); +} + static inline zig_u128 zig_shls_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { zig_u128 res; if (zig_cmp_u128(rhs, zig_as_u128(0, bits)) >= zig_as_i32(0)) return zig_cmp_u128(lhs, zig_as_u128(0, 0)) != zig_as_i32(0) ? zig_maxInt(u128, bits) : lhs; + +#if zig_has_int128 return zig_shlo_u128(&res, lhs, (zig_u8)rhs, bits) ? zig_maxInt(u128, bits) : res; +#else + return zig_shlo_u128(&res, lhs, (zig_u8)rhs.lo, bits) ? zig_maxInt(u128, bits) : res; +#endif } static inline zig_i128 zig_shls_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { zig_i128 res; - if (zig_cmp_u128(zig_bitcast_u128(rhs), zig_as_u128(0, bits)) < zig_as_i32(0) && !zig_shlo_i128(&res, lhs, rhs, bits)) return res; + if (zig_cmp_u128(zig_bitcast_u128(rhs), zig_as_u128(0, bits)) < zig_as_i32(0) && !zig_shlo_i128(&res, lhs, zig_lo_i128(rhs), bits)) return res; return zig_cmp_i128(lhs, zig_as_i128(0, 0)) < zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits); } @@ -1555,8 +1695,9 @@ static inline zig_i128 zig_muls_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { } static inline zig_u8 zig_clz_u128(zig_u128 val, zig_u8 bits) { + if (bits <= zig_as_u8(64)) return zig_clz_u64(zig_lo_u128(val), bits); if (zig_hi_u128(val) != 0) return zig_clz_u64(zig_hi_u128(val), bits - zig_as_u8(64)); - return zig_clz_u64(zig_lo_u128(val), zig_as_u8(64)) + zig_as_u8(64); + return zig_clz_u64(zig_lo_u128(val), zig_as_u8(64)) + (bits - zig_as_u8(64)); } static inline zig_u8 zig_clz_i128(zig_i128 val, zig_u8 bits) { @@ -1593,7 +1734,7 @@ static inline zig_u128 zig_byte_swap_u128(zig_u128 val, zig_u8 bits) { } static inline zig_i128 zig_byte_swap_i128(zig_i128 val, zig_u8 bits) { - return zig_byte_swap_u128(zig_bitcast_u128(val), bits); + return zig_bitcast_i128(zig_byte_swap_u128(zig_bitcast_u128(val), bits)); } static inline zig_u128 zig_bit_reverse_u128(zig_u128 val, zig_u8 bits) { @@ -1603,15 +1744,47 @@ static inline zig_u128 zig_bit_reverse_u128(zig_u128 val, zig_u8 bits) { } static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, zig_u8 bits) { - return zig_bit_reverse_u128(zig_bitcast_u128(val), bits); + return zig_bitcast_i128(zig_bit_reverse_u128(zig_bitcast_u128(val), bits)); } /* ========================= Floating Point Support ========================= */ +#if _MSC_VER +#define zig_msvc_flt_inf ((double)(1e+300 * 1e+300)) +#define zig_msvc_flt_inff ((float)(1e+300 * 1e+300)) +#define zig_msvc_flt_infl ((long double)(1e+300 * 1e+300)) +#define zig_msvc_flt_nan ((double)(zig_msvc_flt_inf * 0.f)) +#define zig_msvc_flt_nanf ((float)(zig_msvc_flt_inf * 0.f)) +#define zig_msvc_flt_nanl ((long double)(zig_msvc_flt_inf * 0.f)) +#define __builtin_nan(str) nan(str) +#define __builtin_nanf(str) nanf(str) +#define __builtin_nanl(str) nanl(str) +#define __builtin_inf() zig_msvc_flt_inf +#define __builtin_inff() zig_msvc_flt_inff +#define __builtin_infl() zig_msvc_flt_infl +#endif + +#define zig_has_float_builtins (zig_has_builtin(nan) && zig_has_builtin(nans) && zig_has_builtin(inf)) +#if zig_has_float_builtins +#define zig_as_special_f16(sign, name, arg, repr) sign zig_as_f16(__builtin_##name, )(arg) +#define zig_as_special_f32(sign, name, arg, repr) sign zig_as_f32(__builtin_##name, )(arg) +#define zig_as_special_f64(sign, name, arg, repr) sign zig_as_f64(__builtin_##name, )(arg) +#define zig_as_special_f80(sign, name, arg, repr) sign zig_as_f80(__builtin_##name, )(arg) +#define zig_as_special_f128(sign, name, arg, repr) sign zig_as_f128(__builtin_##name, )(arg) +#define zig_as_special_c_longdouble(sign, name, arg, repr) sign zig_as_c_longdouble(__builtin_##name, )(arg) +#else +#define zig_as_special_f16(sign, name, arg, repr) zig_float_from_repr_f16(repr) +#define zig_as_special_f32(sign, name, arg, repr) zig_float_from_repr_f32(repr) +#define zig_as_special_f64(sign, name, arg, repr) zig_float_from_repr_f64(repr) +#define zig_as_special_f80(sign, name, arg, repr) zig_float_from_repr_f80(repr) +#define zig_as_special_f128(sign, name, arg, repr) zig_float_from_repr_f128(repr) +#define zig_as_special_c_longdouble(sign, name, arg, repr) zig_float_from_repr_c_longdouble(repr) +#endif + #define zig_has_f16 1 #define zig_bitSizeOf_f16 16 #define zig_libc_name_f16(name) __##name##h -#define zig_as_special_f16(sign, name, arg, repr) sign zig_as_f16(__builtin_##name, )(arg) +#define zig_as_special_constant_f16(sign, name, arg, repr) zig_as_special_f16(sign, name, arg, repr) #if FLT_MANT_DIG == 11 typedef float zig_f16; #define zig_as_f16(fp, repr) fp##f @@ -1636,12 +1809,18 @@ typedef zig_i16 zig_f16; #define zig_as_f16(fp, repr) repr #undef zig_as_special_f16 #define zig_as_special_f16(sign, name, arg, repr) repr +#undef zig_as_special_constant_f16 +#define zig_as_special_constant_f16(sign, name, arg, repr) repr #endif #define zig_has_f32 1 #define zig_bitSizeOf_f32 32 #define zig_libc_name_f32(name) name##f -#define zig_as_special_f32(sign, name, arg, repr) sign zig_as_f32(__builtin_##name, )(arg) +#if _MSC_VER +#define zig_as_special_constant_f32(sign, name, arg, repr) sign zig_as_f32(zig_msvc_flt_##name, ) +#else +#define zig_as_special_constant_f32(sign, name, arg, repr) zig_as_special_f32(sign, name, arg, repr) +#endif #if FLT_MANT_DIG == 24 typedef float zig_f32; #define zig_as_f32(fp, repr) fp##f @@ -1663,12 +1842,18 @@ typedef zig_i32 zig_f32; #define zig_as_f32(fp, repr) repr #undef zig_as_special_f32 #define zig_as_special_f32(sign, name, arg, repr) repr +#undef zig_as_special_constant_f32 +#define zig_as_special_constant_f32(sign, name, arg, repr) repr #endif #define zig_has_f64 1 #define zig_bitSizeOf_f64 64 #define zig_libc_name_f64(name) name -#define zig_as_special_f64(sign, name, arg, repr) sign zig_as_f64(__builtin_##name, )(arg) +#if _MSC_VER +#define zig_as_special_constant_f64(sign, name, arg, repr) sign zig_as_f64(zig_msvc_flt_##name, ) +#else +#define zig_as_special_constant_f64(sign, name, arg, repr) zig_as_special_f64(sign, name, arg, repr) +#endif #if FLT_MANT_DIG == 53 typedef float zig_f64; #define zig_as_f64(fp, repr) fp##f @@ -1693,12 +1878,14 @@ typedef zig_i64 zig_f64; #define zig_as_f64(fp, repr) repr #undef zig_as_special_f64 #define zig_as_special_f64(sign, name, arg, repr) repr +#undef zig_as_special_constant_f64 +#define zig_as_special_constant_f64(sign, name, arg, repr) repr #endif #define zig_has_f80 1 #define zig_bitSizeOf_f80 80 #define zig_libc_name_f80(name) __##name##x -#define zig_as_special_f80(sign, name, arg, repr) sign zig_as_f80(__builtin_##name, )(arg) +#define zig_as_special_constant_f80(sign, name, arg, repr) zig_as_special_f80(sign, name, arg, repr) #if FLT_MANT_DIG == 64 typedef float zig_f80; #define zig_as_f80(fp, repr) fp##f @@ -1726,12 +1913,14 @@ typedef zig_i128 zig_f80; #define zig_as_f80(fp, repr) repr #undef zig_as_special_f80 #define zig_as_special_f80(sign, name, arg, repr) repr +#undef zig_as_special_constant_f80 +#define zig_as_special_constant_f80(sign, name, arg, repr) repr #endif #define zig_has_f128 1 #define zig_bitSizeOf_f128 128 #define zig_libc_name_f128(name) name##q -#define zig_as_special_f128(sign, name, arg, repr) sign zig_as_f128(__builtin_##name, )(arg) +#define zig_as_special_constant_f128(sign, name, arg, repr) zig_as_special_f128(sign, name, arg, repr) #if FLT_MANT_DIG == 113 typedef float zig_f128; #define zig_as_f128(fp, repr) fp##f @@ -1761,13 +1950,57 @@ typedef zig_i128 zig_f128; #define zig_as_f128(fp, repr) repr #undef zig_as_special_f128 #define zig_as_special_f128(sign, name, arg, repr) repr +#undef zig_as_special_constant_f128 +#define zig_as_special_constant_f128(sign, name, arg, repr) repr #endif #define zig_has_c_longdouble 1 +#define zig_libc_name_c_longdouble(name) name##l +#define zig_as_special_constant_c_longdouble(sign, name, arg, repr) zig_as_special_c_longdouble(sign, name, arg, repr) +#ifdef zig_bitSizeOf_c_longdouble typedef long double zig_c_longdouble; #define zig_as_c_longdouble(fp, repr) fp##l -#define zig_libc_name_c_longdouble(name) name##l -#define zig_as_special_c_longdouble(sign, name, arg, repr) sign __builtin_##name##l(arg) +#else +#undef zig_has_c_longdouble +#define zig_bitSizeOf_c_longdouble 80 +#define zig_compiler_rt_abbrev_c_longdouble zig_compiler_rt_abbrev_f80 +#define zig_has_c_longdouble 0 +#define zig_repr_c_longdouble i128 +typedef zig_i128 zig_c_longdouble; +#define zig_as_c_longdouble(fp, repr) repr +#undef zig_as_special_c_longdouble +#define zig_as_special_c_longdouble(sign, name, arg, repr) repr +#undef zig_as_special_constant_c_longdouble +#define zig_as_special_constant_c_longdouble(sign, name, arg, repr) repr +#endif + +#if !zig_has_float_builtins +#define zig_float_from_repr(Type, ReprType) \ + static inline zig_##Type zig_float_from_repr_##Type(zig_##ReprType repr) { \ + return *((zig_##Type*)&repr); \ + } + +zig_float_from_repr(f16, u16) +zig_float_from_repr(f32, u32) +zig_float_from_repr(f64, u64) +zig_float_from_repr(f80, u128) +zig_float_from_repr(f128, u128) +zig_float_from_repr(c_longdouble, u128) +#endif + +#define zig_cast_f16 (zig_f16) +#define zig_cast_f32 (zig_f32) +#define zig_cast_f64 (zig_f64) + +#if _MSC_VER && !zig_has_f128 +#define zig_cast_f80 +#define zig_cast_c_longdouble +#define zig_cast_f128 +#else +#define zig_cast_f80 (zig_f80) +#define zig_cast_c_longdouble (zig_c_longdouble) +#define zig_cast_f128 (zig_f128) +#endif #define zig_convert_builtin(ResType, operation, ArgType, version) \ zig_extern zig_##ResType zig_expand_concat(zig_expand_concat(zig_expand_concat(__##operation, \ @@ -1892,3 +2125,268 @@ zig_float_builtins(f64) zig_float_builtins(f80) zig_float_builtins(f128) zig_float_builtins(c_longdouble) + +#if _MSC_VER && (_M_IX86 || _M_X64) + +// TODO: zig_msvc_atomic_load should load 32 bit without interlocked on x86, and load 64 bit without interlocked on x64 + +#define zig_msvc_atomics(Type, suffix) \ + static inline bool zig_msvc_cmpxchg_##Type(zig_##Type volatile* obj, zig_##Type* expected, zig_##Type desired) { \ + zig_##Type comparand = *expected; \ + zig_##Type initial = _InterlockedCompareExchange##suffix(obj, desired, comparand); \ + bool exchanged = initial == comparand; \ + if (!exchanged) { \ + *expected = initial; \ + } \ + return exchanged; \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + return _InterlockedExchange##suffix(obj, value); \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_add_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + return _InterlockedExchangeAdd##suffix(obj, value); \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_sub_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + bool success = false; \ + zig_##Type new; \ + zig_##Type prev; \ + while (!success) { \ + prev = *obj; \ + new = prev - value; \ + success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \ + } \ + return prev; \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_or_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + return _InterlockedOr##suffix(obj, value); \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_xor_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + return _InterlockedXor##suffix(obj, value); \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_and_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + return _InterlockedAnd##suffix(obj, value); \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_nand_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + bool success = false; \ + zig_##Type new; \ + zig_##Type prev; \ + while (!success) { \ + prev = *obj; \ + new = ~(prev & value); \ + success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \ + } \ + return prev; \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_min_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + bool success = false; \ + zig_##Type new; \ + zig_##Type prev; \ + while (!success) { \ + prev = *obj; \ + new = value < prev ? value : prev; \ + success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \ + } \ + return prev; \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_max_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + bool success = false; \ + zig_##Type new; \ + zig_##Type prev; \ + while (!success) { \ + prev = *obj; \ + new = value > prev ? value : prev; \ + success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \ + } \ + return prev; \ + } \ + static inline void zig_msvc_atomic_store_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + _InterlockedExchange##suffix(obj, value); \ + } \ + static inline zig_##Type zig_msvc_atomic_load_##Type(zig_##Type volatile* obj) { \ + return _InterlockedOr##suffix(obj, 0); \ + } + +zig_msvc_atomics(u8, 8) +zig_msvc_atomics(i8, 8) +zig_msvc_atomics(u16, 16) +zig_msvc_atomics(i16, 16) +zig_msvc_atomics(u32, ) +zig_msvc_atomics(i32, ) +zig_msvc_atomics(u64, 64) +zig_msvc_atomics(i64, 64) + +#define zig_msvc_flt_atomics(Type, ReprType, suffix) \ + static inline bool zig_msvc_cmpxchg_##Type(zig_##Type volatile* obj, zig_##Type* expected, zig_##Type desired) { \ + zig_##ReprType comparand = *((zig_##ReprType*)expected); \ + zig_##ReprType initial = _InterlockedCompareExchange##suffix((zig_##ReprType volatile*)obj, *((zig_##ReprType*)&desired), comparand); \ + bool exchanged = initial == comparand; \ + if (!exchanged) { \ + *expected = *((zig_##Type*)&initial); \ + } \ + return exchanged; \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + zig_##ReprType initial = _InterlockedExchange##suffix((zig_##ReprType volatile*)obj, *((zig_##ReprType*)&value)); \ + return *((zig_##Type*)&initial); \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_add_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + bool success = false; \ + zig_##ReprType new; \ + zig_##Type prev; \ + while (!success) { \ + prev = *obj; \ + new = prev + value; \ + success = zig_msvc_cmpxchg_##Type(obj, &prev, *((zig_##ReprType*)&new)); \ + } \ + return prev; \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_sub_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + bool success = false; \ + zig_##ReprType new; \ + zig_##Type prev; \ + while (!success) { \ + prev = *obj; \ + new = prev - value; \ + success = zig_msvc_cmpxchg_##Type(obj, &prev, *((zig_##ReprType*)&new)); \ + } \ + return prev; \ + } + +zig_msvc_flt_atomics(f32, u32, ) +zig_msvc_flt_atomics(f64, u64, 64) + +#if _M_IX86 +static inline void* zig_msvc_atomicrmw_xchg_p32(void** obj, zig_u32* arg) { + return _InterlockedExchangePointer(obj, arg); +} + +static inline void zig_msvc_atomic_store_p32(void** obj, zig_u32* arg) { + _InterlockedExchangePointer(obj, arg); +} + +static inline void* zig_msvc_atomic_load_p32(void** obj, zig_u32* arg) { + return (void*)_InterlockedOr((void*)obj, 0); +} + +static inline bool zig_msvc_cmpxchg_p32(void** obj, void** expected, void* desired) { + void* comparand = *expected; + void* initial = _InterlockedCompareExchangePointer(obj, desired, comparand); + bool exchanged = initial == comparand; + if (!exchanged) { + *expected = initial; + } + return exchanged; +} +#else +static inline void* zig_msvc_atomicrmw_xchg_p64(void** obj, zig_u64* arg) { + return _InterlockedExchangePointer(obj, arg); +} + +static inline void zig_msvc_atomic_store_p64(void** obj, zig_u64* arg) { + _InterlockedExchangePointer(obj, arg); +} + +static inline void* zig_msvc_atomic_load_p64(void** obj) { + return (void*)_InterlockedOr64((void*)obj, 0); +} + +static inline bool zig_msvc_cmpxchg_p64(void** obj, void** expected, void* desired) { + void* comparand = *expected; + void* initial = _InterlockedCompareExchangePointer(obj, desired, comparand); + bool exchanged = initial == comparand; + if (!exchanged) { + *expected = initial; + } + return exchanged; +} +#endif + +static inline bool zig_msvc_cmpxchg_u128(zig_u128 volatile* obj, zig_u128* expected, zig_u128 desired) { + return _InterlockedCompareExchange128((zig_i64 volatile*)obj, desired.hi, desired.lo, (zig_i64*)expected); +} + +static inline bool zig_msvc_cmpxchg_i128(zig_i128 volatile* obj, zig_i128* expected, zig_i128 desired) { + return _InterlockedCompareExchange128((zig_i64 volatile*)obj, desired.hi, desired.lo, (zig_u64*)expected); +} + +#define zig_msvc_atomics_128xchg(Type) \ + static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + bool success = false; \ + zig_##Type prev; \ + while (!success) { \ + prev = *obj; \ + success = zig_msvc_cmpxchg_##Type(obj, &prev, value); \ + } \ + return prev; \ + } + +zig_msvc_atomics_128xchg(u128) +zig_msvc_atomics_128xchg(i128) + +#define zig_msvc_atomics_128op(Type, operation) \ + static inline zig_##Type zig_msvc_atomicrmw_##operation##_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + bool success = false; \ + zig_##Type new; \ + zig_##Type prev; \ + while (!success) { \ + prev = *obj; \ + new = zig_##operation##_##Type(prev, value); \ + success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \ + } \ + return prev; \ + } + +zig_msvc_atomics_128op(u128, add) +zig_msvc_atomics_128op(u128, sub) +zig_msvc_atomics_128op(u128, or) +zig_msvc_atomics_128op(u128, xor) +zig_msvc_atomics_128op(u128, and) +zig_msvc_atomics_128op(u128, nand) +zig_msvc_atomics_128op(u128, min) +zig_msvc_atomics_128op(u128, max) + +#endif /* _MSC_VER && (_M_IX86 || _M_X64) */ + +/* ========================= Special Case Intrinsics ========================= */ + +#if (_MSC_VER && _M_X64) || defined(__x86_64__) + +static inline void* zig_x86_64_windows_teb(void) { +#if _MSC_VER + return __readgsqword(0x30); +#else + void* teb; + __asm volatile(" movq %%gs:0x30, %[ptr]": [ptr]"=r"(teb)::); + return teb; +#endif +} + +#endif + +#if (_MSC_VER && (_M_IX86 || _M_X64)) || defined(__i386__) || defined(__x86_64__) + +static inline void zig_x86_cpuid(zig_u32 leaf_id, zig_u32 subid, zig_u32* eax, zig_u32* ebx, zig_u32* ecx, zig_u32* edx) { + zig_u32 cpu_info[4]; +#if _MSC_VER + __cpuidex(cpu_info, leaf_id, subid); +#else + __cpuid_count(leaf_id, subid, cpu_info[0], cpu_info[1], cpu_info[2], cpu_info[3]); +#endif + *eax = cpu_info[0]; + *ebx = cpu_info[1]; + *ecx = cpu_info[2]; + *edx = cpu_info[3]; +} + +static inline zig_u32 zig_x86_get_xcr0(void) { +#if _MSC_VER + return (zig_u32)_xgetbv(0); +#else + zig_u32 eax; + zig_u32 edx; + __asm__("xgetbv" : "=a"(eax), "=d"(edx) : "c"(0)); + return eax; +#endif +} + +#endif diff --git a/src/codegen/c.zig b/src/codegen/c.zig index fe6e245716..c1adbfe6cf 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -90,7 +90,15 @@ const FormatTypeAsCIdentContext = struct { const ValueRenderLocation = enum { FunctionArgument, Initializer, + StaticInitializer, Other, + + pub fn isInitializer(self: ValueRenderLocation) bool { + return switch (self) { + .Initializer, .StaticInitializer => true, + else => false, + }; + } }; const BuiltinInfo = enum { @@ -312,7 +320,7 @@ pub const Function = struct { try writer.writeAll("static "); try f.object.dg.renderTypeAndName(writer, ty, decl_c_value, .Const, alignment, .Complete); try writer.writeAll(" = "); - try f.object.dg.renderValue(writer, ty, val, .Initializer); + try f.object.dg.renderValue(writer, ty, val, .StaticInitializer); try writer.writeAll(";\n "); break :result decl_c_value; } else CValue{ .constant = inst }; @@ -431,6 +439,10 @@ pub const Function = struct { return f.object.dg.renderTypecast(w, t); } + fn renderIntCast(f: *Function, w: anytype, dest_ty: Type, src: CValue, src_ty: Type, location: ValueRenderLocation) !void { + return f.object.dg.renderIntCast(w, dest_ty, .{ .c_value = .{ .f = f, .value = src } }, src_ty, location); + } + fn fmtIntLiteral(f: *Function, ty: Type, val: Value) !std.fmt.Formatter(formatIntLiteral) { return f.object.dg.fmtIntLiteral(ty, val); } @@ -502,6 +514,7 @@ pub const DeclGen = struct { ty: Type, val: Value, decl_index: Decl.Index, + location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { const decl = dg.module.declPtr(decl_index); assert(decl.has_tv); @@ -515,12 +528,16 @@ pub const DeclGen = struct { inline for (.{ .function, .extern_fn }) |tag| if (decl.val.castTag(tag)) |func| if (func.data.owner_decl != decl_index) - return dg.renderDeclValue(writer, ty, val, func.data.owner_decl); + return dg.renderDeclValue(writer, ty, val, func.data.owner_decl, location); if (ty.isSlice()) { - try writer.writeByte('('); - try dg.renderTypecast(writer, ty); - try writer.writeAll("){ .ptr = "); + if (location == .StaticInitializer) { + try writer.writeByte('{'); + } else { + try writer.writeByte('('); + try dg.renderTypecast(writer, ty); + try writer.writeAll("){ .ptr = "); + } var buf: Type.SlicePtrFieldTypeBuffer = undefined; try dg.renderValue(writer, ty.slicePtrFieldType(&buf), val.slicePtr(), .Initializer); @@ -530,7 +547,12 @@ pub const DeclGen = struct { .data = val.sliceLen(dg.module), }; const len_val = Value.initPayload(&len_pl.base); - return writer.print(", .len = {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val)}); + + if (location == .StaticInitializer) { + return writer.print(", {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val)}); + } else { + return writer.print(", .len = {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val)}); + } } // We shouldn't cast C function pointers as this is UB (when you call @@ -552,7 +574,7 @@ pub const DeclGen = struct { // that its contents are defined with respect to. // // Used for .elem_ptr, .field_ptr, .opt_payload_ptr, .eu_payload_ptr - fn renderParentPtr(dg: *DeclGen, writer: anytype, ptr_val: Value, ptr_ty: Type) error{ OutOfMemory, AnalysisFail }!void { + fn renderParentPtr(dg: *DeclGen, writer: anytype, ptr_val: Value, ptr_ty: Type, location: ValueRenderLocation) error{ OutOfMemory, AnalysisFail }!void { if (!ptr_ty.isSlice()) { try writer.writeByte('('); try dg.renderTypecast(writer, ptr_ty); @@ -567,7 +589,7 @@ pub const DeclGen = struct { .variable => ptr_val.castTag(.variable).?.data.owner_decl, else => unreachable, }; - try dg.renderDeclValue(writer, ptr_ty, ptr_val, decl_index); + try dg.renderDeclValue(writer, ptr_ty, ptr_val, decl_index, location); }, .field_ptr => { const ptr_info = ptr_ty.ptrInfo(); @@ -605,7 +627,7 @@ pub const DeclGen = struct { try writer.writeAll("&(("); try dg.renderTypecast(writer, u8_ptr_ty); try writer.writeByte(')'); - try dg.renderParentPtr(writer, field_ptr.container_ptr, container_ptr_ty); + try dg.renderParentPtr(writer, field_ptr.container_ptr, container_ptr_ty, location); return writer.print(")[{}]", .{try dg.fmtIntLiteral(Type.usize, byte_offset_val)}); } else { var host_pl = Type.Payload.Bits{ @@ -617,7 +639,7 @@ pub const DeclGen = struct { try writer.writeByte('('); try dg.renderTypecast(writer, ptr_ty); try writer.writeByte(')'); - return dg.renderParentPtr(writer, field_ptr.container_ptr, host_ty); + return dg.renderParentPtr(writer, field_ptr.container_ptr, host_ty, location); }, }, .Union => switch (container_ty.containerLayout()) { @@ -626,7 +648,7 @@ pub const DeclGen = struct { .ty = container_ty.unionFields().values()[index].ty, }, .Packed => { - return dg.renderParentPtr(writer, field_ptr.container_ptr, ptr_ty); + return dg.renderParentPtr(writer, field_ptr.container_ptr, ptr_ty, location); }, }, .Pointer => field_info: { @@ -645,7 +667,7 @@ pub const DeclGen = struct { try dg.renderType(std.io.null_writer, field_ptr.container_ty, .Complete); try writer.writeAll("&("); - try dg.renderParentPtr(writer, field_ptr.container_ptr, container_ptr_ty); + try dg.renderParentPtr(writer, field_ptr.container_ptr, container_ptr_ty, location); try writer.writeAll(")->"); switch (field_ptr.container_ty.tag()) { .union_tagged, .union_safety_tagged => try writer.writeAll("payload."), @@ -653,7 +675,7 @@ pub const DeclGen = struct { } try writer.print("{ }", .{fmtIdent(field_info.name)}); } else { - try dg.renderParentPtr(writer, field_ptr.container_ptr, container_ptr_ty); + try dg.renderParentPtr(writer, field_ptr.container_ptr, container_ptr_ty, location); } }, .elem_ptr => { @@ -665,7 +687,7 @@ pub const DeclGen = struct { const elem_ptr_ty = Type.initPayload(&elem_ptr_ty_pl.base); try writer.writeAll("&("); - try dg.renderParentPtr(writer, elem_ptr.array_ptr, elem_ptr_ty); + try dg.renderParentPtr(writer, elem_ptr.array_ptr, elem_ptr_ty, location); try writer.print(")[{d}]", .{elem_ptr.index}); }, .opt_payload_ptr, .eu_payload_ptr => { @@ -680,7 +702,7 @@ pub const DeclGen = struct { try dg.renderType(std.io.null_writer, payload_ptr.container_ty, .Complete); try writer.writeAll("&("); - try dg.renderParentPtr(writer, payload_ptr.container_ptr, container_ptr_ty); + try dg.renderParentPtr(writer, payload_ptr.container_ptr, container_ptr_ty, location); try writer.writeAll(")->payload"); }, else => unreachable, @@ -699,6 +721,10 @@ pub const DeclGen = struct { val = rt.data; } const target = dg.module.getTarget(); + const initializer_type: ValueRenderLocation = switch (location) { + .StaticInitializer => .StaticInitializer, + else => .Initializer, + }; const safety_on = switch (dg.module.optimizeMode()) { .Debug, .ReleaseSafe => true, @@ -714,15 +740,15 @@ pub const DeclGen = struct { return writer.writeAll("false"); } }, - .Int, .Enum, .ErrorSet => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, val)}), + .Int, .Enum, .ErrorSet => return writer.print("{x}", .{try dg.fmtIntLiteralLoc(ty, val, location)}), .Float => { const bits = ty.floatBits(target); var int_pl = Type.Payload.Bits{ .base = .{ .tag = .int_signed }, .data = bits }; const int_ty = Type.initPayload(&int_pl.base); - try writer.writeByte('('); - try dg.renderTypecast(writer, ty); - try writer.writeAll(")zig_as_"); + try writer.writeAll("zig_cast_"); + try dg.renderTypeForBuiltinFnName(writer, ty); + try writer.writeAll(" zig_as_"); try dg.renderTypeForBuiltinFnName(writer, ty); try writer.writeByte('('); switch (bits) { @@ -738,7 +764,7 @@ pub const DeclGen = struct { return writer.writeByte(')'); }, .Pointer => if (ty.isSlice()) { - if (location != .Initializer) { + if (!location.isInitializer()) { try writer.writeByte('('); try dg.renderTypecast(writer, ty); try writer.writeByte(')'); @@ -766,21 +792,21 @@ pub const DeclGen = struct { return dg.renderValue(writer, payload_ty, val, location); } - if (location != .Initializer) { + if (!location.isInitializer()) { try writer.writeByte('('); try dg.renderTypecast(writer, ty); try writer.writeByte(')'); } try writer.writeAll("{ .payload = "); - try dg.renderValue(writer, payload_ty, val, .Initializer); + try dg.renderValue(writer, payload_ty, val, initializer_type); try writer.writeAll(", .is_null = "); - try dg.renderValue(writer, Type.bool, val, .Initializer); + try dg.renderValue(writer, Type.bool, val, initializer_type); return writer.writeAll(" }"); }, .Struct => switch (ty.containerLayout()) { .Auto, .Extern => { - if (location != .Initializer) { + if (!location.isInitializer()) { try writer.writeByte('('); try dg.renderTypecast(writer, ty); try writer.writeByte(')'); @@ -792,7 +818,7 @@ pub const DeclGen = struct { if (!field.ty.hasRuntimeBits()) continue; if (!empty) try writer.writeByte(','); - try dg.renderValue(writer, field.ty, val, .Initializer); + try dg.renderValue(writer, field.ty, val, initializer_type); empty = false; } @@ -802,7 +828,7 @@ pub const DeclGen = struct { .Packed => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, Value.undef)}), }, .Union => { - if (location != .Initializer) { + if (!location.isInitializer()) { try writer.writeByte('('); try dg.renderTypecast(writer, ty); try writer.writeByte(')'); @@ -813,34 +839,34 @@ pub const DeclGen = struct { const layout = ty.unionGetLayout(target); if (layout.tag_size != 0) { try writer.writeAll(" .tag = "); - try dg.renderValue(writer, tag_ty, val, .Initializer); + try dg.renderValue(writer, tag_ty, val, initializer_type); try writer.writeByte(','); } try writer.writeAll(" .payload = {"); } for (ty.unionFields().values()) |field| { if (!field.ty.hasRuntimeBits()) continue; - try dg.renderValue(writer, field.ty, val, .Initializer); + try dg.renderValue(writer, field.ty, val, initializer_type); break; } else try writer.print("{x}", .{try dg.fmtIntLiteral(Type.u8, Value.undef)}); if (ty.unionTagTypeSafety()) |_| try writer.writeByte('}'); return writer.writeByte('}'); }, .ErrorUnion => { - if (location != .Initializer) { + if (!location.isInitializer()) { try writer.writeByte('('); try dg.renderTypecast(writer, ty); try writer.writeByte(')'); } try writer.writeAll("{ .payload = "); - try dg.renderValue(writer, ty.errorUnionPayload(), val, .Initializer); + try dg.renderValue(writer, ty.errorUnionPayload(), val, initializer_type); return writer.print(", .error = {x} }}", .{ try dg.fmtIntLiteral(ty.errorUnionSet(), val), }); }, .Array, .Vector => { - if (location != .Initializer) { + if (!location.isInitializer()) { try writer.writeByte('('); try dg.renderTypecast(writer, ty); try writer.writeByte(')'); @@ -848,19 +874,20 @@ pub const DeclGen = struct { const ai = ty.arrayInfo(); if (ai.elem_type.eql(Type.u8, dg.module)) { - try writer.writeByte('"'); + var literal = stringLiteral(writer); + try literal.start(); const c_len = ty.arrayLenIncludingSentinel(); var index: usize = 0; while (index < c_len) : (index += 1) - try writeStringLiteralChar(writer, 0xaa); - return writer.writeByte('"'); + try literal.writeChar(0xaa); + return literal.end(); } else { try writer.writeByte('{'); const c_len = ty.arrayLenIncludingSentinel(); var index: usize = 0; while (index < c_len) : (index += 1) { if (index > 0) try writer.writeAll(", "); - try dg.renderValue(writer, ty.childType(), val, .Initializer); + try dg.renderValue(writer, ty.childType(), val, initializer_type); } return writer.writeByte('}'); } @@ -893,8 +920,8 @@ pub const DeclGen = struct { .eu_payload_ptr, .decl_ref_mut, .decl_ref, - => try dg.renderParentPtr(writer, val, ty), - else => try writer.print("{}", .{try dg.fmtIntLiteral(ty, val)}), + => try dg.renderParentPtr(writer, val, ty, location), + else => try writer.print("{}", .{try dg.fmtIntLiteralLoc(ty, val, location)}), }, .Float => { const bits = ty.floatBits(target); @@ -926,9 +953,10 @@ pub const DeclGen = struct { }; const int_val = Value.initPayload(&int_val_pl.base); - try writer.writeByte('('); - try dg.renderTypecast(writer, ty); - try writer.writeByte(')'); + try writer.writeAll("zig_cast_"); + try dg.renderTypeForBuiltinFnName(writer, ty); + try writer.writeByte(' '); + var empty = true; if (std.math.isFinite(f128_val)) { try writer.writeAll("zig_as_"); try dg.renderTypeForBuiltinFnName(writer, ty); @@ -941,17 +969,32 @@ pub const DeclGen = struct { 128 => try writer.print("{x}", .{f128_val}), else => unreachable, } + try writer.writeAll(", "); + empty = false; } else { - const operation = if (std.math.isSignalNan(f128_val)) - "nans" - else if (std.math.isNan(f128_val)) + // isSignalNan is equivalent to isNan currently, and MSVC doens't have nans, so prefer nan + const operation = if (std.math.isNan(f128_val)) "nan" + else if (std.math.isSignalNan(f128_val)) + "nans" else if (std.math.isInf(f128_val)) "inf" else unreachable; + if (location == .StaticInitializer) { + if (!std.math.isNan(f128_val) and std.math.isSignalNan(f128_val)) + return dg.fail("TODO: C backend: implement nans rendering in static initializers", .{}); + + // MSVC doesn't have a way to define a custom or signaling NaN value in a constant expression + + // TODO: Re-enable this check, otherwise we're writing qnan bit patterns on msvc incorrectly + // if (std.math.isNan(f128_val) and f128_val != std.math.qnan_f128) + // return dg.fail("Only quiet nans are supported in global variable initializers", .{}); + } + try writer.writeAll("zig_as_special_"); + if (location == .StaticInitializer) try writer.writeAll("constant_"); try dg.renderTypeForBuiltinFnName(writer, ty); try writer.writeByte('('); if (std.math.signbit(f128_val)) try writer.writeByte('-'); @@ -968,8 +1011,12 @@ pub const DeclGen = struct { 128 => try writer.print("\"0x{x}\"", .{@bitCast(u128, f128_val)}), else => unreachable, }; + try writer.writeAll(", "); + empty = false; } - return writer.print(", {x})", .{try dg.fmtIntLiteral(int_ty, int_val)}); + try writer.print("{x}", .{try dg.fmtIntLiteralLoc(int_ty, int_val, location)}); + if (!empty) try writer.writeByte(')'); + return; }, .Pointer => switch (val.tag()) { .null_value, .zero => if (ty.isSlice()) { @@ -987,10 +1034,10 @@ pub const DeclGen = struct { }, .variable => { const decl = val.castTag(.variable).?.data.owner_decl; - return dg.renderDeclValue(writer, ty, val, decl); + return dg.renderDeclValue(writer, ty, val, decl, location); }, .slice => { - if (location != .Initializer) { + if (!location.isInitializer()) { try writer.writeByte('('); try dg.renderTypecast(writer, ty); try writer.writeByte(')'); @@ -1000,9 +1047,9 @@ pub const DeclGen = struct { var buf: Type.SlicePtrFieldTypeBuffer = undefined; try writer.writeByte('{'); - try dg.renderValue(writer, ty.slicePtrFieldType(&buf), slice.ptr, .Initializer); + try dg.renderValue(writer, ty.slicePtrFieldType(&buf), slice.ptr, initializer_type); try writer.writeAll(", "); - try dg.renderValue(writer, Type.usize, slice.len, .Initializer); + try dg.renderValue(writer, Type.usize, slice.len, initializer_type); try writer.writeByte('}'); }, .function => { @@ -1024,7 +1071,7 @@ pub const DeclGen = struct { .eu_payload_ptr, .decl_ref_mut, .decl_ref, - => try dg.renderParentPtr(writer, val, ty), + => try dg.renderParentPtr(writer, val, ty, location), else => unreachable, }, .Array, .Vector => { @@ -1040,7 +1087,7 @@ pub const DeclGen = struct { try writer.writeByte('{'); const ai = ty.arrayInfo(); if (ai.sentinel) |s| { - try dg.renderValue(writer, ai.elem_type, s, .Initializer); + try dg.renderValue(writer, ai.elem_type, s, initializer_type); } else { try writer.writeByte('0'); } @@ -1060,34 +1107,51 @@ pub const DeclGen = struct { defer arena.deinit(); const arena_allocator = arena.allocator(); + // MSVC throws C2078 if an array of size 65536 or greater is initialized with a string literal + const max_string_initializer_len = 65535; + const ai = ty.arrayInfo(); if (ai.elem_type.eql(Type.u8, dg.module)) { - try writer.writeByte('"'); - var index: usize = 0; - while (index < ai.len) : (index += 1) { - const elem_val = try val.elemValue(dg.module, arena_allocator, index); - const elem_val_u8 = if (elem_val.isUndef()) - undefPattern(u8) - else - @intCast(u8, elem_val.toUnsignedInt(target)); - try writeStringLiteralChar(writer, elem_val_u8); + if (ai.len <= max_string_initializer_len) { + var literal = stringLiteral(writer); + try literal.start(); + var index: usize = 0; + while (index < ai.len) : (index += 1) { + const elem_val = try val.elemValue(dg.module, arena_allocator, index); + const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(target)); + try literal.writeChar(elem_val_u8); + } + if (ai.sentinel) |s| { + const s_u8 = @intCast(u8, s.toUnsignedInt(target)); + try literal.writeChar(s_u8); + } + try literal.end(); + } else { + try writer.writeByte('{'); + var index: usize = 0; + while (index < ai.len) : (index += 1) { + if (index != 0) try writer.writeByte(','); + const elem_val = try val.elemValue(dg.module, arena_allocator, index); + const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(target)); + try writer.print("'\\x{x}'", .{elem_val_u8}); + } + if (ai.sentinel) |s| { + if (index != 0) try writer.writeByte(','); + try dg.renderValue(writer, ai.elem_type, s, initializer_type); + } + try writer.writeByte('}'); } - if (ai.sentinel) |s| { - const s_u8 = @intCast(u8, s.toUnsignedInt(target)); - try writeStringLiteralChar(writer, s_u8); - } - try writer.writeByte('"'); } else { try writer.writeByte('{'); var index: usize = 0; while (index < ai.len) : (index += 1) { if (index != 0) try writer.writeByte(','); const elem_val = try val.elemValue(dg.module, arena_allocator, index); - try dg.renderValue(writer, ai.elem_type, elem_val, .Initializer); + try dg.renderValue(writer, ai.elem_type, elem_val, initializer_type); } if (ai.sentinel) |s| { if (index != 0) try writer.writeByte(','); - try dg.renderValue(writer, ai.elem_type, s, .Initializer); + try dg.renderValue(writer, ai.elem_type, s, initializer_type); } try writer.writeByte('}'); } @@ -1114,7 +1178,7 @@ pub const DeclGen = struct { return dg.renderValue(writer, payload_ty, payload_val, location); } - if (location != .Initializer) { + if (!location.isInitializer()) { try writer.writeByte('('); try dg.renderTypecast(writer, ty); try writer.writeByte(')'); @@ -1123,9 +1187,9 @@ pub const DeclGen = struct { const payload_val = if (val.castTag(.opt_payload)) |pl| pl.data else Value.undef; try writer.writeAll("{ .payload = "); - try dg.renderValue(writer, payload_ty, payload_val, .Initializer); + try dg.renderValue(writer, payload_ty, payload_val, initializer_type); try writer.writeAll(", .is_null = "); - try dg.renderValue(writer, Type.bool, is_null_val, .Initializer); + try dg.renderValue(writer, Type.bool, is_null_val, initializer_type); try writer.writeAll(" }"); }, .ErrorSet => { @@ -1148,7 +1212,7 @@ pub const DeclGen = struct { return dg.renderValue(writer, error_ty, val, location); } - if (location != .Initializer) { + if (!location.isInitializer()) { try writer.writeByte('('); try dg.renderTypecast(writer, ty); try writer.writeByte(')'); @@ -1158,9 +1222,9 @@ pub const DeclGen = struct { const error_val = if (val.errorUnionIsPayload()) Value.zero else val; try writer.writeAll("{ .payload = "); - try dg.renderValue(writer, payload_ty, payload_val, .Initializer); + try dg.renderValue(writer, payload_ty, payload_val, initializer_type); try writer.writeAll(", .error = "); - try dg.renderValue(writer, error_ty, error_val, .Initializer); + try dg.renderValue(writer, error_ty, error_val, initializer_type); try writer.writeAll(" }"); }, .Enum => { @@ -1200,11 +1264,11 @@ pub const DeclGen = struct { .Fn => switch (val.tag()) { .function => { const decl = val.castTag(.function).?.data.owner_decl; - return dg.renderDeclValue(writer, ty, val, decl); + return dg.renderDeclValue(writer, ty, val, decl, location); }, .extern_fn => { const decl = val.castTag(.extern_fn).?.data.owner_decl; - return dg.renderDeclValue(writer, ty, val, decl); + return dg.renderDeclValue(writer, ty, val, decl, location); }, else => unreachable, }, @@ -1212,7 +1276,7 @@ pub const DeclGen = struct { .Auto, .Extern => { const field_vals = val.castTag(.aggregate).?.data; - if (location != .Initializer) { + if (!location.isInitializer()) { try writer.writeByte('('); try dg.renderTypecast(writer, ty); try writer.writeByte(')'); @@ -1225,7 +1289,7 @@ pub const DeclGen = struct { if (!field_ty.hasRuntimeBits()) continue; if (!empty) try writer.writeByte(','); - try dg.renderValue(writer, field_ty, field_val, .Initializer); + try dg.renderValue(writer, field_ty, field_val, initializer_type); empty = false; } @@ -1245,31 +1309,85 @@ pub const DeclGen = struct { var bit_offset_val_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = 0 }; const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); - try writer.writeByte('('); - var empty = true; - for (field_vals) |field_val, index| { + var eff_num_fields: usize = 0; + for (field_vals) |_, index| { const field_ty = ty.structFieldType(index); if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; - if (!empty) try writer.writeAll(" | "); - try writer.writeByte('('); - try dg.renderTypecast(writer, ty); - try writer.writeByte(')'); - try dg.renderValue(writer, field_ty, field_val, .Other); - try writer.writeAll(" << "); - try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); - - bit_offset_val_pl.data += field_ty.bitSize(target); - empty = false; + eff_num_fields += 1; + } + + if (eff_num_fields == 0) { + try writer.writeByte('('); + try dg.renderValue(writer, ty, Value.undef, initializer_type); + try writer.writeByte(')'); + } else if (ty.bitSize(target) > 64) { + // zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off)) + var num_or = eff_num_fields - 1; + while (num_or > 0) : (num_or -= 1) { + try writer.writeAll("zig_or_"); + try dg.renderTypeForBuiltinFnName(writer, ty); + try writer.writeByte('('); + } + + var eff_index: usize = 0; + var needs_closing_paren = false; + for (field_vals) |field_val, index| { + const field_ty = ty.structFieldType(index); + if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + + const cast_context = IntCastContext{ .value = .{ .value = field_val } }; + if (bit_offset_val_pl.data != 0) { + try writer.writeAll("zig_shl_"); + try dg.renderTypeForBuiltinFnName(writer, ty); + try writer.writeByte('('); + try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument); + try writer.writeAll(", "); + try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); + try writer.writeByte(')'); + } else { + try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument); + } + + if (needs_closing_paren) try writer.writeByte(')'); + if (eff_index != eff_num_fields - 1) try writer.writeAll(", "); + + bit_offset_val_pl.data += field_ty.bitSize(target); + needs_closing_paren = true; + eff_index += 1; + } + } else { + try writer.writeByte('('); + // a << a_off | b << b_off | c << c_off + var empty = true; + for (field_vals) |field_val, index| { + const field_ty = ty.structFieldType(index); + if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + + if (!empty) try writer.writeAll(" | "); + try writer.writeByte('('); + try dg.renderTypecast(writer, ty); + try writer.writeByte(')'); + + if (bit_offset_val_pl.data != 0) { + try dg.renderValue(writer, field_ty, field_val, .Other); + try writer.writeAll(" << "); + try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); + } else { + try dg.renderValue(writer, field_ty, field_val, .Other); + } + + bit_offset_val_pl.data += field_ty.bitSize(target); + empty = false; + } + try writer.writeByte(')'); } - if (empty) try dg.renderValue(writer, ty, Value.undef, .Initializer); - try writer.writeByte(')'); }, }, .Union => { const union_obj = val.castTag(.@"union").?.data; - if (location != .Initializer) { + if (!location.isInitializer()) { try writer.writeByte('('); try dg.renderTypecast(writer, ty); try writer.writeByte(')'); @@ -1289,7 +1407,7 @@ pub const DeclGen = struct { try dg.renderTypecast(writer, ty); try writer.writeByte(')'); } - try dg.renderValue(writer, field_ty, union_obj.val, .Initializer); + try dg.renderValue(writer, field_ty, union_obj.val, initializer_type); } else { try writer.writeAll("0"); } @@ -1301,7 +1419,7 @@ pub const DeclGen = struct { const layout = ty.unionGetLayout(target); if (layout.tag_size != 0) { try writer.writeAll(".tag = "); - try dg.renderValue(writer, tag_ty, union_obj.tag, .Initializer); + try dg.renderValue(writer, tag_ty, union_obj.tag, initializer_type); try writer.writeAll(", "); } try writer.writeAll(".payload = {"); @@ -1310,11 +1428,11 @@ pub const DeclGen = struct { var it = ty.unionFields().iterator(); if (field_ty.hasRuntimeBits()) { try writer.print(".{ } = ", .{fmtIdent(field_name)}); - try dg.renderValue(writer, field_ty, union_obj.val, .Initializer); + try dg.renderValue(writer, field_ty, union_obj.val, initializer_type); } else while (it.next()) |field| { if (!field.value_ptr.ty.hasRuntimeBits()) continue; try writer.print(".{ } = ", .{fmtIdent(field.key_ptr.*)}); - try dg.renderValue(writer, field.value_ptr.ty, Value.undef, .Initializer); + try dg.renderValue(writer, field.value_ptr.ty, Value.undef, initializer_type); break; } else try writer.writeAll(".empty_union = 0"); if (ty.unionTagTypeSafety()) |_| try writer.writeByte('}'); @@ -2085,6 +2203,103 @@ pub const DeclGen = struct { }); } + const IntCastContext = union(enum) { + c_value: struct { + f: *Function, + value: CValue, + }, + value: struct { + value: Value, + }, + + pub fn writeValue(self: *const IntCastContext, dg: *DeclGen, w: anytype, value_ty: Type, location: ValueRenderLocation) !void { + switch (self.*) { + .c_value => |v| { + try v.f.writeCValue(w, v.value, location); + }, + .value => |v| { + try dg.renderValue(w, value_ty, v.value, location); + }, + } + } + }; + + /// Renders a cast to an int type, from either an int or a pointer. + /// + /// Some platforms don't have 128 bit integers, so we need to use + /// the zig_as_ and zig_lo_ macros in those cases. + /// + /// | Dest type bits | Src type | Result + /// |------------------|------------------|---------------------------| + /// | < 64 bit integer | pointer | (zig_)(zig_size)src + /// | < 64 bit integer | < 64 bit integer | (zig_)src + /// | < 64 bit integer | > 64 bit integer | zig_lo(src) + /// | > 64 bit integer | pointer | zig_as_(0, (zig_size)src) + /// | > 64 bit integer | < 64 bit integer | zig_as_(0, src) + /// | > 64 bit integer | > 64 bit integer | zig_as_(zig_hi_(src), zig_lo_(src)) + fn renderIntCast(dg: *DeclGen, w: anytype, dest_ty: Type, context: IntCastContext, src_ty: Type, location: ValueRenderLocation) !void { + const target = dg.module.getTarget(); + const dest_bits = dest_ty.bitSize(target); + const dest_int_info = dest_ty.intInfo(target); + + const src_is_ptr = src_ty.isPtrAtRuntime(); + const src_eff_ty: Type = if (src_is_ptr) switch (dest_int_info.signedness) { + .unsigned => Type.usize, + .signed => Type.isize, + } else src_ty; + + const src_bits = src_eff_ty.bitSize(target); + const src_int_info = if (src_eff_ty.isAbiInt()) src_eff_ty.intInfo(target) else null; + if (dest_bits <= 64 and src_bits <= 64) { + const needs_cast = src_int_info == null or + (toCIntBits(dest_int_info.bits) != toCIntBits(src_int_info.?.bits) or + dest_int_info.signedness != src_int_info.?.signedness); + + if (needs_cast) { + try w.writeByte('('); + try dg.renderTypecast(w, dest_ty); + try w.writeByte(')'); + } + if (src_is_ptr) { + try w.writeByte('('); + try dg.renderTypecast(w, src_eff_ty); + try w.writeByte(')'); + } + try context.writeValue(dg, w, src_ty, location); + } else if (dest_bits <= 64 and src_bits > 64) { + assert(!src_is_ptr); + try w.writeAll("zig_lo_"); + try dg.renderTypeForBuiltinFnName(w, src_eff_ty); + try w.writeByte('('); + try context.writeValue(dg, w, src_ty, .FunctionArgument); + try w.writeByte(')'); + } else if (dest_bits > 64 and src_bits <= 64) { + try w.writeAll("zig_as_"); + try dg.renderTypeForBuiltinFnName(w, dest_ty); + try w.writeAll("(0, "); // TODO: Should the 0 go through fmtIntLiteral? + if (src_is_ptr) { + try w.writeByte('('); + try dg.renderTypecast(w, src_eff_ty); + try w.writeByte(')'); + } + try context.writeValue(dg, w, src_ty, .FunctionArgument); + try w.writeByte(')'); + } else { + assert(!src_is_ptr); + try w.writeAll("zig_as_"); + try dg.renderTypeForBuiltinFnName(w, dest_ty); + try w.writeAll("(zig_hi_"); + try dg.renderTypeForBuiltinFnName(w, src_eff_ty); + try w.writeByte('('); + try context.writeValue(dg, w, src_ty, .FunctionArgument); + try w.writeAll("), zig_lo_"); + try dg.renderTypeForBuiltinFnName(w, src_eff_ty); + try w.writeByte('('); + try context.writeValue(dg, w, src_ty, .FunctionArgument); + try w.writeAll("))"); + } + } + /// Renders a type in C typecast format. /// /// This is guaranteed to be valid in a typecast expression, but not @@ -2134,7 +2349,7 @@ pub const DeclGen = struct { const c_len_val = Value.initPayload(&c_len_pl.base); try suffix_writer.writeByte('['); - if (mutability == .ConstArgument and depth == 0) try suffix_writer.writeAll("static const "); + if (mutability == .ConstArgument and depth == 0) try suffix_writer.writeAll("zig_const_arr "); try suffix.writer().print("{}]", .{try dg.fmtIntLiteral(Type.usize, c_len_val)}); render_ty = array_info.elem_type; depth += 1; @@ -2306,6 +2521,9 @@ pub const DeclGen = struct { try dg.writeCValue(writer, member); } + const IdentHasher = std.crypto.auth.siphash.SipHash128(1, 3); + const ident_hasher_init: IdentHasher = IdentHasher.init(&[_]u8{0} ** IdentHasher.key_length); + fn renderDeclName(dg: *DeclGen, writer: anytype, decl_index: Decl.Index, export_index: u32) !void { const decl = dg.module.declPtr(decl_index); dg.module.markDeclAlive(decl); @@ -2323,7 +2541,18 @@ pub const DeclGen = struct { const gpa = dg.gpa; const name = try decl.getFullyQualifiedName(dg.module); defer gpa.free(name); - return writer.print("{}", .{fmtIdent(name)}); + + // MSVC has a limit of 4095 character token length limit, and fmtIdent can (worst case), expand + // to 3x the length of its input + if (name.len > 1365) { + var hash = ident_hasher_init; + hash.update(name); + const ident_hash = hash.finalInt(); + try writer.writeAll("zig_D_"); + return std.fmt.formatIntValue(ident_hash, "x", .{}, writer); + } else { + return writer.print("{}", .{fmtIdent(name)}); + } } } @@ -2336,6 +2565,10 @@ pub const DeclGen = struct { try writer.print("{c}{d}", .{ signAbbrev(int_info.signedness), c_bits }); } else if (ty.isRuntimeFloat()) { try ty.print(writer, dg.module); + } else if (ty.isPtrAtRuntime()) { + try writer.print("p{d}", .{ty.bitSize(target)}); + } else if (ty.zigTypeTag() == .Bool) { + try writer.print("u8", .{}); } else return dg.fail("TODO: CBE: implement renderTypeForBuiltinFnName for type {}", .{ ty.fmt(dg.module), }); @@ -2388,6 +2621,19 @@ pub const DeclGen = struct { .mod = dg.module, } }; } + + fn fmtIntLiteralLoc( + dg: *DeclGen, + ty: Type, + val: Value, + location: ValueRenderLocation, // TODO: Instead add this as optional arg to fmtIntLiteral + ) !std.fmt.Formatter(formatIntLiteral) { + const int_info = ty.intInfo(dg.module.getTarget()); + const c_bits = toCIntBits(int_info.bits); + if (c_bits == null or c_bits.? > 128) + return dg.fail("TODO implement integer constants larger than 128 bits", .{}); + return std.fmt.Formatter(formatIntLiteral){ .data = .{ .ty = ty, .val = val, .mod = dg.module, .location = location } }; + } }; pub fn genGlobalAsm(mod: *Module, code: *std.ArrayList(u8)) !void { @@ -2433,7 +2679,7 @@ pub fn genErrDecls(o: *Object) !void { try writer.writeAll("static "); try o.dg.renderTypeAndName(writer, name_ty, .{ .identifier = identifier }, .Const, 0, .Complete); try writer.writeAll(" = "); - try o.dg.renderValue(writer, name_ty, name_val, .Initializer); + try o.dg.renderValue(writer, name_ty, name_val, .StaticInitializer); try writer.writeAll(";\n"); } @@ -2604,7 +2850,7 @@ pub fn genDecl(o: *Object) !void { if (variable.is_threadlocal) try w.writeAll("zig_threadlocal "); try o.dg.renderTypeAndName(w, o.dg.decl.ty, decl_c_value, .Mut, o.dg.decl.@"align", .Complete); try w.writeAll(" = "); - try o.dg.renderValue(w, tv.ty, variable.init, .Initializer); + try o.dg.renderValue(w, tv.ty, variable.init, .StaticInitializer); try w.writeByte(';'); try o.indent_writer.insertNewline(); } else { @@ -2621,7 +2867,7 @@ pub fn genDecl(o: *Object) !void { // https://github.com/ziglang/zig/issues/7582 try o.dg.renderTypeAndName(writer, tv.ty, decl_c_value, .Mut, o.dg.decl.@"align", .Complete); try writer.writeAll(" = "); - try o.dg.renderValue(writer, tv.ty, tv.val, .Initializer); + try o.dg.renderValue(writer, tv.ty, tv.val, .StaticInitializer); try writer.writeAll(";\n"); } } @@ -3244,11 +3490,20 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { try f.object.dg.renderTypeForBuiltinFnName(writer, field_ty); try writer.writeAll("(("); try f.renderTypecast(writer, field_ty); - try writer.writeAll(")zig_shr_"); + try writer.writeByte(')'); + const cant_cast = host_ty.isInt() and host_ty.bitSize(target) > 64; + if (cant_cast) { + if (field_ty.bitSize(target) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); + try writer.writeAll("zig_lo_"); + try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty); + try writer.writeByte('('); + } + try writer.writeAll("zig_shr_"); try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty); try writer.writeByte('('); try f.writeCValueDeref(writer, operand); try writer.print(", {})", .{try f.fmtIntLiteral(bit_offset_ty, bit_offset_val)}); + if (cant_cast) try writer.writeByte(')'); try f.object.dg.renderBuiltinInfo(writer, field_ty, .Bits); try writer.writeByte(')'); } else { @@ -3322,11 +3577,11 @@ fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const inst_ty = f.air.typeOfIndex(inst); const local = try f.allocLocal(inst, inst_ty); + const operand_ty = f.air.typeOf(ty_op.operand); + try f.writeCValue(writer, local, .Other); - try writer.writeAll(" = ("); - try f.renderTypecast(writer, inst_ty); - try writer.writeByte(')'); - try f.writeCValue(writer, operand, .Other); + try writer.writeAll(" = "); + try f.renderIntCast(writer, inst_ty, operand, operand_ty, .Other); try writer.writeAll(";\n"); return local; } @@ -3346,15 +3601,27 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { const target = f.object.dg.module.getTarget(); const dest_int_info = inst_ty.intInfo(target); const dest_bits = dest_int_info.bits; + const dest_c_bits = toCIntBits(dest_int_info.bits) orelse + return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{}); + const operand_ty = f.air.typeOf(ty_op.operand); + const operand_int_info = operand_ty.intInfo(target); try f.writeCValue(writer, local, .Other); - try writer.writeAll(" = ("); - try f.renderTypecast(writer, inst_ty); - try writer.writeByte(')'); + try writer.writeAll(" = "); + + const needs_lo = operand_int_info.bits > 64 and dest_bits <= 64; + if (needs_lo) { + try writer.writeAll("zig_lo_"); + try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty); + try writer.writeByte('('); + } else if (dest_c_bits <= 64) { + try writer.writeByte('('); + try f.renderTypecast(writer, inst_ty); + try writer.writeByte(')'); + } if (dest_bits >= 8 and std.math.isPowerOfTwo(dest_bits)) { try f.writeCValue(writer, operand, .Other); - try writer.writeAll(";\n"); } else switch (dest_int_info.signedness) { .unsigned => { var arena = std.heap.ArenaAllocator.init(f.object.dg.gpa); @@ -3365,14 +3632,14 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator()); const mask_val = try inst_ty.maxInt(stack.get(), target); - + try writer.writeAll("zig_and_"); + try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty); try writer.writeByte('('); - try f.writeCValue(writer, operand, .Other); - try writer.print(" & {x});\n", .{try f.fmtIntLiteral(inst_ty, mask_val)}); + try f.writeCValue(writer, operand, .FunctionArgument); + try writer.print(", {x})", .{try f.fmtIntLiteral(operand_ty, mask_val)}); }, .signed => { - const operand_ty = f.air.typeOf(ty_op.operand); - const c_bits = toCIntBits(operand_ty.intInfo(target).bits) orelse + const c_bits = toCIntBits(operand_int_info.bits) orelse return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{}); var shift_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, @@ -3380,11 +3647,29 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { }; const shift_val = Value.initPayload(&shift_pl.base); - try writer.print("((int{d}_t)((uint{0d}_t)", .{c_bits}); - try f.writeCValue(writer, operand, .Other); - try writer.print(" << {}) >> {0});\n", .{try f.fmtIntLiteral(Type.u8, shift_val)}); + try writer.writeAll("zig_shr_"); + try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty); + if (c_bits == 128) { + try writer.print("(zig_bitcast_i{d}(", .{c_bits}); + } else { + try writer.print("((int{d}_t)", .{c_bits}); + } + try writer.print("zig_shl_u{d}(", .{c_bits}); + if (c_bits == 128) { + try writer.print("zig_bitcast_u{d}(", .{c_bits}); + } else { + try writer.print("(uint{d}_t)", .{c_bits}); + } + try f.writeCValue(writer, operand, .FunctionArgument); + if (c_bits == 128) try writer.writeByte(')'); + try writer.print(", {})", .{try f.fmtIntLiteral(Type.u8, shift_val)}); + if (c_bits == 128) try writer.writeByte(')'); + try writer.print(", {})", .{try f.fmtIntLiteral(Type.u8, shift_val)}); }, } + + if (needs_lo) try writer.writeByte(')'); + try writer.writeAll(";\n"); return local; } @@ -3521,15 +3806,26 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValueDeref(writer, ptr_val); try writer.print(", {x}), zig_shl_", .{try f.fmtIntLiteral(host_ty, mask_val)}); try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty); - try writer.writeAll("(("); - try f.renderTypecast(writer, host_ty); - try writer.writeByte(')'); + try writer.writeByte('('); + const cant_cast = host_ty.isInt() and host_ty.bitSize(target) > 64; + if (cant_cast) { + if (src_ty.bitSize(target) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); + try writer.writeAll("zig_as_"); + try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty); + try writer.writeAll("(0, "); + } else { + try writer.writeByte('('); + try f.renderTypecast(writer, host_ty); + try writer.writeByte(')'); + } + if (src_ty.isPtrAtRuntime()) { try writer.writeByte('('); try f.renderTypecast(writer, Type.usize); try writer.writeByte(')'); } try f.writeCValue(writer, src_val, .Other); + if (cant_cast) try writer.writeByte(')'); try writer.print(", {}))", .{try f.fmtIntLiteral(bit_offset_ty, bit_offset_val)}); } else { try f.writeCValueDeref(writer, ptr_val); @@ -3610,12 +3906,12 @@ fn airNot(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); + try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); try writer.writeByte('!'); try f.writeCValue(writer, op, .Other); try writer.writeAll(";\n"); - return local; } @@ -4382,7 +4678,15 @@ fn airCondBr(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, cond, .Other); try writer.writeAll(") "); try genBody(f, then_body); - try writer.writeAll(" else "); + + // TODO: If body ends in goto, elide the else block? + const needs_else = then_body.len <= 0 or f.air.instructions.items(.tag)[then_body[then_body.len - 1]] != .br; + if (needs_else) { + try writer.writeAll(" else "); + } else { + try writer.writeByte('\n'); + } + f.value_map.deinit(); f.value_map = cloned_map.move(); const free_locals = f.getFreeLocals(); @@ -4395,7 +4699,12 @@ fn airCondBr(f: *Function, inst: Air.Inst.Index) !CValue { try noticeBranchFrees(f, pre_locals_len, inst); - try genBody(f, else_body); + if (needs_else) { + try genBody(f, else_body); + } else { + try genBodyInner(f, else_body); + } + try f.object.indent_writer.insertNewline(); return CValue.none; @@ -5218,13 +5527,22 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { try f.object.dg.renderTypeForBuiltinFnName(writer, field_int_ty); try writer.writeAll("(("); try f.renderTypecast(writer, field_int_ty); - try writer.writeAll(")zig_shr_"); + try writer.writeByte(')'); + const cant_cast = int_info.bits > 64; + if (cant_cast) { + if (field_int_ty.bitSize(target) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); + try writer.writeAll("zig_lo_"); + try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); + try writer.writeByte('('); + } + try writer.writeAll("zig_shr_"); try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); try writer.writeByte('('); try f.writeCValue(writer, struct_byval, .Other); try writer.writeAll(", "); try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); try writer.writeByte(')'); + if (cant_cast) try writer.writeByte(')'); try f.object.dg.renderBuiltinInfo(writer, field_int_ty, .Bits); try writer.writeAll(");\n"); if (inst_ty.eql(field_int_ty, f.object.dg.module)) return temp_local; @@ -5812,7 +6130,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue try writer.writeAll(";\n"); try writer.writeAll("if ("); try writer.print("zig_cmpxchg_{s}((zig_atomic(", .{flavor}); - try f.renderTypecast(writer, ptr_ty.elemType()); + try f.renderTypecast(writer, ptr_ty.childType()); try writer.writeByte(')'); if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); try writer.writeAll(" *)"); @@ -5825,6 +6143,8 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue try writeMemoryOrder(writer, extra.successOrder()); try writer.writeAll(", "); try writeMemoryOrder(writer, extra.failureOrder()); + try writer.writeAll(", "); + try f.object.dg.renderTypeForBuiltinFnName(writer, ptr_ty.childType()); try writer.writeByte(')'); try writer.writeAll(") {\n"); f.object.indent_writer.pushIndent(); @@ -5839,7 +6159,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue try writer.writeAll(";\n"); try f.writeCValue(writer, local, .Other); try writer.print(".is_null = zig_cmpxchg_{s}((zig_atomic(", .{flavor}); - try f.renderTypecast(writer, ptr_ty.elemType()); + try f.renderTypecast(writer, ptr_ty.childType()); try writer.writeByte(')'); if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); try writer.writeAll(" *)"); @@ -5852,6 +6172,8 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue try writeMemoryOrder(writer, extra.successOrder()); try writer.writeAll(", "); try writeMemoryOrder(writer, extra.failureOrder()); + try writer.writeAll(", "); + try f.object.dg.renderTypeForBuiltinFnName(writer, ptr_ty.childType()); try writer.writeByte(')'); try writer.writeAll(";\n"); } @@ -5874,8 +6196,8 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ pl_op.operand, extra.operand }); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - try f.writeCValue(writer, local, .Other); + try f.writeCValue(writer, local, .Other); try writer.print(" = zig_atomicrmw_{s}((", .{toAtomicRmwSuffix(extra.op())}); switch (extra.op()) { else => { @@ -5895,6 +6217,8 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, operand, .FunctionArgument); try writer.writeAll(", "); try writeMemoryOrder(writer, extra.ordering()); + try writer.writeAll(", "); + try f.object.dg.renderTypeForBuiltinFnName(writer, ptr_ty.childType()); try writer.writeAll(");\n"); if (f.liveness.isUnused(inst)) { @@ -5927,6 +6251,8 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, ptr, .Other); try writer.writeAll(", "); try writeMemoryOrder(writer, atomic_load.order); + try writer.writeAll(", "); + try f.object.dg.renderTypeForBuiltinFnName(writer, ptr_ty.childType()); try writer.writeAll(");\n"); return local; @@ -5948,7 +6274,9 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa try f.writeCValue(writer, ptr, .Other); try writer.writeAll(", "); try f.writeCValue(writer, element, .FunctionArgument); - try writer.print(", {s});\n", .{order}); + try writer.print(", {s}, ", .{order}); + try f.object.dg.renderTypeForBuiltinFnName(writer, ptr_ty.childType()); + try writer.writeAll(");\n"); return CValue.none; } @@ -6405,9 +6733,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { }, .Packed => { try f.writeCValue(writer, local, .Other); - try writer.writeAll(" = ("); - try f.renderTypecast(writer, inst_ty); - try writer.writeAll(")"); + try writer.writeAll(" = "); const int_info = inst_ty.intInfo(target); var bit_offset_ty_pl = Type.Payload.Bits{ @@ -6437,20 +6763,28 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; if (!empty) try writer.writeAll(", "); + // TODO: Skip this entire shift if val is 0? try writer.writeAll("zig_shlw_"); try f.object.dg.renderTypeForBuiltinFnName(writer, inst_ty); - try writer.writeAll("(("); - try f.renderTypecast(writer, inst_ty); - try writer.writeByte(')'); - if (field_ty.isPtrAtRuntime()) { + try writer.writeByte('('); + + if (inst_ty.isAbiInt() and (field_ty.isAbiInt() or field_ty.isPtrAtRuntime())) { + try f.renderIntCast(writer, inst_ty, element, field_ty, .FunctionArgument); + } else { try writer.writeByte('('); - try f.renderTypecast(writer, switch (int_info.signedness) { - .unsigned => Type.usize, - .signed => Type.isize, - }); + try f.renderTypecast(writer, inst_ty); try writer.writeByte(')'); + if (field_ty.isPtrAtRuntime()) { + try writer.writeByte('('); + try f.renderTypecast(writer, switch (int_info.signedness) { + .unsigned => Type.usize, + .signed => Type.isize, + }); + try writer.writeByte(')'); + } + try f.writeCValue(writer, element, .Other); } - try f.writeCValue(writer, element, .Other); + try writer.writeAll(", "); try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); try f.object.dg.renderBuiltinInfo(writer, inst_ty, .Bits); @@ -6460,7 +6794,14 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { bit_offset_val_pl.data += field_ty.bitSize(target); empty = false; } - if (empty) try f.writeCValue(writer, .{ .undef = inst_ty }, .Initializer); + + if (empty) { + try writer.writeByte('('); + try f.renderTypecast(writer, inst_ty); + try writer.writeByte(')'); + try f.writeCValue(writer, .{ .undef = inst_ty }, .Initializer); + } + try writer.writeAll(";\n"); }, }, @@ -6793,6 +7134,68 @@ fn compilerRtAbbrev(ty: Type, target: std.Target) []const u8 { } else unreachable; } +fn StringLiteral(comptime WriterType: type) type { + // MSVC has a length limit of 16380 per string literal (before concatenation) + const max_char_len = 4; + const max_len = 16380 - max_char_len; + + return struct { + cur_len: u64 = 0, + counting_writer: std.io.CountingWriter(WriterType), + + pub const Error = WriterType.Error; + + const Self = @This(); + + pub fn start(self: *Self) Error!void { + const writer = self.counting_writer.writer(); + try writer.writeByte('\"'); + } + + pub fn end(self: *Self) Error!void { + const writer = self.counting_writer.writer(); + try writer.writeByte('\"'); + } + + fn writeStringLiteralChar(writer: anytype, c: u8) !void { + switch (c) { + 7 => try writer.writeAll("\\a"), + 8 => try writer.writeAll("\\b"), + '\t' => try writer.writeAll("\\t"), + '\n' => try writer.writeAll("\\n"), + 11 => try writer.writeAll("\\v"), + 12 => try writer.writeAll("\\f"), + '\r' => try writer.writeAll("\\r"), + '"', '\'', '?', '\\' => try writer.print("\\{c}", .{c}), + else => switch (c) { + ' '...'~' => try writer.writeByte(c), + else => try writer.print("\\{o:0>3}", .{c}), + }, + } + } + + pub fn writeChar(self: *Self, c: u8) Error!void { + const writer = self.counting_writer.writer(); + + if (self.cur_len == 0 and self.counting_writer.bytes_written > 1) + try writer.writeAll("\"\""); + + const len = self.counting_writer.bytes_written; + try writeStringLiteralChar(writer, c); + + const char_length = self.counting_writer.bytes_written - len; + assert(char_length <= max_char_len); + self.cur_len += char_length; + + if (self.cur_len >= max_len) self.cur_len = 0; + } + }; +} + +fn stringLiteral(child_stream: anytype) StringLiteral(@TypeOf(child_stream)) { + return .{ .counting_writer = std.io.countingWriter(child_stream) }; +} + fn formatStringLiteral( str: []const u8, comptime fmt: []const u8, @@ -6800,44 +7203,25 @@ fn formatStringLiteral( writer: anytype, ) @TypeOf(writer).Error!void { if (fmt.len != 1 or fmt[0] != 's') @compileError("Invalid fmt: " ++ fmt); - try writer.writeByte('\"'); + + var literal = stringLiteral(writer); + try literal.start(); for (str) |c| - try writeStringLiteralChar(writer, c); - try writer.writeByte('\"'); + try literal.writeChar(c); + try literal.end(); } fn fmtStringLiteral(str: []const u8) std.fmt.Formatter(formatStringLiteral) { return .{ .data = str }; } -fn writeStringLiteralChar(writer: anytype, c: u8) !void { - switch (c) { - 7 => try writer.writeAll("\\a"), - 8 => try writer.writeAll("\\b"), - '\t' => try writer.writeAll("\\t"), - '\n' => try writer.writeAll("\\n"), - 11 => try writer.writeAll("\\v"), - 12 => try writer.writeAll("\\f"), - '\r' => try writer.writeAll("\\r"), - '"', '\'', '?', '\\' => try writer.print("\\{c}", .{c}), - else => switch (c) { - ' '...'~' => try writer.writeByte(c), - else => try writer.print("\\{o:0>3}", .{c}), - }, - } -} - fn undefPattern(comptime IntType: type) IntType { const int_info = @typeInfo(IntType).Int; const UnsignedType = std.meta.Int(.unsigned, int_info.bits); return @bitCast(IntType, @as(UnsignedType, (1 << (int_info.bits | 1)) / 3)); } -const FormatIntLiteralContext = struct { - ty: Type, - val: Value, - mod: *Module, -}; +const FormatIntLiteralContext = struct { ty: Type, val: Value, mod: *Module, location: ?ValueRenderLocation = null }; fn formatIntLiteral( data: FormatIntLiteralContext, comptime fmt: []const u8, @@ -6905,10 +7289,31 @@ fn formatIntLiteral( return writer.print("{s}_{s}", .{ abbrev, if (int.positive) "MAX" else "MIN" }); } - if (!int.positive) try writer.writeByte('-'); + var use_twos_comp = false; + if (!int.positive) { + if (c_bits > 64) { + // TODO: Can this be done for decimal literals as well? + if (fmt.len == 1 and fmt[0] != 'd') { + use_twos_comp = true; + } else { + // TODO: Use fmtIntLiteral for 0? + try writer.print("zig_sub_{c}{d}(zig_as_{c}{d}(0, 0), ", .{ signAbbrev(int_info.signedness), c_bits, signAbbrev(int_info.signedness), c_bits }); + } + } else { + try writer.writeByte('-'); + } + } + switch (data.ty.tag()) { .c_short, .c_ushort, .c_int, .c_uint, .c_long, .c_ulong, .c_longlong, .c_ulonglong => {}, - else => try writer.print("zig_as_{c}{d}(", .{ signAbbrev(int_info.signedness), c_bits }), + else => { + if (int_info.bits > 64 and data.location != null and data.location.? == .StaticInitializer) { + // MSVC treats casting the struct initializer as not constant (C2099), so an alternate form is used in global initializers + try writer.print("zig_as_constant_{c}{d}(", .{ signAbbrev(int_info.signedness), c_bits }); + } else { + try writer.print("zig_as_{c}{d}(", .{ signAbbrev(int_info.signedness), c_bits }); + } + }, } const limbs_count_64 = @divExact(64, @bitSizeOf(BigIntLimb)); @@ -6948,16 +7353,34 @@ fn formatIntLiteral( } else { assert(c_bits == 128); const split = std.math.min(int.limbs.len, limbs_count_64); + var twos_comp_limbs: [BigInt.calcTwosCompLimbCount(128)]BigIntLimb = undefined; + + // Adding a negation in the C code before the doesn't work in all cases: + // - struct versions would require an extra zig_sub_ call to negate, which wouldn't work in constant expressions + // - negating the f80 int representation (i128) doesn't make sense + // Instead we write out the literal as a negative number in twos complement + var limbs = int.limbs; + + if (use_twos_comp) { + var twos_comp = BigInt.Mutable{ + .limbs = &twos_comp_limbs, + .positive = undefined, + .len = undefined, + }; + + twos_comp.convertToTwosComplement(int, .signed, int_info.bits); + limbs = twos_comp.limbs; + } var upper_pl = Value.Payload.BigInt{ .base = .{ .tag = .int_big_positive }, - .data = int.limbs[split..], + .data = limbs[split..], }; const upper_val = Value.initPayload(&upper_pl.base); try formatIntLiteral(.{ .ty = switch (int_info.signedness) { .unsigned => Type.u64, - .signed => Type.i64, + .signed => if (use_twos_comp) Type.u64 else Type.i64, }, .val = upper_val, .mod = data.mod, @@ -6967,7 +7390,7 @@ fn formatIntLiteral( var lower_pl = Value.Payload.BigInt{ .base = .{ .tag = .int_big_positive }, - .data = int.limbs[0..split], + .data = limbs[0..split], }; const lower_val = Value.initPayload(&lower_pl.base); try formatIntLiteral(.{ @@ -6976,6 +7399,7 @@ fn formatIntLiteral( .mod = data.mod, }, fmt, options, writer); + if (!int.positive and c_bits > 64 and !use_twos_comp) try writer.writeByte(')'); return writer.writeByte(')'); } diff --git a/stage1/zig1.wasm b/stage1/zig1.wasm index 1ce3a0fb97..88186ff514 100644 Binary files a/stage1/zig1.wasm and b/stage1/zig1.wasm differ diff --git a/test/behavior/align.zig b/test/behavior/align.zig index 780ee06875..8272a852df 100644 --- a/test/behavior/align.zig +++ b/test/behavior/align.zig @@ -540,6 +540,7 @@ test "align(N) on functions" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO this is not supported on MSVC // function alignment is a compile error on wasm32/wasm64 if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest; diff --git a/test/behavior/asm.zig b/test/behavior/asm.zig index e9a01226b1..f041963494 100644 --- a/test/behavior/asm.zig +++ b/test/behavior/asm.zig @@ -23,6 +23,7 @@ test "module level assembly" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (is_x86_64_linux) { try expect(this_is_my_alias() == 1234); @@ -35,6 +36,7 @@ test "output constraint modifiers" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO // This is only testing compilation. var a: u32 = 3; @@ -56,6 +58,7 @@ test "alternative constraints" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO // Make sure we allow commas as a separator for alternative constraints. var a: u32 = 3; @@ -72,6 +75,7 @@ test "sized integer/float in asm input" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO asm volatile ("" : @@ -121,6 +125,7 @@ test "struct/array/union types as input values" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO asm volatile ("" : diff --git a/test/behavior/atomics.zig b/test/behavior/atomics.zig index 094be62bc7..2ca0e9ff15 100644 --- a/test/behavior/atomics.zig +++ b/test/behavior/atomics.zig @@ -250,31 +250,142 @@ test "atomicrmw with ints" { return error.SkipZigTest; } - try testAtomicRmwInt(); - comptime try testAtomicRmwInt(); + try testAtomicRmwInts(); + comptime try testAtomicRmwInts(); } -fn testAtomicRmwInt() !void { - var x: u8 = 1; - var res = @atomicRmw(u8, &x, .Xchg, 3, .SeqCst); - try expect(x == 3 and res == 1); - _ = @atomicRmw(u8, &x, .Add, 3, .SeqCst); - try expect(x == 6); - _ = @atomicRmw(u8, &x, .Sub, 1, .SeqCst); - try expect(x == 5); - _ = @atomicRmw(u8, &x, .And, 4, .SeqCst); - try expect(x == 4); - _ = @atomicRmw(u8, &x, .Nand, 4, .SeqCst); - try expect(x == 0xfb); - _ = @atomicRmw(u8, &x, .Or, 6, .SeqCst); - try expect(x == 0xff); - _ = @atomicRmw(u8, &x, .Xor, 2, .SeqCst); - try expect(x == 0xfd); +fn testAtomicRmwInts() !void { + // TODO: Use the max atomic bit size for the target, maybe builtin? + try testAtomicRmwInt(8); - _ = @atomicRmw(u8, &x, .Max, 1, .SeqCst); - try expect(x == 0xfd); - _ = @atomicRmw(u8, &x, .Min, 1, .SeqCst); - try expect(x == 1); + if (builtin.cpu.arch == .x86_64) { + try testAtomicRmwInt(16); + try testAtomicRmwInt(32); + try testAtomicRmwInt(64); + } +} + +fn testAtomicRmwInt(comptime signedness: std.builtin.Signedness, comptime N: usize) !void { + const int = std.meta.Int(signedness, N); + + var x: int = 1; + var res = @atomicRmw(int, &x, .Xchg, 3, .SeqCst); + try expect(x == 3 and res == 1); + + res = @atomicRmw(int, &x, .Add, 3, .SeqCst); + var y: int = 3; + try expect(res == y); + y = y + 3; + try expect(x == y); + + res = @atomicRmw(int, &x, .Sub, 1, .SeqCst); + try expect(res == y); + y = y - 1; + try expect(x == y); + + res = @atomicRmw(int, &x, .And, 4, .SeqCst); + try expect(res == y); + y = y & 4; + try expect(x == y); + + res = @atomicRmw(int, &x, .Nand, 4, .SeqCst); + try expect(res == y); + y = ~(y & 4); + try expect(x == y); + + res = @atomicRmw(int, &x, .Or, 6, .SeqCst); + try expect(res == y); + y = y | 6; + try expect(x == y); + + res = @atomicRmw(int, &x, .Xor, 2, .SeqCst); + try expect(res == y); + y = y ^ 2; + try expect(x == y); + + res = @atomicRmw(int, &x, .Max, 1, .SeqCst); + try expect(res == y); + y = @max(y, 1); + try expect(x == y); + + res = @atomicRmw(int, &x, .Min, 1, .SeqCst); + try expect(res == y); + y = @min(y, 1); + try expect(x == y); +} + +test "atomicrmw with 128-bit ints" { + if (builtin.cpu.arch != .x86_64) { + // TODO: Ideally this could use target.atomicPtrAlignment and check for IntTooBig + return error.SkipZigTest; + } + + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + + // TODO "ld.lld: undefined symbol: __sync_lock_test_and_set_16" on -mcpu x86_64 + if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; + + try testAtomicRmwInt128(.unsigned); + comptime try testAtomicRmwInt128(.unsigned); +} + +fn testAtomicRmwInt128(comptime signedness: std.builtin.Signedness) !void { + const int = std.meta.Int(signedness, 128); + + const initial: int = 0xaaaaaaaa_bbbbbbbb_cccccccc_dddddddd; + const replacement: int = 0x00000000_00000005_00000000_00000003; + + var x: int align(16) = initial; + var res = @atomicRmw(int, &x, .Xchg, replacement, .SeqCst); + try expect(x == replacement and res == initial); + + var operator: int = 0x00000001_00000000_20000000_00000000; + res = @atomicRmw(int, &x, .Add, operator, .SeqCst); + var y: int = replacement; + try expect(res == y); + y = y + operator; + try expect(x == y); + + operator = 0x00000000_10000000_00000000_20000000; + res = @atomicRmw(int, &x, .Sub, operator, .SeqCst); + try expect(res == y); + y = y - operator; + try expect(x == y); + + operator = 0x12345678_87654321_12345678_87654321; + res = @atomicRmw(int, &x, .And, operator, .SeqCst); + try expect(res == y); + y = y & operator; + try expect(x == y); + + operator = 0x00000000_10000000_00000000_20000000; + res = @atomicRmw(int, &x, .Nand, operator, .SeqCst); + try expect(res == y); + y = ~(y & operator); + try expect(x == y); + + operator = 0x12340000_56780000_67890000_98760000; + res = @atomicRmw(int, &x, .Or, operator, .SeqCst); + try expect(res == y); + y = y | operator; + try expect(x == y); + + operator = 0x0a0b0c0d_0e0f0102_03040506_0708090a; + res = @atomicRmw(int, &x, .Xor, operator, .SeqCst); + try expect(res == y); + y = y ^ operator; + try expect(x == y); + + operator = 0x00000000_10000000_00000000_20000000; + res = @atomicRmw(int, &x, .Max, operator, .SeqCst); + try expect(res == y); + y = @max(y, operator); + try expect(x == y); + + res = @atomicRmw(int, &x, .Min, operator, .SeqCst); + try expect(res == y); + y = @min(y, operator); + try expect(x == y); } test "atomics with different types" { diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig index 6fcef06fc1..442cd02d5a 100644 --- a/test/behavior/basic.zig +++ b/test/behavior/basic.zig @@ -37,6 +37,24 @@ test "truncate to non-power-of-two integers" { try testTrunc(i32, i5, std.math.maxInt(i32), -1); } +test "truncate to non-power-of-two integers from 128-bit" { + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + + try testTrunc(u128, u1, 0xffffffff_ffffffff_ffffffff_01010101, 0x01); + try testTrunc(u128, u1, 0xffffffff_ffffffff_ffffffff_01010110, 0x00); + try testTrunc(u128, u2, 0xffffffff_ffffffff_ffffffff_01010101, 0x01); + try testTrunc(u128, u2, 0xffffffff_ffffffff_ffffffff_01010102, 0x02); + try testTrunc(i128, i5, -4, -4); + try testTrunc(i128, i5, 4, 4); + try testTrunc(i128, i5, -28, 4); + try testTrunc(i128, i5, 28, -4); + try testTrunc(i128, i5, std.math.maxInt(i128), -1); +} + fn testTrunc(comptime Big: type, comptime Little: type, big: Big, little: Little) !void { try expect(@truncate(Little, big) == little); } diff --git a/test/behavior/int128.zig b/test/behavior/int128.zig index 7da6f7954e..f02795cebe 100644 --- a/test/behavior/int128.zig +++ b/test/behavior/int128.zig @@ -64,11 +64,23 @@ test "int128" { } test "truncate int128" { + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - var buff: u128 = maxInt(u128); - try expect(@truncate(u64, buff) == maxInt(u64)); + { + var buff: u128 = maxInt(u128); + try expect(@truncate(u64, buff) == maxInt(u64)); + try expect(@truncate(u90, buff) == maxInt(u90)); + try expect(@truncate(u128, buff) == maxInt(u128)); + } + + { + var buff: i128 = maxInt(i128); + try expect(@truncate(i64, buff) == -1); + try expect(@truncate(i90, buff) == -1); + try expect(@truncate(i128, buff) == maxInt(i128)); + } } diff --git a/test/behavior/math.zig b/test/behavior/math.zig index d6955b0592..2257a116b7 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -377,6 +377,28 @@ fn testBinaryNot(x: u16) !void { try expect(~x == 0b0101010101010101); } +test "binary not 128-bit" { + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + + try expect(comptime x: { + break :x ~@as(u128, 0x55555555_55555555_55555555_55555555) == 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa; + }); + try expect(comptime x: { + break :x ~@as(i128, 0x55555555_55555555_55555555_55555555) == @bitCast(i128, @as(u128, 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa)); + }); + + try testBinaryNot128(u128, 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa); + try testBinaryNot128(i128, @bitCast(i128, @as(u128, 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa))); +} + +fn testBinaryNot128(comptime Type: type, x: Type) !void { + try expect(~x == @as(Type, 0x55555555_55555555_55555555_55555555)); +} + test "division" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -632,10 +654,24 @@ test "128-bit multiplication" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - var a: i128 = 3; - var b: i128 = 2; - var c = a * b; - try expect(c == 6); + { + var a: i128 = 3; + var b: i128 = 2; + var c = a * b; + try expect(c == 6); + + a = -3; + b = 2; + c = a * b; + try expect(c == -6); + } + + { + var a: u128 = 0xffffffffffffffff; + var b: u128 = 100; + var c = a * b; + try expect(c == 0x63ffffffffffffff9c); + } } test "@addWithOverflow" {