Merge pull request #14002 from kcbanner/cbe_msvc_compatibility

CBE: MSVC-compatible code generation, and fixes to get behaviour tests passing and zig2.c building
This commit is contained in:
Andrew Kelley 2023-01-02 16:11:17 -05:00 committed by GitHub
commit 4c1007fc04
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 1510 additions and 322 deletions

View File

@ -7,21 +7,8 @@ const common = @import("common.zig");
pub const panic = common.panic;
comptime {
if (builtin.os.tag == .windows) {
switch (arch) {
.x86 => {
@export(__divti3, .{ .name = "__divti3", .linkage = common.linkage, .visibility = common.visibility });
},
.x86_64 => {
// The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI
// that LLVM expects compiler-rt to have.
@export(__divti3_windows_x86_64, .{ .name = "__divti3", .linkage = common.linkage, .visibility = common.visibility });
},
else => {},
}
if (arch.isAARCH64()) {
@export(__divti3, .{ .name = "__divti3", .linkage = common.linkage, .visibility = common.visibility });
}
if (common.want_windows_v2u64_abi) {
@export(__divti3_windows_x86_64, .{ .name = "__divti3", .linkage = common.linkage, .visibility = common.visibility });
} else {
@export(__divti3, .{ .name = "__divti3", .linkage = common.linkage, .visibility = common.visibility });
}
@ -31,7 +18,7 @@ pub fn __divti3(a: i128, b: i128) callconv(.C) i128 {
return div(a, b);
}
const v128 = @import("std").meta.Vector(2, u64);
const v128 = @Vector(2, u64);
fn __divti3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 {
return @bitCast(v128, div(@bitCast(i128, a), @bitCast(i128, b)));

View File

@ -16,7 +16,7 @@ pub fn __fixunshfti(a: f16) callconv(.C) u128 {
return floatToInt(u128, a);
}
const v2u64 = @import("std").meta.Vector(2, u64);
const v2u64 = @Vector(2, u64);
fn __fixunshfti_windows_x86_64(a: f16) callconv(.C) v2u64 {
return @bitCast(v2u64, floatToInt(u128, a));

View File

@ -129,6 +129,8 @@ pub fn __umodei4(r_p: [*c]u32, u_p: [*c]const u32, v_p: [*c]const u32, bits: usi
}
test "__udivei4/__umodei4" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const RndGen = std.rand.DefaultPrng;
var rnd = RndGen.init(42);
var i: usize = 10000;

View File

@ -1677,6 +1677,40 @@ pub const Mutable = struct {
y.shiftRight(y.toConst(), norm_shift);
}
/// If a is positive, this passes through to truncate.
/// If a is negative, then r is set to positive with the bit pattern ~(a - 1).
///
/// Asserts `r` has enough storage to store the result.
/// The upper bound is `calcTwosCompLimbCount(a.len)`.
pub fn convertToTwosComplement(r: *Mutable, a: Const, signedness: Signedness, bit_count: usize) void {
if (a.positive) {
r.truncate(a, signedness, bit_count);
return;
}
const req_limbs = calcTwosCompLimbCount(bit_count);
if (req_limbs == 0 or a.eqZero()) {
r.set(0);
return;
}
const bit = @truncate(Log2Limb, bit_count - 1);
const signmask = @as(Limb, 1) << bit;
const mask = (signmask << 1) -% 1;
r.addScalar(a.abs(), -1);
if (req_limbs > r.len) {
mem.set(Limb, r.limbs[r.len..req_limbs], 0);
}
assert(r.limbs.len >= req_limbs);
r.len = req_limbs;
llnot(r.limbs[0..r.len]);
r.limbs[r.len - 1] &= mask;
r.normalize(r.len);
}
/// Truncate an integer to a number of bits, following 2s-complement semantics.
/// r may alias a.
///

View File

@ -3771,7 +3771,7 @@ pub fn doNotOptimizeAway(val: anytype) void {
.Bool => doNotOptimizeAway(@boolToInt(val)),
.Int => {
const bits = t.Int.bits;
if (bits <= max_gp_register_bits) {
if (bits <= max_gp_register_bits and builtin.zig_backend != .stage2_c) {
const val2 = @as(
std.meta.Int(t.Int.signedness, @max(8, std.math.ceilPowerOfTwoAssert(u16, bits))),
val,
@ -3783,18 +3783,24 @@ pub fn doNotOptimizeAway(val: anytype) void {
} else doNotOptimizeAway(&val);
},
.Float => {
if (t.Float.bits == 32 or t.Float.bits == 64) {
if ((t.Float.bits == 32 or t.Float.bits == 64) and builtin.zig_backend != .stage2_c) {
asm volatile (""
:
: [val] "rm" (val),
);
} else doNotOptimizeAway(&val);
},
.Pointer => asm volatile (""
:
: [val] "m" (val),
: "memory"
),
.Pointer => {
if (builtin.zig_backend == .stage2_c) {
doNotOptimizeAwayC(val);
} else {
asm volatile (""
:
: [val] "m" (val),
: "memory"
);
}
},
.Array => {
if (t.Array.len * @sizeOf(t.Array.child) <= 64) {
for (val) |v| doNotOptimizeAway(v);
@ -3804,6 +3810,16 @@ pub fn doNotOptimizeAway(val: anytype) void {
}
}
/// .stage2_c doesn't support asm blocks yet, so use volatile stores instead
var deopt_target: if (builtin.zig_backend == .stage2_c) u8 else void = undefined;
fn doNotOptimizeAwayC(ptr: anytype) void {
const dest = @ptrCast(*volatile u8, &deopt_target);
for (asBytes(ptr)) |b| {
dest.* = b;
}
dest.* = 0;
}
test "doNotOptimizeAway" {
comptime doNotOptimizeAway("test");

View File

@ -1776,16 +1776,26 @@ pub fn UnlockFile(
}
}
/// This is a workaround for the C backend until zig has the ability to put
/// C code in inline assembly.
extern fn zig_x86_64_windows_teb() callconv(.C) *anyopaque;
pub fn teb() *TEB {
return switch (native_arch) {
.x86 => asm volatile (
\\ movl %%fs:0x18, %[ptr]
: [ptr] "=r" (-> *TEB),
),
.x86_64 => asm volatile (
\\ movq %%gs:0x30, %[ptr]
: [ptr] "=r" (-> *TEB),
),
.x86_64 => blk: {
if (builtin.zig_backend == .stage2_c) {
break :blk @ptrCast(*TEB, @alignCast(@alignOf(TEB), zig_x86_64_windows_teb()));
} else {
break :blk asm volatile (
\\ movq %%gs:0x30, %[ptr]
: [ptr] "=r" (-> *TEB),
);
}
},
.aarch64 => asm volatile (
\\ mov %[ptr], x18
: [ptr] "=r" (-> *TEB),
@ -3455,6 +3465,21 @@ pub const ASSEMBLY_STORAGE_MAP = opaque {};
pub const FLS_CALLBACK_INFO = opaque {};
pub const RTL_BITMAP = opaque {};
pub const KAFFINITY = usize;
pub const KPRIORITY = i32;
pub const CLIENT_ID = extern struct {
UniqueProcess: HANDLE,
UniqueThread: HANDLE,
};
pub const THREAD_BASIC_INFORMATION = extern struct {
ExitStatus: NTSTATUS,
TebBaseAddress: PVOID,
ClientId: CLIENT_ID,
AffinityMask: KAFFINITY,
Priority: KPRIORITY,
BasePriority: KPRIORITY,
};
pub const TEB = extern struct {
Reserved1: [12]PVOID,

View File

@ -1,4 +1,5 @@
const std = @import("std");
const builtin = @import("builtin");
const Target = std.Target;
const CrossTarget = std.zig.CrossTarget;
@ -527,25 +528,43 @@ const CpuidLeaf = packed struct {
edx: u32,
};
/// This is a workaround for the C backend until zig has the ability to put
/// C code in inline assembly.
extern fn zig_x86_cpuid(leaf_id: u32, subid: u32, eax: *u32, ebx: *u32, ecx: *u32, edx: *u32) callconv(.C) void;
fn cpuid(leaf_id: u32, subid: u32) CpuidLeaf {
// valid for both x86 and x86_64
var eax: u32 = undefined;
var ebx: u32 = undefined;
var ecx: u32 = undefined;
var edx: u32 = undefined;
asm volatile ("cpuid"
: [_] "={eax}" (eax),
[_] "={ebx}" (ebx),
[_] "={ecx}" (ecx),
[_] "={edx}" (edx),
: [_] "{eax}" (leaf_id),
[_] "{ecx}" (subid),
);
if (builtin.zig_backend == .stage2_c) {
zig_x86_cpuid(leaf_id, subid, &eax, &ebx, &ecx, &edx);
} else {
asm volatile ("cpuid"
: [_] "={eax}" (eax),
[_] "={ebx}" (ebx),
[_] "={ecx}" (ecx),
[_] "={edx}" (edx),
: [_] "{eax}" (leaf_id),
[_] "{ecx}" (subid),
);
}
return .{ .eax = eax, .ebx = ebx, .ecx = ecx, .edx = edx };
}
/// This is a workaround for the C backend until zig has the ability to put
/// C code in inline assembly.
extern fn zig_x86_get_xcr0() callconv(.C) u32;
// Read control register 0 (XCR0). Used to detect features such as AVX.
fn getXCR0() u32 {
if (builtin.zig_backend == .stage2_c) {
return zig_x86_get_xcr0();
}
return asm volatile (
\\ xor %%ecx, %%ecx
\\ xgetbv

670
lib/zig.h
View File

@ -6,6 +6,12 @@
#include <stddef.h>
#include <stdint.h>
#if _MSC_VER
#include <intrin.h>
#elif defined(__i386__) || defined(__x86_64__)
#include <cpuid.h>
#endif
#if !defined(__cplusplus) && __STDC_VERSION__ <= 201710L
#if __STDC_VERSION__ >= 199901L
#include <stdbool.h>
@ -38,6 +44,12 @@ typedef char bool;
#define zig_threadlocal zig_threadlocal_unavailable
#endif
#if _MSC_VER
#define zig_const_arr
#else
#define zig_const_arr static const
#endif
#if zig_has_attribute(naked) || defined(__GNUC__)
#define zig_naked __attribute__((naked))
#elif defined(_MSC_VER)
@ -65,7 +77,7 @@ typedef char bool;
#elif zig_has_attribute(aligned)
#define zig_align(alignment) __attribute__((aligned(alignment)))
#elif _MSC_VER
#define zig_align zig_align_unavailable
#define zig_align(alignment) __declspec(align(alignment))
#else
#define zig_align zig_align_unavailable
#endif
@ -73,7 +85,7 @@ typedef char bool;
#if zig_has_attribute(aligned)
#define zig_align_fn(alignment) __attribute__((aligned(alignment)))
#elif _MSC_VER
#define zig_align_fn zig_align_fn_unavailable
#define zig_align_fn(alignment)
#else
#define zig_align_fn zig_align_fn_unavailable
#endif
@ -92,6 +104,9 @@ typedef char bool;
#if zig_has_attribute(alias)
#define zig_export(sig, symbol, name) zig_extern sig __attribute__((alias(symbol)))
#elif _MSC_VER
#define zig_export(sig, symbol, name) sig;\
__pragma(comment(linker, "/alternatename:" name "=" symbol ))
#else
#define zig_export(sig, symbol, name) __asm(name " = " symbol)
#endif
@ -136,22 +151,25 @@ typedef char bool;
#define zig_wasm_memory_grow(index, delta) zig_unimplemented()
#endif
#define zig_concat(lhs, rhs) lhs##rhs
#define zig_expand_concat(lhs, rhs) zig_concat(lhs, rhs)
#if __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__)
#include <stdatomic.h>
#define zig_atomic(type) _Atomic(type)
#define zig_cmpxchg_strong(obj, expected, desired, succ, fail) atomic_compare_exchange_strong_explicit(obj, &(expected), desired, succ, fail)
#define zig_cmpxchg_weak(obj, expected, desired, succ, fail) atomic_compare_exchange_weak_explicit (obj, &(expected), desired, succ, fail)
#define zig_atomicrmw_xchg(obj, arg, order) atomic_exchange_explicit (obj, arg, order)
#define zig_atomicrmw_add(obj, arg, order) atomic_fetch_add_explicit (obj, arg, order)
#define zig_atomicrmw_sub(obj, arg, order) atomic_fetch_sub_explicit (obj, arg, order)
#define zig_atomicrmw_or(obj, arg, order) atomic_fetch_or_explicit (obj, arg, order)
#define zig_atomicrmw_xor(obj, arg, order) atomic_fetch_xor_explicit (obj, arg, order)
#define zig_atomicrmw_and(obj, arg, order) atomic_fetch_and_explicit (obj, arg, order)
#define zig_atomicrmw_nand(obj, arg, order) __atomic_fetch_nand (obj, arg, order)
#define zig_atomicrmw_min(obj, arg, order) __atomic_fetch_min (obj, arg, order)
#define zig_atomicrmw_max(obj, arg, order) __atomic_fetch_max (obj, arg, order)
#define zig_atomic_store(obj, arg, order) atomic_store_explicit (obj, arg, order)
#define zig_atomic_load(obj, order) atomic_load_explicit (obj, order)
#define zig_cmpxchg_strong(obj, expected, desired, succ, fail, type) atomic_compare_exchange_strong_explicit(obj, &(expected), desired, succ, fail)
#define zig_cmpxchg_weak(obj, expected, desired, succ, fail, type) atomic_compare_exchange_weak_explicit (obj, &(expected), desired, succ, fail)
#define zig_atomicrmw_xchg(obj, arg, order, type) atomic_exchange_explicit (obj, arg, order)
#define zig_atomicrmw_add(obj, arg, order, type) atomic_fetch_add_explicit (obj, arg, order)
#define zig_atomicrmw_sub(obj, arg, order, type) atomic_fetch_sub_explicit (obj, arg, order)
#define zig_atomicrmw_or(obj, arg, order, type) atomic_fetch_or_explicit (obj, arg, order)
#define zig_atomicrmw_xor(obj, arg, order, type) atomic_fetch_xor_explicit (obj, arg, order)
#define zig_atomicrmw_and(obj, arg, order, type) atomic_fetch_and_explicit (obj, arg, order)
#define zig_atomicrmw_nand(obj, arg, order, type) __atomic_fetch_nand (obj, arg, order)
#define zig_atomicrmw_min(obj, arg, order, type) __atomic_fetch_min (obj, arg, order)
#define zig_atomicrmw_max(obj, arg, order, type) __atomic_fetch_max (obj, arg, order)
#define zig_atomic_store(obj, arg, order, type) atomic_store_explicit (obj, arg, order)
#define zig_atomic_load(obj, order, type) atomic_load_explicit (obj, order)
#define zig_fence(order) atomic_thread_fence(order)
#elif defined(__GNUC__)
#define memory_order_relaxed __ATOMIC_RELAXED
@ -161,20 +179,43 @@ typedef char bool;
#define memory_order_acq_rel __ATOMIC_ACQ_REL
#define memory_order_seq_cst __ATOMIC_SEQ_CST
#define zig_atomic(type) type
#define zig_cmpxchg_strong(obj, expected, desired, succ, fail) __atomic_compare_exchange_n(obj, &(expected), desired, false, succ, fail)
#define zig_cmpxchg_weak(obj, expected, desired, succ, fail) __atomic_compare_exchange_n(obj, &(expected), desired, true , succ, fail)
#define zig_atomicrmw_xchg(obj, arg, order) __atomic_exchange_n(obj, arg, order)
#define zig_atomicrmw_add(obj, arg, order) __atomic_fetch_add (obj, arg, order)
#define zig_atomicrmw_sub(obj, arg, order) __atomic_fetch_sub (obj, arg, order)
#define zig_atomicrmw_or(obj, arg, order) __atomic_fetch_or (obj, arg, order)
#define zig_atomicrmw_xor(obj, arg, order) __atomic_fetch_xor (obj, arg, order)
#define zig_atomicrmw_and(obj, arg, order) __atomic_fetch_and (obj, arg, order)
#define zig_atomicrmw_nand(obj, arg, order) __atomic_fetch_nand(obj, arg, order)
#define zig_atomicrmw_min(obj, arg, order) __atomic_fetch_min (obj, arg, order)
#define zig_atomicrmw_max(obj, arg, order) __atomic_fetch_max (obj, arg, order)
#define zig_atomic_store(obj, arg, order) __atomic_store_n (obj, arg, order)
#define zig_atomic_load(obj, order) __atomic_load_n (obj, order)
#define zig_cmpxchg_strong(obj, expected, desired, succ, fail, type) __atomic_compare_exchange_n(obj, &(expected), desired, false, succ, fail)
#define zig_cmpxchg_weak(obj, expected, desired, succ, fail, type) __atomic_compare_exchange_n(obj, &(expected), desired, true , succ, fail)
#define zig_atomicrmw_xchg(obj, arg, order, type) __atomic_exchange_n(obj, arg, order)
#define zig_atomicrmw_add(obj, arg, order, type) __atomic_fetch_add (obj, arg, order)
#define zig_atomicrmw_sub(obj, arg, order, type) __atomic_fetch_sub (obj, arg, order)
#define zig_atomicrmw_or(obj, arg, order, type) __atomic_fetch_or (obj, arg, order)
#define zig_atomicrmw_xor(obj, arg, order, type) __atomic_fetch_xor (obj, arg, order)
#define zig_atomicrmw_and(obj, arg, order, type) __atomic_fetch_and (obj, arg, order)
#define zig_atomicrmw_nand(obj, arg, order, type) __atomic_fetch_nand(obj, arg, order)
#define zig_atomicrmw_min(obj, arg, order, type) __atomic_fetch_min (obj, arg, order)
#define zig_atomicrmw_max(obj, arg, order, type) __atomic_fetch_max (obj, arg, order)
#define zig_atomic_store(obj, arg, order, type) __atomic_store_n (obj, arg, order)
#define zig_atomic_load(obj, order, type) __atomic_load_n (obj, order)
#define zig_fence(order) __atomic_thread_fence(order)
#elif _MSC_VER && (_M_IX86 || _M_X64)
#define memory_order_relaxed 0
#define memory_order_consume 1
#define memory_order_acquire 2
#define memory_order_release 3
#define memory_order_acq_rel 4
#define memory_order_seq_cst 5
#define zig_atomic(type) type
#define zig_cmpxchg_strong(obj, expected, desired, succ, fail, type) zig_expand_concat(zig_msvc_cmpxchg_, type)(obj, &(expected), desired)
#define zig_cmpxchg_weak(obj, expected, desired, succ, fail, type) zig_cmpxchg_strong(obj, expected, desired, succ, fail, type)
#define zig_atomicrmw_xchg(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_xchg_, type)(obj, arg)
#define zig_atomicrmw_add(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_add_, type)(obj, arg)
#define zig_atomicrmw_sub(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_sub_, type)(obj, arg)
#define zig_atomicrmw_or(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_or_, type)(obj, arg)
#define zig_atomicrmw_xor(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_xor_, type)(obj, arg)
#define zig_atomicrmw_and(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_and_, type)(obj, arg)
#define zig_atomicrmw_nand(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_nand_, type)(obj, arg)
#define zig_atomicrmw_min(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_min_, type)(obj, arg)
#define zig_atomicrmw_max(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_max_, type)(obj, arg)
#define zig_atomic_store(obj, arg, order, type) zig_expand_concat(zig_msvc_atomic_store_, type)(obj, arg)
#define zig_atomic_load(obj, order, type) zig_expand_concat(zig_msvc_atomic_load_, type)(obj)
#define zig_fence(order) __faststorefence()
// TODO: _MSC_VER && (_M_ARM || _M_ARM64)
#else
#define memory_order_relaxed 0
#define memory_order_consume 1
@ -183,19 +224,19 @@ typedef char bool;
#define memory_order_acq_rel 4
#define memory_order_seq_cst 5
#define zig_atomic(type) type
#define zig_cmpxchg_strong(obj, expected, desired, succ, fail) zig_unimplemented()
#define zig_cmpxchg_weak(obj, expected, desired, succ, fail) zig_unimplemented()
#define zig_atomicrmw_xchg(obj, arg, order) zig_unimplemented()
#define zig_atomicrmw_add(obj, arg, order) zig_unimplemented()
#define zig_atomicrmw_sub(obj, arg, order) zig_unimplemented()
#define zig_atomicrmw_or(obj, arg, order) zig_unimplemented()
#define zig_atomicrmw_xor(obj, arg, order) zig_unimplemented()
#define zig_atomicrmw_and(obj, arg, order) zig_unimplemented()
#define zig_atomicrmw_nand(obj, arg, order) zig_unimplemented()
#define zig_atomicrmw_min(obj, arg, order) zig_unimplemented()
#define zig_atomicrmw_max(obj, arg, order) zig_unimplemented()
#define zig_atomic_store(obj, arg, order) zig_unimplemented()
#define zig_atomic_load(obj, order) zig_unimplemented()
#define zig_cmpxchg_strong(obj, expected, desired, succ, fail, type) zig_unimplemented()
#define zig_cmpxchg_weak(obj, expected, desired, succ, fail, type) zig_unimplemented()
#define zig_atomicrmw_xchg(obj, arg, order, type) zig_unimplemented()
#define zig_atomicrmw_add(obj, arg, order, type) zig_unimplemented()
#define zig_atomicrmw_sub(obj, arg, order, type) zig_unimplemented()
#define zig_atomicrmw_or(obj, arg, order, type) zig_unimplemented()
#define zig_atomicrmw_xor(obj, arg, order, type) zig_unimplemented()
#define zig_atomicrmw_and(obj, arg, order, type) zig_unimplemented()
#define zig_atomicrmw_nand(obj, arg, order, type) zig_unimplemented()
#define zig_atomicrmw_min(obj, arg, order, type) zig_unimplemented()
#define zig_atomicrmw_max(obj, arg, order, type) zig_unimplemented()
#define zig_atomic_store(obj, arg, order, type) zig_unimplemented()
#define zig_atomic_load(obj, order, type) zig_unimplemented()
#define zig_fence(order) zig_unimplemented()
#endif
@ -209,9 +250,6 @@ typedef char bool;
#define zig_noreturn void
#endif
#define zig_concat(lhs, rhs) lhs##rhs
#define zig_expand_concat(lhs, rhs) zig_concat(lhs, rhs)
#define zig_bitSizeOf(T) (CHAR_BIT * sizeof(T))
typedef uintptr_t zig_usize;
@ -1141,6 +1179,8 @@ typedef signed __int128 zig_i128;
#define zig_as_u128(hi, lo) ((zig_u128)(hi)<<64|(lo))
#define zig_as_i128(hi, lo) ((zig_i128)zig_as_u128(hi, lo))
#define zig_as_constant_u128(hi, lo) zig_as_u128(hi, lo)
#define zig_as_constant_i128(hi, lo) zig_as_i128(hi, lo)
#define zig_hi_u128(val) ((zig_u64)((val) >> 64))
#define zig_lo_u128(val) ((zig_u64)((val) >> 0))
#define zig_hi_i128(val) ((zig_i64)((val) >> 64))
@ -1168,6 +1208,8 @@ typedef struct { zig_align(16) zig_i64 hi; zig_u64 lo; } zig_i128;
#define zig_as_u128(hi, lo) ((zig_u128){ .h##i = (hi), .l##o = (lo) })
#define zig_as_i128(hi, lo) ((zig_i128){ .h##i = (hi), .l##o = (lo) })
#define zig_as_constant_u128(hi, lo) { .h##i = (hi), .l##o = (lo) }
#define zig_as_constant_i128(hi, lo) { .h##i = (hi), .l##o = (lo) }
#define zig_hi_u128(val) ((val).hi)
#define zig_lo_u128(val) ((val).lo)
#define zig_hi_i128(val) ((val).hi)
@ -1289,51 +1331,79 @@ static inline zig_i128 zig_not_i128(zig_i128 val, zig_u8 bits) {
}
static inline zig_u128 zig_shr_u128(zig_u128 lhs, zig_u8 rhs) {
if (rhs >= zig_as_u8(64)) return (zig_u128){ .hi = lhs.hi << (rhs - zig_as_u8(64)), .lo = zig_minInt_u64 };
return (zig_u128){ .hi = lhs.hi << rhs | lhs.lo >> (zig_as_u8(64) - rhs), .lo = lhs.lo << rhs };
if (rhs == zig_as_u8(0)) return lhs;
if (rhs >= zig_as_u8(64)) return (zig_u128){ .hi = zig_minInt_u64, .lo = lhs.hi >> (rhs - zig_as_u8(64)) };
return (zig_u128){ .hi = lhs.hi >> rhs, .lo = lhs.hi << (zig_as_u8(64) - rhs) | lhs.lo >> rhs };
}
static inline zig_u128 zig_shl_u128(zig_u128 lhs, zig_u8 rhs) {
if (rhs >= zig_as_u8(64)) return (zig_u128){ .hi = lhs.hi << (rhs - zig_as_u8(64)), .lo = zig_minInt_u64 };
if (rhs == zig_as_u8(0)) return lhs;
if (rhs >= zig_as_u8(64)) return (zig_u128){ .hi = lhs.lo << rhs, .lo = zig_minInt_u64 };
return (zig_u128){ .hi = lhs.hi << rhs | lhs.lo >> (zig_as_u8(64) - rhs), .lo = lhs.lo << rhs };
}
static inline zig_i128 zig_shl_i128(zig_i128 lhs, zig_u8 rhs) {
if (rhs >= zig_as_u8(64)) return (zig_i128){ .hi = lhs.hi << (rhs - zig_as_u8(64)), .lo = zig_minInt_u64 };
if (rhs == zig_as_u8(0)) return lhs;
if (rhs >= zig_as_u8(64)) return (zig_i128){ .hi = lhs.lo << rhs, .lo = zig_minInt_u64 };
return (zig_i128){ .hi = lhs.hi << rhs | lhs.lo >> (zig_as_u8(64) - rhs), .lo = lhs.lo << rhs };
}
static inline zig_u128 zig_add_u128(zig_u128 lhs, zig_u128 rhs) {
zig_u128 res;
res.hi = lhs.hi + rhs.hi + zig_addo_u64(&res.lo, lhs.lo, rhs.lo, zig_maxInt_u64);
res.hi = lhs.hi + rhs.hi + zig_addo_u64(&res.lo, lhs.lo, rhs.lo, 64);
return res;
}
static inline zig_i128 zig_add_i128(zig_i128 lhs, zig_i128 rhs) {
zig_i128 res;
res.hi = lhs.hi + rhs.hi + zig_addo_u64(&res.lo, lhs.lo, rhs.lo, zig_maxInt_u64);
res.hi = lhs.hi + rhs.hi + zig_addo_u64(&res.lo, lhs.lo, rhs.lo, 64);
return res;
}
static inline zig_u128 zig_sub_u128(zig_u128 lhs, zig_u128 rhs) {
zig_u128 res;
res.hi = lhs.hi - rhs.hi - zig_subo_u64(&res.lo, lhs.lo, rhs.lo, zig_maxInt_u64);
res.hi = lhs.hi - rhs.hi - zig_subo_u64(&res.lo, lhs.lo, rhs.lo, 64);
return res;
}
static inline zig_i128 zig_sub_i128(zig_i128 lhs, zig_i128 rhs) {
zig_i128 res;
res.hi = lhs.hi - rhs.hi - zig_subo_u64(&res.lo, lhs.lo, rhs.lo, zig_maxInt_u64);
res.hi = lhs.hi - rhs.hi - zig_subo_u64(&res.lo, lhs.lo, rhs.lo, 64);
return res;
}
static inline zig_i128 zig_div_floor_i128(zig_i128 lhs, zig_i128 rhs) {
return zig_sub_i128(zig_div_trunc_i128(lhs, rhs), (((lhs.hi ^ rhs.hi) & zig_rem_i128(lhs, rhs).hi) < zig_as_i64(0)) ? zig_as_i128(0, 1) : zig_as_i128(0, 0));
zig_extern zig_i128 __multi3(zig_i128 lhs, zig_i128 rhs);
static zig_i128 zig_mul_i128(zig_i128 lhs, zig_i128 rhs) {
return __multi3(lhs, rhs);
}
zig_extern zig_u128 __udivti3(zig_u128 lhs, zig_u128 rhs);
static zig_u128 zig_div_trunc_u128(zig_u128 lhs, zig_u128 rhs) {
return __udivti3(lhs, rhs);
};
zig_extern zig_i128 __divti3(zig_i128 lhs, zig_i128 rhs);
static zig_i128 zig_div_trunc_i128(zig_i128 lhs, zig_i128 rhs) {
return __divti3(lhs, rhs);
};
zig_extern zig_u128 __umodti3(zig_u128 lhs, zig_u128 rhs);
static zig_u128 zig_rem_u128(zig_u128 lhs, zig_u128 rhs) {
return __umodti3(lhs, rhs);
}
zig_extern zig_i128 __modti3(zig_i128 lhs, zig_i128 rhs);
static zig_i128 zig_rem_i128(zig_i128 lhs, zig_i128 rhs) {
return __modti3(lhs, rhs);
}
static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) {
zig_i128 rem = zig_rem_i128(lhs, rhs);
return rem + (((lhs.hi ^ rhs.hi) & rem.hi) < zig_as_i64(0) ? rhs : zig_as_i128(0, 0));
return zig_add_i128(rem, (((lhs.hi ^ rhs.hi) & rem.hi) < zig_as_i64(0) ? rhs : zig_as_i128(0, 0)));
}
static inline zig_i128 zig_div_floor_i128(zig_i128 lhs, zig_i128 rhs) {
return zig_sub_i128(zig_div_trunc_i128(lhs, rhs), zig_as_i128(0, zig_cmp_i128(zig_and_i128(zig_xor_i128(lhs, rhs), zig_rem_i128(lhs, rhs)), zig_as_i128(0, 0)) < zig_as_i32(0)));
}
#endif /* zig_has_int128 */
@ -1341,6 +1411,10 @@ static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) {
#define zig_div_floor_u128 zig_div_trunc_u128
#define zig_mod_u128 zig_rem_u128
static inline zig_u128 zig_nand_u128(zig_u128 lhs, zig_u128 rhs) {
return zig_not_u128(zig_and_u128(lhs, rhs), 128);
}
static inline zig_u128 zig_min_u128(zig_u128 lhs, zig_u128 rhs) {
return zig_cmp_u128(lhs, rhs) < zig_as_i32(0) ? lhs : rhs;
}
@ -1358,7 +1432,7 @@ static inline zig_i128 zig_max_i128(zig_i128 lhs, zig_i128 rhs) {
}
static inline zig_i128 zig_shr_i128(zig_i128 lhs, zig_u8 rhs) {
zig_i128 sign_mask = zig_cmp_i128(lhs, zig_as_i128(0, 0)) < zig_as_i32(0) ? -zig_as_i128(0, 1) : zig_as_i128(0, 0);
zig_i128 sign_mask = zig_cmp_i128(lhs, zig_as_i128(0, 0)) < zig_as_i32(0) ? zig_sub_i128(zig_as_i128(0, 0), zig_as_i128(0, 1)) : zig_as_i128(0, 0);
return zig_xor_i128(zig_bitcast_i128(zig_shr_u128(zig_bitcast_u128(zig_xor_i128(lhs, sign_mask)), rhs)), sign_mask);
}
@ -1375,7 +1449,7 @@ static inline zig_u128 zig_shlw_u128(zig_u128 lhs, zig_u8 rhs, zig_u8 bits) {
}
static inline zig_i128 zig_shlw_i128(zig_i128 lhs, zig_u8 rhs, zig_u8 bits) {
return zig_wrap_i128(zig_bitcast_i128(zig_shl_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits);
return zig_wrap_i128(zig_bitcast_i128(zig_shl_u128(zig_bitcast_u128(lhs), rhs)), bits);
}
static inline zig_u128 zig_addw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
@ -1394,6 +1468,17 @@ static inline zig_i128 zig_subw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
return zig_wrap_i128(zig_bitcast_i128(zig_sub_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits);
}
#if _MSC_VER
static zig_u128 zig_mul_u128(zig_u128 lhs, zig_u128 rhs) {
zig_u64 lo_carry;
zig_u64 lo = _umul128(lhs.lo, rhs.lo, &lo_carry);
zig_u64 hi = lhs.hi * rhs.lo + lhs.lo * rhs.hi + lo_carry;
return zig_as_u128(hi, lo);
}
#else
static zig_u128 zig_mul_u128(zig_u128 lhs, zig_u128 rhs); // TODO
#endif
static inline zig_u128 zig_mulw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
return zig_wrap_u128(zig_mul_u128(lhs, rhs), bits);
}
@ -1404,18 +1489,6 @@ static inline zig_i128 zig_mulw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
#if zig_has_int128
static inline bool zig_shlo_u128(zig_u128 *res, zig_u128 lhs, zig_u8 rhs, zig_u8 bits) {
*res = zig_shlw_u128(lhs, rhs, bits);
return zig_cmp_u128(lhs, zig_shr_u128(zig_maxInt(u128, bits), rhs)) > zig_as_i32(0);
}
static inline bool zig_shlo_i128(zig_i128 *res, zig_i128 lhs, zig_u8 rhs, zig_u8 bits) {
*res = zig_shlw_i128(lhs, rhs, bits);
zig_i128 mask = zig_bitcast_i128(zig_shl_u128(zig_maxInt_u128, bits - rhs - zig_as_u8(1)));
return zig_cmp_i128(zig_and_i128(lhs, mask), zig_as_i128(0, 0)) != zig_as_i32(0) &&
zig_cmp_i128(zig_and_i128(lhs, mask), mask) != zig_as_i32(0);
}
static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
#if zig_has_builtin(add_overflow)
zig_u128 full_res;
@ -1496,28 +1569,95 @@ static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_
#else /* zig_has_int128 */
static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs) {
return zig_addo_u64(&res->hi, lhs.hi, rhs.hi, UINT64_MAX) |
zig_addo_u64(&res->hi, res->hi, zig_addo_u64(&res->lo, lhs.lo, rhs.lo, UINT64_MAX));
static inline bool zig_overflow_u128(bool overflow, zig_u128 full_res, zig_u8 bits) {
return overflow ||
zig_cmp_u128(full_res, zig_minInt(u128, bits)) < zig_as_i32(0) ||
zig_cmp_u128(full_res, zig_maxInt(u128, bits)) > zig_as_i32(0);
}
static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs) {
return zig_subo_u64(&res->hi, lhs.hi, rhs.hi, UINT64_MAX) |
zig_subo_u64(&res->hi, res->hi, zig_subo_u64(&res->lo, lhs.lo, rhs.lo, UINT64_MAX));
static inline bool zig_overflow_i128(bool overflow, zig_i128 full_res, zig_u8 bits) {
return overflow ||
zig_cmp_i128(full_res, zig_minInt(i128, bits)) < zig_as_i32(0) ||
zig_cmp_i128(full_res, zig_maxInt(i128, bits)) > zig_as_i32(0);
}
static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
zig_u128 full_res;
bool overflow =
zig_addo_u64(&full_res.hi, lhs.hi, rhs.hi, 64) |
zig_addo_u64(&full_res.hi, full_res.hi, zig_addo_u64(&full_res.lo, lhs.lo, rhs.lo, 64), 64);
*res = zig_wrap_u128(full_res, bits);
return zig_overflow_u128(overflow, full_res, bits);
}
zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
zig_c_int overflow_int;
zig_i128 full_res = __addoti4(lhs, rhs, &overflow_int);
*res = zig_wrap_i128(full_res, bits);
return zig_overflow_i128(overflow_int, full_res, bits);
}
static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
zig_u128 full_res;
bool overflow =
zig_subo_u64(&full_res.hi, lhs.hi, rhs.hi, 64) |
zig_subo_u64(&full_res.hi, full_res.hi, zig_subo_u64(&full_res.lo, lhs.lo, rhs.lo, 64), 64);
*res = zig_wrap_u128(full_res, bits);
return zig_overflow_u128(overflow, full_res, bits);
}
zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
zig_c_int overflow_int;
zig_i128 full_res = __suboti4(lhs, rhs, &overflow_int);
*res = zig_wrap_i128(full_res, bits);
return zig_overflow_i128(overflow_int, full_res, bits);
}
static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
*res = zig_mulw_u128(lhs, rhs, bits);
return zig_cmp_u128(*res, zig_as_u128(0, 0)) != zig_as_i32(0) &&
zig_cmp_u128(lhs, zig_div_trunc_u128(zig_maxInt(u128, bits), rhs)) > zig_as_i32(0);
}
zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
zig_c_int overflow_int;
zig_i128 full_res = __muloti4(lhs, rhs, &overflow_int);
*res = zig_wrap_i128(full_res, bits);
return zig_overflow_i128(overflow_int, full_res, bits);
}
#endif /* zig_has_int128 */
static inline bool zig_shlo_u128(zig_u128 *res, zig_u128 lhs, zig_u8 rhs, zig_u8 bits) {
*res = zig_shlw_u128(lhs, rhs, bits);
return zig_cmp_u128(lhs, zig_shr_u128(zig_maxInt(u128, bits), rhs)) > zig_as_i32(0);
}
static inline bool zig_shlo_i128(zig_i128 *res, zig_i128 lhs, zig_u8 rhs, zig_u8 bits) {
*res = zig_shlw_i128(lhs, rhs, bits);
zig_i128 mask = zig_bitcast_i128(zig_shl_u128(zig_maxInt_u128, bits - rhs - zig_as_u8(1)));
return zig_cmp_i128(zig_and_i128(lhs, mask), zig_as_i128(0, 0)) != zig_as_i32(0) &&
zig_cmp_i128(zig_and_i128(lhs, mask), mask) != zig_as_i32(0);
}
static inline zig_u128 zig_shls_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
zig_u128 res;
if (zig_cmp_u128(rhs, zig_as_u128(0, bits)) >= zig_as_i32(0))
return zig_cmp_u128(lhs, zig_as_u128(0, 0)) != zig_as_i32(0) ? zig_maxInt(u128, bits) : lhs;
#if zig_has_int128
return zig_shlo_u128(&res, lhs, (zig_u8)rhs, bits) ? zig_maxInt(u128, bits) : res;
#else
return zig_shlo_u128(&res, lhs, (zig_u8)rhs.lo, bits) ? zig_maxInt(u128, bits) : res;
#endif
}
static inline zig_i128 zig_shls_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
zig_i128 res;
if (zig_cmp_u128(zig_bitcast_u128(rhs), zig_as_u128(0, bits)) < zig_as_i32(0) && !zig_shlo_i128(&res, lhs, rhs, bits)) return res;
if (zig_cmp_u128(zig_bitcast_u128(rhs), zig_as_u128(0, bits)) < zig_as_i32(0) && !zig_shlo_i128(&res, lhs, zig_lo_i128(rhs), bits)) return res;
return zig_cmp_i128(lhs, zig_as_i128(0, 0)) < zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits);
}
@ -1555,8 +1695,9 @@ static inline zig_i128 zig_muls_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
}
static inline zig_u8 zig_clz_u128(zig_u128 val, zig_u8 bits) {
if (bits <= zig_as_u8(64)) return zig_clz_u64(zig_lo_u128(val), bits);
if (zig_hi_u128(val) != 0) return zig_clz_u64(zig_hi_u128(val), bits - zig_as_u8(64));
return zig_clz_u64(zig_lo_u128(val), zig_as_u8(64)) + zig_as_u8(64);
return zig_clz_u64(zig_lo_u128(val), zig_as_u8(64)) + (bits - zig_as_u8(64));
}
static inline zig_u8 zig_clz_i128(zig_i128 val, zig_u8 bits) {
@ -1593,7 +1734,7 @@ static inline zig_u128 zig_byte_swap_u128(zig_u128 val, zig_u8 bits) {
}
static inline zig_i128 zig_byte_swap_i128(zig_i128 val, zig_u8 bits) {
return zig_byte_swap_u128(zig_bitcast_u128(val), bits);
return zig_bitcast_i128(zig_byte_swap_u128(zig_bitcast_u128(val), bits));
}
static inline zig_u128 zig_bit_reverse_u128(zig_u128 val, zig_u8 bits) {
@ -1603,15 +1744,47 @@ static inline zig_u128 zig_bit_reverse_u128(zig_u128 val, zig_u8 bits) {
}
static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, zig_u8 bits) {
return zig_bit_reverse_u128(zig_bitcast_u128(val), bits);
return zig_bitcast_i128(zig_bit_reverse_u128(zig_bitcast_u128(val), bits));
}
/* ========================= Floating Point Support ========================= */
#if _MSC_VER
#define zig_msvc_flt_inf ((double)(1e+300 * 1e+300))
#define zig_msvc_flt_inff ((float)(1e+300 * 1e+300))
#define zig_msvc_flt_infl ((long double)(1e+300 * 1e+300))
#define zig_msvc_flt_nan ((double)(zig_msvc_flt_inf * 0.f))
#define zig_msvc_flt_nanf ((float)(zig_msvc_flt_inf * 0.f))
#define zig_msvc_flt_nanl ((long double)(zig_msvc_flt_inf * 0.f))
#define __builtin_nan(str) nan(str)
#define __builtin_nanf(str) nanf(str)
#define __builtin_nanl(str) nanl(str)
#define __builtin_inf() zig_msvc_flt_inf
#define __builtin_inff() zig_msvc_flt_inff
#define __builtin_infl() zig_msvc_flt_infl
#endif
#define zig_has_float_builtins (zig_has_builtin(nan) && zig_has_builtin(nans) && zig_has_builtin(inf))
#if zig_has_float_builtins
#define zig_as_special_f16(sign, name, arg, repr) sign zig_as_f16(__builtin_##name, )(arg)
#define zig_as_special_f32(sign, name, arg, repr) sign zig_as_f32(__builtin_##name, )(arg)
#define zig_as_special_f64(sign, name, arg, repr) sign zig_as_f64(__builtin_##name, )(arg)
#define zig_as_special_f80(sign, name, arg, repr) sign zig_as_f80(__builtin_##name, )(arg)
#define zig_as_special_f128(sign, name, arg, repr) sign zig_as_f128(__builtin_##name, )(arg)
#define zig_as_special_c_longdouble(sign, name, arg, repr) sign zig_as_c_longdouble(__builtin_##name, )(arg)
#else
#define zig_as_special_f16(sign, name, arg, repr) zig_float_from_repr_f16(repr)
#define zig_as_special_f32(sign, name, arg, repr) zig_float_from_repr_f32(repr)
#define zig_as_special_f64(sign, name, arg, repr) zig_float_from_repr_f64(repr)
#define zig_as_special_f80(sign, name, arg, repr) zig_float_from_repr_f80(repr)
#define zig_as_special_f128(sign, name, arg, repr) zig_float_from_repr_f128(repr)
#define zig_as_special_c_longdouble(sign, name, arg, repr) zig_float_from_repr_c_longdouble(repr)
#endif
#define zig_has_f16 1
#define zig_bitSizeOf_f16 16
#define zig_libc_name_f16(name) __##name##h
#define zig_as_special_f16(sign, name, arg, repr) sign zig_as_f16(__builtin_##name, )(arg)
#define zig_as_special_constant_f16(sign, name, arg, repr) zig_as_special_f16(sign, name, arg, repr)
#if FLT_MANT_DIG == 11
typedef float zig_f16;
#define zig_as_f16(fp, repr) fp##f
@ -1636,12 +1809,18 @@ typedef zig_i16 zig_f16;
#define zig_as_f16(fp, repr) repr
#undef zig_as_special_f16
#define zig_as_special_f16(sign, name, arg, repr) repr
#undef zig_as_special_constant_f16
#define zig_as_special_constant_f16(sign, name, arg, repr) repr
#endif
#define zig_has_f32 1
#define zig_bitSizeOf_f32 32
#define zig_libc_name_f32(name) name##f
#define zig_as_special_f32(sign, name, arg, repr) sign zig_as_f32(__builtin_##name, )(arg)
#if _MSC_VER
#define zig_as_special_constant_f32(sign, name, arg, repr) sign zig_as_f32(zig_msvc_flt_##name, )
#else
#define zig_as_special_constant_f32(sign, name, arg, repr) zig_as_special_f32(sign, name, arg, repr)
#endif
#if FLT_MANT_DIG == 24
typedef float zig_f32;
#define zig_as_f32(fp, repr) fp##f
@ -1663,12 +1842,18 @@ typedef zig_i32 zig_f32;
#define zig_as_f32(fp, repr) repr
#undef zig_as_special_f32
#define zig_as_special_f32(sign, name, arg, repr) repr
#undef zig_as_special_constant_f32
#define zig_as_special_constant_f32(sign, name, arg, repr) repr
#endif
#define zig_has_f64 1
#define zig_bitSizeOf_f64 64
#define zig_libc_name_f64(name) name
#define zig_as_special_f64(sign, name, arg, repr) sign zig_as_f64(__builtin_##name, )(arg)
#if _MSC_VER
#define zig_as_special_constant_f64(sign, name, arg, repr) sign zig_as_f64(zig_msvc_flt_##name, )
#else
#define zig_as_special_constant_f64(sign, name, arg, repr) zig_as_special_f64(sign, name, arg, repr)
#endif
#if FLT_MANT_DIG == 53
typedef float zig_f64;
#define zig_as_f64(fp, repr) fp##f
@ -1693,12 +1878,14 @@ typedef zig_i64 zig_f64;
#define zig_as_f64(fp, repr) repr
#undef zig_as_special_f64
#define zig_as_special_f64(sign, name, arg, repr) repr
#undef zig_as_special_constant_f64
#define zig_as_special_constant_f64(sign, name, arg, repr) repr
#endif
#define zig_has_f80 1
#define zig_bitSizeOf_f80 80
#define zig_libc_name_f80(name) __##name##x
#define zig_as_special_f80(sign, name, arg, repr) sign zig_as_f80(__builtin_##name, )(arg)
#define zig_as_special_constant_f80(sign, name, arg, repr) zig_as_special_f80(sign, name, arg, repr)
#if FLT_MANT_DIG == 64
typedef float zig_f80;
#define zig_as_f80(fp, repr) fp##f
@ -1726,12 +1913,14 @@ typedef zig_i128 zig_f80;
#define zig_as_f80(fp, repr) repr
#undef zig_as_special_f80
#define zig_as_special_f80(sign, name, arg, repr) repr
#undef zig_as_special_constant_f80
#define zig_as_special_constant_f80(sign, name, arg, repr) repr
#endif
#define zig_has_f128 1
#define zig_bitSizeOf_f128 128
#define zig_libc_name_f128(name) name##q
#define zig_as_special_f128(sign, name, arg, repr) sign zig_as_f128(__builtin_##name, )(arg)
#define zig_as_special_constant_f128(sign, name, arg, repr) zig_as_special_f128(sign, name, arg, repr)
#if FLT_MANT_DIG == 113
typedef float zig_f128;
#define zig_as_f128(fp, repr) fp##f
@ -1761,13 +1950,57 @@ typedef zig_i128 zig_f128;
#define zig_as_f128(fp, repr) repr
#undef zig_as_special_f128
#define zig_as_special_f128(sign, name, arg, repr) repr
#undef zig_as_special_constant_f128
#define zig_as_special_constant_f128(sign, name, arg, repr) repr
#endif
#define zig_has_c_longdouble 1
#define zig_libc_name_c_longdouble(name) name##l
#define zig_as_special_constant_c_longdouble(sign, name, arg, repr) zig_as_special_c_longdouble(sign, name, arg, repr)
#ifdef zig_bitSizeOf_c_longdouble
typedef long double zig_c_longdouble;
#define zig_as_c_longdouble(fp, repr) fp##l
#define zig_libc_name_c_longdouble(name) name##l
#define zig_as_special_c_longdouble(sign, name, arg, repr) sign __builtin_##name##l(arg)
#else
#undef zig_has_c_longdouble
#define zig_bitSizeOf_c_longdouble 80
#define zig_compiler_rt_abbrev_c_longdouble zig_compiler_rt_abbrev_f80
#define zig_has_c_longdouble 0
#define zig_repr_c_longdouble i128
typedef zig_i128 zig_c_longdouble;
#define zig_as_c_longdouble(fp, repr) repr
#undef zig_as_special_c_longdouble
#define zig_as_special_c_longdouble(sign, name, arg, repr) repr
#undef zig_as_special_constant_c_longdouble
#define zig_as_special_constant_c_longdouble(sign, name, arg, repr) repr
#endif
#if !zig_has_float_builtins
#define zig_float_from_repr(Type, ReprType) \
static inline zig_##Type zig_float_from_repr_##Type(zig_##ReprType repr) { \
return *((zig_##Type*)&repr); \
}
zig_float_from_repr(f16, u16)
zig_float_from_repr(f32, u32)
zig_float_from_repr(f64, u64)
zig_float_from_repr(f80, u128)
zig_float_from_repr(f128, u128)
zig_float_from_repr(c_longdouble, u128)
#endif
#define zig_cast_f16 (zig_f16)
#define zig_cast_f32 (zig_f32)
#define zig_cast_f64 (zig_f64)
#if _MSC_VER && !zig_has_f128
#define zig_cast_f80
#define zig_cast_c_longdouble
#define zig_cast_f128
#else
#define zig_cast_f80 (zig_f80)
#define zig_cast_c_longdouble (zig_c_longdouble)
#define zig_cast_f128 (zig_f128)
#endif
#define zig_convert_builtin(ResType, operation, ArgType, version) \
zig_extern zig_##ResType zig_expand_concat(zig_expand_concat(zig_expand_concat(__##operation, \
@ -1892,3 +2125,268 @@ zig_float_builtins(f64)
zig_float_builtins(f80)
zig_float_builtins(f128)
zig_float_builtins(c_longdouble)
#if _MSC_VER && (_M_IX86 || _M_X64)
// TODO: zig_msvc_atomic_load should load 32 bit without interlocked on x86, and load 64 bit without interlocked on x64
#define zig_msvc_atomics(Type, suffix) \
static inline bool zig_msvc_cmpxchg_##Type(zig_##Type volatile* obj, zig_##Type* expected, zig_##Type desired) { \
zig_##Type comparand = *expected; \
zig_##Type initial = _InterlockedCompareExchange##suffix(obj, desired, comparand); \
bool exchanged = initial == comparand; \
if (!exchanged) { \
*expected = initial; \
} \
return exchanged; \
} \
static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \
return _InterlockedExchange##suffix(obj, value); \
} \
static inline zig_##Type zig_msvc_atomicrmw_add_##Type(zig_##Type volatile* obj, zig_##Type value) { \
return _InterlockedExchangeAdd##suffix(obj, value); \
} \
static inline zig_##Type zig_msvc_atomicrmw_sub_##Type(zig_##Type volatile* obj, zig_##Type value) { \
bool success = false; \
zig_##Type new; \
zig_##Type prev; \
while (!success) { \
prev = *obj; \
new = prev - value; \
success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \
} \
return prev; \
} \
static inline zig_##Type zig_msvc_atomicrmw_or_##Type(zig_##Type volatile* obj, zig_##Type value) { \
return _InterlockedOr##suffix(obj, value); \
} \
static inline zig_##Type zig_msvc_atomicrmw_xor_##Type(zig_##Type volatile* obj, zig_##Type value) { \
return _InterlockedXor##suffix(obj, value); \
} \
static inline zig_##Type zig_msvc_atomicrmw_and_##Type(zig_##Type volatile* obj, zig_##Type value) { \
return _InterlockedAnd##suffix(obj, value); \
} \
static inline zig_##Type zig_msvc_atomicrmw_nand_##Type(zig_##Type volatile* obj, zig_##Type value) { \
bool success = false; \
zig_##Type new; \
zig_##Type prev; \
while (!success) { \
prev = *obj; \
new = ~(prev & value); \
success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \
} \
return prev; \
} \
static inline zig_##Type zig_msvc_atomicrmw_min_##Type(zig_##Type volatile* obj, zig_##Type value) { \
bool success = false; \
zig_##Type new; \
zig_##Type prev; \
while (!success) { \
prev = *obj; \
new = value < prev ? value : prev; \
success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \
} \
return prev; \
} \
static inline zig_##Type zig_msvc_atomicrmw_max_##Type(zig_##Type volatile* obj, zig_##Type value) { \
bool success = false; \
zig_##Type new; \
zig_##Type prev; \
while (!success) { \
prev = *obj; \
new = value > prev ? value : prev; \
success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \
} \
return prev; \
} \
static inline void zig_msvc_atomic_store_##Type(zig_##Type volatile* obj, zig_##Type value) { \
_InterlockedExchange##suffix(obj, value); \
} \
static inline zig_##Type zig_msvc_atomic_load_##Type(zig_##Type volatile* obj) { \
return _InterlockedOr##suffix(obj, 0); \
}
zig_msvc_atomics(u8, 8)
zig_msvc_atomics(i8, 8)
zig_msvc_atomics(u16, 16)
zig_msvc_atomics(i16, 16)
zig_msvc_atomics(u32, )
zig_msvc_atomics(i32, )
zig_msvc_atomics(u64, 64)
zig_msvc_atomics(i64, 64)
#define zig_msvc_flt_atomics(Type, ReprType, suffix) \
static inline bool zig_msvc_cmpxchg_##Type(zig_##Type volatile* obj, zig_##Type* expected, zig_##Type desired) { \
zig_##ReprType comparand = *((zig_##ReprType*)expected); \
zig_##ReprType initial = _InterlockedCompareExchange##suffix((zig_##ReprType volatile*)obj, *((zig_##ReprType*)&desired), comparand); \
bool exchanged = initial == comparand; \
if (!exchanged) { \
*expected = *((zig_##Type*)&initial); \
} \
return exchanged; \
} \
static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \
zig_##ReprType initial = _InterlockedExchange##suffix((zig_##ReprType volatile*)obj, *((zig_##ReprType*)&value)); \
return *((zig_##Type*)&initial); \
} \
static inline zig_##Type zig_msvc_atomicrmw_add_##Type(zig_##Type volatile* obj, zig_##Type value) { \
bool success = false; \
zig_##ReprType new; \
zig_##Type prev; \
while (!success) { \
prev = *obj; \
new = prev + value; \
success = zig_msvc_cmpxchg_##Type(obj, &prev, *((zig_##ReprType*)&new)); \
} \
return prev; \
} \
static inline zig_##Type zig_msvc_atomicrmw_sub_##Type(zig_##Type volatile* obj, zig_##Type value) { \
bool success = false; \
zig_##ReprType new; \
zig_##Type prev; \
while (!success) { \
prev = *obj; \
new = prev - value; \
success = zig_msvc_cmpxchg_##Type(obj, &prev, *((zig_##ReprType*)&new)); \
} \
return prev; \
}
zig_msvc_flt_atomics(f32, u32, )
zig_msvc_flt_atomics(f64, u64, 64)
#if _M_IX86
static inline void* zig_msvc_atomicrmw_xchg_p32(void** obj, zig_u32* arg) {
return _InterlockedExchangePointer(obj, arg);
}
static inline void zig_msvc_atomic_store_p32(void** obj, zig_u32* arg) {
_InterlockedExchangePointer(obj, arg);
}
static inline void* zig_msvc_atomic_load_p32(void** obj, zig_u32* arg) {
return (void*)_InterlockedOr((void*)obj, 0);
}
static inline bool zig_msvc_cmpxchg_p32(void** obj, void** expected, void* desired) {
void* comparand = *expected;
void* initial = _InterlockedCompareExchangePointer(obj, desired, comparand);
bool exchanged = initial == comparand;
if (!exchanged) {
*expected = initial;
}
return exchanged;
}
#else
static inline void* zig_msvc_atomicrmw_xchg_p64(void** obj, zig_u64* arg) {
return _InterlockedExchangePointer(obj, arg);
}
static inline void zig_msvc_atomic_store_p64(void** obj, zig_u64* arg) {
_InterlockedExchangePointer(obj, arg);
}
static inline void* zig_msvc_atomic_load_p64(void** obj) {
return (void*)_InterlockedOr64((void*)obj, 0);
}
static inline bool zig_msvc_cmpxchg_p64(void** obj, void** expected, void* desired) {
void* comparand = *expected;
void* initial = _InterlockedCompareExchangePointer(obj, desired, comparand);
bool exchanged = initial == comparand;
if (!exchanged) {
*expected = initial;
}
return exchanged;
}
#endif
static inline bool zig_msvc_cmpxchg_u128(zig_u128 volatile* obj, zig_u128* expected, zig_u128 desired) {
return _InterlockedCompareExchange128((zig_i64 volatile*)obj, desired.hi, desired.lo, (zig_i64*)expected);
}
static inline bool zig_msvc_cmpxchg_i128(zig_i128 volatile* obj, zig_i128* expected, zig_i128 desired) {
return _InterlockedCompareExchange128((zig_i64 volatile*)obj, desired.hi, desired.lo, (zig_u64*)expected);
}
#define zig_msvc_atomics_128xchg(Type) \
static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \
bool success = false; \
zig_##Type prev; \
while (!success) { \
prev = *obj; \
success = zig_msvc_cmpxchg_##Type(obj, &prev, value); \
} \
return prev; \
}
zig_msvc_atomics_128xchg(u128)
zig_msvc_atomics_128xchg(i128)
#define zig_msvc_atomics_128op(Type, operation) \
static inline zig_##Type zig_msvc_atomicrmw_##operation##_##Type(zig_##Type volatile* obj, zig_##Type value) { \
bool success = false; \
zig_##Type new; \
zig_##Type prev; \
while (!success) { \
prev = *obj; \
new = zig_##operation##_##Type(prev, value); \
success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \
} \
return prev; \
}
zig_msvc_atomics_128op(u128, add)
zig_msvc_atomics_128op(u128, sub)
zig_msvc_atomics_128op(u128, or)
zig_msvc_atomics_128op(u128, xor)
zig_msvc_atomics_128op(u128, and)
zig_msvc_atomics_128op(u128, nand)
zig_msvc_atomics_128op(u128, min)
zig_msvc_atomics_128op(u128, max)
#endif /* _MSC_VER && (_M_IX86 || _M_X64) */
/* ========================= Special Case Intrinsics ========================= */
#if (_MSC_VER && _M_X64) || defined(__x86_64__)
static inline void* zig_x86_64_windows_teb(void) {
#if _MSC_VER
return __readgsqword(0x30);
#else
void* teb;
__asm volatile(" movq %%gs:0x30, %[ptr]": [ptr]"=r"(teb)::);
return teb;
#endif
}
#endif
#if (_MSC_VER && (_M_IX86 || _M_X64)) || defined(__i386__) || defined(__x86_64__)
static inline void zig_x86_cpuid(zig_u32 leaf_id, zig_u32 subid, zig_u32* eax, zig_u32* ebx, zig_u32* ecx, zig_u32* edx) {
zig_u32 cpu_info[4];
#if _MSC_VER
__cpuidex(cpu_info, leaf_id, subid);
#else
__cpuid_count(leaf_id, subid, cpu_info[0], cpu_info[1], cpu_info[2], cpu_info[3]);
#endif
*eax = cpu_info[0];
*ebx = cpu_info[1];
*ecx = cpu_info[2];
*edx = cpu_info[3];
}
static inline zig_u32 zig_x86_get_xcr0(void) {
#if _MSC_VER
return (zig_u32)_xgetbv(0);
#else
zig_u32 eax;
zig_u32 edx;
__asm__("xgetbv" : "=a"(eax), "=d"(edx) : "c"(0));
return eax;
#endif
}
#endif

File diff suppressed because it is too large Load Diff

Binary file not shown.

View File

@ -540,6 +540,7 @@ test "align(N) on functions" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO this is not supported on MSVC
// function alignment is a compile error on wasm32/wasm64
if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest;

View File

@ -23,6 +23,7 @@ test "module level assembly" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (is_x86_64_linux) {
try expect(this_is_my_alias() == 1234);
@ -35,6 +36,7 @@ test "output constraint modifiers" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
// This is only testing compilation.
var a: u32 = 3;
@ -56,6 +58,7 @@ test "alternative constraints" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
// Make sure we allow commas as a separator for alternative constraints.
var a: u32 = 3;
@ -72,6 +75,7 @@ test "sized integer/float in asm input" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
asm volatile (""
:
@ -121,6 +125,7 @@ test "struct/array/union types as input values" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
asm volatile (""
:

View File

@ -250,31 +250,142 @@ test "atomicrmw with ints" {
return error.SkipZigTest;
}
try testAtomicRmwInt();
comptime try testAtomicRmwInt();
try testAtomicRmwInts();
comptime try testAtomicRmwInts();
}
fn testAtomicRmwInt() !void {
var x: u8 = 1;
var res = @atomicRmw(u8, &x, .Xchg, 3, .SeqCst);
try expect(x == 3 and res == 1);
_ = @atomicRmw(u8, &x, .Add, 3, .SeqCst);
try expect(x == 6);
_ = @atomicRmw(u8, &x, .Sub, 1, .SeqCst);
try expect(x == 5);
_ = @atomicRmw(u8, &x, .And, 4, .SeqCst);
try expect(x == 4);
_ = @atomicRmw(u8, &x, .Nand, 4, .SeqCst);
try expect(x == 0xfb);
_ = @atomicRmw(u8, &x, .Or, 6, .SeqCst);
try expect(x == 0xff);
_ = @atomicRmw(u8, &x, .Xor, 2, .SeqCst);
try expect(x == 0xfd);
fn testAtomicRmwInts() !void {
// TODO: Use the max atomic bit size for the target, maybe builtin?
try testAtomicRmwInt(8);
_ = @atomicRmw(u8, &x, .Max, 1, .SeqCst);
try expect(x == 0xfd);
_ = @atomicRmw(u8, &x, .Min, 1, .SeqCst);
try expect(x == 1);
if (builtin.cpu.arch == .x86_64) {
try testAtomicRmwInt(16);
try testAtomicRmwInt(32);
try testAtomicRmwInt(64);
}
}
fn testAtomicRmwInt(comptime signedness: std.builtin.Signedness, comptime N: usize) !void {
const int = std.meta.Int(signedness, N);
var x: int = 1;
var res = @atomicRmw(int, &x, .Xchg, 3, .SeqCst);
try expect(x == 3 and res == 1);
res = @atomicRmw(int, &x, .Add, 3, .SeqCst);
var y: int = 3;
try expect(res == y);
y = y + 3;
try expect(x == y);
res = @atomicRmw(int, &x, .Sub, 1, .SeqCst);
try expect(res == y);
y = y - 1;
try expect(x == y);
res = @atomicRmw(int, &x, .And, 4, .SeqCst);
try expect(res == y);
y = y & 4;
try expect(x == y);
res = @atomicRmw(int, &x, .Nand, 4, .SeqCst);
try expect(res == y);
y = ~(y & 4);
try expect(x == y);
res = @atomicRmw(int, &x, .Or, 6, .SeqCst);
try expect(res == y);
y = y | 6;
try expect(x == y);
res = @atomicRmw(int, &x, .Xor, 2, .SeqCst);
try expect(res == y);
y = y ^ 2;
try expect(x == y);
res = @atomicRmw(int, &x, .Max, 1, .SeqCst);
try expect(res == y);
y = @max(y, 1);
try expect(x == y);
res = @atomicRmw(int, &x, .Min, 1, .SeqCst);
try expect(res == y);
y = @min(y, 1);
try expect(x == y);
}
test "atomicrmw with 128-bit ints" {
if (builtin.cpu.arch != .x86_64) {
// TODO: Ideally this could use target.atomicPtrAlignment and check for IntTooBig
return error.SkipZigTest;
}
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
// TODO "ld.lld: undefined symbol: __sync_lock_test_and_set_16" on -mcpu x86_64
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
try testAtomicRmwInt128(.unsigned);
comptime try testAtomicRmwInt128(.unsigned);
}
fn testAtomicRmwInt128(comptime signedness: std.builtin.Signedness) !void {
const int = std.meta.Int(signedness, 128);
const initial: int = 0xaaaaaaaa_bbbbbbbb_cccccccc_dddddddd;
const replacement: int = 0x00000000_00000005_00000000_00000003;
var x: int align(16) = initial;
var res = @atomicRmw(int, &x, .Xchg, replacement, .SeqCst);
try expect(x == replacement and res == initial);
var operator: int = 0x00000001_00000000_20000000_00000000;
res = @atomicRmw(int, &x, .Add, operator, .SeqCst);
var y: int = replacement;
try expect(res == y);
y = y + operator;
try expect(x == y);
operator = 0x00000000_10000000_00000000_20000000;
res = @atomicRmw(int, &x, .Sub, operator, .SeqCst);
try expect(res == y);
y = y - operator;
try expect(x == y);
operator = 0x12345678_87654321_12345678_87654321;
res = @atomicRmw(int, &x, .And, operator, .SeqCst);
try expect(res == y);
y = y & operator;
try expect(x == y);
operator = 0x00000000_10000000_00000000_20000000;
res = @atomicRmw(int, &x, .Nand, operator, .SeqCst);
try expect(res == y);
y = ~(y & operator);
try expect(x == y);
operator = 0x12340000_56780000_67890000_98760000;
res = @atomicRmw(int, &x, .Or, operator, .SeqCst);
try expect(res == y);
y = y | operator;
try expect(x == y);
operator = 0x0a0b0c0d_0e0f0102_03040506_0708090a;
res = @atomicRmw(int, &x, .Xor, operator, .SeqCst);
try expect(res == y);
y = y ^ operator;
try expect(x == y);
operator = 0x00000000_10000000_00000000_20000000;
res = @atomicRmw(int, &x, .Max, operator, .SeqCst);
try expect(res == y);
y = @max(y, operator);
try expect(x == y);
res = @atomicRmw(int, &x, .Min, operator, .SeqCst);
try expect(res == y);
y = @min(y, operator);
try expect(x == y);
}
test "atomics with different types" {

View File

@ -37,6 +37,24 @@ test "truncate to non-power-of-two integers" {
try testTrunc(i32, i5, std.math.maxInt(i32), -1);
}
test "truncate to non-power-of-two integers from 128-bit" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testTrunc(u128, u1, 0xffffffff_ffffffff_ffffffff_01010101, 0x01);
try testTrunc(u128, u1, 0xffffffff_ffffffff_ffffffff_01010110, 0x00);
try testTrunc(u128, u2, 0xffffffff_ffffffff_ffffffff_01010101, 0x01);
try testTrunc(u128, u2, 0xffffffff_ffffffff_ffffffff_01010102, 0x02);
try testTrunc(i128, i5, -4, -4);
try testTrunc(i128, i5, 4, 4);
try testTrunc(i128, i5, -28, 4);
try testTrunc(i128, i5, 28, -4);
try testTrunc(i128, i5, std.math.maxInt(i128), -1);
}
fn testTrunc(comptime Big: type, comptime Little: type, big: Big, little: Little) !void {
try expect(@truncate(Little, big) == little);
}

View File

@ -64,11 +64,23 @@ test "int128" {
}
test "truncate int128" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var buff: u128 = maxInt(u128);
try expect(@truncate(u64, buff) == maxInt(u64));
{
var buff: u128 = maxInt(u128);
try expect(@truncate(u64, buff) == maxInt(u64));
try expect(@truncate(u90, buff) == maxInt(u90));
try expect(@truncate(u128, buff) == maxInt(u128));
}
{
var buff: i128 = maxInt(i128);
try expect(@truncate(i64, buff) == -1);
try expect(@truncate(i90, buff) == -1);
try expect(@truncate(i128, buff) == maxInt(i128));
}
}

View File

@ -377,6 +377,28 @@ fn testBinaryNot(x: u16) !void {
try expect(~x == 0b0101010101010101);
}
test "binary not 128-bit" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(comptime x: {
break :x ~@as(u128, 0x55555555_55555555_55555555_55555555) == 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa;
});
try expect(comptime x: {
break :x ~@as(i128, 0x55555555_55555555_55555555_55555555) == @bitCast(i128, @as(u128, 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa));
});
try testBinaryNot128(u128, 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa);
try testBinaryNot128(i128, @bitCast(i128, @as(u128, 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa)));
}
fn testBinaryNot128(comptime Type: type, x: Type) !void {
try expect(~x == @as(Type, 0x55555555_55555555_55555555_55555555));
}
test "division" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -632,10 +654,24 @@ test "128-bit multiplication" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var a: i128 = 3;
var b: i128 = 2;
var c = a * b;
try expect(c == 6);
{
var a: i128 = 3;
var b: i128 = 2;
var c = a * b;
try expect(c == 6);
a = -3;
b = 2;
c = a * b;
try expect(c == -6);
}
{
var a: u128 = 0xffffffffffffffff;
var b: u128 = 100;
var c = a * b;
try expect(c == 0x63ffffffffffffff9c);
}
}
test "@addWithOverflow" {