From 34a23db664e0fe50fb21c892f33b0aec8a7a2f7f Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 4 Mar 2023 14:21:57 -0700
Subject: [PATCH 1/6] zig.h: lower trap to SIGTRAP instead of SIGILL
---
lib/zig.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/zig.h b/lib/zig.h
index 22a9dbbb9e..65fb21f99a 100644
--- a/lib/zig.h
+++ b/lib/zig.h
@@ -193,7 +193,7 @@ typedef char bool;
#elif defined(__i386__) || defined(__x86_64__)
#define zig_trap() __asm__ volatile("ud2");
#else
-#define zig_trap() raise(SIGILL)
+#define zig_trap() raise(SIGTRAP)
#endif
#if zig_has_builtin(debugtrap)
From fb04ff45cd1b4eca5c56e0295bbbe961557ef820 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 4 Mar 2023 14:22:46 -0700
Subject: [PATCH 2/6] langref: small clarification to `@trap`
---
doc/langref.html.in | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/doc/langref.html.in b/doc/langref.html.in
index a413c3aab5..7044fe977f 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -9403,7 +9403,7 @@ fn List(comptime T: type) type {
Unlike for {#syntax#}@breakpoint(){#endsyntax#}, execution does not continue after this point.
- This function is only valid within function scope.
+ Outside function scope, this builtin causes a compile error.
{#see_also|@breakpoint#}
{#header_close#}
From 48e72960a496edc86b231d45bfa39d618b6adfaf Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 4 Mar 2023 14:48:31 -0700
Subject: [PATCH 3/6] llvm: fix lowering of `@trap`
It needed an unreachable instruction after it.
---
src/codegen/llvm.zig | 1 +
1 file changed, 1 insertion(+)
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index baeaeee58f..85a82f4eda 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -8261,6 +8261,7 @@ pub const FuncGen = struct {
_ = inst;
const llvm_fn = self.getIntrinsic("llvm.trap", &.{});
_ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, undefined, 0, .Cold, .Auto, "");
+ _ = self.builder.buildUnreachable();
return null;
}
From c839c180ef1686794c039fc6d3c20a8716e87357 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sun, 5 Mar 2023 12:46:12 -0700
Subject: [PATCH 4/6] stage2: add zig_backend to ZIR cache namespace
---
src/Module.zig | 1 +
1 file changed, 1 insertion(+)
diff --git a/src/Module.zig b/src/Module.zig
index a2502d36d3..7ea69a0a2e 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -3528,6 +3528,7 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
const digest = hash: {
var path_hash: Cache.HashHelper = .{};
path_hash.addBytes(build_options.version);
+ path_hash.add(builtin.zig_backend);
if (!want_local_cache) {
path_hash.addOptionalBytes(file.pkg.root_src_directory.path);
}
From cdb9cc8f6bda4b4faa270278e3b67c4ef9246a84 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 4 Mar 2023 14:41:12 -0700
Subject: [PATCH 5/6] update zig1.wasm
---
stage1/zig.h | 2759 +++++++++++++++++++++++++++++++---------------
stage1/zig1.wasm | Bin 2408069 -> 2412111 bytes
2 files changed, 1858 insertions(+), 901 deletions(-)
diff --git a/stage1/zig.h b/stage1/zig.h
index 0756d9f731..65fb21f99a 100644
--- a/stage1/zig.h
+++ b/stage1/zig.h
@@ -1,8 +1,11 @@
#undef linux
+#ifndef __STDC_WANT_IEC_60559_TYPES_EXT__
#define __STDC_WANT_IEC_60559_TYPES_EXT__
+#endif
#include
#include
+#include
#include
#include
@@ -34,6 +37,14 @@ typedef char bool;
#define zig_has_attribute(attribute) 0
#endif
+#if __LITTLE_ENDIAN__ || _MSC_VER
+#define zig_little_endian 1
+#define zig_big_endian 0
+#else
+#define zig_little_endian 0
+#define zig_big_endian 1
+#endif
+
#if __STDC_VERSION__ >= 201112L
#define zig_threadlocal _Thread_local
#elif defined(__GNUC__)
@@ -75,6 +86,32 @@ typedef char bool;
#define zig_cold
#endif
+#if zig_has_attribute(flatten)
+#define zig_maybe_flatten __attribute__((flatten))
+#else
+#define zig_maybe_flatten
+#endif
+
+#if zig_has_attribute(noinline)
+#define zig_never_inline __attribute__((noinline)) zig_maybe_flatten
+#elif defined(_MSC_VER)
+#define zig_never_inline __declspec(noinline) zig_maybe_flatten
+#else
+#define zig_never_inline zig_never_inline_unavailable
+#endif
+
+#if zig_has_attribute(not_tail_called)
+#define zig_never_tail __attribute__((not_tail_called)) zig_never_inline
+#else
+#define zig_never_tail zig_never_tail_unavailable
+#endif
+
+#if zig_has_attribute(always_inline)
+#define zig_always_tail __attribute__((musttail))
+#else
+#define zig_always_tail zig_always_tail_unavailable
+#endif
+
#if __STDC_VERSION__ >= 199901L
#define zig_restrict restrict
#elif defined(__GNUC__)
@@ -151,10 +188,16 @@ typedef char bool;
#define zig_export(sig, symbol, name) __asm(name " = " symbol)
#endif
+#if zig_has_builtin(trap)
+#define zig_trap() __builtin_trap()
+#elif defined(__i386__) || defined(__x86_64__)
+#define zig_trap() __asm__ volatile("ud2");
+#else
+#define zig_trap() raise(SIGTRAP)
+#endif
+
#if zig_has_builtin(debugtrap)
#define zig_breakpoint() __builtin_debugtrap()
-#elif zig_has_builtin(trap) || defined(zig_gnuc)
-#define zig_breakpoint() __builtin_trap()
#elif defined(_MSC_VER) || defined(__MINGW32__) || defined(__MINGW64__)
#define zig_breakpoint() __debugbreak()
#elif defined(__i386__) || defined(__x86_64__)
@@ -286,701 +329,656 @@ typedef char bool;
#endif
#if __STDC_VERSION__ >= 201112L
-#define zig_noreturn _Noreturn void
+#define zig_noreturn _Noreturn
#elif zig_has_attribute(noreturn) || defined(zig_gnuc)
-#define zig_noreturn __attribute__((noreturn)) void
+#define zig_noreturn __attribute__((noreturn))
#elif _MSC_VER
-#define zig_noreturn __declspec(noreturn) void
+#define zig_noreturn __declspec(noreturn)
#else
-#define zig_noreturn void
+#define zig_noreturn
#endif
#define zig_bitSizeOf(T) (CHAR_BIT * sizeof(T))
-typedef uintptr_t zig_usize;
-typedef intptr_t zig_isize;
-typedef signed short int zig_c_short;
-typedef unsigned short int zig_c_ushort;
-typedef signed int zig_c_int;
-typedef unsigned int zig_c_uint;
-typedef signed long int zig_c_long;
-typedef unsigned long int zig_c_ulong;
-typedef signed long long int zig_c_longlong;
-typedef unsigned long long int zig_c_ulonglong;
+#define zig_compiler_rt_abbrev_uint32_t si
+#define zig_compiler_rt_abbrev_int32_t si
+#define zig_compiler_rt_abbrev_uint64_t di
+#define zig_compiler_rt_abbrev_int64_t di
+#define zig_compiler_rt_abbrev_zig_u128 ti
+#define zig_compiler_rt_abbrev_zig_i128 ti
+#define zig_compiler_rt_abbrev_zig_f16 hf
+#define zig_compiler_rt_abbrev_zig_f32 sf
+#define zig_compiler_rt_abbrev_zig_f64 df
+#define zig_compiler_rt_abbrev_zig_f80 xf
+#define zig_compiler_rt_abbrev_zig_f128 tf
-typedef uint8_t zig_u8;
-typedef int8_t zig_i8;
-typedef uint16_t zig_u16;
-typedef int16_t zig_i16;
-typedef uint32_t zig_u32;
-typedef int32_t zig_i32;
-typedef uint64_t zig_u64;
-typedef int64_t zig_i64;
+zig_extern void *memcpy (void *zig_restrict, void const *zig_restrict, size_t);
+zig_extern void *memset (void *, int, size_t);
-#define zig_as_u8(val) UINT8_C(val)
-#define zig_as_i8(val) INT8_C(val)
-#define zig_as_u16(val) UINT16_C(val)
-#define zig_as_i16(val) INT16_C(val)
-#define zig_as_u32(val) UINT32_C(val)
-#define zig_as_i32(val) INT32_C(val)
-#define zig_as_u64(val) UINT64_C(val)
-#define zig_as_i64(val) INT64_C(val)
+/* ===================== 8/16/32/64-bit Integer Support ===================== */
+
+#if __STDC_VERSION__ >= 199901L || _MSC_VER
+#include
+#else
+
+#if SCHAR_MIN == ~0x7F && SCHAR_MAX == 0x7F && UCHAR_MAX == 0xFF
+typedef unsigned char uint8_t;
+typedef signed char int8_t;
+#define INT8_C(c) c
+#define UINT8_C(c) c##U
+#elif SHRT_MIN == ~0x7F && SHRT_MAX == 0x7F && USHRT_MAX == 0xFF
+typedef unsigned short uint8_t;
+typedef signed short int8_t;
+#define INT8_C(c) c
+#define UINT8_C(c) c##U
+#elif INT_MIN == ~0x7F && INT_MAX == 0x7F && UINT_MAX == 0xFF
+typedef unsigned int uint8_t;
+typedef signed int int8_t;
+#define INT8_C(c) c
+#define UINT8_C(c) c##U
+#elif LONG_MIN == ~0x7F && LONG_MAX == 0x7F && ULONG_MAX == 0xFF
+typedef unsigned long uint8_t;
+typedef signed long int8_t;
+#define INT8_C(c) c##L
+#define UINT8_C(c) c##LU
+#elif LLONG_MIN == ~0x7F && LLONG_MAX == 0x7F && ULLONG_MAX == 0xFF
+typedef unsigned long long uint8_t;
+typedef signed long long int8_t;
+#define INT8_C(c) c##LL
+#define UINT8_C(c) c##LLU
+#endif
+#define INT8_MIN (~INT8_C(0x7F))
+#define INT8_MAX ( INT8_C(0x7F))
+#define UINT8_MAX ( INT8_C(0xFF))
+
+#if SCHAR_MIN == ~0x7FFF && SCHAR_MAX == 0x7FFF && UCHAR_MAX == 0xFFFF
+typedef unsigned char uint16_t;
+typedef signed char int16_t;
+#define INT16_C(c) c
+#define UINT16_C(c) c##U
+#elif SHRT_MIN == ~0x7FFF && SHRT_MAX == 0x7FFF && USHRT_MAX == 0xFFFF
+typedef unsigned short uint16_t;
+typedef signed short int16_t;
+#define INT16_C(c) c
+#define UINT16_C(c) c##U
+#elif INT_MIN == ~0x7FFF && INT_MAX == 0x7FFF && UINT_MAX == 0xFFFF
+typedef unsigned int uint16_t;
+typedef signed int int16_t;
+#define INT16_C(c) c
+#define UINT16_C(c) c##U
+#elif LONG_MIN == ~0x7FFF && LONG_MAX == 0x7FFF && ULONG_MAX == 0xFFFF
+typedef unsigned long uint16_t;
+typedef signed long int16_t;
+#define INT16_C(c) c##L
+#define UINT16_C(c) c##LU
+#elif LLONG_MIN == ~0x7FFF && LLONG_MAX == 0x7FFF && ULLONG_MAX == 0xFFFF
+typedef unsigned long long uint16_t;
+typedef signed long long int16_t;
+#define INT16_C(c) c##LL
+#define UINT16_C(c) c##LLU
+#endif
+#define INT16_MIN (~INT16_C(0x7FFF))
+#define INT16_MAX ( INT16_C(0x7FFF))
+#define UINT16_MAX ( INT16_C(0xFFFF))
+
+#if SCHAR_MIN == ~0x7FFFFFFF && SCHAR_MAX == 0x7FFFFFFF && UCHAR_MAX == 0xFFFFFFFF
+typedef unsigned char uint32_t;
+typedef signed char int32_t;
+#define INT32_C(c) c
+#define UINT32_C(c) c##U
+#elif SHRT_MIN == ~0x7FFFFFFF && SHRT_MAX == 0x7FFFFFFF && USHRT_MAX == 0xFFFFFFFF
+typedef unsigned short uint32_t;
+typedef signed short int32_t;
+#define INT32_C(c) c
+#define UINT32_C(c) c##U
+#elif INT_MIN == ~0x7FFFFFFF && INT_MAX == 0x7FFFFFFF && UINT_MAX == 0xFFFFFFFF
+typedef unsigned int uint32_t;
+typedef signed int int32_t;
+#define INT32_C(c) c
+#define UINT32_C(c) c##U
+#elif LONG_MIN == ~0x7FFFFFFF && LONG_MAX == 0x7FFFFFFF && ULONG_MAX == 0xFFFFFFFF
+typedef unsigned long uint32_t;
+typedef signed long int32_t;
+#define INT32_C(c) c##L
+#define UINT32_C(c) c##LU
+#elif LLONG_MIN == ~0x7FFFFFFF && LLONG_MAX == 0x7FFFFFFF && ULLONG_MAX == 0xFFFFFFFF
+typedef unsigned long long uint32_t;
+typedef signed long long int32_t;
+#define INT32_C(c) c##LL
+#define UINT32_C(c) c##LLU
+#endif
+#define INT32_MIN (~INT32_C(0x7FFFFFFF))
+#define INT32_MAX ( INT32_C(0x7FFFFFFF))
+#define UINT32_MAX ( INT32_C(0xFFFFFFFF))
+
+#if SCHAR_MIN == ~0x7FFFFFFFFFFFFFFF && SCHAR_MAX == 0x7FFFFFFFFFFFFFFF && UCHAR_MAX == 0xFFFFFFFFFFFFFFFF
+typedef unsigned char uint64_t;
+typedef signed char int64_t;
+#define INT64_C(c) c
+#define UINT64_C(c) c##U
+#elif SHRT_MIN == ~0x7FFFFFFFFFFFFFFF && SHRT_MAX == 0x7FFFFFFFFFFFFFFF && USHRT_MAX == 0xFFFFFFFFFFFFFFFF
+typedef unsigned short uint64_t;
+typedef signed short int64_t;
+#define INT64_C(c) c
+#define UINT64_C(c) c##U
+#elif INT_MIN == ~0x7FFFFFFFFFFFFFFF && INT_MAX == 0x7FFFFFFFFFFFFFFF && UINT_MAX == 0xFFFFFFFFFFFFFFFF
+typedef unsigned int uint64_t;
+typedef signed int int64_t;
+#define INT64_C(c) c
+#define UINT64_C(c) c##U
+#elif LONG_MIN == ~0x7FFFFFFFFFFFFFFF && LONG_MAX == 0x7FFFFFFFFFFFFFFF && ULONG_MAX == 0xFFFFFFFFFFFFFFFF
+typedef unsigned long uint64_t;
+typedef signed long int64_t;
+#define INT64_C(c) c##L
+#define UINT64_C(c) c##LU
+#elif LLONG_MIN == ~0x7FFFFFFFFFFFFFFF && LLONG_MAX == 0x7FFFFFFFFFFFFFFF && ULLONG_MAX == 0xFFFFFFFFFFFFFFFF
+typedef unsigned long long uint64_t;
+typedef signed long long int64_t;
+#define INT64_C(c) c##LL
+#define UINT64_C(c) c##LLU
+#endif
+#define INT64_MIN (~INT64_C(0x7FFFFFFFFFFFFFFF))
+#define INT64_MAX ( INT64_C(0x7FFFFFFFFFFFFFFF))
+#define UINT64_MAX ( INT64_C(0xFFFFFFFFFFFFFFFF))
+
+typedef size_t uintptr_t;
+typedef ptrdiff_t intptr_t;
+
+#endif
-#define zig_minInt_u8 zig_as_u8(0)
-#define zig_maxInt_u8 UINT8_MAX
#define zig_minInt_i8 INT8_MIN
#define zig_maxInt_i8 INT8_MAX
-#define zig_minInt_u16 zig_as_u16(0)
-#define zig_maxInt_u16 UINT16_MAX
+#define zig_minInt_u8 UINT8_C(0)
+#define zig_maxInt_u8 UINT8_MAX
#define zig_minInt_i16 INT16_MIN
#define zig_maxInt_i16 INT16_MAX
-#define zig_minInt_u32 zig_as_u32(0)
-#define zig_maxInt_u32 UINT32_MAX
+#define zig_minInt_u16 UINT16_C(0)
+#define zig_maxInt_u16 UINT16_MAX
#define zig_minInt_i32 INT32_MIN
#define zig_maxInt_i32 INT32_MAX
-#define zig_minInt_u64 zig_as_u64(0)
-#define zig_maxInt_u64 UINT64_MAX
+#define zig_minInt_u32 UINT32_C(0)
+#define zig_maxInt_u32 UINT32_MAX
#define zig_minInt_i64 INT64_MIN
#define zig_maxInt_i64 INT64_MAX
+#define zig_minInt_u64 UINT64_C(0)
+#define zig_maxInt_u64 UINT64_MAX
-#define zig_compiler_rt_abbrev_u32 si
-#define zig_compiler_rt_abbrev_i32 si
-#define zig_compiler_rt_abbrev_u64 di
-#define zig_compiler_rt_abbrev_i64 di
-#define zig_compiler_rt_abbrev_u128 ti
-#define zig_compiler_rt_abbrev_i128 ti
-#define zig_compiler_rt_abbrev_f16 hf
-#define zig_compiler_rt_abbrev_f32 sf
-#define zig_compiler_rt_abbrev_f64 df
-#define zig_compiler_rt_abbrev_f80 xf
-#define zig_compiler_rt_abbrev_f128 tf
-
-zig_extern void *memcpy (void *zig_restrict, void const *zig_restrict, zig_usize);
-zig_extern void *memset (void *, int, zig_usize);
-
-/* ==================== 8/16/32/64-bit Integer Routines ===================== */
-
-#define zig_maxInt(Type, bits) zig_shr_##Type(zig_maxInt_##Type, (zig_bitSizeOf(zig_##Type) - bits))
-#define zig_expand_maxInt(Type, bits) zig_maxInt(Type, bits)
-#define zig_minInt(Type, bits) zig_not_##Type(zig_maxInt(Type, bits), bits)
-#define zig_expand_minInt(Type, bits) zig_minInt(Type, bits)
+#define zig_intLimit(s, w, limit, bits) zig_shr_##s##w(zig_##limit##Int_##s##w, w - (bits))
+#define zig_minInt_i(w, bits) zig_intLimit(i, w, min, bits)
+#define zig_maxInt_i(w, bits) zig_intLimit(i, w, max, bits)
+#define zig_minInt_u(w, bits) zig_intLimit(u, w, min, bits)
+#define zig_maxInt_u(w, bits) zig_intLimit(u, w, max, bits)
#define zig_int_operator(Type, RhsType, operation, operator) \
- static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##RhsType rhs) { \
+ static inline Type zig_##operation(Type lhs, RhsType rhs) { \
return lhs operator rhs; \
}
#define zig_int_basic_operator(Type, operation, operator) \
- zig_int_operator(Type, Type, operation, operator)
+ zig_int_operator(Type, Type, operation, operator)
#define zig_int_shift_operator(Type, operation, operator) \
- zig_int_operator(Type, u8, operation, operator)
+ zig_int_operator(Type, uint8_t, operation, operator)
#define zig_int_helpers(w) \
- zig_int_basic_operator(u##w, and, &) \
- zig_int_basic_operator(i##w, and, &) \
- zig_int_basic_operator(u##w, or, |) \
- zig_int_basic_operator(i##w, or, |) \
- zig_int_basic_operator(u##w, xor, ^) \
- zig_int_basic_operator(i##w, xor, ^) \
- zig_int_shift_operator(u##w, shl, <<) \
- zig_int_shift_operator(i##w, shl, <<) \
- zig_int_shift_operator(u##w, shr, >>) \
+ zig_int_basic_operator(uint##w##_t, and_u##w, &) \
+ zig_int_basic_operator( int##w##_t, and_i##w, &) \
+ zig_int_basic_operator(uint##w##_t, or_u##w, |) \
+ zig_int_basic_operator( int##w##_t, or_i##w, |) \
+ zig_int_basic_operator(uint##w##_t, xor_u##w, ^) \
+ zig_int_basic_operator( int##w##_t, xor_i##w, ^) \
+ zig_int_shift_operator(uint##w##_t, shl_u##w, <<) \
+ zig_int_shift_operator( int##w##_t, shl_i##w, <<) \
+ zig_int_shift_operator(uint##w##_t, shr_u##w, >>) \
\
- static inline zig_i##w zig_shr_i##w(zig_i##w lhs, zig_u8 rhs) { \
- zig_i##w sign_mask = lhs < zig_as_i##w(0) ? -zig_as_i##w(1) : zig_as_i##w(0); \
+ static inline int##w##_t zig_shr_i##w(int##w##_t lhs, uint8_t rhs) { \
+ int##w##_t sign_mask = lhs < INT##w##_C(0) ? -INT##w##_C(1) : INT##w##_C(0); \
return ((lhs ^ sign_mask) >> rhs) ^ sign_mask; \
} \
\
- static inline zig_u##w zig_not_u##w(zig_u##w val, zig_u8 bits) { \
- return val ^ zig_maxInt(u##w, bits); \
+ static inline uint##w##_t zig_not_u##w(uint##w##_t val, uint8_t bits) { \
+ return val ^ zig_maxInt_u(w, bits); \
} \
\
- static inline zig_i##w zig_not_i##w(zig_i##w val, zig_u8 bits) { \
+ static inline int##w##_t zig_not_i##w(int##w##_t val, uint8_t bits) { \
(void)bits; \
return ~val; \
} \
\
- static inline zig_u##w zig_wrap_u##w(zig_u##w val, zig_u8 bits) { \
- return val & zig_maxInt(u##w, bits); \
+ static inline uint##w##_t zig_wrap_u##w(uint##w##_t val, uint8_t bits) { \
+ return val & zig_maxInt_u(w, bits); \
} \
\
- static inline zig_i##w zig_wrap_i##w(zig_i##w val, zig_u8 bits) { \
- return (val & zig_as_u##w(1) << (bits - zig_as_u8(1))) != 0 \
- ? val | zig_minInt(i##w, bits) : val & zig_maxInt(i##w, bits); \
+ static inline int##w##_t zig_wrap_i##w(int##w##_t val, uint8_t bits) { \
+ return (val & UINT##w##_C(1) << (bits - UINT8_C(1))) != 0 \
+ ? val | zig_minInt_i(w, bits) : val & zig_maxInt_i(w, bits); \
} \
\
- zig_int_basic_operator(u##w, div_floor, /) \
+ zig_int_basic_operator(uint##w##_t, div_floor_u##w, /) \
\
- static inline zig_i##w zig_div_floor_i##w(zig_i##w lhs, zig_i##w rhs) { \
- return lhs / rhs - (((lhs ^ rhs) & (lhs % rhs)) < zig_as_i##w(0)); \
+ static inline int##w##_t zig_div_floor_i##w(int##w##_t lhs, int##w##_t rhs) { \
+ return lhs / rhs - (((lhs ^ rhs) & (lhs % rhs)) < INT##w##_C(0)); \
} \
\
- zig_int_basic_operator(u##w, mod, %) \
+ zig_int_basic_operator(uint##w##_t, mod_u##w, %) \
\
- static inline zig_i##w zig_mod_i##w(zig_i##w lhs, zig_i##w rhs) { \
- zig_i##w rem = lhs % rhs; \
- return rem + (((lhs ^ rhs) & rem) < zig_as_i##w(0) ? rhs : zig_as_i##w(0)); \
+ static inline int##w##_t zig_mod_i##w(int##w##_t lhs, int##w##_t rhs) { \
+ int##w##_t rem = lhs % rhs; \
+ return rem + (((lhs ^ rhs) & rem) < INT##w##_C(0) ? rhs : INT##w##_C(0)); \
} \
\
- static inline zig_u##w zig_shlw_u##w(zig_u##w lhs, zig_u8 rhs, zig_u8 bits) { \
+ static inline uint##w##_t zig_shlw_u##w(uint##w##_t lhs, uint8_t rhs, uint8_t bits) { \
return zig_wrap_u##w(zig_shl_u##w(lhs, rhs), bits); \
} \
\
- static inline zig_i##w zig_shlw_i##w(zig_i##w lhs, zig_u8 rhs, zig_u8 bits) { \
- return zig_wrap_i##w((zig_i##w)zig_shl_u##w((zig_u##w)lhs, (zig_u##w)rhs), bits); \
+ static inline int##w##_t zig_shlw_i##w(int##w##_t lhs, uint8_t rhs, uint8_t bits) { \
+ return zig_wrap_i##w((int##w##_t)zig_shl_u##w((uint##w##_t)lhs, (uint##w##_t)rhs), bits); \
} \
\
- static inline zig_u##w zig_addw_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
+ static inline uint##w##_t zig_addw_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
return zig_wrap_u##w(lhs + rhs, bits); \
} \
\
- static inline zig_i##w zig_addw_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
- return zig_wrap_i##w((zig_i##w)((zig_u##w)lhs + (zig_u##w)rhs), bits); \
+ static inline int##w##_t zig_addw_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
+ return zig_wrap_i##w((int##w##_t)((uint##w##_t)lhs + (uint##w##_t)rhs), bits); \
} \
\
- static inline zig_u##w zig_subw_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
+ static inline uint##w##_t zig_subw_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
return zig_wrap_u##w(lhs - rhs, bits); \
} \
\
- static inline zig_i##w zig_subw_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
- return zig_wrap_i##w((zig_i##w)((zig_u##w)lhs - (zig_u##w)rhs), bits); \
+ static inline int##w##_t zig_subw_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
+ return zig_wrap_i##w((int##w##_t)((uint##w##_t)lhs - (uint##w##_t)rhs), bits); \
} \
\
- static inline zig_u##w zig_mulw_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
+ static inline uint##w##_t zig_mulw_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
return zig_wrap_u##w(lhs * rhs, bits); \
} \
\
- static inline zig_i##w zig_mulw_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
- return zig_wrap_i##w((zig_i##w)((zig_u##w)lhs * (zig_u##w)rhs), bits); \
+ static inline int##w##_t zig_mulw_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
+ return zig_wrap_i##w((int##w##_t)((uint##w##_t)lhs * (uint##w##_t)rhs), bits); \
}
zig_int_helpers(8)
zig_int_helpers(16)
zig_int_helpers(32)
zig_int_helpers(64)
-static inline bool zig_addo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) {
+static inline bool zig_addo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u32(full_res, bits);
- return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits);
+ return overflow || full_res < zig_minInt_u(32, bits) || full_res > zig_maxInt_u(32, bits);
#else
*res = zig_addw_u32(lhs, rhs, bits);
return *res < lhs;
#endif
}
-static inline void zig_vaddo_u32(zig_u8 *ov, zig_u32 *res, int n,
- const zig_u32 *lhs, const zig_u32 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_u32(&res[i], lhs[i], rhs[i], bits);
-}
-
-zig_extern zig_i32 __addosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow);
-static inline bool zig_addo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) {
+zig_extern int32_t __addosi4(int32_t lhs, int32_t rhs, int *overflow);
+static inline bool zig_addo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
- zig_i32 full_res = __addosi4(lhs, rhs, &overflow_int);
+ int overflow_int;
+ int32_t full_res = __addosi4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i32(full_res, bits);
- return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits);
+ return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits);
}
-static inline void zig_vaddo_i32(zig_u8 *ov, zig_i32 *res, int n,
- const zig_i32 *lhs, const zig_i32 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_i32(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_addo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) {
+static inline bool zig_addo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_u64 full_res;
+ uint64_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u64(full_res, bits);
- return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits);
+ return overflow || full_res < zig_minInt_u(64, bits) || full_res > zig_maxInt_u(64, bits);
#else
*res = zig_addw_u64(lhs, rhs, bits);
return *res < lhs;
#endif
}
-static inline void zig_vaddo_u64(zig_u8 *ov, zig_u64 *res, int n,
- const zig_u64 *lhs, const zig_u64 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_u64(&res[i], lhs[i], rhs[i], bits);
-}
-
-zig_extern zig_i64 __addodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow);
-static inline bool zig_addo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) {
+zig_extern int64_t __addodi4(int64_t lhs, int64_t rhs, int *overflow);
+static inline bool zig_addo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_i64 full_res;
+ int64_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
- zig_i64 full_res = __addodi4(lhs, rhs, &overflow_int);
+ int overflow_int;
+ int64_t full_res = __addodi4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i64(full_res, bits);
- return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits);
+ return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits);
}
-static inline void zig_vaddo_i64(zig_u8 *ov, zig_i64 *res, int n,
- const zig_i64 *lhs, const zig_i64 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_i64(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_addo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline bool zig_addo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_u8 full_res;
+ uint8_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u8(full_res, bits);
- return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits);
+ return overflow || full_res < zig_minInt_u(8, bits) || full_res > zig_maxInt_u(8, bits);
#else
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = zig_addo_u32(&full_res, lhs, rhs, bits);
- *res = (zig_u8)full_res;
+ *res = (uint8_t)full_res;
return overflow;
#endif
}
-static inline void zig_vaddo_u8(zig_u8 *ov, zig_u8 *res, int n,
- const zig_u8 *lhs, const zig_u8 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_u8(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_addo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) {
+static inline bool zig_addo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_i8 full_res;
+ int8_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i8(full_res, bits);
- return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits);
+ return overflow || full_res < zig_minInt_i(8, bits) || full_res > zig_maxInt_i(8, bits);
#else
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = zig_addo_i32(&full_res, lhs, rhs, bits);
- *res = (zig_i8)full_res;
+ *res = (int8_t)full_res;
return overflow;
#endif
}
-static inline void zig_vaddo_i8(zig_u8 *ov, zig_i8 *res, int n,
- const zig_i8 *lhs, const zig_i8 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_i8(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_addo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) {
+static inline bool zig_addo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_u16 full_res;
+ uint16_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u16(full_res, bits);
- return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits);
+ return overflow || full_res < zig_minInt_u(16, bits) || full_res > zig_maxInt_u(16, bits);
#else
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = zig_addo_u32(&full_res, lhs, rhs, bits);
- *res = (zig_u16)full_res;
+ *res = (uint16_t)full_res;
return overflow;
#endif
}
-static inline void zig_vaddo_u16(zig_u8 *ov, zig_u16 *res, int n,
- const zig_u16 *lhs, const zig_u16 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_u16(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_addo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) {
+static inline bool zig_addo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_i16 full_res;
+ int16_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i16(full_res, bits);
- return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits);
+ return overflow || full_res < zig_minInt_i(16, bits) || full_res > zig_maxInt_i(16, bits);
#else
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = zig_addo_i32(&full_res, lhs, rhs, bits);
- *res = (zig_i16)full_res;
+ *res = (int16_t)full_res;
return overflow;
#endif
}
-static inline void zig_vaddo_i16(zig_u8 *ov, zig_i16 *res, int n,
- const zig_i16 *lhs, const zig_i16 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_i16(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_subo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) {
+static inline bool zig_subo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u32(full_res, bits);
- return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits);
+ return overflow || full_res < zig_minInt_u(32, bits) || full_res > zig_maxInt_u(32, bits);
#else
*res = zig_subw_u32(lhs, rhs, bits);
return *res > lhs;
#endif
}
-static inline void zig_vsubo_u32(zig_u8 *ov, zig_u32 *res, int n,
- const zig_u32 *lhs, const zig_u32 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_u32(&res[i], lhs[i], rhs[i], bits);
-}
-
-zig_extern zig_i32 __subosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow);
-static inline bool zig_subo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) {
+zig_extern int32_t __subosi4(int32_t lhs, int32_t rhs, int *overflow);
+static inline bool zig_subo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
- zig_i32 full_res = __subosi4(lhs, rhs, &overflow_int);
+ int overflow_int;
+ int32_t full_res = __subosi4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i32(full_res, bits);
- return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits);
+ return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits);
}
-static inline void zig_vsubo_i32(zig_u8 *ov, zig_i32 *res, int n,
- const zig_i32 *lhs, const zig_i32 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_i32(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_subo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) {
+static inline bool zig_subo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_u64 full_res;
+ uint64_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u64(full_res, bits);
- return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits);
+ return overflow || full_res < zig_minInt_u(64, bits) || full_res > zig_maxInt_u(64, bits);
#else
*res = zig_subw_u64(lhs, rhs, bits);
return *res > lhs;
#endif
}
-static inline void zig_vsubo_u64(zig_u8 *ov, zig_u64 *res, int n,
- const zig_u64 *lhs, const zig_u64 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_u64(&res[i], lhs[i], rhs[i], bits);
-}
-
-zig_extern zig_i64 __subodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow);
-static inline bool zig_subo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) {
+zig_extern int64_t __subodi4(int64_t lhs, int64_t rhs, int *overflow);
+static inline bool zig_subo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_i64 full_res;
+ int64_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
- zig_i64 full_res = __subodi4(lhs, rhs, &overflow_int);
+ int overflow_int;
+ int64_t full_res = __subodi4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i64(full_res, bits);
- return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits);
+ return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits);
}
-static inline void zig_vsubo_i64(zig_u8 *ov, zig_i64 *res, int n,
- const zig_i64 *lhs, const zig_i64 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_i64(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_subo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline bool zig_subo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_u8 full_res;
+ uint8_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u8(full_res, bits);
- return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits);
+ return overflow || full_res < zig_minInt_u(8, bits) || full_res > zig_maxInt_u(8, bits);
#else
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = zig_subo_u32(&full_res, lhs, rhs, bits);
- *res = (zig_u8)full_res;
+ *res = (uint8_t)full_res;
return overflow;
#endif
}
-static inline void zig_vsubo_u8(zig_u8 *ov, zig_u8 *res, int n,
- const zig_u8 *lhs, const zig_u8 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_u8(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_subo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) {
+static inline bool zig_subo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_i8 full_res;
+ int8_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i8(full_res, bits);
- return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits);
+ return overflow || full_res < zig_minInt_i(8, bits) || full_res > zig_maxInt_i(8, bits);
#else
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = zig_subo_i32(&full_res, lhs, rhs, bits);
- *res = (zig_i8)full_res;
+ *res = (int8_t)full_res;
return overflow;
#endif
}
-static inline void zig_vsubo_i8(zig_u8 *ov, zig_i8 *res, int n,
- const zig_i8 *lhs, const zig_i8 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_i8(&res[i], lhs[i], rhs[i], bits);
-}
-
-
-static inline bool zig_subo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) {
+static inline bool zig_subo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_u16 full_res;
+ uint16_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u16(full_res, bits);
- return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits);
+ return overflow || full_res < zig_minInt_u(16, bits) || full_res > zig_maxInt_u(16, bits);
#else
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = zig_subo_u32(&full_res, lhs, rhs, bits);
- *res = (zig_u16)full_res;
+ *res = (uint16_t)full_res;
return overflow;
#endif
}
-static inline void zig_vsubo_u16(zig_u8 *ov, zig_u16 *res, int n,
- const zig_u16 *lhs, const zig_u16 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_u16(&res[i], lhs[i], rhs[i], bits);
-}
-
-
-static inline bool zig_subo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) {
+static inline bool zig_subo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_i16 full_res;
+ int16_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i16(full_res, bits);
- return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits);
+ return overflow || full_res < zig_minInt_i(16, bits) || full_res > zig_maxInt_i(16, bits);
#else
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = zig_subo_i32(&full_res, lhs, rhs, bits);
- *res = (zig_i16)full_res;
+ *res = (int16_t)full_res;
return overflow;
#endif
}
-static inline void zig_vsubo_i16(zig_u8 *ov, zig_i16 *res, int n,
- const zig_i16 *lhs, const zig_i16 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_i16(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_mulo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) {
+static inline bool zig_mulo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u32(full_res, bits);
- return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits);
+ return overflow || full_res < zig_minInt_u(32, bits) || full_res > zig_maxInt_u(32, bits);
#else
*res = zig_mulw_u32(lhs, rhs, bits);
- return rhs != zig_as_u32(0) && lhs > zig_maxInt(u32, bits) / rhs;
+ return rhs != UINT32_C(0) && lhs > zig_maxInt_u(32, bits) / rhs;
#endif
}
-static inline void zig_vmulo_u32(zig_u8 *ov, zig_u32 *res, int n,
- const zig_u32 *lhs, const zig_u32 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u32(&res[i], lhs[i], rhs[i], bits);
-}
-
-zig_extern zig_i32 __mulosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow);
-static inline bool zig_mulo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) {
+zig_extern int32_t __mulosi4(int32_t lhs, int32_t rhs, int *overflow);
+static inline bool zig_mulo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
- zig_i32 full_res = __mulosi4(lhs, rhs, &overflow_int);
+ int overflow_int;
+ int32_t full_res = __mulosi4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i32(full_res, bits);
- return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits);
+ return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits);
}
-static inline void zig_vmulo_i32(zig_u8 *ov, zig_i32 *res, int n,
- const zig_i32 *lhs, const zig_i32 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i32(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_mulo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) {
+static inline bool zig_mulo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_u64 full_res;
+ uint64_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u64(full_res, bits);
- return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits);
+ return overflow || full_res < zig_minInt_u(64, bits) || full_res > zig_maxInt_u(64, bits);
#else
*res = zig_mulw_u64(lhs, rhs, bits);
- return rhs != zig_as_u64(0) && lhs > zig_maxInt(u64, bits) / rhs;
+ return rhs != UINT64_C(0) && lhs > zig_maxInt_u(64, bits) / rhs;
#endif
}
-static inline void zig_vmulo_u64(zig_u8 *ov, zig_u64 *res, int n,
- const zig_u64 *lhs, const zig_u64 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u64(&res[i], lhs[i], rhs[i], bits);
-}
-
-zig_extern zig_i64 __mulodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow);
-static inline bool zig_mulo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) {
+zig_extern int64_t __mulodi4(int64_t lhs, int64_t rhs, int *overflow);
+static inline bool zig_mulo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_i64 full_res;
+ int64_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
- zig_i64 full_res = __mulodi4(lhs, rhs, &overflow_int);
+ int overflow_int;
+ int64_t full_res = __mulodi4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i64(full_res, bits);
- return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits);
+ return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits);
}
-static inline void zig_vmulo_i64(zig_u8 *ov, zig_i64 *res, int n,
- const zig_i64 *lhs, const zig_i64 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i64(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_mulo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline bool zig_mulo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_u8 full_res;
+ uint8_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u8(full_res, bits);
- return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits);
+ return overflow || full_res < zig_minInt_u(8, bits) || full_res > zig_maxInt_u(8, bits);
#else
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = zig_mulo_u32(&full_res, lhs, rhs, bits);
- *res = (zig_u8)full_res;
+ *res = (uint8_t)full_res;
return overflow;
#endif
}
-static inline void zig_vmulo_u8(zig_u8 *ov, zig_u8 *res, int n,
- const zig_u8 *lhs, const zig_u8 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u8(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_mulo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) {
+static inline bool zig_mulo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_i8 full_res;
+ int8_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i8(full_res, bits);
- return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits);
+ return overflow || full_res < zig_minInt_i(8, bits) || full_res > zig_maxInt_i(8, bits);
#else
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = zig_mulo_i32(&full_res, lhs, rhs, bits);
- *res = (zig_i8)full_res;
+ *res = (int8_t)full_res;
return overflow;
#endif
}
-static inline void zig_vmulo_i8(zig_u8 *ov, zig_i8 *res, int n,
- const zig_i8 *lhs, const zig_i8 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i8(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_mulo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) {
+static inline bool zig_mulo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_u16 full_res;
+ uint16_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u16(full_res, bits);
- return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits);
+ return overflow || full_res < zig_minInt_u(16, bits) || full_res > zig_maxInt_u(16, bits);
#else
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = zig_mulo_u32(&full_res, lhs, rhs, bits);
- *res = (zig_u16)full_res;
+ *res = (uint16_t)full_res;
return overflow;
#endif
}
-static inline void zig_vmulo_u16(zig_u8 *ov, zig_u16 *res, int n,
- const zig_u16 *lhs, const zig_u16 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u16(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_mulo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) {
+static inline bool zig_mulo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_i16 full_res;
+ int16_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i16(full_res, bits);
- return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits);
+ return overflow || full_res < zig_minInt_i(16, bits) || full_res > zig_maxInt_i(16, bits);
#else
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = zig_mulo_i32(&full_res, lhs, rhs, bits);
- *res = (zig_i16)full_res;
+ *res = (int16_t)full_res;
return overflow;
#endif
}
-static inline void zig_vmulo_i16(zig_u8 *ov, zig_i16 *res, int n,
- const zig_i16 *lhs, const zig_i16 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i16(&res[i], lhs[i], rhs[i], bits);
-}
-
#define zig_int_builtins(w) \
- static inline bool zig_shlo_u##w(zig_u##w *res, zig_u##w lhs, zig_u8 rhs, zig_u8 bits) { \
+ static inline bool zig_shlo_u##w(uint##w##_t *res, uint##w##_t lhs, uint8_t rhs, uint8_t bits) { \
*res = zig_shlw_u##w(lhs, rhs, bits); \
- return lhs > zig_maxInt(u##w, bits) >> rhs; \
+ return lhs > zig_maxInt_u(w, bits) >> rhs; \
} \
\
- static inline bool zig_shlo_i##w(zig_i##w *res, zig_i##w lhs, zig_u8 rhs, zig_u8 bits) { \
+ static inline bool zig_shlo_i##w(int##w##_t *res, int##w##_t lhs, uint8_t rhs, uint8_t bits) { \
*res = zig_shlw_i##w(lhs, rhs, bits); \
- zig_i##w mask = (zig_i##w)(zig_maxInt_u##w << (bits - rhs - 1)); \
- return (lhs & mask) != zig_as_i##w(0) && (lhs & mask) != mask; \
+ int##w##_t mask = (int##w##_t)(UINT##w##_MAX << (bits - rhs - 1)); \
+ return (lhs & mask) != INT##w##_C(0) && (lhs & mask) != mask; \
} \
\
- static inline zig_u##w zig_shls_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
- zig_u##w res; \
- if (rhs >= bits) return lhs != zig_as_u##w(0) ? zig_maxInt(u##w, bits) : lhs; \
- return zig_shlo_u##w(&res, lhs, (zig_u8)rhs, bits) ? zig_maxInt(u##w, bits) : res; \
+ static inline uint##w##_t zig_shls_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
+ uint##w##_t res; \
+ if (rhs >= bits) return lhs != UINT##w##_C(0) ? zig_maxInt_u(w, bits) : lhs; \
+ return zig_shlo_u##w(&res, lhs, (uint8_t)rhs, bits) ? zig_maxInt_u(w, bits) : res; \
} \
\
- static inline zig_i##w zig_shls_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
- zig_i##w res; \
- if ((zig_u##w)rhs < (zig_u##w)bits && !zig_shlo_i##w(&res, lhs, rhs, bits)) return res; \
- return lhs < zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \
+ static inline int##w##_t zig_shls_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
+ int##w##_t res; \
+ if ((uint##w##_t)rhs < (uint##w##_t)bits && !zig_shlo_i##w(&res, lhs, (uint8_t)rhs, bits)) return res; \
+ return lhs < INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \
} \
\
- static inline zig_u##w zig_adds_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
- zig_u##w res; \
- return zig_addo_u##w(&res, lhs, rhs, bits) ? zig_maxInt(u##w, bits) : res; \
+ static inline uint##w##_t zig_adds_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
+ uint##w##_t res; \
+ return zig_addo_u##w(&res, lhs, rhs, bits) ? zig_maxInt_u(w, bits) : res; \
} \
\
- static inline zig_i##w zig_adds_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
- zig_i##w res; \
+ static inline int##w##_t zig_adds_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
+ int##w##_t res; \
if (!zig_addo_i##w(&res, lhs, rhs, bits)) return res; \
- return res >= zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \
+ return res >= INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \
} \
\
- static inline zig_u##w zig_subs_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
- zig_u##w res; \
- return zig_subo_u##w(&res, lhs, rhs, bits) ? zig_minInt(u##w, bits) : res; \
+ static inline uint##w##_t zig_subs_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
+ uint##w##_t res; \
+ return zig_subo_u##w(&res, lhs, rhs, bits) ? zig_minInt_u(w, bits) : res; \
} \
\
- static inline zig_i##w zig_subs_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
- zig_i##w res; \
+ static inline int##w##_t zig_subs_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
+ int##w##_t res; \
if (!zig_subo_i##w(&res, lhs, rhs, bits)) return res; \
- return res >= zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \
+ return res >= INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \
} \
\
- static inline zig_u##w zig_muls_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
- zig_u##w res; \
- return zig_mulo_u##w(&res, lhs, rhs, bits) ? zig_maxInt(u##w, bits) : res; \
+ static inline uint##w##_t zig_muls_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
+ uint##w##_t res; \
+ return zig_mulo_u##w(&res, lhs, rhs, bits) ? zig_maxInt_u(w, bits) : res; \
} \
\
- static inline zig_i##w zig_muls_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
- zig_i##w res; \
+ static inline int##w##_t zig_muls_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
+ int##w##_t res; \
if (!zig_mulo_i##w(&res, lhs, rhs, bits)) return res; \
- return (lhs ^ rhs) < zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \
+ return (lhs ^ rhs) < INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \
}
zig_int_builtins(8)
zig_int_builtins(16)
@@ -988,89 +986,89 @@ zig_int_builtins(32)
zig_int_builtins(64)
#define zig_builtin8(name, val) __builtin_##name(val)
-typedef zig_c_uint zig_Builtin8;
+typedef unsigned int zig_Builtin8;
#define zig_builtin16(name, val) __builtin_##name(val)
-typedef zig_c_uint zig_Builtin16;
+typedef unsigned int zig_Builtin16;
#if INT_MIN <= INT32_MIN
#define zig_builtin32(name, val) __builtin_##name(val)
-typedef zig_c_uint zig_Builtin32;
+typedef unsigned int zig_Builtin32;
#elif LONG_MIN <= INT32_MIN
#define zig_builtin32(name, val) __builtin_##name##l(val)
-typedef zig_c_ulong zig_Builtin32;
+typedef unsigned long zig_Builtin32;
#endif
#if INT_MIN <= INT64_MIN
#define zig_builtin64(name, val) __builtin_##name(val)
-typedef zig_c_uint zig_Builtin64;
+typedef unsigned int zig_Builtin64;
#elif LONG_MIN <= INT64_MIN
#define zig_builtin64(name, val) __builtin_##name##l(val)
-typedef zig_c_ulong zig_Builtin64;
+typedef unsigned long zig_Builtin64;
#elif LLONG_MIN <= INT64_MIN
#define zig_builtin64(name, val) __builtin_##name##ll(val)
-typedef zig_c_ulonglong zig_Builtin64;
+typedef unsigned long long zig_Builtin64;
#endif
-static inline zig_u8 zig_byte_swap_u8(zig_u8 val, zig_u8 bits) {
+static inline uint8_t zig_byte_swap_u8(uint8_t val, uint8_t bits) {
return zig_wrap_u8(val >> (8 - bits), bits);
}
-static inline zig_i8 zig_byte_swap_i8(zig_i8 val, zig_u8 bits) {
- return zig_wrap_i8((zig_i8)zig_byte_swap_u8((zig_u8)val, bits), bits);
+static inline int8_t zig_byte_swap_i8(int8_t val, uint8_t bits) {
+ return zig_wrap_i8((int8_t)zig_byte_swap_u8((uint8_t)val, bits), bits);
}
-static inline zig_u16 zig_byte_swap_u16(zig_u16 val, zig_u8 bits) {
- zig_u16 full_res;
+static inline uint16_t zig_byte_swap_u16(uint16_t val, uint8_t bits) {
+ uint16_t full_res;
#if zig_has_builtin(bswap16) || defined(zig_gnuc)
full_res = __builtin_bswap16(val);
#else
- full_res = (zig_u16)zig_byte_swap_u8((zig_u8)(val >> 0), 8) << 8 |
- (zig_u16)zig_byte_swap_u8((zig_u8)(val >> 8), 8) >> 0;
+ full_res = (uint16_t)zig_byte_swap_u8((uint8_t)(val >> 0), 8) << 8 |
+ (uint16_t)zig_byte_swap_u8((uint8_t)(val >> 8), 8) >> 0;
#endif
return zig_wrap_u16(full_res >> (16 - bits), bits);
}
-static inline zig_i16 zig_byte_swap_i16(zig_i16 val, zig_u8 bits) {
- return zig_wrap_i16((zig_i16)zig_byte_swap_u16((zig_u16)val, bits), bits);
+static inline int16_t zig_byte_swap_i16(int16_t val, uint8_t bits) {
+ return zig_wrap_i16((int16_t)zig_byte_swap_u16((uint16_t)val, bits), bits);
}
-static inline zig_u32 zig_byte_swap_u32(zig_u32 val, zig_u8 bits) {
- zig_u32 full_res;
+static inline uint32_t zig_byte_swap_u32(uint32_t val, uint8_t bits) {
+ uint32_t full_res;
#if zig_has_builtin(bswap32) || defined(zig_gnuc)
full_res = __builtin_bswap32(val);
#else
- full_res = (zig_u32)zig_byte_swap_u16((zig_u16)(val >> 0), 16) << 16 |
- (zig_u32)zig_byte_swap_u16((zig_u16)(val >> 16), 16) >> 0;
+ full_res = (uint32_t)zig_byte_swap_u16((uint16_t)(val >> 0), 16) << 16 |
+ (uint32_t)zig_byte_swap_u16((uint16_t)(val >> 16), 16) >> 0;
#endif
return zig_wrap_u32(full_res >> (32 - bits), bits);
}
-static inline zig_i32 zig_byte_swap_i32(zig_i32 val, zig_u8 bits) {
- return zig_wrap_i32((zig_i32)zig_byte_swap_u32((zig_u32)val, bits), bits);
+static inline int32_t zig_byte_swap_i32(int32_t val, uint8_t bits) {
+ return zig_wrap_i32((int32_t)zig_byte_swap_u32((uint32_t)val, bits), bits);
}
-static inline zig_u64 zig_byte_swap_u64(zig_u64 val, zig_u8 bits) {
- zig_u64 full_res;
+static inline uint64_t zig_byte_swap_u64(uint64_t val, uint8_t bits) {
+ uint64_t full_res;
#if zig_has_builtin(bswap64) || defined(zig_gnuc)
full_res = __builtin_bswap64(val);
#else
- full_res = (zig_u64)zig_byte_swap_u32((zig_u32)(val >> 0), 32) << 32 |
- (zig_u64)zig_byte_swap_u32((zig_u32)(val >> 32), 32) >> 0;
+ full_res = (uint64_t)zig_byte_swap_u32((uint32_t)(val >> 0), 32) << 32 |
+ (uint64_t)zig_byte_swap_u32((uint32_t)(val >> 32), 32) >> 0;
#endif
return zig_wrap_u64(full_res >> (64 - bits), bits);
}
-static inline zig_i64 zig_byte_swap_i64(zig_i64 val, zig_u8 bits) {
- return zig_wrap_i64((zig_i64)zig_byte_swap_u64((zig_u64)val, bits), bits);
+static inline int64_t zig_byte_swap_i64(int64_t val, uint8_t bits) {
+ return zig_wrap_i64((int64_t)zig_byte_swap_u64((uint64_t)val, bits), bits);
}
-static inline zig_u8 zig_bit_reverse_u8(zig_u8 val, zig_u8 bits) {
- zig_u8 full_res;
+static inline uint8_t zig_bit_reverse_u8(uint8_t val, uint8_t bits) {
+ uint8_t full_res;
#if zig_has_builtin(bitreverse8)
full_res = __builtin_bitreverse8(val);
#else
- static zig_u8 const lut[0x10] = {
+ static uint8_t const lut[0x10] = {
0x0, 0x8, 0x4, 0xc, 0x2, 0xa, 0x6, 0xe,
0x1, 0x9, 0x5, 0xd, 0x3, 0xb, 0x7, 0xf
};
@@ -1079,62 +1077,62 @@ static inline zig_u8 zig_bit_reverse_u8(zig_u8 val, zig_u8 bits) {
return zig_wrap_u8(full_res >> (8 - bits), bits);
}
-static inline zig_i8 zig_bit_reverse_i8(zig_i8 val, zig_u8 bits) {
- return zig_wrap_i8((zig_i8)zig_bit_reverse_u8((zig_u8)val, bits), bits);
+static inline int8_t zig_bit_reverse_i8(int8_t val, uint8_t bits) {
+ return zig_wrap_i8((int8_t)zig_bit_reverse_u8((uint8_t)val, bits), bits);
}
-static inline zig_u16 zig_bit_reverse_u16(zig_u16 val, zig_u8 bits) {
- zig_u16 full_res;
+static inline uint16_t zig_bit_reverse_u16(uint16_t val, uint8_t bits) {
+ uint16_t full_res;
#if zig_has_builtin(bitreverse16)
full_res = __builtin_bitreverse16(val);
#else
- full_res = (zig_u16)zig_bit_reverse_u8((zig_u8)(val >> 0), 8) << 8 |
- (zig_u16)zig_bit_reverse_u8((zig_u8)(val >> 8), 8) >> 0;
+ full_res = (uint16_t)zig_bit_reverse_u8((uint8_t)(val >> 0), 8) << 8 |
+ (uint16_t)zig_bit_reverse_u8((uint8_t)(val >> 8), 8) >> 0;
#endif
return zig_wrap_u16(full_res >> (16 - bits), bits);
}
-static inline zig_i16 zig_bit_reverse_i16(zig_i16 val, zig_u8 bits) {
- return zig_wrap_i16((zig_i16)zig_bit_reverse_u16((zig_u16)val, bits), bits);
+static inline int16_t zig_bit_reverse_i16(int16_t val, uint8_t bits) {
+ return zig_wrap_i16((int16_t)zig_bit_reverse_u16((uint16_t)val, bits), bits);
}
-static inline zig_u32 zig_bit_reverse_u32(zig_u32 val, zig_u8 bits) {
- zig_u32 full_res;
+static inline uint32_t zig_bit_reverse_u32(uint32_t val, uint8_t bits) {
+ uint32_t full_res;
#if zig_has_builtin(bitreverse32)
full_res = __builtin_bitreverse32(val);
#else
- full_res = (zig_u32)zig_bit_reverse_u16((zig_u16)(val >> 0), 16) << 16 |
- (zig_u32)zig_bit_reverse_u16((zig_u16)(val >> 16), 16) >> 0;
+ full_res = (uint32_t)zig_bit_reverse_u16((uint16_t)(val >> 0), 16) << 16 |
+ (uint32_t)zig_bit_reverse_u16((uint16_t)(val >> 16), 16) >> 0;
#endif
return zig_wrap_u32(full_res >> (32 - bits), bits);
}
-static inline zig_i32 zig_bit_reverse_i32(zig_i32 val, zig_u8 bits) {
- return zig_wrap_i32((zig_i32)zig_bit_reverse_u32((zig_u32)val, bits), bits);
+static inline int32_t zig_bit_reverse_i32(int32_t val, uint8_t bits) {
+ return zig_wrap_i32((int32_t)zig_bit_reverse_u32((uint32_t)val, bits), bits);
}
-static inline zig_u64 zig_bit_reverse_u64(zig_u64 val, zig_u8 bits) {
- zig_u64 full_res;
+static inline uint64_t zig_bit_reverse_u64(uint64_t val, uint8_t bits) {
+ uint64_t full_res;
#if zig_has_builtin(bitreverse64)
full_res = __builtin_bitreverse64(val);
#else
- full_res = (zig_u64)zig_bit_reverse_u32((zig_u32)(val >> 0), 32) << 32 |
- (zig_u64)zig_bit_reverse_u32((zig_u32)(val >> 32), 32) >> 0;
+ full_res = (uint64_t)zig_bit_reverse_u32((uint32_t)(val >> 0), 32) << 32 |
+ (uint64_t)zig_bit_reverse_u32((uint32_t)(val >> 32), 32) >> 0;
#endif
return zig_wrap_u64(full_res >> (64 - bits), bits);
}
-static inline zig_i64 zig_bit_reverse_i64(zig_i64 val, zig_u8 bits) {
- return zig_wrap_i64((zig_i64)zig_bit_reverse_u64((zig_u64)val, bits), bits);
+static inline int64_t zig_bit_reverse_i64(int64_t val, uint8_t bits) {
+ return zig_wrap_i64((int64_t)zig_bit_reverse_u64((uint64_t)val, bits), bits);
}
#define zig_builtin_popcount_common(w) \
- static inline zig_u8 zig_popcount_i##w(zig_i##w val, zig_u8 bits) { \
- return zig_popcount_u##w((zig_u##w)val, bits); \
+ static inline uint8_t zig_popcount_i##w(int##w##_t val, uint8_t bits) { \
+ return zig_popcount_u##w((uint##w##_t)val, bits); \
}
#if zig_has_builtin(popcount) || defined(zig_gnuc)
#define zig_builtin_popcount(w) \
- static inline zig_u8 zig_popcount_u##w(zig_u##w val, zig_u8 bits) { \
+ static inline uint8_t zig_popcount_u##w(uint##w##_t val, uint8_t bits) { \
(void)bits; \
return zig_builtin##w(popcount, val); \
} \
@@ -1142,12 +1140,12 @@ static inline zig_i64 zig_bit_reverse_i64(zig_i64 val, zig_u8 bits) {
zig_builtin_popcount_common(w)
#else
#define zig_builtin_popcount(w) \
- static inline zig_u8 zig_popcount_u##w(zig_u##w val, zig_u8 bits) { \
+ static inline uint8_t zig_popcount_u##w(uint##w##_t val, uint8_t bits) { \
(void)bits; \
- zig_u##w temp = val - ((val >> 1) & (zig_maxInt_u##w / 3)); \
- temp = (temp & (zig_maxInt_u##w / 5)) + ((temp >> 2) & (zig_maxInt_u##w / 5)); \
- temp = (temp + (temp >> 4)) & (zig_maxInt_u##w / 17); \
- return temp * (zig_maxInt_u##w / 255) >> (w - 8); \
+ uint##w##_t temp = val - ((val >> 1) & (UINT##w##_MAX / 3)); \
+ temp = (temp & (UINT##w##_MAX / 5)) + ((temp >> 2) & (UINT##w##_MAX / 5)); \
+ temp = (temp + (temp >> 4)) & (UINT##w##_MAX / 17); \
+ return temp * (UINT##w##_MAX / 255) >> (w - 8); \
} \
\
zig_builtin_popcount_common(w)
@@ -1158,12 +1156,12 @@ zig_builtin_popcount(32)
zig_builtin_popcount(64)
#define zig_builtin_ctz_common(w) \
- static inline zig_u8 zig_ctz_i##w(zig_i##w val, zig_u8 bits) { \
- return zig_ctz_u##w((zig_u##w)val, bits); \
+ static inline uint8_t zig_ctz_i##w(int##w##_t val, uint8_t bits) { \
+ return zig_ctz_u##w((uint##w##_t)val, bits); \
}
#if zig_has_builtin(ctz) || defined(zig_gnuc)
#define zig_builtin_ctz(w) \
- static inline zig_u8 zig_ctz_u##w(zig_u##w val, zig_u8 bits) { \
+ static inline uint8_t zig_ctz_u##w(uint##w##_t val, uint8_t bits) { \
if (val == 0) return bits; \
return zig_builtin##w(ctz, val); \
} \
@@ -1171,7 +1169,7 @@ zig_builtin_popcount(64)
zig_builtin_ctz_common(w)
#else
#define zig_builtin_ctz(w) \
- static inline zig_u8 zig_ctz_u##w(zig_u##w val, zig_u8 bits) { \
+ static inline uint8_t zig_ctz_u##w(uint##w##_t val, uint8_t bits) { \
return zig_popcount_u##w(zig_not_u##w(val, bits) & zig_subw_u##w(val, 1, bits), bits); \
} \
\
@@ -1183,12 +1181,12 @@ zig_builtin_ctz(32)
zig_builtin_ctz(64)
#define zig_builtin_clz_common(w) \
- static inline zig_u8 zig_clz_i##w(zig_i##w val, zig_u8 bits) { \
- return zig_clz_u##w((zig_u##w)val, bits); \
+ static inline uint8_t zig_clz_i##w(int##w##_t val, uint8_t bits) { \
+ return zig_clz_u##w((uint##w##_t)val, bits); \
}
#if zig_has_builtin(clz) || defined(zig_gnuc)
#define zig_builtin_clz(w) \
- static inline zig_u8 zig_clz_u##w(zig_u##w val, zig_u8 bits) { \
+ static inline uint8_t zig_clz_u##w(uint##w##_t val, uint8_t bits) { \
if (val == 0) return bits; \
return zig_builtin##w(clz, val) - (zig_bitSizeOf(zig_Builtin##w) - bits); \
} \
@@ -1196,7 +1194,7 @@ zig_builtin_ctz(64)
zig_builtin_clz_common(w)
#else
#define zig_builtin_clz(w) \
- static inline zig_u8 zig_clz_u##w(zig_u##w val, zig_u8 bits) { \
+ static inline uint8_t zig_clz_u##w(uint##w##_t val, uint8_t bits) { \
return zig_ctz_u##w(zig_bit_reverse_u##w(val, bits), bits); \
} \
\
@@ -1207,7 +1205,7 @@ zig_builtin_clz(16)
zig_builtin_clz(32)
zig_builtin_clz(64)
-/* ======================== 128-bit Integer Routines ======================== */
+/* ======================== 128-bit Integer Support ========================= */
#if !defined(zig_has_int128)
# if defined(__SIZEOF_INT128__)
@@ -1222,18 +1220,18 @@ zig_builtin_clz(64)
typedef unsigned __int128 zig_u128;
typedef signed __int128 zig_i128;
-#define zig_as_u128(hi, lo) ((zig_u128)(hi)<<64|(lo))
-#define zig_as_i128(hi, lo) ((zig_i128)zig_as_u128(hi, lo))
-#define zig_as_constant_u128(hi, lo) zig_as_u128(hi, lo)
-#define zig_as_constant_i128(hi, lo) zig_as_i128(hi, lo)
-#define zig_hi_u128(val) ((zig_u64)((val) >> 64))
-#define zig_lo_u128(val) ((zig_u64)((val) >> 0))
-#define zig_hi_i128(val) ((zig_i64)((val) >> 64))
-#define zig_lo_i128(val) ((zig_u64)((val) >> 0))
+#define zig_make_u128(hi, lo) ((zig_u128)(hi)<<64|(lo))
+#define zig_make_i128(hi, lo) ((zig_i128)zig_make_u128(hi, lo))
+#define zig_init_u128(hi, lo) zig_make_u128(hi, lo)
+#define zig_init_i128(hi, lo) zig_make_i128(hi, lo)
+#define zig_hi_u128(val) ((uint64_t)((val) >> 64))
+#define zig_lo_u128(val) ((uint64_t)((val) >> 0))
+#define zig_hi_i128(val) (( int64_t)((val) >> 64))
+#define zig_lo_i128(val) ((uint64_t)((val) >> 0))
#define zig_bitcast_u128(val) ((zig_u128)(val))
#define zig_bitcast_i128(val) ((zig_i128)(val))
#define zig_cmp_int128(Type) \
- static inline zig_i32 zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \
+ static inline int32_t zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \
return (lhs > rhs) - (lhs < rhs); \
}
#define zig_bit_int128(Type, operation, operator) \
@@ -1243,32 +1241,32 @@ typedef signed __int128 zig_i128;
#else /* zig_has_int128 */
-#if __LITTLE_ENDIAN__ || _MSC_VER
-typedef struct { zig_align(16) zig_u64 lo; zig_u64 hi; } zig_u128;
-typedef struct { zig_align(16) zig_u64 lo; zig_i64 hi; } zig_i128;
+#if zig_little_endian
+typedef struct { zig_align(16) uint64_t lo; uint64_t hi; } zig_u128;
+typedef struct { zig_align(16) uint64_t lo; int64_t hi; } zig_i128;
#else
-typedef struct { zig_align(16) zig_u64 hi; zig_u64 lo; } zig_u128;
-typedef struct { zig_align(16) zig_i64 hi; zig_u64 lo; } zig_i128;
+typedef struct { zig_align(16) uint64_t hi; uint64_t lo; } zig_u128;
+typedef struct { zig_align(16) int64_t hi; uint64_t lo; } zig_i128;
#endif
-#define zig_as_u128(hi, lo) ((zig_u128){ .h##i = (hi), .l##o = (lo) })
-#define zig_as_i128(hi, lo) ((zig_i128){ .h##i = (hi), .l##o = (lo) })
+#define zig_make_u128(hi, lo) ((zig_u128){ .h##i = (hi), .l##o = (lo) })
+#define zig_make_i128(hi, lo) ((zig_i128){ .h##i = (hi), .l##o = (lo) })
-#if _MSC_VER
-#define zig_as_constant_u128(hi, lo) { .h##i = (hi), .l##o = (lo) }
-#define zig_as_constant_i128(hi, lo) { .h##i = (hi), .l##o = (lo) }
-#else
-#define zig_as_constant_u128(hi, lo) zig_as_u128(hi, lo)
-#define zig_as_constant_i128(hi, lo) zig_as_i128(hi, lo)
+#if _MSC_VER /* MSVC doesn't allow struct literals in constant expressions */
+#define zig_init_u128(hi, lo) { .h##i = (hi), .l##o = (lo) }
+#define zig_init_i128(hi, lo) { .h##i = (hi), .l##o = (lo) }
+#else /* But non-MSVC doesn't like the unprotected commas */
+#define zig_init_u128(hi, lo) zig_make_u128(hi, lo)
+#define zig_init_i128(hi, lo) zig_make_i128(hi, lo)
#endif
#define zig_hi_u128(val) ((val).hi)
#define zig_lo_u128(val) ((val).lo)
#define zig_hi_i128(val) ((val).hi)
#define zig_lo_i128(val) ((val).lo)
-#define zig_bitcast_u128(val) zig_as_u128((zig_u64)(val).hi, (val).lo)
-#define zig_bitcast_i128(val) zig_as_i128((zig_i64)(val).hi, (val).lo)
+#define zig_bitcast_u128(val) zig_make_u128((uint64_t)(val).hi, (val).lo)
+#define zig_bitcast_i128(val) zig_make_i128(( int64_t)(val).hi, (val).lo)
#define zig_cmp_int128(Type) \
- static inline zig_i32 zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \
+ static inline int32_t zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \
return (lhs.hi == rhs.hi) \
? (lhs.lo > rhs.lo) - (lhs.lo < rhs.lo) \
: (lhs.hi > rhs.hi) - (lhs.hi < rhs.hi); \
@@ -1280,10 +1278,10 @@ typedef struct { zig_align(16) zig_i64 hi; zig_u64 lo; } zig_i128;
#endif /* zig_has_int128 */
-#define zig_minInt_u128 zig_as_u128(zig_minInt_u64, zig_minInt_u64)
-#define zig_maxInt_u128 zig_as_u128(zig_maxInt_u64, zig_maxInt_u64)
-#define zig_minInt_i128 zig_as_i128(zig_minInt_i64, zig_minInt_u64)
-#define zig_maxInt_i128 zig_as_i128(zig_maxInt_i64, zig_maxInt_u64)
+#define zig_minInt_u128 zig_make_u128(zig_minInt_u64, zig_minInt_u64)
+#define zig_maxInt_u128 zig_make_u128(zig_maxInt_u64, zig_maxInt_u64)
+#define zig_minInt_i128 zig_make_i128(zig_minInt_i64, zig_minInt_u64)
+#define zig_maxInt_i128 zig_make_i128(zig_maxInt_i64, zig_maxInt_u64)
zig_cmp_int128(u128)
zig_cmp_int128(i128)
@@ -1297,28 +1295,33 @@ zig_bit_int128(i128, or, |)
zig_bit_int128(u128, xor, ^)
zig_bit_int128(i128, xor, ^)
-static inline zig_u128 zig_shr_u128(zig_u128 lhs, zig_u8 rhs);
+static inline zig_u128 zig_shr_u128(zig_u128 lhs, uint8_t rhs);
#if zig_has_int128
-static inline zig_u128 zig_not_u128(zig_u128 val, zig_u8 bits) {
- return val ^ zig_maxInt(u128, bits);
+static inline zig_u128 zig_not_u128(zig_u128 val, uint8_t bits) {
+ return val ^ zig_maxInt_u(128, bits);
}
-static inline zig_i128 zig_not_i128(zig_i128 val, zig_u8 bits) {
+static inline zig_i128 zig_not_i128(zig_i128 val, uint8_t bits) {
(void)bits;
return ~val;
}
-static inline zig_u128 zig_shr_u128(zig_u128 lhs, zig_u8 rhs) {
+static inline zig_u128 zig_shr_u128(zig_u128 lhs, uint8_t rhs) {
return lhs >> rhs;
}
-static inline zig_u128 zig_shl_u128(zig_u128 lhs, zig_u8 rhs) {
+static inline zig_u128 zig_shl_u128(zig_u128 lhs, uint8_t rhs) {
return lhs << rhs;
}
-static inline zig_i128 zig_shl_i128(zig_i128 lhs, zig_u8 rhs) {
+static inline zig_i128 zig_shr_i128(zig_i128 lhs, uint8_t rhs) {
+ zig_i128 sign_mask = lhs < zig_make_i128(0, 0) ? -zig_make_i128(0, 1) : zig_make_i128(0, 0);
+ return ((lhs ^ sign_mask) >> rhs) ^ sign_mask;
+}
+
+static inline zig_i128 zig_shl_i128(zig_i128 lhs, uint8_t rhs) {
return lhs << rhs;
}
@@ -1363,40 +1366,46 @@ static inline zig_i128 zig_rem_i128(zig_i128 lhs, zig_i128 rhs) {
}
static inline zig_i128 zig_div_floor_i128(zig_i128 lhs, zig_i128 rhs) {
- return zig_div_trunc_i128(lhs, rhs) - (((lhs ^ rhs) & zig_rem_i128(lhs, rhs)) < zig_as_i128(0, 0));
+ return zig_div_trunc_i128(lhs, rhs) - (((lhs ^ rhs) & zig_rem_i128(lhs, rhs)) < zig_make_i128(0, 0));
}
static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) {
zig_i128 rem = zig_rem_i128(lhs, rhs);
- return rem + (((lhs ^ rhs) & rem) < zig_as_i128(0, 0) ? rhs : zig_as_i128(0, 0));
+ return rem + (((lhs ^ rhs) & rem) < zig_make_i128(0, 0) ? rhs : zig_make_i128(0, 0));
}
#else /* zig_has_int128 */
-static inline zig_u128 zig_not_u128(zig_u128 val, zig_u8 bits) {
- return (zig_u128){ .hi = zig_not_u64(val.hi, bits - zig_as_u8(64)), .lo = zig_not_u64(val.lo, zig_as_u8(64)) };
+static inline zig_u128 zig_not_u128(zig_u128 val, uint8_t bits) {
+ return (zig_u128){ .hi = zig_not_u64(val.hi, bits - UINT8_C(64)), .lo = zig_not_u64(val.lo, UINT8_C(64)) };
}
-static inline zig_i128 zig_not_i128(zig_i128 val, zig_u8 bits) {
- return (zig_i128){ .hi = zig_not_i64(val.hi, bits - zig_as_u8(64)), .lo = zig_not_u64(val.lo, zig_as_u8(64)) };
+static inline zig_i128 zig_not_i128(zig_i128 val, uint8_t bits) {
+ return (zig_i128){ .hi = zig_not_i64(val.hi, bits - UINT8_C(64)), .lo = zig_not_u64(val.lo, UINT8_C(64)) };
}
-static inline zig_u128 zig_shr_u128(zig_u128 lhs, zig_u8 rhs) {
- if (rhs == zig_as_u8(0)) return lhs;
- if (rhs >= zig_as_u8(64)) return (zig_u128){ .hi = zig_minInt_u64, .lo = lhs.hi >> (rhs - zig_as_u8(64)) };
- return (zig_u128){ .hi = lhs.hi >> rhs, .lo = lhs.hi << (zig_as_u8(64) - rhs) | lhs.lo >> rhs };
+static inline zig_u128 zig_shr_u128(zig_u128 lhs, uint8_t rhs) {
+ if (rhs == UINT8_C(0)) return lhs;
+ if (rhs >= UINT8_C(64)) return (zig_u128){ .hi = zig_minInt_u64, .lo = lhs.hi >> (rhs - UINT8_C(64)) };
+ return (zig_u128){ .hi = lhs.hi >> rhs, .lo = lhs.hi << (UINT8_C(64) - rhs) | lhs.lo >> rhs };
}
-static inline zig_u128 zig_shl_u128(zig_u128 lhs, zig_u8 rhs) {
- if (rhs == zig_as_u8(0)) return lhs;
- if (rhs >= zig_as_u8(64)) return (zig_u128){ .hi = lhs.lo << (rhs - zig_as_u8(64)), .lo = zig_minInt_u64 };
- return (zig_u128){ .hi = lhs.hi << rhs | lhs.lo >> (zig_as_u8(64) - rhs), .lo = lhs.lo << rhs };
+static inline zig_u128 zig_shl_u128(zig_u128 lhs, uint8_t rhs) {
+ if (rhs == UINT8_C(0)) return lhs;
+ if (rhs >= UINT8_C(64)) return (zig_u128){ .hi = lhs.lo << (rhs - UINT8_C(64)), .lo = zig_minInt_u64 };
+ return (zig_u128){ .hi = lhs.hi << rhs | lhs.lo >> (UINT8_C(64) - rhs), .lo = lhs.lo << rhs };
}
-static inline zig_i128 zig_shl_i128(zig_i128 lhs, zig_u8 rhs) {
- if (rhs == zig_as_u8(0)) return lhs;
- if (rhs >= zig_as_u8(64)) return (zig_i128){ .hi = lhs.lo << (rhs - zig_as_u8(64)), .lo = zig_minInt_u64 };
- return (zig_i128){ .hi = lhs.hi << rhs | lhs.lo >> (zig_as_u8(64) - rhs), .lo = lhs.lo << rhs };
+static inline zig_i128 zig_shr_i128(zig_i128 lhs, uint8_t rhs) {
+ if (rhs == UINT8_C(0)) return lhs;
+ if (rhs >= UINT8_C(64)) return (zig_i128){ .hi = zig_shr_i64(lhs.hi, 63), .lo = zig_shr_i64(lhs.hi, (rhs - UINT8_C(64))) };
+ return (zig_i128){ .hi = zig_shr_i64(lhs.hi, rhs), .lo = lhs.lo >> rhs | (uint64_t)lhs.hi << (UINT8_C(64) - rhs) };
+}
+
+static inline zig_i128 zig_shl_i128(zig_i128 lhs, uint8_t rhs) {
+ if (rhs == UINT8_C(0)) return lhs;
+ if (rhs >= UINT8_C(64)) return (zig_i128){ .hi = lhs.lo << (rhs - UINT8_C(64)), .lo = zig_minInt_u64 };
+ return (zig_i128){ .hi = lhs.hi << rhs | lhs.lo >> (UINT8_C(64) - rhs), .lo = lhs.lo << rhs };
}
static inline zig_u128 zig_add_u128(zig_u128 lhs, zig_u128 rhs) {
@@ -1424,14 +1433,14 @@ static inline zig_i128 zig_sub_i128(zig_i128 lhs, zig_i128 rhs) {
}
zig_extern zig_i128 __multi3(zig_i128 lhs, zig_i128 rhs);
-static zig_u128 zig_mul_u128(zig_u128 lhs, zig_u128 rhs) {
- return zig_bitcast_u128(__multi3(zig_bitcast_i128(lhs), zig_bitcast_i128(rhs)));
-}
-
static zig_i128 zig_mul_i128(zig_i128 lhs, zig_i128 rhs) {
return __multi3(lhs, rhs);
}
+static zig_u128 zig_mul_u128(zig_u128 lhs, zig_u128 rhs) {
+ return zig_bitcast_u128(zig_mul_i128(zig_bitcast_i128(lhs), zig_bitcast_i128(rhs)));
+}
+
zig_extern zig_u128 __udivti3(zig_u128 lhs, zig_u128 rhs);
static zig_u128 zig_div_trunc_u128(zig_u128 lhs, zig_u128 rhs) {
return __udivti3(lhs, rhs);
@@ -1454,11 +1463,11 @@ static zig_i128 zig_rem_i128(zig_i128 lhs, zig_i128 rhs) {
static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) {
zig_i128 rem = zig_rem_i128(lhs, rhs);
- return zig_add_i128(rem, (((lhs.hi ^ rhs.hi) & rem.hi) < zig_as_i64(0) ? rhs : zig_as_i128(0, 0)));
+ return zig_add_i128(rem, ((lhs.hi ^ rhs.hi) & rem.hi) < INT64_C(0) ? rhs : zig_make_i128(0, 0));
}
static inline zig_i128 zig_div_floor_i128(zig_i128 lhs, zig_i128 rhs) {
- return zig_sub_i128(zig_div_trunc_i128(lhs, rhs), zig_as_i128(0, zig_cmp_i128(zig_and_i128(zig_xor_i128(lhs, rhs), zig_rem_i128(lhs, rhs)), zig_as_i128(0, 0)) < zig_as_i32(0)));
+ return zig_sub_i128(zig_div_trunc_i128(lhs, rhs), zig_make_i128(0, zig_cmp_i128(zig_and_i128(zig_xor_i128(lhs, rhs), zig_rem_i128(lhs, rhs)), zig_make_i128(0, 0)) < INT32_C(0)));
}
#endif /* zig_has_int128 */
@@ -1471,326 +1480,1265 @@ static inline zig_u128 zig_nand_u128(zig_u128 lhs, zig_u128 rhs) {
}
static inline zig_u128 zig_min_u128(zig_u128 lhs, zig_u128 rhs) {
- return zig_cmp_u128(lhs, rhs) < zig_as_i32(0) ? lhs : rhs;
+ return zig_cmp_u128(lhs, rhs) < INT32_C(0) ? lhs : rhs;
}
static inline zig_i128 zig_min_i128(zig_i128 lhs, zig_i128 rhs) {
- return zig_cmp_i128(lhs, rhs) < zig_as_i32(0) ? lhs : rhs;
+ return zig_cmp_i128(lhs, rhs) < INT32_C(0) ? lhs : rhs;
}
static inline zig_u128 zig_max_u128(zig_u128 lhs, zig_u128 rhs) {
- return zig_cmp_u128(lhs, rhs) > zig_as_i32(0) ? lhs : rhs;
+ return zig_cmp_u128(lhs, rhs) > INT32_C(0) ? lhs : rhs;
}
static inline zig_i128 zig_max_i128(zig_i128 lhs, zig_i128 rhs) {
- return zig_cmp_i128(lhs, rhs) > zig_as_i32(0) ? lhs : rhs;
+ return zig_cmp_i128(lhs, rhs) > INT32_C(0) ? lhs : rhs;
}
-static inline zig_i128 zig_shr_i128(zig_i128 lhs, zig_u8 rhs) {
- zig_i128 sign_mask = zig_cmp_i128(lhs, zig_as_i128(0, 0)) < zig_as_i32(0) ? zig_sub_i128(zig_as_i128(0, 0), zig_as_i128(0, 1)) : zig_as_i128(0, 0);
- return zig_xor_i128(zig_bitcast_i128(zig_shr_u128(zig_bitcast_u128(zig_xor_i128(lhs, sign_mask)), rhs)), sign_mask);
+static inline zig_u128 zig_wrap_u128(zig_u128 val, uint8_t bits) {
+ return zig_and_u128(val, zig_maxInt_u(128, bits));
}
-static inline zig_u128 zig_wrap_u128(zig_u128 val, zig_u8 bits) {
- return zig_and_u128(val, zig_maxInt(u128, bits));
+static inline zig_i128 zig_wrap_i128(zig_i128 val, uint8_t bits) {
+ if (bits > UINT8_C(64)) return zig_make_i128(zig_wrap_i64(zig_hi_i128(val), bits - UINT8_C(64)), zig_lo_i128(val));
+ int64_t lo = zig_wrap_i64((int64_t)zig_lo_i128(val), bits);
+ return zig_make_i128(zig_shr_i64(lo, 63), (uint64_t)lo);
}
-static inline zig_i128 zig_wrap_i128(zig_i128 val, zig_u8 bits) {
- return zig_as_i128(zig_wrap_i64(zig_hi_i128(val), bits - zig_as_u8(64)), zig_lo_i128(val));
-}
-
-static inline zig_u128 zig_shlw_u128(zig_u128 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline zig_u128 zig_shlw_u128(zig_u128 lhs, uint8_t rhs, uint8_t bits) {
return zig_wrap_u128(zig_shl_u128(lhs, rhs), bits);
}
-static inline zig_i128 zig_shlw_i128(zig_i128 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline zig_i128 zig_shlw_i128(zig_i128 lhs, uint8_t rhs, uint8_t bits) {
return zig_wrap_i128(zig_bitcast_i128(zig_shl_u128(zig_bitcast_u128(lhs), rhs)), bits);
}
-static inline zig_u128 zig_addw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline zig_u128 zig_addw_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
return zig_wrap_u128(zig_add_u128(lhs, rhs), bits);
}
-static inline zig_i128 zig_addw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline zig_i128 zig_addw_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
return zig_wrap_i128(zig_bitcast_i128(zig_add_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits);
}
-static inline zig_u128 zig_subw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline zig_u128 zig_subw_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
return zig_wrap_u128(zig_sub_u128(lhs, rhs), bits);
}
-static inline zig_i128 zig_subw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline zig_i128 zig_subw_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
return zig_wrap_i128(zig_bitcast_i128(zig_sub_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits);
}
-static inline zig_u128 zig_mulw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline zig_u128 zig_mulw_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
return zig_wrap_u128(zig_mul_u128(lhs, rhs), bits);
}
-static inline zig_i128 zig_mulw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline zig_i128 zig_mulw_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
return zig_wrap_i128(zig_bitcast_i128(zig_mul_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits);
}
#if zig_has_int128
-static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow)
zig_u128 full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u128(full_res, bits);
- return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits);
+ return overflow || full_res < zig_minInt_u(128, bits) || full_res > zig_maxInt_u(128, bits);
#else
*res = zig_addw_u128(lhs, rhs, bits);
return *res < lhs;
#endif
}
-zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
-static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, int *overflow);
+static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow)
zig_i128 full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
+ int overflow_int;
zig_i128 full_res = __addoti4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i128(full_res, bits);
- return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits);
+ return overflow || full_res < zig_minInt_i(128, bits) || full_res > zig_maxInt_i(128, bits);
}
-static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow)
zig_u128 full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u128(full_res, bits);
- return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits);
+ return overflow || full_res < zig_minInt_u(128, bits) || full_res > zig_maxInt_u(128, bits);
#else
*res = zig_subw_u128(lhs, rhs, bits);
return *res > lhs;
#endif
}
-zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
-static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, int *overflow);
+static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow)
zig_i128 full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
+ int overflow_int;
zig_i128 full_res = __suboti4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i128(full_res, bits);
- return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits);
+ return overflow || full_res < zig_minInt_i(128, bits) || full_res > zig_maxInt_i(128, bits);
}
-static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow)
zig_u128 full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u128(full_res, bits);
- return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits);
+ return overflow || full_res < zig_minInt_u(128, bits) || full_res > zig_maxInt_u(128, bits);
#else
*res = zig_mulw_u128(lhs, rhs, bits);
- return rhs != zig_as_u128(0, 0) && lhs > zig_maxInt(u128, bits) / rhs;
+ return rhs != zig_make_u128(0, 0) && lhs > zig_maxInt_u(128, bits) / rhs;
#endif
}
-zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
-static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, int *overflow);
+static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow)
zig_i128 full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
+ int overflow_int;
zig_i128 full_res = __muloti4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i128(full_res, bits);
- return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits);
+ return overflow || full_res < zig_minInt_i(128, bits) || full_res > zig_maxInt_i(128, bits);
}
#else /* zig_has_int128 */
-static inline bool zig_overflow_u128(bool overflow, zig_u128 full_res, zig_u8 bits) {
- return overflow ||
- zig_cmp_u128(full_res, zig_minInt(u128, bits)) < zig_as_i32(0) ||
- zig_cmp_u128(full_res, zig_maxInt(u128, bits)) > zig_as_i32(0);
+static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
+ uint64_t hi;
+ bool overflow = zig_addo_u64(&hi, lhs.hi, rhs.hi, bits - 64);
+ return overflow ^ zig_addo_u64(&res->hi, hi, zig_addo_u64(&res->lo, lhs.lo, rhs.lo, 64), bits - 64);
}
-static inline bool zig_overflow_i128(bool overflow, zig_i128 full_res, zig_u8 bits) {
- return overflow ||
- zig_cmp_i128(full_res, zig_minInt(i128, bits)) < zig_as_i32(0) ||
- zig_cmp_i128(full_res, zig_maxInt(i128, bits)) > zig_as_i32(0);
+static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
+ int64_t hi;
+ bool overflow = zig_addo_i64(&hi, lhs.hi, rhs.hi, bits - 64);
+ return overflow ^ zig_addo_i64(&res->hi, hi, zig_addo_u64(&res->lo, lhs.lo, rhs.lo, 64), bits - 64);
}
-static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
- zig_u128 full_res;
- bool overflow =
- zig_addo_u64(&full_res.hi, lhs.hi, rhs.hi, 64) |
- zig_addo_u64(&full_res.hi, full_res.hi, zig_addo_u64(&full_res.lo, lhs.lo, rhs.lo, 64), 64);
- *res = zig_wrap_u128(full_res, bits);
- return zig_overflow_u128(overflow, full_res, bits);
+static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
+ uint64_t hi;
+ bool overflow = zig_subo_u64(&hi, lhs.hi, rhs.hi, bits - 64);
+ return overflow ^ zig_subo_u64(&res->hi, hi, zig_subo_u64(&res->lo, lhs.lo, rhs.lo, 64), bits - 64);
}
-zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
-static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
- zig_c_int overflow_int;
- zig_i128 full_res = __addoti4(lhs, rhs, &overflow_int);
- *res = zig_wrap_i128(full_res, bits);
- return zig_overflow_i128(overflow_int, full_res, bits);
+static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
+ int64_t hi;
+ bool overflow = zig_subo_i64(&hi, lhs.hi, rhs.hi, bits - 64);
+ return overflow ^ zig_subo_i64(&res->hi, hi, zig_subo_u64(&res->lo, lhs.lo, rhs.lo, 64), bits - 64);
}
-static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
- zig_u128 full_res;
- bool overflow =
- zig_subo_u64(&full_res.hi, lhs.hi, rhs.hi, 64) |
- zig_subo_u64(&full_res.hi, full_res.hi, zig_subo_u64(&full_res.lo, lhs.lo, rhs.lo, 64), 64);
- *res = zig_wrap_u128(full_res, bits);
- return zig_overflow_u128(overflow, full_res, bits);
-}
-
-zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
-static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
- zig_c_int overflow_int;
- zig_i128 full_res = __suboti4(lhs, rhs, &overflow_int);
- *res = zig_wrap_i128(full_res, bits);
- return zig_overflow_i128(overflow_int, full_res, bits);
-}
-
-static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
*res = zig_mulw_u128(lhs, rhs, bits);
- return zig_cmp_u128(*res, zig_as_u128(0, 0)) != zig_as_i32(0) &&
- zig_cmp_u128(lhs, zig_div_trunc_u128(zig_maxInt(u128, bits), rhs)) > zig_as_i32(0);
+ return zig_cmp_u128(*res, zig_make_u128(0, 0)) != INT32_C(0) &&
+ zig_cmp_u128(lhs, zig_div_trunc_u128(zig_maxInt_u(128, bits), rhs)) > INT32_C(0);
}
-zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
-static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
- zig_c_int overflow_int;
+zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, int *overflow);
+static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
+ int overflow_int;
zig_i128 full_res = __muloti4(lhs, rhs, &overflow_int);
+ bool overflow = overflow_int != 0 ||
+ zig_cmp_i128(full_res, zig_minInt_i(128, bits)) < INT32_C(0) ||
+ zig_cmp_i128(full_res, zig_maxInt_i(128, bits)) > INT32_C(0);
*res = zig_wrap_i128(full_res, bits);
- return zig_overflow_i128(overflow_int, full_res, bits);
+ return overflow;
}
#endif /* zig_has_int128 */
-static inline bool zig_shlo_u128(zig_u128 *res, zig_u128 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline bool zig_shlo_u128(zig_u128 *res, zig_u128 lhs, uint8_t rhs, uint8_t bits) {
*res = zig_shlw_u128(lhs, rhs, bits);
- return zig_cmp_u128(lhs, zig_shr_u128(zig_maxInt(u128, bits), rhs)) > zig_as_i32(0);
+ return zig_cmp_u128(lhs, zig_shr_u128(zig_maxInt_u(128, bits), rhs)) > INT32_C(0);
}
-static inline bool zig_shlo_i128(zig_i128 *res, zig_i128 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline bool zig_shlo_i128(zig_i128 *res, zig_i128 lhs, uint8_t rhs, uint8_t bits) {
*res = zig_shlw_i128(lhs, rhs, bits);
- zig_i128 mask = zig_bitcast_i128(zig_shl_u128(zig_maxInt_u128, bits - rhs - zig_as_u8(1)));
- return zig_cmp_i128(zig_and_i128(lhs, mask), zig_as_i128(0, 0)) != zig_as_i32(0) &&
- zig_cmp_i128(zig_and_i128(lhs, mask), mask) != zig_as_i32(0);
+ zig_i128 mask = zig_bitcast_i128(zig_shl_u128(zig_maxInt_u128, bits - rhs - UINT8_C(1)));
+ return zig_cmp_i128(zig_and_i128(lhs, mask), zig_make_i128(0, 0)) != INT32_C(0) &&
+ zig_cmp_i128(zig_and_i128(lhs, mask), mask) != INT32_C(0);
}
-static inline zig_u128 zig_shls_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline zig_u128 zig_shls_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
zig_u128 res;
- if (zig_cmp_u128(rhs, zig_as_u128(0, bits)) >= zig_as_i32(0))
- return zig_cmp_u128(lhs, zig_as_u128(0, 0)) != zig_as_i32(0) ? zig_maxInt(u128, bits) : lhs;
-
-#if zig_has_int128
- return zig_shlo_u128(&res, lhs, (zig_u8)rhs, bits) ? zig_maxInt(u128, bits) : res;
-#else
- return zig_shlo_u128(&res, lhs, (zig_u8)rhs.lo, bits) ? zig_maxInt(u128, bits) : res;
-#endif
+ if (zig_cmp_u128(rhs, zig_make_u128(0, bits)) >= INT32_C(0))
+ return zig_cmp_u128(lhs, zig_make_u128(0, 0)) != INT32_C(0) ? zig_maxInt_u(128, bits) : lhs;
+ return zig_shlo_u128(&res, lhs, (uint8_t)zig_lo_u128(rhs), bits) ? zig_maxInt_u(128, bits) : res;
}
-static inline zig_i128 zig_shls_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline zig_i128 zig_shls_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
zig_i128 res;
- if (zig_cmp_u128(zig_bitcast_u128(rhs), zig_as_u128(0, bits)) < zig_as_i32(0) && !zig_shlo_i128(&res, lhs, zig_lo_i128(rhs), bits)) return res;
- return zig_cmp_i128(lhs, zig_as_i128(0, 0)) < zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits);
+ if (zig_cmp_u128(zig_bitcast_u128(rhs), zig_make_u128(0, bits)) < INT32_C(0) && !zig_shlo_i128(&res, lhs, (uint8_t)zig_lo_i128(rhs), bits)) return res;
+ return zig_cmp_i128(lhs, zig_make_i128(0, 0)) < INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits);
}
-static inline zig_u128 zig_adds_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline zig_u128 zig_adds_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
zig_u128 res;
- return zig_addo_u128(&res, lhs, rhs, bits) ? zig_maxInt(u128, bits) : res;
+ return zig_addo_u128(&res, lhs, rhs, bits) ? zig_maxInt_u(128, bits) : res;
}
-static inline zig_i128 zig_adds_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline zig_i128 zig_adds_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
zig_i128 res;
if (!zig_addo_i128(&res, lhs, rhs, bits)) return res;
- return zig_cmp_i128(res, zig_as_i128(0, 0)) >= zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits);
+ return zig_cmp_i128(res, zig_make_i128(0, 0)) >= INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits);
}
-static inline zig_u128 zig_subs_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline zig_u128 zig_subs_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
zig_u128 res;
- return zig_subo_u128(&res, lhs, rhs, bits) ? zig_minInt(u128, bits) : res;
+ return zig_subo_u128(&res, lhs, rhs, bits) ? zig_minInt_u(128, bits) : res;
}
-static inline zig_i128 zig_subs_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline zig_i128 zig_subs_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
zig_i128 res;
if (!zig_subo_i128(&res, lhs, rhs, bits)) return res;
- return zig_cmp_i128(res, zig_as_i128(0, 0)) >= zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits);
+ return zig_cmp_i128(res, zig_make_i128(0, 0)) >= INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits);
}
-static inline zig_u128 zig_muls_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline zig_u128 zig_muls_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
zig_u128 res;
- return zig_mulo_u128(&res, lhs, rhs, bits) ? zig_maxInt(u128, bits) : res;
+ return zig_mulo_u128(&res, lhs, rhs, bits) ? zig_maxInt_u(128, bits) : res;
}
-static inline zig_i128 zig_muls_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline zig_i128 zig_muls_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
zig_i128 res;
if (!zig_mulo_i128(&res, lhs, rhs, bits)) return res;
- return zig_cmp_i128(zig_xor_i128(lhs, rhs), zig_as_i128(0, 0)) < zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits);
+ return zig_cmp_i128(zig_xor_i128(lhs, rhs), zig_make_i128(0, 0)) < INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits);
}
-static inline zig_u8 zig_clz_u128(zig_u128 val, zig_u8 bits) {
- if (bits <= zig_as_u8(64)) return zig_clz_u64(zig_lo_u128(val), bits);
- if (zig_hi_u128(val) != 0) return zig_clz_u64(zig_hi_u128(val), bits - zig_as_u8(64));
- return zig_clz_u64(zig_lo_u128(val), zig_as_u8(64)) + (bits - zig_as_u8(64));
+static inline uint8_t zig_clz_u128(zig_u128 val, uint8_t bits) {
+ if (bits <= UINT8_C(64)) return zig_clz_u64(zig_lo_u128(val), bits);
+ if (zig_hi_u128(val) != 0) return zig_clz_u64(zig_hi_u128(val), bits - UINT8_C(64));
+ return zig_clz_u64(zig_lo_u128(val), UINT8_C(64)) + (bits - UINT8_C(64));
}
-static inline zig_u8 zig_clz_i128(zig_i128 val, zig_u8 bits) {
+static inline uint8_t zig_clz_i128(zig_i128 val, uint8_t bits) {
return zig_clz_u128(zig_bitcast_u128(val), bits);
}
-static inline zig_u8 zig_ctz_u128(zig_u128 val, zig_u8 bits) {
- if (zig_lo_u128(val) != 0) return zig_ctz_u64(zig_lo_u128(val), zig_as_u8(64));
- return zig_ctz_u64(zig_hi_u128(val), bits - zig_as_u8(64)) + zig_as_u8(64);
+static inline uint8_t zig_ctz_u128(zig_u128 val, uint8_t bits) {
+ if (zig_lo_u128(val) != 0) return zig_ctz_u64(zig_lo_u128(val), UINT8_C(64));
+ return zig_ctz_u64(zig_hi_u128(val), bits - UINT8_C(64)) + UINT8_C(64);
}
-static inline zig_u8 zig_ctz_i128(zig_i128 val, zig_u8 bits) {
+static inline uint8_t zig_ctz_i128(zig_i128 val, uint8_t bits) {
return zig_ctz_u128(zig_bitcast_u128(val), bits);
}
-static inline zig_u8 zig_popcount_u128(zig_u128 val, zig_u8 bits) {
- return zig_popcount_u64(zig_hi_u128(val), bits - zig_as_u8(64)) +
- zig_popcount_u64(zig_lo_u128(val), zig_as_u8(64));
+static inline uint8_t zig_popcount_u128(zig_u128 val, uint8_t bits) {
+ return zig_popcount_u64(zig_hi_u128(val), bits - UINT8_C(64)) +
+ zig_popcount_u64(zig_lo_u128(val), UINT8_C(64));
}
-static inline zig_u8 zig_popcount_i128(zig_i128 val, zig_u8 bits) {
+static inline uint8_t zig_popcount_i128(zig_i128 val, uint8_t bits) {
return zig_popcount_u128(zig_bitcast_u128(val), bits);
}
-static inline zig_u128 zig_byte_swap_u128(zig_u128 val, zig_u8 bits) {
+static inline zig_u128 zig_byte_swap_u128(zig_u128 val, uint8_t bits) {
zig_u128 full_res;
#if zig_has_builtin(bswap128)
full_res = __builtin_bswap128(val);
#else
- full_res = zig_as_u128(zig_byte_swap_u64(zig_lo_u128(val), zig_as_u8(64)),
- zig_byte_swap_u64(zig_hi_u128(val), zig_as_u8(64)));
+ full_res = zig_make_u128(zig_byte_swap_u64(zig_lo_u128(val), UINT8_C(64)),
+ zig_byte_swap_u64(zig_hi_u128(val), UINT8_C(64)));
#endif
- return zig_shr_u128(full_res, zig_as_u8(128) - bits);
+ return zig_shr_u128(full_res, UINT8_C(128) - bits);
}
-static inline zig_i128 zig_byte_swap_i128(zig_i128 val, zig_u8 bits) {
+static inline zig_i128 zig_byte_swap_i128(zig_i128 val, uint8_t bits) {
return zig_bitcast_i128(zig_byte_swap_u128(zig_bitcast_u128(val), bits));
}
-static inline zig_u128 zig_bit_reverse_u128(zig_u128 val, zig_u8 bits) {
- return zig_shr_u128(zig_as_u128(zig_bit_reverse_u64(zig_lo_u128(val), zig_as_u8(64)),
- zig_bit_reverse_u64(zig_hi_u128(val), zig_as_u8(64))),
- zig_as_u8(128) - bits);
+static inline zig_u128 zig_bit_reverse_u128(zig_u128 val, uint8_t bits) {
+ return zig_shr_u128(zig_make_u128(zig_bit_reverse_u64(zig_lo_u128(val), UINT8_C(64)),
+ zig_bit_reverse_u64(zig_hi_u128(val), UINT8_C(64))),
+ UINT8_C(128) - bits);
}
-static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, zig_u8 bits) {
+static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, uint8_t bits) {
return zig_bitcast_i128(zig_bit_reverse_u128(zig_bitcast_u128(val), bits));
}
+/* ========================== Big Integer Support =========================== */
+
+static inline uint16_t zig_int_bytes(uint16_t bits) {
+ uint16_t bytes = (bits + CHAR_BIT - 1) / CHAR_BIT;
+ uint16_t alignment = ZIG_TARGET_MAX_INT_ALIGNMENT;
+ while (alignment / 2 >= bytes) alignment /= 2;
+ return (bytes + alignment - 1) / alignment * alignment;
+}
+
+static inline int32_t zig_cmp_big(const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ const uint8_t *lhs_bytes = lhs;
+ const uint8_t *rhs_bytes = rhs;
+ uint16_t byte_offset = 0;
+ bool do_signed = is_signed;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+
+#if zig_little_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+ int32_t limb_cmp;
+
+#if zig_little_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ zig_i128 lhs_limb;
+ zig_i128 rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_cmp = zig_cmp_i128(lhs_limb, rhs_limb);
+ do_signed = false;
+ } else {
+ zig_u128 lhs_limb;
+ zig_u128 rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_cmp = zig_cmp_u128(lhs_limb, rhs_limb);
+ }
+
+ if (limb_cmp != 0) return limb_cmp;
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ int64_t lhs_limb;
+ int64_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ do_signed = false;
+ } else {
+ uint64_t lhs_limb;
+ uint64_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ }
+
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ int32_t lhs_limb;
+ int32_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ do_signed = false;
+ } else {
+ uint32_t lhs_limb;
+ uint32_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ }
+
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ int16_t lhs_limb;
+ int16_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ do_signed = false;
+ } else {
+ uint16_t lhs_limb;
+ uint16_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ }
+
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ int8_t lhs_limb;
+ int8_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ do_signed = false;
+ } else {
+ uint8_t lhs_limb;
+ uint8_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ }
+
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return 0;
+}
+
+static inline bool zig_addo_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ uint8_t *res_bytes = res;
+ const uint8_t *lhs_bytes = lhs;
+ const uint8_t *rhs_bytes = rhs;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t top_bits = remaining_bytes * 8 - bits;
+ bool overflow = false;
+
+#if zig_big_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+ uint16_t limb_bits = 128 - (remaining_bytes == 128 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 128 / CHAR_BIT && is_signed) {
+ zig_i128 res_limb;
+ zig_i128 tmp_limb;
+ zig_i128 lhs_limb;
+ zig_i128 rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i128(&res_limb, tmp_limb, zig_make_i128(INT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ zig_u128 res_limb;
+ zig_u128 tmp_limb;
+ zig_u128 lhs_limb;
+ zig_u128 rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u128(&res_limb, tmp_limb, zig_make_u128(UINT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+ uint16_t limb_bits = 64 - (remaining_bytes == 64 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 64 / CHAR_BIT && is_signed) {
+ int64_t res_limb;
+ int64_t tmp_limb;
+ int64_t lhs_limb;
+ int64_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i64(&res_limb, tmp_limb, overflow ? INT64_C(1) : INT64_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint64_t res_limb;
+ uint64_t tmp_limb;
+ uint64_t lhs_limb;
+ uint64_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u64(&res_limb, tmp_limb, overflow ? UINT64_C(1) : UINT64_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+ uint16_t limb_bits = 32 - (remaining_bytes == 32 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 32 / CHAR_BIT && is_signed) {
+ int32_t res_limb;
+ int32_t tmp_limb;
+ int32_t lhs_limb;
+ int32_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i32(&res_limb, tmp_limb, overflow ? INT32_C(1) : INT32_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint32_t res_limb;
+ uint32_t tmp_limb;
+ uint32_t lhs_limb;
+ uint32_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u32(&res_limb, tmp_limb, overflow ? UINT32_C(1) : UINT32_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+ uint16_t limb_bits = 16 - (remaining_bytes == 16 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 16 / CHAR_BIT && is_signed) {
+ int16_t res_limb;
+ int16_t tmp_limb;
+ int16_t lhs_limb;
+ int16_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i16(&res_limb, tmp_limb, overflow ? INT16_C(1) : INT16_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint16_t res_limb;
+ uint16_t tmp_limb;
+ uint16_t lhs_limb;
+ uint16_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u16(&res_limb, tmp_limb, overflow ? UINT16_C(1) : UINT16_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+ uint16_t limb_bits = 8 - (remaining_bytes == 8 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 8 / CHAR_BIT && is_signed) {
+ int8_t res_limb;
+ int8_t tmp_limb;
+ int8_t lhs_limb;
+ int8_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i8(&res_limb, tmp_limb, overflow ? INT8_C(1) : INT8_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint8_t res_limb;
+ uint8_t tmp_limb;
+ uint8_t lhs_limb;
+ uint8_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u8(&res_limb, tmp_limb, overflow ? UINT8_C(1) : UINT8_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return overflow;
+}
+
+static inline bool zig_subo_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ uint8_t *res_bytes = res;
+ const uint8_t *lhs_bytes = lhs;
+ const uint8_t *rhs_bytes = rhs;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t top_bits = remaining_bytes * 8 - bits;
+ bool overflow = false;
+
+#if zig_big_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+ uint16_t limb_bits = 128 - (remaining_bytes == 128 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 128 / CHAR_BIT && is_signed) {
+ zig_i128 res_limb;
+ zig_i128 tmp_limb;
+ zig_i128 lhs_limb;
+ zig_i128 rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i128(&res_limb, tmp_limb, zig_make_i128(INT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ zig_u128 res_limb;
+ zig_u128 tmp_limb;
+ zig_u128 lhs_limb;
+ zig_u128 rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u128(&res_limb, tmp_limb, zig_make_u128(UINT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+ uint16_t limb_bits = 64 - (remaining_bytes == 64 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 64 / CHAR_BIT && is_signed) {
+ int64_t res_limb;
+ int64_t tmp_limb;
+ int64_t lhs_limb;
+ int64_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i64(&res_limb, tmp_limb, overflow ? INT64_C(1) : INT64_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint64_t res_limb;
+ uint64_t tmp_limb;
+ uint64_t lhs_limb;
+ uint64_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u64(&res_limb, tmp_limb, overflow ? UINT64_C(1) : UINT64_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+ uint16_t limb_bits = 32 - (remaining_bytes == 32 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 32 / CHAR_BIT && is_signed) {
+ int32_t res_limb;
+ int32_t tmp_limb;
+ int32_t lhs_limb;
+ int32_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i32(&res_limb, tmp_limb, overflow ? INT32_C(1) : INT32_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint32_t res_limb;
+ uint32_t tmp_limb;
+ uint32_t lhs_limb;
+ uint32_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u32(&res_limb, tmp_limb, overflow ? UINT32_C(1) : UINT32_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+ uint16_t limb_bits = 16 - (remaining_bytes == 16 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 16 / CHAR_BIT && is_signed) {
+ int16_t res_limb;
+ int16_t tmp_limb;
+ int16_t lhs_limb;
+ int16_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i16(&res_limb, tmp_limb, overflow ? INT16_C(1) : INT16_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint16_t res_limb;
+ uint16_t tmp_limb;
+ uint16_t lhs_limb;
+ uint16_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u16(&res_limb, tmp_limb, overflow ? UINT16_C(1) : UINT16_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+ uint16_t limb_bits = 8 - (remaining_bytes == 8 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 8 / CHAR_BIT && is_signed) {
+ int8_t res_limb;
+ int8_t tmp_limb;
+ int8_t lhs_limb;
+ int8_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i8(&res_limb, tmp_limb, overflow ? INT8_C(1) : INT8_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint8_t res_limb;
+ uint8_t tmp_limb;
+ uint8_t lhs_limb;
+ uint8_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u8(&res_limb, tmp_limb, overflow ? UINT8_C(1) : UINT8_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return overflow;
+}
+
+static inline void zig_addw_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ (void)zig_addo_big(res, lhs, rhs, is_signed, bits);
+}
+
+static inline void zig_subw_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ (void)zig_subo_big(res, lhs, rhs, is_signed, bits);
+}
+
+static inline uint16_t zig_clz_big(const void *val, bool is_signed, uint16_t bits) {
+ const uint8_t *val_bytes = val;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t skip_bits = remaining_bytes * 8 - bits;
+ uint16_t total_lz = 0;
+ uint16_t limb_lz;
+ (void)is_signed;
+
+#if zig_little_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ {
+ zig_u128 val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u128(val_limb, 128 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 128 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ {
+ uint64_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u64(val_limb, 64 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 64 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ {
+ uint32_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u32(val_limb, 32 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 32 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ {
+ uint16_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u16(val_limb, 16 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 16 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ {
+ uint8_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u8(val_limb, 8 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 8 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return total_lz;
+}
+
+static inline uint16_t zig_ctz_big(const void *val, bool is_signed, uint16_t bits) {
+ const uint8_t *val_bytes = val;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t total_tz = 0;
+ uint16_t limb_tz;
+ (void)is_signed;
+
+#if zig_big_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ {
+ zig_u128 val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u128(val_limb, 128);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 128) return total_tz;
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ {
+ uint64_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u64(val_limb, 64);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 64) return total_tz;
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ {
+ uint32_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u32(val_limb, 32);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 32) return total_tz;
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ {
+ uint16_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u16(val_limb, 16);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 16) return total_tz;
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ {
+ uint8_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u8(val_limb, 8);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 8) return total_tz;
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return total_tz;
+}
+
+static inline uint16_t zig_popcount_big(const void *val, bool is_signed, uint16_t bits) {
+ const uint8_t *val_bytes = val;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t total_pc = 0;
+ (void)is_signed;
+
+#if zig_big_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ {
+ zig_u128 val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc += zig_popcount_u128(val_limb, 128);
+ }
+
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ {
+ uint64_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc += zig_popcount_u64(val_limb, 64);
+ }
+
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ {
+ uint32_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc += zig_popcount_u32(val_limb, 32);
+ }
+
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ {
+ uint16_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc = zig_popcount_u16(val_limb, 16);
+ }
+
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ {
+ uint8_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc = zig_popcount_u8(val_limb, 8);
+ }
+
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return total_pc;
+}
+
/* ========================= Floating Point Support ========================= */
#if _MSC_VER
@@ -1810,252 +2758,253 @@ static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, zig_u8 bits) {
#if (zig_has_builtin(nan) && zig_has_builtin(nans) && zig_has_builtin(inf)) || defined(zig_gnuc)
#define zig_has_float_builtins 1
-#define zig_as_special_f16(sign, name, arg, repr) sign zig_as_f16(__builtin_##name, )(arg)
-#define zig_as_special_f32(sign, name, arg, repr) sign zig_as_f32(__builtin_##name, )(arg)
-#define zig_as_special_f64(sign, name, arg, repr) sign zig_as_f64(__builtin_##name, )(arg)
-#define zig_as_special_f80(sign, name, arg, repr) sign zig_as_f80(__builtin_##name, )(arg)
-#define zig_as_special_f128(sign, name, arg, repr) sign zig_as_f128(__builtin_##name, )(arg)
-#define zig_as_special_c_longdouble(sign, name, arg, repr) sign zig_as_c_longdouble(__builtin_##name, )(arg)
+#define zig_make_special_f16(sign, name, arg, repr) sign zig_make_f16(__builtin_##name, )(arg)
+#define zig_make_special_f32(sign, name, arg, repr) sign zig_make_f32(__builtin_##name, )(arg)
+#define zig_make_special_f64(sign, name, arg, repr) sign zig_make_f64(__builtin_##name, )(arg)
+#define zig_make_special_f80(sign, name, arg, repr) sign zig_make_f80(__builtin_##name, )(arg)
+#define zig_make_special_f128(sign, name, arg, repr) sign zig_make_f128(__builtin_##name, )(arg)
#else
#define zig_has_float_builtins 0
-#define zig_as_special_f16(sign, name, arg, repr) zig_float_from_repr_f16(repr)
-#define zig_as_special_f32(sign, name, arg, repr) zig_float_from_repr_f32(repr)
-#define zig_as_special_f64(sign, name, arg, repr) zig_float_from_repr_f64(repr)
-#define zig_as_special_f80(sign, name, arg, repr) zig_float_from_repr_f80(repr)
-#define zig_as_special_f128(sign, name, arg, repr) zig_float_from_repr_f128(repr)
-#define zig_as_special_c_longdouble(sign, name, arg, repr) zig_float_from_repr_c_longdouble(repr)
+#define zig_make_special_f16(sign, name, arg, repr) zig_float_from_repr_f16(repr)
+#define zig_make_special_f32(sign, name, arg, repr) zig_float_from_repr_f32(repr)
+#define zig_make_special_f64(sign, name, arg, repr) zig_float_from_repr_f64(repr)
+#define zig_make_special_f80(sign, name, arg, repr) zig_float_from_repr_f80(repr)
+#define zig_make_special_f128(sign, name, arg, repr) zig_float_from_repr_f128(repr)
#endif
#define zig_has_f16 1
#define zig_bitSizeOf_f16 16
+typedef int16_t zig_repr_f16;
#define zig_libc_name_f16(name) __##name##h
-#define zig_as_special_constant_f16(sign, name, arg, repr) zig_as_special_f16(sign, name, arg, repr)
+#define zig_init_special_f16(sign, name, arg, repr) zig_make_special_f16(sign, name, arg, repr)
#if FLT_MANT_DIG == 11
typedef float zig_f16;
-#define zig_as_f16(fp, repr) fp##f
+#define zig_make_f16(fp, repr) fp##f
#elif DBL_MANT_DIG == 11
typedef double zig_f16;
-#define zig_as_f16(fp, repr) fp
+#define zig_make_f16(fp, repr) fp
#elif LDBL_MANT_DIG == 11
#define zig_bitSizeOf_c_longdouble 16
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f16 zig_repr_c_longdouble;
+#endif
typedef long double zig_f16;
-#define zig_as_f16(fp, repr) fp##l
+#define zig_make_f16(fp, repr) fp##l
#elif FLT16_MANT_DIG == 11 && (zig_has_builtin(inff16) || defined(zig_gnuc))
typedef _Float16 zig_f16;
-#define zig_as_f16(fp, repr) fp##f16
+#define zig_make_f16(fp, repr) fp##f16
#elif defined(__SIZEOF_FP16__)
typedef __fp16 zig_f16;
-#define zig_as_f16(fp, repr) fp##f16
+#define zig_make_f16(fp, repr) fp##f16
#else
#undef zig_has_f16
#define zig_has_f16 0
-#define zig_repr_f16 i16
-typedef zig_i16 zig_f16;
-#define zig_as_f16(fp, repr) repr
-#undef zig_as_special_f16
-#define zig_as_special_f16(sign, name, arg, repr) repr
-#undef zig_as_special_constant_f16
-#define zig_as_special_constant_f16(sign, name, arg, repr) repr
+#define zig_bitSizeOf_repr_f16 16
+typedef int16_t zig_f16;
+#define zig_make_f16(fp, repr) repr
+#undef zig_make_special_f16
+#define zig_make_special_f16(sign, name, arg, repr) repr
+#undef zig_init_special_f16
+#define zig_init_special_f16(sign, name, arg, repr) repr
#endif
#define zig_has_f32 1
#define zig_bitSizeOf_f32 32
+typedef int32_t zig_repr_f32;
#define zig_libc_name_f32(name) name##f
#if _MSC_VER
-#define zig_as_special_constant_f32(sign, name, arg, repr) sign zig_as_f32(zig_msvc_flt_##name, )
+#define zig_init_special_f32(sign, name, arg, repr) sign zig_make_f32(zig_msvc_flt_##name, )
#else
-#define zig_as_special_constant_f32(sign, name, arg, repr) zig_as_special_f32(sign, name, arg, repr)
+#define zig_init_special_f32(sign, name, arg, repr) zig_make_special_f32(sign, name, arg, repr)
#endif
#if FLT_MANT_DIG == 24
typedef float zig_f32;
-#define zig_as_f32(fp, repr) fp##f
+#define zig_make_f32(fp, repr) fp##f
#elif DBL_MANT_DIG == 24
typedef double zig_f32;
-#define zig_as_f32(fp, repr) fp
+#define zig_make_f32(fp, repr) fp
#elif LDBL_MANT_DIG == 24
#define zig_bitSizeOf_c_longdouble 32
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f32 zig_repr_c_longdouble;
+#endif
typedef long double zig_f32;
-#define zig_as_f32(fp, repr) fp##l
+#define zig_make_f32(fp, repr) fp##l
#elif FLT32_MANT_DIG == 24
typedef _Float32 zig_f32;
-#define zig_as_f32(fp, repr) fp##f32
+#define zig_make_f32(fp, repr) fp##f32
#else
#undef zig_has_f32
#define zig_has_f32 0
-#define zig_repr_f32 i32
-typedef zig_i32 zig_f32;
-#define zig_as_f32(fp, repr) repr
-#undef zig_as_special_f32
-#define zig_as_special_f32(sign, name, arg, repr) repr
-#undef zig_as_special_constant_f32
-#define zig_as_special_constant_f32(sign, name, arg, repr) repr
+#define zig_bitSizeOf_repr_f32 32
+typedef int32_t zig_f32;
+#define zig_make_f32(fp, repr) repr
+#undef zig_make_special_f32
+#define zig_make_special_f32(sign, name, arg, repr) repr
+#undef zig_init_special_f32
+#define zig_init_special_f32(sign, name, arg, repr) repr
#endif
#define zig_has_f64 1
#define zig_bitSizeOf_f64 64
+typedef int64_t zig_repr_f64;
#define zig_libc_name_f64(name) name
#if _MSC_VER
#ifdef ZIG_TARGET_ABI_MSVC
#define zig_bitSizeOf_c_longdouble 64
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f64 zig_repr_c_longdouble;
#endif
-#define zig_as_special_constant_f64(sign, name, arg, repr) sign zig_as_f64(zig_msvc_flt_##name, )
+#endif
+#define zig_init_special_f64(sign, name, arg, repr) sign zig_make_f64(zig_msvc_flt_##name, )
#else /* _MSC_VER */
-#define zig_as_special_constant_f64(sign, name, arg, repr) zig_as_special_f64(sign, name, arg, repr)
+#define zig_init_special_f64(sign, name, arg, repr) zig_make_special_f64(sign, name, arg, repr)
#endif /* _MSC_VER */
#if FLT_MANT_DIG == 53
typedef float zig_f64;
-#define zig_as_f64(fp, repr) fp##f
+#define zig_make_f64(fp, repr) fp##f
#elif DBL_MANT_DIG == 53
typedef double zig_f64;
-#define zig_as_f64(fp, repr) fp
+#define zig_make_f64(fp, repr) fp
#elif LDBL_MANT_DIG == 53
#define zig_bitSizeOf_c_longdouble 64
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f64 zig_repr_c_longdouble;
+#endif
typedef long double zig_f64;
-#define zig_as_f64(fp, repr) fp##l
+#define zig_make_f64(fp, repr) fp##l
#elif FLT64_MANT_DIG == 53
typedef _Float64 zig_f64;
-#define zig_as_f64(fp, repr) fp##f64
+#define zig_make_f64(fp, repr) fp##f64
#elif FLT32X_MANT_DIG == 53
typedef _Float32x zig_f64;
-#define zig_as_f64(fp, repr) fp##f32x
+#define zig_make_f64(fp, repr) fp##f32x
#else
#undef zig_has_f64
#define zig_has_f64 0
-#define zig_repr_f64 i64
-typedef zig_i64 zig_f64;
-#define zig_as_f64(fp, repr) repr
-#undef zig_as_special_f64
-#define zig_as_special_f64(sign, name, arg, repr) repr
-#undef zig_as_special_constant_f64
-#define zig_as_special_constant_f64(sign, name, arg, repr) repr
+#define zig_bitSizeOf_repr_f64 64
+typedef int64_t zig_f64;
+#define zig_make_f64(fp, repr) repr
+#undef zig_make_special_f64
+#define zig_make_special_f64(sign, name, arg, repr) repr
+#undef zig_init_special_f64
+#define zig_init_special_f64(sign, name, arg, repr) repr
#endif
#define zig_has_f80 1
#define zig_bitSizeOf_f80 80
+typedef zig_i128 zig_repr_f80;
#define zig_libc_name_f80(name) __##name##x
-#define zig_as_special_constant_f80(sign, name, arg, repr) zig_as_special_f80(sign, name, arg, repr)
+#define zig_init_special_f80(sign, name, arg, repr) zig_make_special_f80(sign, name, arg, repr)
#if FLT_MANT_DIG == 64
typedef float zig_f80;
-#define zig_as_f80(fp, repr) fp##f
+#define zig_make_f80(fp, repr) fp##f
#elif DBL_MANT_DIG == 64
typedef double zig_f80;
-#define zig_as_f80(fp, repr) fp
+#define zig_make_f80(fp, repr) fp
#elif LDBL_MANT_DIG == 64
#define zig_bitSizeOf_c_longdouble 80
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f80 zig_repr_c_longdouble;
+#endif
typedef long double zig_f80;
-#define zig_as_f80(fp, repr) fp##l
+#define zig_make_f80(fp, repr) fp##l
#elif FLT80_MANT_DIG == 64
typedef _Float80 zig_f80;
-#define zig_as_f80(fp, repr) fp##f80
+#define zig_make_f80(fp, repr) fp##f80
#elif FLT64X_MANT_DIG == 64
typedef _Float64x zig_f80;
-#define zig_as_f80(fp, repr) fp##f64x
+#define zig_make_f80(fp, repr) fp##f64x
#elif defined(__SIZEOF_FLOAT80__)
typedef __float80 zig_f80;
-#define zig_as_f80(fp, repr) fp##l
+#define zig_make_f80(fp, repr) fp##l
#else
#undef zig_has_f80
#define zig_has_f80 0
-#define zig_repr_f80 i128
+#define zig_bitSizeOf_repr_f80 128
typedef zig_i128 zig_f80;
-#define zig_as_f80(fp, repr) repr
-#undef zig_as_special_f80
-#define zig_as_special_f80(sign, name, arg, repr) repr
-#undef zig_as_special_constant_f80
-#define zig_as_special_constant_f80(sign, name, arg, repr) repr
+#define zig_make_f80(fp, repr) repr
+#undef zig_make_special_f80
+#define zig_make_special_f80(sign, name, arg, repr) repr
+#undef zig_init_special_f80
+#define zig_init_special_f80(sign, name, arg, repr) repr
#endif
#define zig_has_f128 1
#define zig_bitSizeOf_f128 128
+typedef zig_i128 zig_repr_f128;
#define zig_libc_name_f128(name) name##q
-#define zig_as_special_constant_f128(sign, name, arg, repr) zig_as_special_f128(sign, name, arg, repr)
+#define zig_init_special_f128(sign, name, arg, repr) zig_make_special_f128(sign, name, arg, repr)
#if FLT_MANT_DIG == 113
typedef float zig_f128;
-#define zig_as_f128(fp, repr) fp##f
+#define zig_make_f128(fp, repr) fp##f
#elif DBL_MANT_DIG == 113
typedef double zig_f128;
-#define zig_as_f128(fp, repr) fp
+#define zig_make_f128(fp, repr) fp
#elif LDBL_MANT_DIG == 113
#define zig_bitSizeOf_c_longdouble 128
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f128 zig_repr_c_longdouble;
+#endif
typedef long double zig_f128;
-#define zig_as_f128(fp, repr) fp##l
+#define zig_make_f128(fp, repr) fp##l
#elif FLT128_MANT_DIG == 113
typedef _Float128 zig_f128;
-#define zig_as_f128(fp, repr) fp##f128
+#define zig_make_f128(fp, repr) fp##f128
#elif FLT64X_MANT_DIG == 113
typedef _Float64x zig_f128;
-#define zig_as_f128(fp, repr) fp##f64x
+#define zig_make_f128(fp, repr) fp##f64x
#elif defined(__SIZEOF_FLOAT128__)
typedef __float128 zig_f128;
-#define zig_as_f128(fp, repr) fp##q
-#undef zig_as_special_f128
-#define zig_as_special_f128(sign, name, arg, repr) sign __builtin_##name##f128(arg)
+#define zig_make_f128(fp, repr) fp##q
+#undef zig_make_special_f128
+#define zig_make_special_f128(sign, name, arg, repr) sign __builtin_##name##f128(arg)
#else
#undef zig_has_f128
#define zig_has_f128 0
-#define zig_repr_f128 i128
+#define zig_bitSizeOf_repr_f128 128
typedef zig_i128 zig_f128;
-#define zig_as_f128(fp, repr) repr
-#undef zig_as_special_f128
-#define zig_as_special_f128(sign, name, arg, repr) repr
-#undef zig_as_special_constant_f128
-#define zig_as_special_constant_f128(sign, name, arg, repr) repr
+#define zig_make_f128(fp, repr) repr
+#undef zig_make_special_f128
+#define zig_make_special_f128(sign, name, arg, repr) repr
+#undef zig_init_special_f128
+#define zig_init_special_f128(sign, name, arg, repr) repr
#endif
-#define zig_has_c_longdouble 1
-
-#ifdef ZIG_TARGET_ABI_MSVC
-#define zig_libc_name_c_longdouble(name) name
-#else
-#define zig_libc_name_c_longdouble(name) name##l
-#endif
-
-#define zig_as_special_constant_c_longdouble(sign, name, arg, repr) zig_as_special_c_longdouble(sign, name, arg, repr)
#ifdef zig_bitSizeOf_c_longdouble
+#define zig_has_c_longdouble 1
#ifdef ZIG_TARGET_ABI_MSVC
-typedef double zig_c_longdouble;
#undef zig_bitSizeOf_c_longdouble
#define zig_bitSizeOf_c_longdouble 64
-#define zig_as_c_longdouble(fp, repr) fp
+typedef zig_f64 zig_c_longdouble;
+typedef zig_repr_f64 zig_repr_c_longdouble;
#else
typedef long double zig_c_longdouble;
-#define zig_as_c_longdouble(fp, repr) fp##l
#endif
#else /* zig_bitSizeOf_c_longdouble */
-#undef zig_has_c_longdouble
#define zig_has_c_longdouble 0
-#define zig_bitSizeOf_c_longdouble 80
-#define zig_compiler_rt_abbrev_c_longdouble zig_compiler_rt_abbrev_f80
-#define zig_repr_c_longdouble i128
-typedef zig_i128 zig_c_longdouble;
-#define zig_as_c_longdouble(fp, repr) repr
-#undef zig_as_special_c_longdouble
-#define zig_as_special_c_longdouble(sign, name, arg, repr) repr
-#undef zig_as_special_constant_c_longdouble
-#define zig_as_special_constant_c_longdouble(sign, name, arg, repr) repr
+#define zig_bitSizeOf_repr_c_longdouble 128
+typedef zig_f128 zig_c_longdouble;
+typedef zig_repr_f128 zig_repr_c_longdouble;
#endif /* zig_bitSizeOf_c_longdouble */
#if !zig_has_float_builtins
-#define zig_float_from_repr(Type, ReprType) \
- static inline zig_##Type zig_float_from_repr_##Type(zig_##ReprType repr) { \
- return *((zig_##Type*)&repr); \
+#define zig_float_from_repr(Type) \
+ static inline zig_##Type zig_float_from_repr_##Type(zig_repr_##Type repr) { \
+ zig_##Type result; \
+ memcpy(&result, &repr, sizeof(result)); \
+ return result; \
}
-zig_float_from_repr(f16, u16)
-zig_float_from_repr(f32, u32)
-zig_float_from_repr(f64, u64)
-zig_float_from_repr(f80, u128)
-zig_float_from_repr(f128, u128)
-#if zig_bitSizeOf_c_longdouble == 80
-zig_float_from_repr(c_longdouble, u128)
-#else
-#define zig_expand_float_from_repr(Type, ReprType) zig_float_from_repr(Type, ReprType)
-zig_expand_float_from_repr(c_longdouble, zig_expand_concat(u, zig_bitSizeOf_c_longdouble))
-#endif
+zig_float_from_repr(f16)
+zig_float_from_repr(f32)
+zig_float_from_repr(f64)
+zig_float_from_repr(f80)
+zig_float_from_repr(f128)
#endif
#define zig_cast_f16 (zig_f16)
@@ -2064,41 +3013,42 @@ zig_expand_float_from_repr(c_longdouble, zig_expand_concat(u, zig_bitSizeOf_c_lo
#if _MSC_VER && !zig_has_f128
#define zig_cast_f80
-#define zig_cast_c_longdouble
#define zig_cast_f128
#else
#define zig_cast_f80 (zig_f80)
-#define zig_cast_c_longdouble (zig_c_longdouble)
#define zig_cast_f128 (zig_f128)
#endif
#define zig_convert_builtin(ResType, operation, ArgType, version) \
- zig_extern zig_##ResType zig_expand_concat(zig_expand_concat(zig_expand_concat(__##operation, \
- zig_compiler_rt_abbrev_##ArgType), zig_compiler_rt_abbrev_##ResType), version)(zig_##ArgType);
-zig_convert_builtin(f16, trunc, f32, 2)
-zig_convert_builtin(f16, trunc, f64, 2)
-zig_convert_builtin(f16, trunc, f80, 2)
-zig_convert_builtin(f16, trunc, f128, 2)
-zig_convert_builtin(f32, extend, f16, 2)
-zig_convert_builtin(f32, trunc, f64, 2)
-zig_convert_builtin(f32, trunc, f80, 2)
-zig_convert_builtin(f32, trunc, f128, 2)
-zig_convert_builtin(f64, extend, f16, 2)
-zig_convert_builtin(f64, extend, f32, 2)
-zig_convert_builtin(f64, trunc, f80, 2)
-zig_convert_builtin(f64, trunc, f128, 2)
-zig_convert_builtin(f80, extend, f16, 2)
-zig_convert_builtin(f80, extend, f32, 2)
-zig_convert_builtin(f80, extend, f64, 2)
-zig_convert_builtin(f80, trunc, f128, 2)
-zig_convert_builtin(f128, extend, f16, 2)
-zig_convert_builtin(f128, extend, f32, 2)
-zig_convert_builtin(f128, extend, f64, 2)
-zig_convert_builtin(f128, extend, f80, 2)
+ zig_extern ResType zig_expand_concat(zig_expand_concat(zig_expand_concat(__##operation, \
+ zig_compiler_rt_abbrev_##ArgType), zig_compiler_rt_abbrev_##ResType), version)(ArgType);
+zig_convert_builtin(zig_f16, trunc, zig_f32, 2)
+zig_convert_builtin(zig_f16, trunc, zig_f64, 2)
+zig_convert_builtin(zig_f16, trunc, zig_f80, 2)
+zig_convert_builtin(zig_f16, trunc, zig_f128, 2)
+zig_convert_builtin(zig_f32, extend, zig_f16, 2)
+zig_convert_builtin(zig_f32, trunc, zig_f64, 2)
+zig_convert_builtin(zig_f32, trunc, zig_f80, 2)
+zig_convert_builtin(zig_f32, trunc, zig_f128, 2)
+zig_convert_builtin(zig_f64, extend, zig_f16, 2)
+zig_convert_builtin(zig_f64, extend, zig_f32, 2)
+zig_convert_builtin(zig_f64, trunc, zig_f80, 2)
+zig_convert_builtin(zig_f64, trunc, zig_f128, 2)
+zig_convert_builtin(zig_f80, extend, zig_f16, 2)
+zig_convert_builtin(zig_f80, extend, zig_f32, 2)
+zig_convert_builtin(zig_f80, extend, zig_f64, 2)
+zig_convert_builtin(zig_f80, trunc, zig_f128, 2)
+zig_convert_builtin(zig_f128, extend, zig_f16, 2)
+zig_convert_builtin(zig_f128, extend, zig_f32, 2)
+zig_convert_builtin(zig_f128, extend, zig_f64, 2)
+zig_convert_builtin(zig_f128, extend, zig_f80, 2)
#define zig_float_negate_builtin_0(Type) \
static inline zig_##Type zig_neg_##Type(zig_##Type arg) { \
- return zig_expand_concat(zig_xor_, zig_repr_##Type)(arg, zig_expand_minInt(zig_repr_##Type, zig_bitSizeOf_##Type)); \
+ return zig_expand_concat(zig_xor_i, zig_bitSizeOf_repr_##Type)( \
+ arg, \
+ zig_minInt_i(zig_bitSizeOf_repr_##Type, zig_bitSizeOf_##Type) \
+ ); \
}
#define zig_float_negate_builtin_1(Type) \
static inline zig_##Type zig_neg_##Type(zig_##Type arg) { \
@@ -2106,28 +3056,28 @@ zig_convert_builtin(f128, extend, f80, 2)
}
#define zig_float_less_builtin_0(Type, operation) \
- zig_extern zig_i32 zig_expand_concat(zig_expand_concat(__##operation, \
- zig_compiler_rt_abbrev_##Type), 2)(zig_##Type, zig_##Type); \
- static inline zig_i32 zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
- return zig_expand_concat(zig_expand_concat(__##operation, zig_compiler_rt_abbrev_##Type), 2)(lhs, rhs); \
+ zig_extern int32_t zig_expand_concat(zig_expand_concat(__##operation, \
+ zig_compiler_rt_abbrev_zig_##Type), 2)(zig_##Type, zig_##Type); \
+ static inline int32_t zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
+ return zig_expand_concat(zig_expand_concat(__##operation, zig_compiler_rt_abbrev_zig_##Type), 2)(lhs, rhs); \
}
#define zig_float_less_builtin_1(Type, operation) \
- static inline zig_i32 zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
+ static inline int32_t zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
return (!(lhs <= rhs) - (lhs < rhs)); \
}
#define zig_float_greater_builtin_0(Type, operation) \
zig_float_less_builtin_0(Type, operation)
#define zig_float_greater_builtin_1(Type, operation) \
- static inline zig_i32 zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
+ static inline int32_t zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
return ((lhs > rhs) - !(lhs >= rhs)); \
}
#define zig_float_binary_builtin_0(Type, operation, operator) \
zig_extern zig_##Type zig_expand_concat(zig_expand_concat(__##operation, \
- zig_compiler_rt_abbrev_##Type), 3)(zig_##Type, zig_##Type); \
+ zig_compiler_rt_abbrev_zig_##Type), 3)(zig_##Type, zig_##Type); \
static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
- return zig_expand_concat(zig_expand_concat(__##operation, zig_compiler_rt_abbrev_##Type), 3)(lhs, rhs); \
+ return zig_expand_concat(zig_expand_concat(__##operation, zig_compiler_rt_abbrev_zig_##Type), 3)(lhs, rhs); \
}
#define zig_float_binary_builtin_1(Type, operation, operator) \
static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
@@ -2135,18 +3085,18 @@ zig_convert_builtin(f128, extend, f80, 2)
}
#define zig_float_builtins(Type) \
- zig_convert_builtin(i32, fix, Type, ) \
- zig_convert_builtin(u32, fixuns, Type, ) \
- zig_convert_builtin(i64, fix, Type, ) \
- zig_convert_builtin(u64, fixuns, Type, ) \
- zig_convert_builtin(i128, fix, Type, ) \
- zig_convert_builtin(u128, fixuns, Type, ) \
- zig_convert_builtin(Type, float, i32, ) \
- zig_convert_builtin(Type, floatun, u32, ) \
- zig_convert_builtin(Type, float, i64, ) \
- zig_convert_builtin(Type, floatun, u64, ) \
- zig_convert_builtin(Type, float, i128, ) \
- zig_convert_builtin(Type, floatun, u128, ) \
+ zig_convert_builtin( int32_t, fix, zig_##Type, ) \
+ zig_convert_builtin(uint32_t, fixuns, zig_##Type, ) \
+ zig_convert_builtin( int64_t, fix, zig_##Type, ) \
+ zig_convert_builtin(uint64_t, fixuns, zig_##Type, ) \
+ zig_convert_builtin(zig_i128, fix, zig_##Type, ) \
+ zig_convert_builtin(zig_u128, fixuns, zig_##Type, ) \
+ zig_convert_builtin(zig_##Type, float, int32_t, ) \
+ zig_convert_builtin(zig_##Type, floatun, uint32_t, ) \
+ zig_convert_builtin(zig_##Type, float, int64_t, ) \
+ zig_convert_builtin(zig_##Type, floatun, uint64_t, ) \
+ zig_convert_builtin(zig_##Type, float, zig_i128, ) \
+ zig_convert_builtin(zig_##Type, floatun, zig_u128, ) \
zig_expand_concat(zig_float_negate_builtin_, zig_has_##Type)(Type) \
zig_expand_concat(zig_float_less_builtin_, zig_has_##Type)(Type, cmp) \
zig_expand_concat(zig_float_less_builtin_, zig_has_##Type)(Type, ne) \
@@ -2194,155 +3144,162 @@ zig_float_builtins(f32)
zig_float_builtins(f64)
zig_float_builtins(f80)
zig_float_builtins(f128)
-zig_float_builtins(c_longdouble)
#if _MSC_VER && (_M_IX86 || _M_X64)
// TODO: zig_msvc_atomic_load should load 32 bit without interlocked on x86, and load 64 bit without interlocked on x64
-#define zig_msvc_atomics(Type, suffix) \
- static inline bool zig_msvc_cmpxchg_##Type(zig_##Type volatile* obj, zig_##Type* expected, zig_##Type desired) { \
- zig_##Type comparand = *expected; \
- zig_##Type initial = _InterlockedCompareExchange##suffix(obj, desired, comparand); \
+#define zig_msvc_atomics(ZigType, Type, suffix) \
+ static inline bool zig_msvc_cmpxchg_##ZigType(Type volatile* obj, Type* expected, Type desired) { \
+ Type comparand = *expected; \
+ Type initial = _InterlockedCompareExchange##suffix(obj, desired, comparand); \
bool exchanged = initial == comparand; \
if (!exchanged) { \
*expected = initial; \
} \
return exchanged; \
} \
- static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_xchg_##ZigType(Type volatile* obj, Type value) { \
return _InterlockedExchange##suffix(obj, value); \
} \
- static inline zig_##Type zig_msvc_atomicrmw_add_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_add_##ZigType(Type volatile* obj, Type value) { \
return _InterlockedExchangeAdd##suffix(obj, value); \
} \
- static inline zig_##Type zig_msvc_atomicrmw_sub_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_sub_##ZigType(Type volatile* obj, Type value) { \
bool success = false; \
- zig_##Type new; \
- zig_##Type prev; \
+ Type new; \
+ Type prev; \
while (!success) { \
prev = *obj; \
new = prev - value; \
- success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \
+ success = zig_msvc_cmpxchg_##ZigType(obj, &prev, new); \
} \
return prev; \
} \
- static inline zig_##Type zig_msvc_atomicrmw_or_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_or_##ZigType(Type volatile* obj, Type value) { \
return _InterlockedOr##suffix(obj, value); \
} \
- static inline zig_##Type zig_msvc_atomicrmw_xor_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_xor_##ZigType(Type volatile* obj, Type value) { \
return _InterlockedXor##suffix(obj, value); \
} \
- static inline zig_##Type zig_msvc_atomicrmw_and_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_and_##ZigType(Type volatile* obj, Type value) { \
return _InterlockedAnd##suffix(obj, value); \
} \
- static inline zig_##Type zig_msvc_atomicrmw_nand_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_nand_##ZigType(Type volatile* obj, Type value) { \
bool success = false; \
- zig_##Type new; \
- zig_##Type prev; \
+ Type new; \
+ Type prev; \
while (!success) { \
prev = *obj; \
new = ~(prev & value); \
- success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \
+ success = zig_msvc_cmpxchg_##ZigType(obj, &prev, new); \
} \
return prev; \
} \
- static inline zig_##Type zig_msvc_atomicrmw_min_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_min_##ZigType(Type volatile* obj, Type value) { \
bool success = false; \
- zig_##Type new; \
- zig_##Type prev; \
+ Type new; \
+ Type prev; \
while (!success) { \
prev = *obj; \
new = value < prev ? value : prev; \
- success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \
+ success = zig_msvc_cmpxchg_##ZigType(obj, &prev, new); \
} \
return prev; \
} \
- static inline zig_##Type zig_msvc_atomicrmw_max_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_max_##ZigType(Type volatile* obj, Type value) { \
bool success = false; \
- zig_##Type new; \
- zig_##Type prev; \
+ Type new; \
+ Type prev; \
while (!success) { \
prev = *obj; \
new = value > prev ? value : prev; \
- success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \
+ success = zig_msvc_cmpxchg_##ZigType(obj, &prev, new); \
} \
return prev; \
} \
- static inline void zig_msvc_atomic_store_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline void zig_msvc_atomic_store_##ZigType(Type volatile* obj, Type value) { \
_InterlockedExchange##suffix(obj, value); \
} \
- static inline zig_##Type zig_msvc_atomic_load_##Type(zig_##Type volatile* obj) { \
+ static inline Type zig_msvc_atomic_load_##ZigType(Type volatile* obj) { \
return _InterlockedOr##suffix(obj, 0); \
}
-zig_msvc_atomics(u8, 8)
-zig_msvc_atomics(i8, 8)
-zig_msvc_atomics(u16, 16)
-zig_msvc_atomics(i16, 16)
-zig_msvc_atomics(u32, )
-zig_msvc_atomics(i32, )
+zig_msvc_atomics( u8, uint8_t, 8)
+zig_msvc_atomics( i8, int8_t, 8)
+zig_msvc_atomics(u16, uint16_t, 16)
+zig_msvc_atomics(i16, int16_t, 16)
+zig_msvc_atomics(u32, uint32_t, )
+zig_msvc_atomics(i32, int32_t, )
#if _M_X64
-zig_msvc_atomics(u64, 64)
-zig_msvc_atomics(i64, 64)
+zig_msvc_atomics(u64, uint64_t, 64)
+zig_msvc_atomics(i64, int64_t, 64)
#endif
#define zig_msvc_flt_atomics(Type, ReprType, suffix) \
static inline bool zig_msvc_cmpxchg_##Type(zig_##Type volatile* obj, zig_##Type* expected, zig_##Type desired) { \
- zig_##ReprType comparand = *((zig_##ReprType*)expected); \
- zig_##ReprType initial = _InterlockedCompareExchange##suffix((zig_##ReprType volatile*)obj, *((zig_##ReprType*)&desired), comparand); \
- bool exchanged = initial == comparand; \
- if (!exchanged) { \
- *expected = *((zig_##Type*)&initial); \
- } \
- return exchanged; \
+ ReprType exchange; \
+ ReprType comparand; \
+ ReprType initial; \
+ bool success; \
+ memcpy(&comparand, expected, sizeof(comparand)); \
+ memcpy(&exchange, &desired, sizeof(exchange)); \
+ initial = _InterlockedCompareExchange##suffix((ReprType volatile*)obj, exchange, comparand); \
+ success = initial == comparand; \
+ if (!success) memcpy(expected, &initial, sizeof(*expected)); \
+ return success; \
} \
static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \
- zig_##ReprType initial = _InterlockedExchange##suffix((zig_##ReprType volatile*)obj, *((zig_##ReprType*)&value)); \
- return *((zig_##Type*)&initial); \
+ ReprType repr; \
+ ReprType initial; \
+ zig_##Type result; \
+ memcpy(&repr, &value, sizeof(repr)); \
+ initial = _InterlockedExchange##suffix((ReprType volatile*)obj, repr); \
+ memcpy(&result, &initial, sizeof(result)); \
+ return result; \
} \
static inline zig_##Type zig_msvc_atomicrmw_add_##Type(zig_##Type volatile* obj, zig_##Type value) { \
- bool success = false; \
- zig_##ReprType new; \
- zig_##Type prev; \
- while (!success) { \
- prev = *obj; \
- new = prev + value; \
- success = zig_msvc_cmpxchg_##Type(obj, &prev, *((zig_##ReprType*)&new)); \
- } \
- return prev; \
+ ReprType repr; \
+ zig_##Type expected; \
+ zig_##Type desired; \
+ repr = *(ReprType volatile*)obj; \
+ memcpy(&expected, &repr, sizeof(expected)); \
+ do { \
+ desired = expected + value; \
+ } while (!zig_msvc_cmpxchg_##Type(obj, &expected, desired)); \
+ return expected; \
} \
static inline zig_##Type zig_msvc_atomicrmw_sub_##Type(zig_##Type volatile* obj, zig_##Type value) { \
- bool success = false; \
- zig_##ReprType new; \
- zig_##Type prev; \
- while (!success) { \
- prev = *obj; \
- new = prev - value; \
- success = zig_msvc_cmpxchg_##Type(obj, &prev, *((zig_##ReprType*)&new)); \
- } \
- return prev; \
+ ReprType repr; \
+ zig_##Type expected; \
+ zig_##Type desired; \
+ repr = *(ReprType volatile*)obj; \
+ memcpy(&expected, &repr, sizeof(expected)); \
+ do { \
+ desired = expected - value; \
+ } while (!zig_msvc_cmpxchg_##Type(obj, &expected, desired)); \
+ return expected; \
}
-zig_msvc_flt_atomics(f32, u32, )
+zig_msvc_flt_atomics(f32, uint32_t, )
#if _M_X64
-zig_msvc_flt_atomics(f64, u64, 64)
+zig_msvc_flt_atomics(f64, uint64_t, 64)
#endif
#if _M_IX86
static inline void zig_msvc_atomic_barrier() {
- zig_i32 barrier;
+ int32_t barrier;
__asm {
xchg barrier, eax
}
}
-static inline void* zig_msvc_atomicrmw_xchg_p32(void** obj, zig_u32* arg) {
+static inline void* zig_msvc_atomicrmw_xchg_p32(void** obj, void* arg) {
return _InterlockedExchangePointer(obj, arg);
}
-static inline void zig_msvc_atomic_store_p32(void** obj, zig_u32* arg) {
+static inline void zig_msvc_atomic_store_p32(void** obj, void* arg) {
_InterlockedExchangePointer(obj, arg);
}
@@ -2360,11 +3317,11 @@ static inline bool zig_msvc_cmpxchg_p32(void** obj, void** expected, void* desir
return exchanged;
}
#else /* _M_IX86 */
-static inline void* zig_msvc_atomicrmw_xchg_p64(void** obj, zig_u64* arg) {
+static inline void* zig_msvc_atomicrmw_xchg_p64(void** obj, void* arg) {
return _InterlockedExchangePointer(obj, arg);
}
-static inline void zig_msvc_atomic_store_p64(void** obj, zig_u64* arg) {
+static inline void zig_msvc_atomic_store_p64(void** obj, void* arg) {
_InterlockedExchangePointer(obj, arg);
}
@@ -2383,11 +3340,11 @@ static inline bool zig_msvc_cmpxchg_p64(void** obj, void** expected, void* desir
}
static inline bool zig_msvc_cmpxchg_u128(zig_u128 volatile* obj, zig_u128* expected, zig_u128 desired) {
- return _InterlockedCompareExchange128((zig_i64 volatile*)obj, desired.hi, desired.lo, (zig_i64*)expected);
+ return _InterlockedCompareExchange128((int64_t volatile*)obj, desired.hi, desired.lo, (int64_t*)expected);
}
static inline bool zig_msvc_cmpxchg_i128(zig_i128 volatile* obj, zig_i128* expected, zig_i128 desired) {
- return _InterlockedCompareExchange128((zig_i64 volatile*)obj, desired.hi, desired.lo, (zig_u64*)expected);
+ return _InterlockedCompareExchange128((int64_t volatile*)obj, desired.hi, desired.lo, (uint64_t*)expected);
}
#define zig_msvc_atomics_128xchg(Type) \
@@ -2429,7 +3386,7 @@ zig_msvc_atomics_128op(u128, max)
#endif /* _MSC_VER && (_M_IX86 || _M_X64) */
-/* ========================= Special Case Intrinsics ========================= */
+/* ======================== Special Case Intrinsics ========================= */
#if (_MSC_VER && _M_X64) || defined(__x86_64__)
@@ -2459,8 +3416,8 @@ static inline void* zig_x86_windows_teb(void) {
#if (_MSC_VER && (_M_IX86 || _M_X64)) || defined(__i386__) || defined(__x86_64__)
-static inline void zig_x86_cpuid(zig_u32 leaf_id, zig_u32 subid, zig_u32* eax, zig_u32* ebx, zig_u32* ecx, zig_u32* edx) {
- zig_u32 cpu_info[4];
+static inline void zig_x86_cpuid(uint32_t leaf_id, uint32_t subid, uint32_t* eax, uint32_t* ebx, uint32_t* ecx, uint32_t* edx) {
+ uint32_t cpu_info[4];
#if _MSC_VER
__cpuidex(cpu_info, leaf_id, subid);
#else
@@ -2472,12 +3429,12 @@ static inline void zig_x86_cpuid(zig_u32 leaf_id, zig_u32 subid, zig_u32* eax, z
*edx = cpu_info[3];
}
-static inline zig_u32 zig_x86_get_xcr0(void) {
+static inline uint32_t zig_x86_get_xcr0(void) {
#if _MSC_VER
- return (zig_u32)_xgetbv(0);
+ return (uint32_t)_xgetbv(0);
#else
- zig_u32 eax;
- zig_u32 edx;
+ uint32_t eax;
+ uint32_t edx;
__asm__("xgetbv" : "=a"(eax), "=d"(edx) : "c"(0));
return eax;
#endif
diff --git a/stage1/zig1.wasm b/stage1/zig1.wasm
index d7bf519b41a74966a3bc46b133fd9b34aa200f75..fa2b2efa03b4ed138a7f8c52e72648d828f2116a 100644
GIT binary patch
delta 933731
zcmc${349dA_AfqFJzFv}NoOSsfu2c#1PEahP(WxD5fQ=Vx?C4{pndt=YeZTkl|NrkzV7jZ$sk7Co
zQ)~B({PQigtxRJ<+oS(_j@fKBHqUYIKu3^1wt<|!BynpG=dj>Fk-(KexG?&15(447
zj2qmUSpXUY1*&bJ+e8qwhYqz3j0y%P1ru{XW+<`5w<9$v1CzX7H@8VHDd=SqrA>frp|t8%QF@ExB-kq+Yw%=0!emT~U&a0}K{+<^Wi7vZyGx%kJVr
ze(qtA)8%qND+x3hJcSFz=u6y*8!usdQP}C^EQPY550qgyaBg!_CbQY?PVRQwxFow=
z5Z?{e;#@$4&6wRuqQM$2kIQb4cG{yo5W&Gv+6EKbUCimVyBuE07Gz!*kAksKoO2gd
zgUe%!lH4!?EWs#;K9cx`=wL-(QUfq(06F40i*D+KFm^jc0(<6>Jdy+d823VCpmgp+
zHN)+k*-#HKjRRJNeVEVZ3xY9sF$sPyA$eu8k%k4yU6HU7Z9~LJCKGqG50n*%k1=;E+7Zq6muWpkiHY}{=l
z7q!`=Tuz(A=aXaylM!xg?r2n)%Y&MTcDZ}l+)h{@N|R{ePf*-7NKQ_6_-u|Sd?Z(t
z5(VM$k)j<@vCJ(&F}Rf5hU_R*u-)N`MV1TbkPVi$#X>$G@=y%O__j%ua!KNYyHuCW
z=8znyeaU~5lzOvGf-V%wf^NIZ7aQwvB9b62;V%k4Y;&W4Tavsmv(3%j9<*nhgox&J
zM!_()SZEIGN{B%8?Q+2*9WWuoR6t;La#dtfcrPo5$VjRvU{VBiTLS
z#)(FYq&<+?q6J)5LQ66}P~1UPfPihcN5L!(=8%Gj@Gv6F)uwnOr0H*+iS2bel4%HAM07O@rNU5K5Wa=53r6a~|uZ-!69dEhY~
zw-50Nts$xb1u$4J=t6izmw?VH3hbR|lW;+s6on4XiK>7X*(v_mAsn?SyYOirq_oFN
z5?Z(u9kT=kIE({viQ*ud3E7#+h&wjfCc^=29y@{nLMqE}N8QFCyvq%zb-9B6Sd^tc
zC(6fL!d$Uvo>Xn5+K6J*S)>2QmF(_=dyotWK(HR1P4sLI4t)_U(OEfB8z6^P
zf~bhN;sXu^Yh-00Qql3Vg1_`y)Hs3Y1L
zbVO5R!zX~ExH#%ZQK2wc9i8tQAXZ#q3#LZ$gtMF}L@P$f|Lwq)ctMZGg{E{lY7?9M8O;hB2
zNUKQds!du{N`P+~spY2RaxGCW>EZ~m3N4tD##U<6Qd)XZ7%G4Om)9@-CgpaFMzqYd
zOg2;Nh3_@m;I#IX}0AP-Yg}sX4R0FiRJCjnc(lOX=^k9`cggfvYyHO?HCA(4|jyNv5Id
zF;m!+7Gcx1GxJBtz17%IkrG1^V)Awc#BC4)
zOpXA?Y=L_jdVp4G1Td;Xd$H{Ud_sw~t!-ZX;u5q=IovMQno><}m%-PT2zTw>u2qgr
zg*qyKQ7zkAB3fyH4-R+~kIMU`t1RCi4QKH_t1j89&1yH&dG!-iaeML?QRV#8rP|r&
z(zKV_H(>L$@$K6(U8`*0o*y}){oej*j;1o;x?aw!MGzuWI2nJ>XfF$>y)tgFDou##D+EDA*u@^BY=-A6yFcu728VnYX6%BIvSYpu1
zWHA2(tYx-5(|I<&gIY?c!}HsvuXY;7LT>3Z+R#Z~qz#4K(noHo^|_&|iCb;r)?ACb
z8@OM#(iYv}N;)!mRH*5OBErTWwBK%M%eU{-ayly}v7#8W@y1-kxG+XAp58gc_=3s!
z^0kc5MHtWj0oohh6=GcRgVwW4TfTFj_F9(^BP666l-deP!?le!zCm7bYqwtf
z{74a@XO1F=VDITed@Nm)yC>!BS`)5%Ut7fs7pLfalyVcT)ZWtW=q{*j?%vC}g`BvV
z@G*8?zdDz-<9`;Bk2P1BUs2vLsr>jh8f?Q56|cXcq7#@HT-v8qswvrf-v>s#iDCNZ
zsB`FI>vk5EZ}sTF7Hb`QbV!;p8p`d4){Ib>L{+qiu
zEhYl8!O|vV*?9;qVUR>uNzz@OwRH5&4>EFz!#zV3i(*Y*jE(rB*65ZSlTOb9MWYf!
zB5gdhbjU3bY4<+un_eLTr6z$gD*^e|8xvp+>KC=;2ndLTH%9*WiU94yX
zp59G4^(ei=JxcG_OgmJVg;!Y$Yv1?2F=@_LXzi|J$m83Wwz;h+)aa|tk~Nl+CfnU$
zYc%BSU)Q1rqwA0*7h!>p3dED6_B
z$j*Cz(l*@jDmnR0{dzgCjD}kV47Z$Pxa9}rmVOMF68g90U+&X-^>52gYya+_#rA76
z`v=f3ukD|SaQ##N03W?ki@viRzp!2Fc4u3@VY~LPJN=<jOeiiiP+m46W2yfz8xY|)5ctz6*XTk-9FdiOn~+meU<1QL
z1btcHU!YuSG)>qLUt5HYZp!O^19b2hf?|wS(0N~J-T&6nG=4^AO+VANmazJChxnqvlHf44F$1RA9^x;O2y^IKqmP%#)FQz>Hp
zTSVCt)LzX&4Ozgj|5?g48kDd=Yvnl;@||86zqO!pP@8sdM#`E)5IRIlwfS!nu&)nk
z+wN@`{Jac6b0|cNnh4+)$_)igNS36$U_xdbfl_97LikAne*xu&Dy&CgK|z7+Kf;tD
zCiucf7PIvL)BwX}Bz5d!?YaBXQpWE@jaaHMPS7pgsm;7ECFRg+ln?h6QT}8B{C%~y
z4ZyusFupOG5tZLY0Dr2|t^)Y>n7W=3FMxB$XnFUiG?=^^z;JCw`5Ot~``RP-r>2}6
z34@x=iu8;XuuCJgiT5|WZt*mWwPDdz0o+BomfGo0gOyC!g5TV+IWA;{_Kp7Hdwzlx^jS?o)kRhxH<*(BA
z|2>D#*sHl8^e0Tf#5Yn^1m3B2c`zsOZORY32IYTJtG)JM78|a;^I%)PvR2#qU?wly
zq@73J_gce;+VXjuwEhoe@)@6MFFlmS)@sv`xA0SK>qD7ruJ+4AP1rFl<>9t$sn+M=
zEWUlNHss+fwp^R~a9dt8SKIh-CND44&LD5DmiS0p{&k^t>m!+Lj`r*$P1ptPtw-9j
zkG1uWv~RFr9V$Ac5bEneq3lde9*~i;{1QerA=gDky9Dg3OIptX3jbog_Vj?P4!_b&
zGTf4F{yPND=o;jPUDoT*P`ditDLqZ-iMiUE0g35X<^m}k5}yT<*PkYe@6u`qBsDzp
z8AJ=YHtM3EGT4M~Kdt!(CN?}wqpoB#^flF9&ipt7f|k_Okw_qMai`!RJf+R{upH{!Kv);vmlQ-10uhV5xEm)BR4G+_Xw{<
zOMJB5pyRV5p3)9HZc`>Q|KAk1;!uBGh2Um3AOiJJN(fG%k}+U!H>kCPN^ye8dUjOyVW2XAY-_cq>p3-~K6^pAtrF)cd
z%>RHe&^t$92#c|AREh~smYQe=-TTx4iqcGrb*)Lm-}p6~`&k&}nr@fCj}JguRi`2HEl9j@*uGj|H*
zh8_P=Gk5&kR(E6mI|YT&Kf=c>>fNUd4}s><_yg{zb$Bv?A6~BYc`~)(7oWo5p}-15
z`IV8(f0v;2wpQ?DTEe7BKuEXx(?K&u2Xq8VgsLYg(<`@{G+2GMx6h{9)M%uz(owh{t6*4;L`J3u-dudg(D&3
znx!sVOI7`&kdOLAyJJXOEZo*Um#&o!X^vGKNy!nqZHuhpkZw{P-726j%fcg{%HmY6?`EXsA(^Xi?+&7YDKNTJ?)3_|Em(
z`j>9t)iX5q@>|Z+AAn0s?cl~F?W33b@s-20t1q|ZM=6@6X?*4Ij`slok?N|b84xMO}jKL;q_j8>Jsgt*SoS3z4hvhWNpdotyqb6;B{2t66l?z
z<-F03|6Hm)@CMm(+M?tY^StI^`#OvZX}_M0gzp$AhHg$-UbYF>b;
zZkX8JXX)KTU*vq&E^R}>b*80iO-r4#S}JAOqwtD<4|~o;DoHZTIXTHVXK``Q&OTQoIWWh~ejFoe3wi|3%3s*ne
zXv&fkpW3M45d)lzC>71(S-R#>hl6Ap
z4pLPUagb;wTFt8CAO-t{gG`y275vd^Lt&ehYb^o^+pM7>vR5)UrE_i5K-}GBMErY(h=cBpDCP5b|7-G-_*XjlIa
z=p6}ZA-zKpEts$VU(q|HcE38I{Qm==?mi_is9F77wA&Nb9MNviSaU?Xy=u*wghZs>
z{v1k)?&?%1CED$up_FL1MH}k0+hIu7YquJ*!tHhv5`s;n-A+SRz0fpslcmUvO02@dpC%2*5^w>C;;Aqa(CU)1Gz?r%}AvyEpb?-!(?@!>2zLUqsu!5t>x+
zkLj&=af#Nv>}lt$auiI84F4;Pxy_-n7R_mn^p4mLE-BOg%Ot6?OzSY?=HND~KB9i+
zZL?@7>Suv9N7TS^NY<;LZO97O&z4Z(
zNd0_+ta|mcZ-=GGUhTejZcD9i%5&psen}Sn?44$0(f%{jv_Id8SL!3~`x;9M9w1Jp
zmD9>@TAOzp8~81+!7U@ARPh3Ld!@GK9VJA&dgisXYiQZu5yG!nW5vH^R{S+Wf42e?
zY{J)u%nA#=>@N_nZp6BZ(C$D4F);gDiUk|6%EE|Z-`Qb`C4Yffy_iu0A=QnFV
ze|!sjTWkGE=j2HzQN$M}ffFN~@P>Vo;5DAhqWgXJ@5vl@I
zZ#~qBr?l?N8?Y_f{ma|?R((ENMz5i!__?}GdwY3fc1c^Y+%Hpg_;FAjXH)?BWT$p~
z`3=c4sm|;ngXe?{e#56tTfR>q76z$*kZQl+OYx%~hoBzSMAfC>g8C@_^e*g^9{4nc
zkFC}&etLa+!7i9Q8M2B|oTV=Ij9=Qfe1+8N;LH*_I2Q1!?w9=;>JMfzhLV};VKW&^
z$;RqWTKknJnteW>+OM(Ph;ET#!gGSQB+boDN#wjWM!RKI_w>udfoq;9u%#(6=pZu%
z{NiM7_NoS{^EaT$nwuwp2UMGYu0NpdSoIo~llpxAWoRM6Z}i&h{FYuj`-@rb;w>X(
ze=2-(`xniylyl*WM)4niOIaS?op)E#l;*;BS0`#+RwwY&BeXkLCz9_DTb<}6*~M)N
zMmwt5g?99@cJtb7Q_}4w`P~-s
z+NibjO>jx3A?f7Ih@{%BU%tj^WB*HYeP!vsFV`{b1kGBXC+#m7qwQRO8=tdPYxGq%
zmXLdWH7K#(0RfldQ}=4?zG@e{>U|BX{qpMyWkHEUn;Y|!?`zGzek0ignPJEl!dUTj
zGhi(J;p-NR|2#sA-O!fLE)xI%7$B=yXK+$yP(f@
zvSe-0tF4@Sr((>o!$>dO*otqRs?}_4#3pE$Hr~fipVRKy)Jmu}a?>4r{t0c*rfjxZ
zyRvC_>}k@VDKy|=4cz)X?pEu|8EsQF0
z^;UoE??!`tUGXVVWRo*nlVkq~<;7t62ziOyo)zfZGPIf7qLa-cChg2n$<^DMC7UI~
zAd|r5ZLRt6(^{+T{rTJTw1VwT`G@ngZr={2RQ8VRC^cZmOU^$w!=YldA=Qc6=^fX{
zE+oZ#P}XY%bgHgZRL-xxy7S>=6KwKo62dyMvl(F}?s_No3nCCzm%v8BIN1dKRbgPv
z-2HFoZ>P|8c!eN4=Yw>e$Y@Asny?JN2vijk~MGj0Dg9}_FVOyu|E(kM_nn88&aB}zmgIdpfhL9
zUgn~sHaLQ;P5UlGJMvw+YCS)Tfj+jBX+Pd*jRBkd4Z}I+#C+qLZC<;W*X=gkXkGU8
za?UA)k6O0Ib_u_4j`NBbVP)3a1*tL{?q+Ml{d3<#W(2A>!8I1J8G&|Z>Uq(umG)sx
z7c&SPGm%ePkl)(>l8c%(sd>l##@h7#p^#COWrU34EKA6E`1=i**@GcXE|}oUR`Ak81*Ta_o0w6&iAB-AKTJy!J&FT;hfS4l
zv3FN(ep2%C4^1-1!yl>Z7C9JFd2nRL3$&P9{)s&r?vJL}{9|Uy&!jJAKM}3y{5WI}
z{@5(#T4OZ8_6LAp{3yDi>K_|ZaJcwmOH&TecPu_w=6552KBoMFTM>247-ThNxvI#Mpge`=y`)D&DZ2YNj%TqKuixd?#
zQ;W4~`3=RccsuZdKA{2LuZOgoj=r937WfN_92kEd4ecqGG&MV?rj`omct?O7&x
zt`+>ju^!GtmC$9`(zD0z3mtu{YGzQ{+$=)LY!2(1>3`nH^0aBk%jwwJ-6wiE=Z!)`
z6gx;Q_)g=1Wwmj@GX11lx666_qewEd^@sRDB^rb&xyM`&?pU!-K^#b$hQt>!or^4_9sL+Ik{h|tewlgt+w8eB_g;y7Ahb|~uRc&*V4?pmMCZByN#0IG)XG5pSi*ighITkio
z&I&doei3Xw`lXk1^Ej~SYOpCb*t})1dE?g*8|S5OF<2ozE|0e`t(+j3el-D1Zwp!NheECQ#jw?o
z`uSo%)A@>8nk-vdSl;!=7^;>1fAn%KBI4^zjn0i1#QzvCyyE>!e*V*3ZNsG|gj{>6
zm-Em>AX~O9G4ZBt#L<_N31jBvUM`I=zBI}0ohZm1m?*HG`BPvO{MpM{PDjPXapD&I
zf^k&*%2+7WSnGSG)Km)RF!*cDN
z0P2;d4C4$LCQa1X$;SF(#%e-TTA5T@MX2cSarULzGnM2S@X2`*xY-no^F&kBKa__kEL0X&XbIvNjfdx_KFS
zx4zB`>AxF=&1bPg&qo<2){5!Gn&PNzldl%)Q+@1Rc0#{ChNU+;Z(Reb?Ut{a`;Wik
zcEnEopD`@6(a*r4Wh&d=ZfGpOg;GZ-HEgH8G=_CcoVe4nmNc#slaA>-b!RMVA&&<{
zl>U?O-`=UKvFvg7hyGqHt7aeTLuK|4{^wG?R%TB&Jz
z6RdLVB}!pAj!+76tOHJ6ImWFulPUG(n7OXL91AE1
za=emim17cZVu$Nx2Bjd!+0^=Sd`YDtMO(VFgSzs6ON8tA)sK_|`B!GJ2O{!sAXZ^J
z?x`dHAxe$@3dKzM&s-z_B~$*#8(HN)Wy-(15u21;U;c|rF_4Va-Ek9@hMh|ltDK(`KI|Oj{|hCeochySap{v=-;!fg03J|VW(J6DOhz=R((NFn1Wu;vNo>$rl9#v
z{z6bRdSBCeg5KV=zMwjtPN^g49Lj-3$27GHI);v_gaw^KDG2&U)B1w0q|&fxi)QS>
zNHkhR_^AG=&Dex%+;j{*sqq)p|5A2+VUJ!T?60P7=M5vBP
z8s$Kw=UZB<_hKE9M$&;NRPTY7^{aOQm4<2LT&rnj5I*F1J(q1@`}Ll!7@qCaclcSF
z{#GlNoKyd!b>V5(f#pu^<7Sr97ZE-TxTrl_#kC53@O7+h+&6zM?{^k?PhVGG
z-kEfotd6`3CNYz@
z?7B!l(2+G^yA20u+^N2Z@w0}&tUWEJs){#Fr}Qwkz(=~Q3DeN-AEUg=~N
zaSP$Y2L@fw(!xg*+Zsdv&2241{~4XxjeOfNeSc?m(wtLWF!3&1@XR^Yxwc}pPfzR0
zZsQ*m=}&cKWB516^s`;r&!IV1JCjR03zu^@vSlW`puGVvY;S?D?G}cYn(#6!yret(
zCNzOD>6cmPmlvpPho|mL?Eb!(%WN5`&+dyQjNilxO8J?-?8PWI9lY?nF_js7AItKY
z$1onnRvfa*?qk`kP+xx^yAP9Ez0v(FC&gS8e>xCtY;xL^CRkIt{Moz#_v1Y^Gp6Z}
z{+(^(>-OnwAH>_d*6-7we-P8d`5)`M9%OH^VdYOf#D+-x=P87jrvEyCeHBurt2r0z
z8Zkxru7S+OOoSp6q1b|;pL>*TG~pMz7z8eNi4f4MA7ihxDf&H+v(-$~6Q5uMdCkc3
zm!H6U%2LJ+E1)3oG>+q%Z=djrl|p^mQ<#BH)Ym=5o=a28fP`c!q(9d`i?=Y*^LY@
z>|k(MJQE$T<#4&9Bn}JbD>ix+2EN>C%CmAM7QVAN-mrkPFnG-a-ew`MrX4~3hJ4l=
zl~$O~?qP-Hd-K^t%(=J}%~G7GF7N*=d!O;qOY{rRu>owFe*g1qF)Pr&c$qcTTfM-N
z8y2mcOs5~P_F9NLu=Lv%Nr6c6dv1N;3+zr|1H~~o;J_NBqj$O=%eC~U7O&L%b&L(@
zr(R%bp7915#Ua0|>m6e=V!|0Jdl`9$I>sjG{r|z5#909O2F%+jHlq%KkN&}ivd{F~
zA*>@Cu0J`16=2r;6H>gUqP+1x+4U^=4Lv-^<>sR1pZOAN
zkXD2(yF7M}56Ai(
zDls+!3`?w9iTVrcrVdqu0{i%ch$Yhf*qP_*LH)It+0ASOwSm@bgMQ{^cFJ3gwnQ6C
zaQp50u~%3>_+6)0+4Ihy=^-m6vY=Ev9P3%B`hm;LrC)v(j=4#H^EGycjntp}H!ER#
z^(+5onMeg*XD=Z2*6Zv^UQ$zj@pbkNmfFfo3)n)3XUS&lnytc@yjJfwiv67}(mx!<
zI^c3>6sw}tXx0sv`u05>R9^L
z?;0yeE;3SAjnv=AiOl8Wu!OWoPc3E-|6fVV0HesTG{b{7GD-o%=C9FHj9YM-1=$F{@
z^;45sKrb23;sBXAN#I?XB=EYuB~l-~B~oXM)FT?=+#-FxCSVOF3)uaWMe6Ix0v0z#
zqza~p)J7w9{Zw`r$`wx)7Tr9RHDep~b5q$pu@`1+gGzS+#bCexJ^2*$VyEQq~3q
z{$0u*xCZ5iQr42K(_`OeQ`lzxy|>wi#$z6C@tB7jk9pvKpYq41u~)ggfhq2uJ`uO=
zey@5=KVHVZKqxN%a0Yvc$IivcdwOD)g1lO`Qg1bjr6SDTI*WDAs|O1q(%FA0o2TGe
zPYFE$zLb2qn|faVWESg$w@sXw#d<`Y{{shB9O=mIFdJ2JUVnZzdkhEm9EbpTsHy7R
z$ay_Gx*dmcfZmQ;$3soW-qp0(?b7w6ci8~Gd%6DdyBI)Rr6W(K&u@H}eZZp2U!23@
z8LgTY&1F6LqA~imxh#)Y882u-I>=u3F
z0(Pr&!f>?lX8O3T39WbqweJLd>(&HkV+51I4)<`H-ekgo1+^qRhCCv)rF7PFgzZ__av
z?!CdIqBl^#oQqRR+z1gmImRdLH%T
zeZ;}r-bl?fD7W)aZ>4UUkH^)yx2=&HTP-l#c!GlM>2)I6+C#xs{b8$MlxL)t7$UVY
z3Vwo!3o^Mz>eCO0QL3efdN>@w6|fdYYRVizrMZXtGxg|x0n0I9t9})*Y>yfwt*03J
zH1nwFe^
zYS=EJtJ`3)RTp+}8L1zC7PfO5G^ZJUgtx(wcbqpk*ga|tQXd-CBcUcKInAgaPN(xE
zb+17b8~&ulsP+1+Pgwe+qn0BoDC&>&Zl82@JudRjM7-Hj^zC+eYNDqUvydPkia#1?
zcm}Jl7>{tqBZX*J@tyg&kTf_z(Jnp|?IenJhriPsE@eNnca8CI%VjK=W7PZT1Xs2`
zVj0VcL7c#o&4U97LyEJv{@F5YS$*i%<0iUr3dpUuUe0c&YwZe_N!L})k}~v)1#wbSWK~A8a7I&P
z9gbu<%&emK!UbV>18rd>4?`ePa(g6;Iu#P^TqFylA(2)3zV0c9Zlw$L*5$0R_t+sL
z*g4eFh2;;HvmT5eJE+gBVC}vjU5i
z$P@N!`x#rsM(JlhV|&@B`i7OPr`tG0ERWWcSFt>I$xe&_8B%>$vHrnO%0UwW%pDnK
zFBGgR=p`)}N!QOWrgvw!uFYRb_!#Zi$DfD>MI+Pt`JenGCJb$^0gU<#0F0mO7pO%z
zB!Tq4pR-)|FZ7BbVClm@XLn)XyytUFT*v5let~{+jQ;Bv7=ae)H?78me60TLYW8e0
zToDsMyb6$B^M@}9(?F@WetI=ifD&7YaoShqZ7b0&cbGB(@BYJBSV&+alNkM1A?1f8
z!x2y;w+0PZNb>JBSh_i`&sxJC!#qp3uVuHe-^=@~g*CC;`0P41i0#zBS;yWm#yk!&
z=5e4K#{WL$Pp)TEIXk9b{+doB4MHf(PHPdAg{SrCt?X8|N56Y326uaO
zZ7cf}ql??Nv1h{WShI~~#MN!an8E6wFJswy+wCj`Z!6T}ma`NmjP#~yjjut~>*wl%k$V4~tTcE9
zukP|oUeOP7bu&ds$%}q43OHk_QgRe;DUdC?70YVuBzLN_QOHEjWUTFHxU
z(2dl30~5j1fz&DLIwUW;LBcGpMC_KR8zk7VpivP0pcBbYje_V0iN)teiuysMek&A$
zif)jLHx+^mx{}=yt)u6(G_Awby*0CuFww5qVI!3U-X4U
zW+Kk@0~P%t0lzaeM1M%BUulFOQFlm!j@vG@M|Vi&KA$FZLU%~1vi;~)B^*8%wpeNi
zgYJ;1OfUo$-62=sSuiX}sOS&L+%pde_UI3Z#kXkufT2Gm?XMgY>_vCT)kz0L3f&=L
zjxfrhJ0uoc4XdF$q|{`i9J<43q;^(`a_9~{NR?web{ANnKP1$*jJg;7Ay+RL`XVk9
z`|@9ehLXW>eu>qF+nT!%MHrs)fJvM7Dg9@aAW
z9vOp^E6_-)!03SC3T~uyqyL8|(3oo_3B$ec1j76dj~5^XPoS=Pj?w+Y6R6yIqpOA|
z5bV!tF{FVfd~HyG7f}8&q7dZd24WzK*H(cb+~6c4H16O5MC?nW5IleexQ7k4@Bk{a
z#9#{#_@1gJJQ%9R^Cd{3_KDHz--U{(eJZoo7{;LXNsZvH#bQ*0>L=7a1~YRsG~5`T
zirU9mnOu=m?UPtD@nT1$Q2QgX2!cCN^OE{0-Uft}IS`sXQ&`Cy2+hO`@&H54W2r^m
z_%1N-=U!C37s)B_h=S%w=mT;f@m^fgdWn%j%`Yex<7d=539;yiAdgxActK)&gGQSE{DQ>hbpNoUQDY0C*4GkRpx?EZeZ|J1q4e3uz{Pk(4jLV9d^B
zOw$B>&OY`4t+<{!%Tgt*@93Ui*z=U}<{^v=P9D{l)UbItBGmn7%)ZC#BlokmYy!!1
zhocl4PSic$V^QOh{=)YHUiLlnw=mfiNcn*%md)q>PhHu^l6CnIyF0`m;)kpi<}Zx$^C5i>@KBS&4D(@z
zN&0O+usIPS(tnJYsY;k>_K#sR1?hOLpGKB6sjh`2kfj!)Ms?uZ@#0=ojVV8tf&$T2
z`6(eOz$=hchIiZ|=D><2v}%Zh{5Yc-t3>aY>PYyxdCRAT?nE6<#{>5R~yiBh-
z%$^SETp6)By~9b^{NW?8d5J#!2#h^h|CBDJ`f<9H=|hgPB)#2FEGeS;|EB#%7%QHy
z|5y8mWx4$*WEnwh?+KZ|T9`le82cNV%L~Uaj1tY|4#z!%}N
zY0OuVmiq5AFv=Kx<{37fy`^`qMQ3H2U>4*WuP>@)og>;OpABh$hhqug6ZNvQ7P&7k
z49oTUFJO%H>|d;mt4HYUv=Jf-e+9yrYW>4s**Mzf=y(oqs4l3%MDa`gnR6_|NzY+o
z3C7>l+d#klnC#X+JjddL^w1@uEyB8lC`^x48eyF#pY+fqQZDL=(M{rUL!=PZ>6u5|
zp)bd7gGzG}q!7`G0zJKm6rwt%=n+MvL|=~1l=^ZJ*@>Q*^y_mzVR7ZR|Hl5wf(z()
ztE9e1mnxh39nK)rs4|J7x{@!pW^mUqU&b1>XCZ$7!+6(w0bWYv#MY5h?T(c!S!0+0
zJM-AB0T-6sorx9k(;2JExOSFFGRCot|KQ7K}esci#_t1SXn|BW&8ubV2L7>v&
z8SScac`t7B*fRl15M;DgDa|J^q75Phm)oPf6y|f87WoZ6_7412gWZd`fcQU%=M&gj
z!eyYJUKUmW8bqr$XyH}F>M$!Igc2Yk3J7NZD;auVhp3DSUBnt_2VRO>1+q-0D?}rD
zgt9DxvV;WzgH(kqfKFi{VIoz)0-g?hK9gWdtUnrJ$gdG7J0Q`^yPS$4%t4~%CJc(f
z-%$#|7T;+4gpNf@Y}?6?BBv8&Z)fyIDZ@<89I1*0J4&s?)Q(a4V<_oCoFwxaOlm2+
z_8LQ6pzI6C7L`GZ!{F7@AB9{jkqU{>^^i>2Jj?H+i-H%!In|VWY{m#}QUdtJlZfKDOLMySb=TGPe`+Bw%ZUt;T=yXYkZmVLC}=_LfI<=96X&Ug*e4?Z5BSpLct
zEID8g)@TS_i@m{SMb9d_32Lj6qZTwi(o@zmjG1+GxCJ_JN1b!p2lkz
z=nGtYFh5hJr?~l}{IeqcbvHkC4c%9wc%v5k=_lG0%;y-zXSj&kZHVgZN59q3P(Fao
zx*^z3cleXkqZN8(6z|6ut=HqCd8Vs^_M%Vj)H_G>*6gJIY&7qcT2FFQwpYEfJ}inS
z2Ei-VW22dm-B>h_H>M_bRV;AFQy=C4OsrbR)6~R?lvsC;r#_6zi7keB>cb?Y#HK_%
z^=IS`Iitm+oS4c)5dTI<
zdw>*vwlhJIjq$0jdHEn3&|LBIX2~YE@Y7HRQ-WpaAI0zNIoO-0)FQH5saUlq8zf0rXz6d|)r5!c><(DN;>XPe`Jw5<4gH0*0zOh}A}aJUJT;H^eT7k-|iB
zjea_o2SRd-DhkUTsiM*EhO0=(eWT3NnwC-~4d=tv5iS|gxD1aV@aMwTHwlFmi#O~L
zvA#((j8NbD7|c(Mc1rLjB6TtqeLDs5Po96>)A>CNP`R1vU@7&s_oW)qOHSylhEET
zlxHRLhd5tfs}E1%ZFtc;`tlUM=YdUEar7WnzQ~j)`45^9@m$+drz#D(!8WTuqP?Wf
z!Z-`R9Kr}kfHyM)5)*9ZM=3m;&t(BT-Y%YTx62pxGpRf|HB{zY{W5x^G~UyD32c>@
zdniToWK4|Hcst)^%8}H`U@BkM7pL(D6fd=ToUOb47Si>Phl|qYC3Pw(jcgoVd~mSz9LAf2eA^6XC|^aG>2UaIIP*DArc>UZ;VOMM>OQ^<3I;P>jMGQFxH&r7N~ijx}!Jm3)P7_`NoAXn?&4Bk2j
z!5m6UlFtEcMXHl=fqoNQ;oFI!+ibqufu5w=L7k_3m5M{K8p=Cp&C9jD6)KWz)6Cn>
zL{Cyn(A7KSVVH{JxdXJH@*}#d5%0mO_1hZpyKtG&h>yc1r!jBaa0;Cb^2ivsN_ng>
zH9#kHT(Tog=$_MpvnLHbr%QE@R?o(#w)GXf7Kups6W?_!CbG`mx7QZzOV>b!&
zr+hgNi(fIF@$;7SP896PFV#QK;=R-VBOE&VoA6sfG`|VYz@-QmeqyUWuL-{y$&*c>
zHnliQ;m-mHJmAhM^@>A*<(m{r{
zdxY~`pU2ao#ozMyCajP6Tk{OpaLh@lZS-u-+ad2YYu=33{I=F#BeIEBL<4tqKI#v1
z*+QcVgJ#ohV+?68ja{{E94(O*~j?Ks4>rU!4TpH}%uBdc4CU7=?a`HP~l3ixak
zdi*B%zbMpAueyo*%#!7oZ{o8V0PpmK%tv(3Eoed`^>(*#e>heC<}I)-lACVDuHMM<
zlehAp7r3NOC(r4d}S`|Vz)X(0|Q@}R5F9xGBe=rf`|?)*mBO69yhDh>?|pfq
zA$VE`w_5NKw6n{YuHaW0)eGCuuum}cRzJr~k7K?OU=(+;F;q>zR7T;lv|4}g4&E5=
z($?R&gLeU`6?edhC-f6{@IKBG;K|4JF8z2fe+?FE)cyEEDRT&)MQ>?Ru|t8k`^YEB
z%IW&*e!Qo%)F?-0*8~0eo3yn#yFWk2__`V8`dw(^4ZdE?xj*O-KfqT(uZpH$;FA~V
zSMKI%IZ3=b{n2u~96%qi-41u=@=0t`(?lPK5&)@T0UQ##ho|tWjrxQ4@bn;7XO_@`
zqJrTCZm}ZEXq^@z0fT@JJPVM@jU}Q!`6_{j!(>>#sK7lTZeNTW1*eIr5bgzcNuA4S
zzJfzl35OixO6GInsa1i44XB~Q1%oKDx%vD=2jH~dPvBrHD
zhb6Zek}FBne_(up1%GV!azWOLWx%PI03x-jY^>pn)fI=HaUX9MG-_(@c(N)Pjp|Zx
z$M*SP6u_~X(9#MMs~X~MEgt|in;&F;7hMlCzXPq<^fsd=iH?hei;T7mR+~iyhFIZs
zq^YPhII&O@M#3$5tzyr$rKmJEEfr2Fd`!hXj+kS3jf3)rsI(wwhm2&;kKql5)GpPI
zUpo{FLg7}a9+vWHxQGU-7TC?(AWmwA?rmyCo%@enN>YajXYC*eeE
zYhsimUtSgh<=Vaz_B2XiOk5`wtg$4iA}cWrR@WK+5#~jJDgtb-2gtQuwUp1b6$tMO
zu`--0l-geh^LrzOL)D05xwZp!fCuZO4%JEhP$%`HB^BT~QuiJ2bw)tD|X4H}*eYNl#dcU_u&W~-
z|Iwie;U0B8%4c<{B)!ky5vu>Bm3L}5m`~tWXM57C#6r$SZ*)u)&W5RklU9l3ULm;g
zW0@(yXc)qPLRg|jPL`Py^4n0mg`41HlhJMwA}AZD8BHt@L>#|BJ93x9DBNiCcJ(CY
zKY!713`r@(h^b%5{fV+VADPRk>CtMa$k~pZa$)vtj&UFwb4`#LPA=d@Ay=kUuI*>j
z)JEzzONv|+9T!)z+Xqyd_}B%DT-#B*{1LG*3LUo;qWuY@5ea6sA{7jyp0=VEU^QN}
zH$o+DCDb4$TN;F*QEI4Ya3i$2LMUrwGz?={llCNtxJU&Nv(&f#vlC6a#)+&x
zREO+B1Q9s3!348Bvu`jAG1-y=$8jN!L5lZv1L0VQ>ZyCDVMO*w|{X<(yfp*1R4KWFMk#}3VLb+8?S
z5ir^jZtG*{#~3|LypmK?hub@|3ZW!g#u9i_e+=V{bTHSp1Az$D@|i#_G5S3~X}n>w
z!ng@(`C~@Ip~=+qsm@*ymaD1a;={;I#Xx2!F~gNafoD&oO-baG3Yx^3Rx-k5)yj7?
zIcQQX6a8tFwLFT^6po4CH!Hz!oN;VRcwzmB5|xG$VI7VEtj;o204%}_*$2J(@()6T
zxk7`~TWGI{asBN9TnVC32fmD}-|WL=Ug)5xd7?5JJ
zT?GH^@!_5i_e8_NX^;xvR0|Y&F$VX>_)*kWp)cSTBPwCvA|DMwg=>TKF~rue%OU~I
z(5d(oDPQ41?BxWU@~6VGn-|jnOd>Fxp8|hA4uraK##tR!P>|XWYGVq8I}FCufY_LZ
zdy_9wlcmiIAs%`PM6$4e=(;?#F(oKtxk~amuxDVR1|_*1LE4}%`mlq>h#zVOxI7
zRrZtL9vhFhtFdBT(lpWsTOuO{KZ7lh$D(QC*T#TX$+6#w_DvZnxT!@Y`|;SvV>}R~jccInW_~+j
z0ruDW7#Zkzyzqc$^Ziak&I
z8c}~sDuSG-qb(aVNAXx~1^fg91`Z_Z9`aaaMFb1Lj*?ALs91_>c{~hm6?#?-b6B(q
z>THyB#4okQ;26o~WJWU~;`C$KibC7Q?N8G&P;npNJrTPJ6mU?f1GZaywA8EMoWu5UW`VRfD-7@ZFG<5
z9yS>d*g@k`m?%doVSy~<-$jpNX7;e%rDWkR^UXl!-~I*unf?H^Nle_tObE*;GzC&6
zzdq|p?h5`VpN#T|?!wY8V10|v;#652dOB)rG`d9evvjgd#o7e0Z?9_`gZ^<9bQMbPhJhnqS@k%bsLZ=@^rK9wZp5p!v5%ueIR^d289q@K8!4pD%
z1k$fQ#Z!ZB92%i&^*)mr*5_AZ7bUa6z74!u%J4&*VD)P}2Pz
zLp_j|Xv@VMj{2BhFo-uuiFh1!G+smtzt?N?*z}tQVSzCM#VPvG;0bLFB8j-8DDr9Q
z*z*`VLyW6PezlMm(mFG$U$|g_Pi;oiMbUE>_&Ojz4ND&e9$rl@5b2l=D4^NEF+kb~
zc2ZF26OO0=kmKM;40%(!2tVW}*o7N{FQpH5NTI0&c=y2pMXdPX)N4QCB*O4;R@5l9
z64{J%$o7G6u5IID3?WD{csXYBqA4YZunK%YLaWb3G*q}gJD+D*Y(t7hx=&-E6Yjt3
z0!_{0G3=qq0{tn`B#(&coCyO8e~zY@9*@`{*9s3tChfL2!!Zd|Chy)z0C6x3<08V1
z1bKRXEM9+bFjt;%BkEJYGSqTgqdOhJa{I+bicrh%f+%nfHbnHWU=mHC9%Y`zdpO)L
zQ#i-_ILOk218#BSrXN0Z>jwt&mT|(o(b$kRq^(-4H++^i|Bpt(*k5e7Sbe*$rR?wl
zRAfu42#AW2WUeh6uj^H0C6!KLDF`P{BaY!PcuTkdNm!5GGnRTm8^v+18*H@yf|k`1
zxV7eJk}3;iabgahe1R4k4V#54ks3InMm9eOAkG$M87Y*fp|qkn_1x!pli+p{Co8EK
z`W$MSX#3VEyUrZonRBdq(i+14i0Tz{tN&>rpm`X{R@oHPG487tQkcutNQA<5NeQGy
z5Y>kb!Wkbs1p})9<_JBkGk_NB!L}X*ZhwG+{jrJM--K?!#Ghq2G9|Y|fJ{1w<-QAE
zi34Cck~~rgDCsCUvJhhdL;J)=^d`s6TYXYG0h
z0#8Y&A%zbQtr;B^3SpA2+C2U^#jgM31>TUI(#OBRTeyc|jU3~318wNBCyNDJV>Kdu0$QN*s?mxK2^uWw5vf~d^X>XZ3IxnIwQJbLYxHA^VnfY
zv~QfmQ=B;cScj6ncL)!}qA$Ko)qtVT1uRqP(f{Pl{~zMs2HdKu+W+5c&Are2;Vj?)
z2M#ZLZ%`pY@g+4iV^OK3mG*Ya-@RpBe`R1@EqmAYQ9-eMAw?&e<Irgo|+FTy@M=A;7~>4g5y>#w$n%}yc4_Rh%e
zBpzj*y9K6&aaM>CWN1RT!
znHA8VPgWak3bZ4uq~v@R&Y^X;fU4!VAH&raKR#fbN2j(~=PRYTQCcp?O6`PRG;VyoyyQ
zj_fD|eKWQdn9$yBOc|2uJ0F!+_
zI|YW%TMcx1gD%cmTKf`O0v)e1-q$c#Y|B-!S3%BgjfkCxfq1tu4E*7rkV?86-ftJV
zNdRikID8@QFKE;rxW8STTjze|3E2axEaqPPAL^-*3uuH|_=2PQ2*_FsJXFFgNbd9x
ze*%lEshyX^^DuYH8Ua1smFLgMd46%`U}-S@QQgRdoPpy8o3`T0;8lm1enQPy-avg(
zo;StLT5L3?wutL>9}cOWf0=G!@K3XzM_<@{n2EtLIXXU4DD
z8>yB&<)Ae}eQ~!aZMyLefna)~4BzegRrS}M8PAHQ_y^96-%txKU>e#Yy)oTz@!54>
z@RZC0EKO*zqpq##tLs&G?#SE?{>bOgs=L9d75v|He9(#!#<$dNx>$Fxul!&@!J3P?
z-#02#VWFN1Qv-H`liK*1WsB^07}m^b{@%~V`_}pcPZKZ5T+$)}VcXqcmMkvrL%GEw
z=tmHcLn9Pn?X_tg`ATzK$Fkn8l+j}=jBvPeh?rsLLTTebWn9CMu>;~12?C`1+sv07ld4pQtJtyOb?%o6LNh1=y&g+414
zB_xXyI%+|j_%SlD7@c-ZW%4&D8~&+<+LmtSej{z@x%Hkqx49ZS}DButFBNgBO_^i0U#I%z<5WVW1McdH|(8*3T#;+w3G9pjL+O?e20|7mtr%j+)FSE)!ABP5@DC#W@aItC2(#)
zg+Asv=jP1NINDuj>y=T;hpB9aB;0+xRxzPY7?ULICC|bjvkDhERRzU0@I0v^llF9ElC*F6a&HHRs*dlFGVI1;2N>i~Z
z7d0-H-J|i;a*TiL!uWvcCgD)EaTGHd)4?6ILCb(BM^t~hh4{+kAI{&jFrG5aG>OnI
z8%xoy*+zp+?Yd+96SUhk%Ix@b6~5a(KYLL;X=+p5o<`kM8z52j)J5?=QIEfR5f)Q}
zSBEg@!+$@Et>==@q>&Iglw&~ROW_z+2q^g?JOe74hDG+(I;|lVz5&o=6x9ST{)bBc
zh5>pdeb3C}?T91^?c!l}6Y4z&m7E%OpELc^-d&NjpR`
zv`fA!oh-~1YRhDLU%4k4;M3wC6WxNP+=EEAud$ep&ww_wd8G%(WV&!&76q?t(C5CH
z{g?=VZ_DR5=|u&NNQ*qO`EYEo)Ub`qW2He?`;sLww~xv*ub!P8A{tMzHE1ntRoE@J
z(vkYlKHoR4hK|)S=dJe;{2mOnTq>$lEc<+mYl@WP(epMH0>MH$dQmtH}rV4niInF*<8D$FkIle2Ug2
zoBQDw3-<02Hn;$$N-5@fW*llVvA$%e!eDv5oPRrm7j`~*J>GG4S)%=w8a(|?_>)E>
zmzwZr=+mtJ0_Y>+JD`u2c1B+fh#~6k@b^36ujfVZ*Ni?z=wwX#!sr9H$Rv;hZLThw24eKnoyf%WI
zV2y4{?3R1v_cpG@Y#++s4_-kQRUh|ry
z*?2ciGQ-awp)|f?c0X4H9BblTvIurYD=u)w3fkI3`t2*(Ia>PM?lGu%jiDH)}g2YZ{n|Ml!f8?xG;bAg3K}jONLgd
zL=($hFyJVbzZVv>`Lnyowys(wgdOE$QchRk#&nC;AoQ6q9>5qY5K8&Lw5dz_lHFlN
z?26}Hq&Q7SQUP|)!?%lvdm9Q1Y|_wi;me`uk87Iqc18coVBk6^i?R*=$Lx-QGq5u1
zP$XU6tucHeJc^Uo;wpp>ajMg!CH6>2%kd?OTxDx7nVp6D!B8<-MB-%jgZ{Gh2tR@y
zIM(ks&^xBK$fn7SnV1g>jpVyl{9!dhQ&g=S?$O{jBT?)(e
z99p>6P(H>Ysd|$02nN&I8$)Cl%}+|1sR7~#NgLQf7aJ8)31AASK#oFC4})^J11*q{
z7aL|FM)94BPn#b9Yo#Zx5!({afkLh2o)8Q`V_NWT&Go}>#o0lIWO02cb6&ReW3bG1
zRmh%knpnbeqLy8fdzut1qpTvi3YF1JQOKIL5!dCkTcbzJUUFhqx&UKzxlds9BS1#y
z5R6amf$g--IJ%JHsFVg&NLUaoj}3<42xa+?D
zMSXc;#wF*&_Oj?|sUl=QtshIet3{ox!tFqJ_Lv9A#xR>)0OVyJw5-Uq)|Ml?aU>aA
zmy4E0VeqUG4a|MPCj+RTQ~HN=_iG}({5jb(QOO@OKaX=0Ph
zlPOju(xa@Us^kKoo>f@bt@SJe6XB}q9Z?$#TZGK8H%_tA0VcCY;A0eHtX7^3CQeUl
zA3cpaGD_JxrVuj5>4r9viEHw=5pz~1iMGInP>2PGMM~mjgm!44N|OkRazNt?!FpjG
z)(oZ1utujVbR&lqBnsLZPj?G0QO=4cv(6ZMg)ot%Xp4)ebrV*h34^PQYrOX(t{<
zW!m5en`7vXSuJe{ijl;*$oJe{9(mNejD_p`LF^r$1y@5j!WgArY*lH-DmE4iH=sT?
z)Z-REEfv_EfK3F*t1crb2v7t^OQqfBzi}+B!V8#@K=?dCPA>t({|$tbb_1dqp%X@E
z0#R`61fmqwVF^wggqX7m?KShHH|JdH|}
zHP;5nuZh}>c0`Y;ZIN)g#L!S0%TU%7wcAp{oS8FrCbnh;n5EJJeOUbqvaU{RSe?)Ihl81{;INsy&;PkG8(x
z`{WcIgd=@Z09pC9c&53fGl*;lXmRq@+yMgq>09VPc|vkIcE34BaN*QftwkjNre#Ds
z{ntV*Hh=^S5-kJht~@_E8x^pS-bW}b4lCTZ3WSFsXby666_X{#L&WBTQMkw+Ctt|1
z#k|<~mhzU1YV$_d8N!n+V#`?@2hS=Hs-^O3MK79LL074RJfxY(f9OggcP{atyE2|M
z?h=SrFDHns3pe`Yi^%GYYV$@%>x1>Xfp7f-R^)bA4nm&0)3NdhN;fD&nP(k`*fxDUWweF)kj=0Dv*juMyuT=iBYA(5`
zCLc~O=x0_s+W~k_ve4075uB*Efi;BuWqc5En^1yjJPYq+;3caKc?M*1lkNvX&bWX#
zt;QCJKoF>=Um-k52`?)tfd?5wiaV{(tW_21KkKdIA_K+w0Y=kDLqgxzZJ9-88*5Y8
zZH4j#X%vPC)~Jw0!YDUdG|p0p2Gs&i5iZG9l-xJEiw~+UyYx|=111Y8?4Q}SCn(jd~D1z>6$tpq_a2R)5%
zzpm2B;OvC+BW)I#lt+uLJ7`gQn-nn7DdhG;Cv+Zt!l?MDK}`SG$XefKU=Iem@B^$)
zj}OtG!->y)uK-_JkE~RcWZx{}&>DMkNMqai)yFQ8eL>ZuRT0lpWw`LxT4$lM>Q*P4
z45QBpE7px&8(%1$nTi3@0sX=UpAl%Wlwta$3tq|O2i6?9fcP?YZcp_nkBwz5Laimb
z30ZKx+{N#3n-E!dcj`2@`CeQNBwUxqLF}c@@S+ZIvwvu9q<#cVpg%xY9txgnmw{H{@
z)M!sFQskhRL?wRF{q@#;91-~-q7k0WPkv}ME#8$+@PnbGnnF*Ok6>Tv$bfl7q4O%4
zevgS!%PtZ0r=(PXVD59VDvljU!YHzEd@7lP29JQ6XN#-_xF<(zdK-;?%O%Xw%GBB@
z>0QS(gS?jw5Qm`CxZQd;m0cn+l&BTh{Sn-Z{NNFOrp!Af-vJabew6bl6CqE^YeY{a
zzqYbMyV6#{u&fPB5*NRtUGlwTYpWpnUEI3#e)?Vr*xKHOlB2F~4MR$coLxDxGVw+|
z-Wug>Vx%yodS4ko8fT;iuL22`gwQ
zWRw{8``ulnRiHCr$szx#h~8%UT1fX3cL267%*^0X6MViY$!3j$Ugh4{L(1v(+v;3o
zJ{`l-(juvq5N}DomUKZk`{|jLDtz%q53I=I?oKk!dKC~`;XIBCQ=FT_?2A1pz1dXK
zKD17qEn_Nm5k~+BDA=mdNKiMj+^?{C=0TIUSj4bernXNYYbskKs-S^mUvnu`y5l(n
zb^Gd`lSliR%j3@41!7cTc2=Q4g>(rUM#(-Z7mo>X++aftWGwEI?@~Hf9E0+NTqRZl
zmPeGsR{226t6=e7dtsbtQJ)1p#U`r=lPGeDX$nbDhajtBCsB?D9J(jY%Ei-}V@zzwSY*4=BXRupz&i2iHM0dv
zHaD6TNJuTCuEpj~EDlWX4zs(3%xWU5+ajDpx5-c@&o*A>{~iC^IM?VK@mSV6{y*$%DeLP??y7sY`U-UPwU+f%(uf%lKWKkqM)c{rM^%y-
zmSEUk2v6i8@%+e{1g2}aiSV(>PtQ3{s-BT<3RyYGW=yvYuAxtNWH)W%WV8$3`_pzeqvcRhVvZ~=t&1b9Vg$3eD+`+A`ruj7x5$G5Oyp8Bb~gD#X}bB
znTbq)^%}O3?z<@-QL}IwYqQZvYw$KVwFNIElBP;{;G50;kewe(vO0=rJd$dJFE(_d
zN1d>&&a4Xzb+JN^HG@)Kqpp%a9hLy+&3?Q}mQq15TQN?rl13C34&a*wU{WK1Z)GFj
z{I=y0^D`avWz&E~tl@|tor^$jBLcHeP(6o|K^hojkOnp^6-vWWfrYBre`O6Gkzt6S
z=65B6Xsu7aBqFF2J?aERpcDF|E`VP(4jE{utHy$DP!}^gBu#WsxtBqQy2HONhj$%7
zojVP>S};y>;th0HOgb;bM(Nqj$RR{V8rh194IwAiw1W~l%B6e_piK_a>I!IPDx`Vn
z#N_PtuQdQOE|r!pT(%#yw~1Di^&nc|B?eAALq`Kg;yN2P;4AiLEpwdy9uRh&@
z<~&U&10g`HPh^eShwOWi<-eQMHehX;!eD47A-P(0AwUp65p}J3Z#uKyw1?gRm%1@d
ztL!Z*iC!$RAK>An`}0b@3Px8U@^}cEU=Xozfur75;KB$p^K%3dV}iGnW!*}Oc%CVf
zcu8)^VXxUg41_kurQR)jL5poH;?|5cw%FNLx7HAcVQ$W?H706IF?$?lT&sh%Mz;JA
zYt00ytTi0ijc;ckFZmVHxHEcDUoMV$OnVq(ivYOdxObol&Y3=21`E?}Goa{6f
z)e$#=1(HQ_ROuqX#e}}xqtZ|^iO)&2niIeu)+6qPJBk^WSwQ5
z>wGO+7TNQc-m5z6G;t1V${c0)KkM)*7~VS*RFmb)C8*}&5n?jvX|pgr))YpMHTz~g
zuDmXuQfq$x(mPJ*9g#K`61-8SSOtgJkDWXw9DE%dC;01RX);P|)yIr7nPb9?tg;YP
zR+Hn};^c79;=h}md6X=OM@o$XaF&^WiJRa&?goX1k6HEDMtt@a!?j-
zBceYGL6yK(Iz0m!7&5iFX8Ly?f{HE-Fic=C5~RH|P$qHEM!~#C?DxSXO*G8M{~z^F(W*dLjrxyxrr0BSdMkWM>~2Jkh;W
z$pa&)2Ug^CKop8b!DE->w}v2w95gSYq-$?u@Ar|(XVjq=z+!OqK`((7q#-{Vp}a2@
zj|auIT|b`TjO7-?h&i;u5ReCfOz&7Vq>E)CNTChLnj{2vVK!;$9B(7ikc{?Dp~|GY
zBCH5P<|q4kQ3^Db`8AoJCfS?9ywaI=#;*dIeaEfjlM+~&V?GYl*}{{II$H7-2@1eL
zvX8BW2f05^j+hKVl6)qD`Q8xQtSz(68rBz2tj{vcEM9y(%~WjOvN89Tz@p=CPd`GeGR~QnJ}+yJ=|US~jzHnb?M)Wd~4ow1gnVi6y~+y(_?QTF@mM
z3`L+ZQ`^8J#54dy)Mdu5Y+Q+rdW`rktIbDnrGO9kuZ`g4o%Qdkj2I?UHCxWiqH>R9
z4Et3=$ZVYHgVG8q)RD<6Dfv2$R)zhdtOW!NW8nm`Wl^7qI3J4H=SCyFAo)#+gD`uA$xVJ^MHTU-K&?4GVG#
zntG1+rZQUUL{MYk3x?UC@@+`RZP}%4#hx+7ougrrIs`
zXi5HvCmZa^GIRcirqNjjnE}#Y-=O#@3+m?7ghyP!sFLaiiU-HbRb;bOULKm$%4534
zp0I0k9g3~?Rv)@bnN{5=YVm>!
zC)S5J?$ohSBv_UC;s@`vs7AvfYgN*d~?l3m3uCBMD(3CR0bxmBU0x#W1
zfuJ&RLa1omyZ}IX!s*BcR9S#Prl%52gHIZTq}HK3S+`<;#Nc{CWkF$F_<>K4@!qtX
zq7;0kFZ0h-d<$u^N}1-kl1l8NC@iy!3wlhedLlj6Mv|&6UIL?-rbwEGz?iDuG(6`l
zHYg^B67!SGpu)bbsolXn;56}TEBXo~;jM3Cy`oP(oMmsf`ZGr-Hz(5(4$$t4W7-*Mgp`~EpfS&X@
zwG%hmA?@V!H2E0P5TT&_=>4U}B9QXz=wVtE1DU3uo^_7@{$25Y!@zycIYZ%w7lvuE
z0pks-(*)ygbGUZ~aWa&yV@V)qmBN(Iw2|?6#
z6CgWS*@4Z(ZPARv+D?MIKPguOV|;}4K(RBh5y?Wmw3J+FEX`st<}Cpr67&SthmcXm
z7Xt?ifk4Z2$HGXPI%m#CF2)r3b*&U2JAPF_Y?
zSxJ3%tiXMU71Oexxs`&<)UcUVVEd2I3#$0B_T>(X7IQ>oY9ehlI10K#D6LhNF*ANa7bS+Jw@Pz_;2+IWE)cCPlA~x}r#5vw?%>6QDvpI!e5)Jnj4|2ZI++6*4?SJ
zusR&w?V&B6iCb$gon1#5w}Pve$K_pCtr;zBDynlo%4McfIV-nRvrG18O_*=OG
z@HzPAHI^D%JYHohY3gfcEaqJ_d>zSVvCW`~&sh4^hcy6WeKWQJh(|XJ!1m5U4d@J|
za#v*4WlYGUWM7v)Y%%~FLkwI16p0l@G-9V*r7IyX%eac?35ifE3`?Tv8mN-75cp1E
zE}~;5DL8q=79W}*-C0y%HB#LSm;&h7jI4el1V$H7+bmvP0rB|kkd&nJBV*5_EBi*P
zh+5=@Y<2dX=eip~{}yoD9IrRJs~6zX^)xjBVP_J6ijtuBp>j)tZqjvvX>)6J8x*^Z
z+_GU-SEa>&BcQ6a4@*P
z`bY7^8WJ4cS}WI_?tokmMb@$(!qErEVdE!sM5R+j=@SSUg*Ie|gj7rlvg0^hiXCKo
z1}kBVRVK}#EoiIS+q~-VtyZS8kC1Ca!6P%Ij7a7(4H;E)Y`Du@!po#G4W{msKd`a~
zElLcV4{65ybgyiCtF1tw{aZzSHf|*ZgB!vLw%g4}0#EIRg|=;WgMtxsb$O?GqJ*Ek
zEi9Df7qX3`DcejXprKqpjD-=F>BWKsiN-HkHUnYGp?Lk!m2UC{c?0e!Bxzjb7uWg8
z)mgr|Nsz2Sn7K4%rGMp2g<2XV9|*K%57QT_*bvd_->KMCZ!Y`Yo>`t&-eYdztqMG>
z@?ekhGC5!%CkQk<;>vwSf$!EPug8-bU60XDkk>J-8d{o84toX*p$TXqo5em5HzwhEkS{n8JM0p6WQp$xF4CY@j~D7K)i>pvmS`|
zsSOq3(llOnp`%Pe6_QPrnhf+k>BcyDxt+F75+*hs*5R4Sas!
zLH6_NVa+4G<=ahr`mD--usevYfYHmaC($ZWxj+OH9EvDbtTX`$DWey(gQG#xPG+gC
z?0~DfU0RKEuBCi!cdLAc?aWiMMx2+q7tBpica&Ugz`V%vUI0p;&zv?Ib(xQ(IH#*~
z&!B{K%et@yk=F7^X)sFgWX>vbQ7EsrLF?73ApgjRG2G|dEZMk+nIQ+qz_=iI7V{a@
z>85QU`b<{{<1y=xwDl}Xps{8I7z*@r{b5UdffZJn+-TO9kf@9k#c*4=X^(&RPf_2Q
z)8rZ?Vq9FBMT~2yYj+@sN*wsZ=v
zWzLeY;~#S$rVU`T39w{?YmyIAO^(cL_U{7s`IZ7V1>0D564Z^l+
zYrcai0pHq*Q~(EjEboA5IH_
z%t$#BtH3o1H?O8@5elI+bwy>QNqaF=Nb1+|>HM08Y!P>b6$1=cI>@JCqXrrHk%z_L
zipoU+smD-U+}@Zq^DTe?gVBKbsHC;@!X<>nB!t8eRG80P+8Zd_GLxd_9KkS}oX1u8IJcg01$Y8@pZ)v~smx;x{@
zCH2~p_RTxuHElUZI-fSA3+t9$*_tKZ4a5l&!E{|#t7D2-1Q6V(;b!|gbehFePve23
zsTR_BbPsnB5rY&}2oSDng#dNtaVgj@8cik^4RFRwu5N`rQcai)8jJ5rqj%MrlCYWJ
zCz52KO*3oi`z1<<@Jx)8aEhy(DBbY>u02NDhsypHzmEIIj8C6qcI3#Jbin$Htcm9P
zwtofZ1?NFz3wMjETvRj=uPj
zN8(@Xt0y5!acU*0>?~wGC{TcF5z-2jCSIC(Rm#W6
zkY-0YT-<;yXz*{qXmnQpFIP~ei>X@lmy}cwaAk0pu1c@DD%8!!bQD@
zSBUJAzNy$BWxk3kaFMbnfi#PkF78gb-#83R3=KFuXfYZy&5TUCNVZ)R>T%HS#Xw?P
zoY7)ZSW#w$Y4_SaDrm{Tx2tlGk}Tyu@ZV0Fvh^g(mk0{8aa#@CXRP)=0lx`)ojh+w
ziGfWha06@y*sWkzhfA~~G8%i`;Ek1bk76(FzI7gVP5Lh|bJ9v+5CgK#1*5gUc#t(3
z9csTp$Be?IZ2PSS>3I@D{}8ZFlrGMxp*IOAFf1N-8IzRyc?PjNEYw;jNCqZA#nHYs
z<0ZZZ@{quRpng{sDjPDkO47PzfXR5$W{hEX_1*Am?S+`53A-S>z#6hL
zv@KV#p^$%Ka!bKdDoqn>G&sXF36nx~GXvE)bXCY+Z&rpPhYH!Q#Oku90ELC}IB@^&
zZ)L{Av6Ax;pXq&PuKD8d^*M1q5)Ai+cOSzVenY7lS)v!IOz
z0X{^U?JUg3D;I5X+-c{Q<*o`#xy3PH32A=PZA`*1Fu(QHqeNYp&RE_yr^v{Ld=Nl&
z`Jk+T7eF}p-lfU+zi68bmI(}B(oaI{WqUn|s~!WNj|t`Sp|*>~6lWR2Lq`#i*5bGP
zKAv0~rks5tM1d+L5b;s(m279@OYICH6j#}BI)}vO=2BA>XphC>Yh}@x9WpVuEBez}
z^k-LiUv_%z&)OD`ecKSKHTP7`lXh@h%@$^DaZh4mcYPp?62B`VJoNQX+u~V`$*mzW
zn$bg*tlNCavi4t6w*{6>%yb(?Lw~~jtlSZ~U&92zq1x7UVm(9G>A7M(t>=}tToO}Vg0d^<5IN#B=8Q>7W!7s7
z(A7c+rC}Fgm67uA$k!&5QuwqCD$C|>M0{c&%U?ZAlR|8*snuqPHH~UMf}7ZT4N#ON
zW&l)sGI+C87UCV7!DEV2@tuLJ$9-a*WDJT8pGGHyz|X7^eT!LL=x+%ppUUrTI)y@=
zi2DR9I$XFZq_a;GNr?NL5RecycS!ud{&~{)3HCaR_|$tuT}Y@6@AnDSX|!m&6w41J
z&uK2EB`4(e%q)7p?N9NH*M&9LG-3j?HA4$0#jrCAe&?5F^Y8l7zx~m8M$Ji5u`;Mc
zZ>=ITWou3N19N_v%;F^PHN2bpc)b6p^}qOBf|p3y2xO+tPdEO;f8gEH=Nu(E9qU}S#BUvQbFms$Wmw7(D};jA
zJV9r#_uqMfl@?%s?1}iJ(Hj5Wzr?TOGuQtmel3?j{v|$Y#uc~dY@AJ+$#r@Nc;(oj
z<~Ey~?DZf1Ydj%Z>CgXbd`j=1iwf>XcA#6=OSL~4@7a5s!Tu!PwT@{+=h}w?yCflb(vFN00j>pMrppi>!$38$4BGnM2MrvLK1S+
zy!q+)2^;r~Kk);{*7$A^iINvmp3ye(&e(-8-I(*YpBS9&Xq`*7$$KQz`fAe^|Mb
z{}I2gxt#beP!ndGGjU}xaqd1cQkO>D%Z#T_uT9He{?qu-cw*NHDKEYv`vVd7P%ARpZ(4c
zMDBzAKl|OnBpsExXLkBB)Eet2#O{dUzC6~S9lL*npj%_N|3vJxl4ML(k>f$P+2h
zaueh8*7_fo+%wS^{iYT-W$aaL)q;&S^zJca!L;IjPQK(%Yb9$qX?5*wkNtD8+=XBE
z%t~XTtL@ERn`QcJX1V=|>}^vk>>;32?`XyfPi%WiWnx;mtkq4~r}={_Wnm~^ZmO?`
z={e75b$-+8_KdFb?QL!fml6^peobq4GbwU>JD9n~
zf4Uv?Ugf{o?hfVMPurOL4_Tw_WgCf`Z<2zir)&PetiU0BV4r(hCj^j)Ji9(WG
zTZgStVx>xOoLBm*LNd6U_iXyys?DTnfzdUxJYoS@w&(y_oY&uI({Jx^V*v?RM*9h!
zq&{5br+2zn$6K!UALw)k^7E=r>&fk%5bwJy{4YA)C*!-8__uet{o;F<_{Cjr-+1#9
ze`}ZPpZJwJEZB{5Z`29(;1a)`646)uP2FyMyk&{stJ}RYKF9lace}nrhOhWJ&*^Qm
zlG!<%V1{WCtTiR(Cdpd6&HEp86QKHl_m6eEL+H<*BMj)HM!46}g$wlSTEBXPJ3Kz;
zdH<&oZV#T0>~SBBuJns~tmySU?iJAO$33pXyTA6hx!_>k!HJ8bvENx@;Uu#w#>|N(
z*QoOj|K^cy@A#qh{*00CgzimDe?YYnq~G@o|J#x74SKcjD0f}F<$nKD!cU`z{J7D;
zbD96w(e71%V
zvXDN=arYV%5lTR7x^*h3?;If=t(dO;;i4q@YHw#RKfxJIe{$MWK5#dg!y!ooNWt&TFbApV<7&PGM9u89ymW)
zf!3!Tb#)W7O=mUYH)jJ;SgT7hF+0p*B&Tp(P&h<^hYGZ-ouoymn7v-{gJ?@C3Wd{$
z6OuytaY2+fqWYZEIJEv5s$Q?jp=lEQp(Z=KXP;NitEd^};ONUsBURPmj^V3F6|0Mq
zA2CVJampK3Ab_)jEfE&{*nk+AV4*6yS2#7^s|D2Sv_Q7gPtg(*CQQYt0Xo3;>=BpZ
z{Fw#h&6_}EDeYQ7DTgA+JaYoef%7X7#Fzcm#nBOhhR6$I;PH#;hYJhZBBDWfV7at_
z5^eL7m5`>Xy40RP6~n0i;<)mJmbCYj`N>BAt_g0hypsGax5U}8^&Dx6OEfg+2PZf}
zBSOUr4S`4M6AK?&QqzT?ISXu&o@LAYhyZ$hNHtP+GIs~Ul-x&D
zP~!@F2<>w~n7BoHjKB=+wG)`D1X=hv3>yGlZ?IH%g0#R2vwv!$>#4~kV^vtjv^Eti
z;!zN?DjjmfwjnNHVfJzdj+`=qj_ME!zKyh~&c*?SfJh4gMAd|V?sV%|Rn}jJ5Xm6z
z=Xs6gZl^VF$U+BWN@Y}O=6gnt5Fytv6mHtxr+^d+A1F3`0E$h|Kkx$W7m^#}BShYZ
z2xvjj!J>t9Q-g2sGPcd&=nJ%LJJ^-!Ir`SVTsHf3vU|GsQj1AB9CIFe9a5ny$x{EO
zDQ;}>vQm1lKY5Cq(e(ucxUB)D&;H(jX^LCba?4$))+_v*%kH>|7k_~{_bDAcUk-R8elc6*c;UP=iR@kR;I1#$U{>ci}L%?y;9Q(ocP*WQ1xN=t(cD8rm|)(hcqQl$q52
z{bhdS)Ow4Zi>2t3Cz+mmxj(k?+|Sy>wYPlba$0ToY1Q@mtEW)js7)z)0s<3_)BFEp#{10q}h
z&s1|xP;p={!D^;CVoc`CNzJ}(6Kg0$HeGY{*D=tAcOJo8Nm3gD)l#4GS<%PjP6`1
z6ry0J%U1jSUcq-)u2!!abm`xy2SdJVW2oMQ7qL)3-UN#E}CMx^U+3Pk$aO;jBdh_p%fyAY|V86(oxn?$5uzj<$0%&}mUdRoN&iGH}oA2Q3mDtKEO{#t5IZ>}j@NET4`O%c6+vr6sLTuK9^QL4cW17Go#{Tt9p
z#>Urt#lLg^99u!^#rJ)I_|`$q-*V}8!yzhX
znBKBBE?llQ4vBwwh5zIMEOS5QS03P|Px_{KXj(D_r9w8H<;DOl2NV>NEBs>zxPRN@
zTWu9vN01G+td!f>QcR`Bn7)?Y;Q!-5H)i7QUSl7j*QBfcASI4iR-!S~%%P2;t!0hn
zr7uCwYhuqFKPdnDy8r8e?n79U?|r2^zV_VB=Oz_4F<=+p^nu^QoN-zr7Cyc0XJ_kP
z`{|_Lxcxlcx3E`uy5c|4B~+%nGyT$?S>3MA^uM-dWjGRR@F}}55saSx@w;~4&Qe(V
z*e|pDw$AkNjduU$b~0PjZ8v2v*u~}ME3^AjXS(E<_JMYdtQ&uA&pR}(p8bIV(Wb#U
z_&o!<4Sb}JKIR7wa^w4cxhU&+@GoOmN#94Ex%nVB^{^{1#^tmZXvyA6^#V`nqbn{Y
z?jzaI)_4KFj5>W#6KB%Q_T_p7M0F!%a_fk;=0lA07&LgAwoSIzMn`ZFi@oN{PcwWVK=j`f4bOr99s>2E(~@?rk!*SaHK{Z4A*
zz|6p+Q&r*GV2ey4xBV<^p-4Gdeb`dkn}daXmu~e_UgxI!d9QO*#?<#_kLJyX(@RjZ
zbd3MZ>)b+
z^bm^pTPIBJk6QeD4=j%ExHH>imUQ52Du6AEAkn30
zwrS2$!_ZFscp=(Z`tR0)+#m~Oh)pKzkPGS|CbvQgbU?9!Ufv6S|lH1SOS6A
z*7?)ctv|_K5mhV>7sU!!U^_dma9-U(@yWo87cb>d@znc|yx;vtd%c+7{y|jHf4<+f
zp$WBqz+F5edxg)i%NigFeKwV6&rff-A3JyiOS7c(Yx?;12VCDCx9MRBtJZq$QsM&g
zYA0vkyyJfV+y~sh$5eUm$*wXsYo{JRoHfV036oEx4#1~bv-L8H{|xmf7_@D7=)0xl
zL%#e0__z0iZq{ou$nl2OyPh|<{92~KQ5O82L0K=7LwTzo_@FykK>qQAt~xfWu`>X?
z%K*4lO%`(i-ap^X?yqB2r-BF6XDGPS-#j0eI*pHC;HDw?k67SNoZJ{p2teApcx9e>yTEPUevjyK7m+sy7)
z_wmFgXvSW;Ie)!Xmf|*cpxx5^`l0;wqk8=$uOIOzpXy#ccI5-)xRGEw>fm^(te~q1
zuUPwl|JJFlyvGuxldZL{(wkd&vn+q2Pj2AB;60b0=8j3Whi@rrUvIYg|8trahkibCeI0K0qYlAOi^G!X$8rYz@iqL@D}3bE^1{eyVYfx{9)phLOR_tGZey*
z*IE|mMbmQqZV9a}2Hfd!q1qv^ikfiQ5jNbz;CW~4bBCxep}YF6Z_Wu9wnMX@eY7wT
zid07*>=px2nd@;p&JiX5IioXO8@?O9v@&nDjRV2_n?C9$)fxcnJNke;O6gS(ph>@E
zw4~}`z*~+XT?Ln6>Z%86ZMsPv>w1o@O@-t0dJJ<_5pE8t-!)K3Pa5shPnFG(LPzxr
zoaoZ#4?o=<&FQqHu8TW2v2<2Um-(fqyS+s5yi+c{uSIX5IAjUDa+f`U@L5y`d}U9d
zfBnfVdNKnhkV_d}4?h><-qp`7Rvv8B+kD8Lh+U>XSL-OK&;>-j8XD?_7V60icU)+5
zN%%DZku$G7SaOel;^S`8$Xlon#8sOo
zT)OHfe#$41z)fR(s~X-$ea84ZKH(1NU7vq)p&Td|v=dSD6aS@8xcz$X%HLe2H_Lf*
z-%tE+KHng1ft2I2$AbNLe^KMxlE)Gz;(du?s$Pn$&I1^h6HxmER+<-eZO
zuZ3*pJ=k1$dH#B-Ozo?9y{h^3N*=6ley}z#KS+7TK)jx-fB&al^2WQG%WcTZtx~(2
z@?Ur9*L^g)xw-Iz`RmPky@l5gt^4k$-I8eZ*8I)ZlI9qs|2F^rGu)BMlc9bfc`1F=
zZ#ctMkK2(KU)V}{Q~V##B=+HH-~L&5Y&6@N+*=}6lSmO`
zEo*Q?U8QH=GjfglD;=u7-Y>2ZEb+Ae?q}UmTsmvaqyO_(zEQ*QdD=hwvRWZG2F*~g
zd~(25sD9;uwX$`W8f@yw%1D8ScD|5Sd}lDeq1qT6$-^O^vZvc(=j+u5bD_NQ~LhnJo(Z&1Yec-|7GJ
zEO+1`4^yzrlJj86BtPcwE}Wo<5G-_MX9`@lsYPb`IcK{u^P1Qrhgxc`K646MFc;&7
zcm?Y?{5Uzl-{6aCd+6Uu{_3+0m3N-)=68z~as+Jhhn(Z4^yH6NbEch5{{82;%E*lo
zrxx&YLsY@FAc7pT2-WOqf9fK9ygxa|y_@?#EOJ%#&NQgoiH
z?A`PwL;d;)7ixKg))2c1fz?j27AfADV&0tRK1FNaKhLd>aSng%bM8Qk?Jy$;Q-i}7
zg>(G5>$w`W8C>eJ865O9`3*1jjZPve!*F;ad+fBG?w-k
zD^{GRd}DXcmfv9xk$_vb?&+*tEYc!!7mJGETxs^Jv<1HkY{OkCdn1_GUxq)i(pz#Xee=EewH`uI#!Bz35c8o9Ccj70geVrQK-762
zZS$=cxs&!swI3+kpLr~`3VUqUll&pvDgLl*2bKdO%d@D%$shceE@GYH4}RT680mlT
z4`1ZAL>>ONi`|d;b-@z%zEONFtWZiM^R?tpmw&;Xu-9Wz=62<2QD6~e@@F_=R>5pt
zttjKsdfcD&1=mZYCw1Pwdl$nKKDxxcIr@t~{t`m9p71AM;@%QJ9{G14HR@Xb?n~Vp
z6n%OraYax1KVRw=;{4@O^_MMmlRCGo0zbiv;cs6`4<29l`=yxi(c_n2?*7I89=mS&
z@PJHqkDJ$(~iKjXz%E&!OZ+%O#HkN9%yOyYCD{U!HkxE$h_T-~}l
zzrW{d_d9U7cF>IiyPoUY{J3lJ*gd}P!E4>OqgS5KU@eOkxpBe!w^tsdRE=QjXf8^+
zMyNss(zfL3s2ush^gT{^f}*u
zz5CbbI{(S*-7#G5y51ehrE`Uw$K~xS+*{|3_;6(mGBsLI9)DV;&nbptyiyTLa|g~|
zylms(S?9D8zdoDCW9H+)=p#kEJmSsT$Etk+v~597us?Ts#617R3iqno^~#Hrf0P)l
za9hONklnVVt@&*Mi#>bF*v)T?X-j@vYLtx{WlN0$1YqQ4UA^qlIN;AeH2L|HMi+I`54=3B4HtK)nl1wVS$tUn_6E2?_QzM
zHSk#PO%s}$;0}nE$E^kNJ%D}_gMvC>P2X+8coVoyFgEov`)p05Y=A?
zlC_m4XomYjQ&V;NH+?{#&GFv^deg^3H-pPC?|Re6npp4!>nq4@yq*8!=EQ7@oj&iB
z`#Xj}sZx3TG3=6Ey3y@XgZo>={U{)CfA{L@Y$7NZ;w8YzSDsz&*6?7f2?hft=JhXV
zly11Aj&EW>!ehIt`((DFxm+-
zMW-WX`?ys5U=KXnJo<RY^P>OUO>RP+G8~izYG?-TxFJJ&t1ty;H_;oF)iRt_jP8h2)>~gU8mD}<
zWGQz5-Ru8xlj}QdG}!=Sk{d(DN44$4)v?5YRuj_Quw+lKPU7{Xd9sDt(zA;5sudln
z+LjI!kEvGuaccAb2Rp*2m{C^h8hpDp=PhJ
z*P5SK`qK!bjZ`xIKSn9kTy@NOYo*5qJCoRvP?BBkYvO#Q$bR$bt1)G@n|WPIEgA;9
zQ_&sLU-#p-+g3w&h!|7Y;dnno&JIJ)Vybs0t4h`G@)Wj*Wn&Hd90||bZ?e16BW{Qh
zGmPH$^4MdE0j;kpow#p_7Qn~<8X?ctteVkvgK)FTKU8ucJ+sl)VV);e9kbvXW
zTqn)1_J;Rv`)s;Lo+0ZntbIq7ap19B9<9xBZO6QEJS)=%Ir$6TXwC4IP5$Av%-)m0
zM!kw!X=jR0sxc{kpr;aOV$JhH%jtRZ9B*#&m*_7cp=t?(~W$)(&m5KD7
z^8}Bw5j2r4E5T{H%k57C<%ySn&5e75It8-8U^!U;x)>kesHHq<0Ud0q^ct$jGU_T@NqC4CS
zYAo*VEANffYGbvvK2{r|seeIb@3OWTVMOzu!3`A5qjo#4GTKH(YODm;sU5J)ll65S3$3txoi}f5Q>>?6-cyjX{19
zdcaOSCNKKSv*d?{Y60n8+H->ej}(vuK)J@(zhyrloCvO^Zj|`&M9nvhGw?3q0c_k(
zOm2ep$aNJLce0NGgc=Ksoq_^5j_K#n;mU-59*t#iK?u;z{_u=hio=AJet+>dU4Lu8
zI?6nE^Ech3vi)F7)Ka=-zxm0U5{Id=W0@U_D}g)K|CuUdt>f(FVK8?91YJmO@>ABi
z_e7I@ZJpaAVIl|+hw(hw-?GllXkTkjOXbP_zt_2`wVUZdcX={Uop9=qoPi!^HHX}vv$;S2pvYtjU{QQDjiIu(1Uwx}P
zaY{BCVGJnOi=6CDW9@DjriNZW`8GE&QX0jAa`NG78{gvpF@DW$ZZDen*=_Eqo;4+W
zL){AqgJ;Ji_;%K}+^5)1sSJx#B?r&1`xcSG*ZFPVa<7fP=4WnjQ
zU-HrUjHuR1XOIoOLTs-;c>_Ukqy3@{yAph~zioq?(YhAhhb@ZyA2+zEM?mfzKWt0X
zkW{EuG}n`UHWOB@N<(34CGAXez$Tjmdi*iByTeW%t=x3Yq##E}thFydovI5nn-(R?
zG%{MX=}@-sfyja^
zp)sp)NN^1y!;`1|@priXk(l&_vhp|eryJ@a=8akiX%A>l~}p)A|nktJNy@IcE-E=9OmC-r6;Wv815
z1mzlWX;_(t*?KSj|pZDx?D;rzEBo
zHBCy3vR}a6LGelIr{#3vEIv{6Q2+>k@nt5Tqx7u8slq4Db{?{_soRr7iWKf}(mOy|
z?+|ol!1#IJaVORY=Zq5&SV&i~Y7{4z3jh+Vh3bj8eA4^BcTR#s!`r78M&Vln!eaKZ
zPwIEh(?RY=Eo`()HpPC}$OxMoPe@#*#=taVk&%nKdXLUJ%ia`K2dn
z&JiqmoAQq5ZGQd70}9#*FfBKj@^%O)pq;B|hd3K{I%Ke_mPp!Yt=o1zY$Zf<%qF~_G6Ne-WeE0X<
zxHnpr#^4V-fNdW-_*ok>yx8F9M>BS;yNqqak^l7f+=N5xa6_j@h4&|IG~hxH30FIY
z861H0V?a9ke+yDhFn`APT;EB}&^#w-IQK?4%%NG_w54CD8HQ?E!tw&vLIZ1*0}7@M
z>HLr{O>F+s1>(D