From c746cbc686c46904a5d381725079a69e38b201cd Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Fri, 3 Mar 2023 18:06:25 +0100
Subject: [PATCH 001/294] codegen: move gen logic for typed values, consts and
decl ref to common codegen
---
src/arch/aarch64/CodeGen.zig | 6 +-
src/arch/arm/CodeGen.zig | 6 +-
src/arch/riscv64/CodeGen.zig | 14 +-
src/arch/sparc64/CodeGen.zig | 6 +-
src/arch/wasm/CodeGen.zig | 2 -
src/arch/x86_64/CodeGen.zig | 216 +++---------------------
src/codegen.zig | 307 ++++++++++++++++++++++++++++++++---
src/link/Coff.zig | 2 +-
src/link/Elf.zig | 2 +-
src/link/MachO.zig | 2 +-
src/link/Plan9.zig | 2 +-
src/link/Wasm.zig | 2 +-
src/register_manager.zig | 3 +
13 files changed, 323 insertions(+), 247 deletions(-)
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 818b04f890..23f458f910 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -41,11 +41,7 @@ const c_abi_int_param_regs = abi.c_abi_int_param_regs;
const c_abi_int_return_regs = abi.c_abi_int_return_regs;
const gp = abi.RegisterClass.gp;
-const InnerError = error{
- OutOfMemory,
- CodegenFail,
- OutOfRegisters,
-};
+const InnerError = codegen.CodeGenError || error{OutOfRegisters};
gpa: Allocator,
air: Air,
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index ceabe70438..87806223e3 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -42,11 +42,7 @@ const c_abi_int_param_regs = abi.c_abi_int_param_regs;
const c_abi_int_return_regs = abi.c_abi_int_return_regs;
const gp = abi.RegisterClass.gp;
-const InnerError = error{
- OutOfMemory,
- CodegenFail,
- OutOfRegisters,
-};
+const InnerError = codegen.CodeGenError || error{OutOfRegisters};
gpa: Allocator,
air: Air,
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index afcf4b0bb7..fad5482cbc 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -21,10 +21,10 @@ const DW = std.dwarf;
const leb128 = std.leb;
const log = std.log.scoped(.codegen);
const build_options = @import("build_options");
+const codegen = @import("../../codegen.zig");
-const Result = @import("../../codegen.zig").Result;
-const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError;
-const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
+const Result = codegen.Result;
+const DebugInfoOutput = codegen.DebugInfoOutput;
const bits = @import("bits.zig");
const abi = @import("abi.zig");
@@ -35,11 +35,7 @@ const Instruction = abi.Instruction;
const callee_preserved_regs = abi.callee_preserved_regs;
const gp = abi.RegisterClass.gp;
-const InnerError = error{
- OutOfMemory,
- CodegenFail,
- OutOfRegisters,
-};
+const InnerError = codegen.CodeGenError || error{OutOfRegisters};
gpa: Allocator,
air: Air,
@@ -225,7 +221,7 @@ pub fn generate(
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
-) GenerateSymbolError!Result {
+) codegen.CodeGenError!Result {
if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig
index c8f77fe702..5a108eca85 100644
--- a/src/arch/sparc64/CodeGen.zig
+++ b/src/arch/sparc64/CodeGen.zig
@@ -38,11 +38,7 @@ const gp = abi.RegisterClass.gp;
const Self = @This();
-const InnerError = error{
- OutOfMemory,
- CodegenFail,
- OutOfRegisters,
-};
+const InnerError = codegen.CodeGenError || error{OutOfRegisters};
const RegisterView = enum(u1) {
caller,
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 2f191fd834..511a10769e 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -733,8 +733,6 @@ const InnerError = error{
OutOfMemory,
/// An error occurred when trying to lower AIR to MIR.
CodegenFail,
- /// Can occur when dereferencing a pointer that points to a `Decl` of which the analysis has failed
- AnalysisFail,
/// Compiler implementation could not handle a large integer.
Overflow,
};
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 53d38f520a..2ec1a33619 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -40,11 +40,7 @@ const Register = bits.Register;
const gp = abi.RegisterClass.gp;
const sse = abi.RegisterClass.sse;
-const InnerError = error{
- OutOfMemory,
- CodegenFail,
- OutOfRegisters,
-};
+const InnerError = codegen.CodeGenError || error{OutOfRegisters};
gpa: Allocator,
air: Air,
@@ -6683,7 +6679,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, pl_op.operand });
}
-pub fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
+fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
// First section of indexes correspond to a set number of constant values.
const ref_int = @enumToInt(inst);
if (ref_int < Air.Inst.Ref.typed_value_map.len) {
@@ -6752,200 +6748,26 @@ fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCV
return mcv;
}
-fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue {
- log.debug("lowerDeclRef: ty = {}, val = {}", .{ tv.ty.fmtDebug(), tv.val.fmtDebug() });
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
- const ptr_bytes: u64 = @divExact(ptr_bits, 8);
-
- // TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
- if (tv.ty.zigTypeTag() == .Pointer) blk: {
- if (tv.ty.castPtrToFn()) |_| break :blk;
- if (!tv.ty.elemType2().hasRuntimeBits()) {
- return MCValue.none;
- }
- }
-
- const module = self.bin_file.options.module.?;
- const decl = module.declPtr(decl_index);
- module.markDeclAlive(decl);
-
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
- const atom = elf_file.getAtom(atom_index);
- return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
- } else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
- const atom_index = try macho_file.getOrCreateAtomForDecl(decl_index);
- const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
- return MCValue{ .linker_load = .{
- .type = .got,
- .sym_index = sym_index,
- } };
- } else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
- const atom_index = try coff_file.getOrCreateAtomForDecl(decl_index);
- const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
- return MCValue{ .linker_load = .{
- .type = .got,
- .sym_index = sym_index,
- } };
- } else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- const decl_block_index = try p9.seeDecl(decl_index);
- const decl_block = p9.getDeclBlock(decl_block_index);
- const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
- return MCValue{ .memory = got_addr };
- } else {
- return self.fail("TODO codegen non-ELF const Decl pointer", .{});
- }
-}
-
-fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
- log.debug("lowerUnnamedConst: ty = {}, val = {}", .{ tv.ty.fmtDebug(), tv.val.fmtDebug() });
- const local_sym_index = self.bin_file.lowerUnnamedConst(tv, self.mod_fn.owner_decl) catch |err| {
- return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
- };
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- return MCValue{ .memory = elf_file.getSymbol(local_sym_index).st_value };
- } else if (self.bin_file.cast(link.File.MachO)) |_| {
- return MCValue{ .linker_load = .{
- .type = .direct,
- .sym_index = local_sym_index,
- } };
- } else if (self.bin_file.cast(link.File.Coff)) |_| {
- return MCValue{ .linker_load = .{
- .type = .direct,
- .sym_index = local_sym_index,
- } };
- } else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
- const ptr_bytes: u64 = @divExact(ptr_bits, 8);
- const got_index = local_sym_index; // the plan9 backend returns the got_index
- const got_addr = p9.bases.data + got_index * ptr_bytes;
- return MCValue{ .memory = got_addr };
- } else {
- return self.fail("TODO lower unnamed const", .{});
- }
-}
-
fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue {
- var typed_value = arg_tv;
- if (typed_value.val.castTag(.runtime_value)) |rt| {
- typed_value.val = rt.data;
- }
- log.debug("genTypedValue: ty = {}, val = {}", .{ typed_value.ty.fmtDebug(), typed_value.val.fmtDebug() });
- if (typed_value.val.isUndef())
- return MCValue{ .undef = {} };
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
-
- if (typed_value.val.castTag(.decl_ref)) |payload| {
- return self.lowerDeclRef(typed_value, payload.data);
- }
- if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
- return self.lowerDeclRef(typed_value, payload.data.decl_index);
- }
-
- const target = self.target.*;
-
- switch (typed_value.ty.zigTypeTag()) {
- .Void => return MCValue{ .none = {} },
- .Pointer => switch (typed_value.ty.ptrSize()) {
- .Slice => {},
- else => {
- switch (typed_value.val.tag()) {
- .int_u64 => {
- return MCValue{ .immediate = typed_value.val.toUnsignedInt(target) };
- },
- else => {},
- }
- },
+ const mcv: MCValue = switch (try codegen.genTypedValue(
+ self.bin_file,
+ self.src_loc,
+ arg_tv,
+ self.mod_fn.owner_decl,
+ )) {
+ .mcv => |mcv| switch (mcv) {
+ .none => .none,
+ .undef => .undef,
+ .linker_load => |ll| .{ .linker_load = ll },
+ .immediate => |imm| .{ .immediate = imm },
+ .memory => |addr| .{ .memory = addr },
},
- .Int => {
- const info = typed_value.ty.intInfo(self.target.*);
- if (info.bits <= ptr_bits and info.signedness == .signed) {
- return MCValue{ .immediate = @bitCast(u64, typed_value.val.toSignedInt(target)) };
- }
- if (!(info.bits > ptr_bits or info.signedness == .signed)) {
- return MCValue{ .immediate = typed_value.val.toUnsignedInt(target) };
- }
+ .fail => |msg| {
+ self.err_msg = msg;
+ return error.CodegenFail;
},
- .Bool => {
- return MCValue{ .immediate = @boolToInt(typed_value.val.toBool()) };
- },
- .Optional => {
- if (typed_value.ty.isPtrLikeOptional()) {
- if (typed_value.val.isNull())
- return MCValue{ .immediate = 0 };
-
- var buf: Type.Payload.ElemType = undefined;
- return self.genTypedValue(.{
- .ty = typed_value.ty.optionalChild(&buf),
- .val = typed_value.val,
- });
- } else if (typed_value.ty.abiSize(self.target.*) == 1) {
- return MCValue{ .immediate = @boolToInt(!typed_value.val.isNull()) };
- }
- },
- .Enum => {
- if (typed_value.val.castTag(.enum_field_index)) |field_index| {
- switch (typed_value.ty.tag()) {
- .enum_simple => {
- return MCValue{ .immediate = field_index.data };
- },
- .enum_full, .enum_nonexhaustive => {
- const enum_full = typed_value.ty.cast(Type.Payload.EnumFull).?.data;
- if (enum_full.values.count() != 0) {
- const tag_val = enum_full.values.keys()[field_index.data];
- return self.genTypedValue(.{ .ty = enum_full.tag_ty, .val = tag_val });
- } else {
- return MCValue{ .immediate = field_index.data };
- }
- },
- else => unreachable,
- }
- } else {
- var int_tag_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = typed_value.ty.intTagType(&int_tag_buffer);
- return self.genTypedValue(.{ .ty = int_tag_ty, .val = typed_value.val });
- }
- },
- .ErrorSet => {
- switch (typed_value.val.tag()) {
- .@"error" => {
- const err_name = typed_value.val.castTag(.@"error").?.data.name;
- const module = self.bin_file.options.module.?;
- const global_error_set = module.global_error_set;
- const error_index = global_error_set.get(err_name).?;
- return MCValue{ .immediate = error_index };
- },
- else => {
- // In this case we are rendering an error union which has a 0 bits payload.
- return MCValue{ .immediate = 0 };
- },
- }
- },
- .ErrorUnion => {
- const error_type = typed_value.ty.errorUnionSet();
- const payload_type = typed_value.ty.errorUnionPayload();
- const is_pl = typed_value.val.errorUnionIsPayload();
-
- if (!payload_type.hasRuntimeBitsIgnoreComptime()) {
- // We use the error type directly as the type.
- const err_val = if (!is_pl) typed_value.val else Value.initTag(.zero);
- return self.genTypedValue(.{ .ty = error_type, .val = err_val });
- }
- },
-
- .ComptimeInt => unreachable,
- .ComptimeFloat => unreachable,
- .Type => unreachable,
- .EnumLiteral => unreachable,
- .NoReturn => unreachable,
- .Undefined => unreachable,
- .Null => unreachable,
- .Opaque => unreachable,
-
- else => {},
- }
-
- return self.lowerUnnamedConst(typed_value);
+ };
+ return mcv;
}
const CallMCValues = struct {
diff --git a/src/codegen.zig b/src/codegen.zig
index df7ceff1f0..245745d6f6 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -29,13 +29,14 @@ pub const Result = union(enum) {
fail: *ErrorMsg,
};
-pub const GenerateSymbolError = error{
+pub const CodeGenError = error{
OutOfMemory,
Overflow,
- /// A Decl that this symbol depends on had a semantic analysis failure.
- AnalysisFail,
+ CodegenFail,
};
+pub const GenerateSymbolError = CodeGenError;
+
pub const DebugInfoOutput = union(enum) {
dwarf: *link.File.Dwarf.DeclState,
/// the plan9 debuginfo output is a bytecode with 4 opcodes
@@ -63,19 +64,6 @@ pub const DebugInfoOutput = union(enum) {
none,
};
-/// Helper struct to denote that the value is in memory but requires a linker relocation fixup:
-/// * got - the value is referenced indirectly via GOT entry index (the linker emits a got-type reloc)
-/// * direct - the value is referenced directly via symbol index index (the linker emits a displacement reloc)
-/// * import - the value is referenced indirectly via import entry index (the linker emits an import-type reloc)
-pub const LinkerLoad = struct {
- type: enum {
- got,
- direct,
- import,
- },
- sym_index: u32,
-};
-
pub fn generateFunction(
bin_file: *link.File,
src_loc: Module.SrcLoc,
@@ -84,7 +72,7 @@ pub fn generateFunction(
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
-) GenerateSymbolError!Result {
+) CodeGenError!Result {
switch (bin_file.options.target.cpu.arch) {
.arm,
.armeb,
@@ -120,7 +108,7 @@ pub fn generateSymbol(
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
reloc_info: RelocInfo,
-) GenerateSymbolError!Result {
+) CodeGenError!Result {
const tracy = trace(@src());
defer tracy.end();
@@ -823,7 +811,7 @@ fn lowerDeclRef(
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
reloc_info: RelocInfo,
-) GenerateSymbolError!Result {
+) CodeGenError!Result {
const target = bin_file.options.target;
const module = bin_file.options.module.?;
if (typed_value.ty.isSlice()) {
@@ -880,6 +868,287 @@ fn lowerDeclRef(
return Result.ok;
}
+/// Helper struct to denote that the value is in memory but requires a linker relocation fixup:
+/// * got - the value is referenced indirectly via GOT entry index (the linker emits a got-type reloc)
+/// * direct - the value is referenced directly via symbol index index (the linker emits a displacement reloc)
+/// * import - the value is referenced indirectly via import entry index (the linker emits an import-type reloc)
+pub const LinkerLoad = struct {
+ type: enum {
+ got,
+ direct,
+ import,
+ },
+ sym_index: u32,
+};
+
+pub const GenResult = union(enum) {
+ mcv: MCValue,
+ fail: *ErrorMsg,
+
+ const MCValue = union(enum) {
+ none,
+ undef,
+ /// The bit-width of the immediate may be smaller than `u64`. For example, on 32-bit targets
+ /// such as ARM, the immediate will never exceed 32-bits.
+ immediate: u64,
+ linker_load: LinkerLoad,
+ /// Direct by-address reference to memory location.
+ memory: u64,
+ };
+
+ fn mcv(val: MCValue) GenResult {
+ return .{ .mcv = val };
+ }
+
+ fn fail(
+ gpa: Allocator,
+ src_loc: Module.SrcLoc,
+ comptime format: []const u8,
+ args: anytype,
+ ) Allocator.Error!GenResult {
+ const msg = try ErrorMsg.create(gpa, src_loc, format, args);
+ return .{ .fail = msg };
+ }
+};
+
+fn genDeclRef(
+ bin_file: *link.File,
+ src_loc: Module.SrcLoc,
+ tv: TypedValue,
+ decl_index: Module.Decl.Index,
+) CodeGenError!GenResult {
+ log.debug("genDeclRef: ty = {}, val = {}", .{ tv.ty.fmtDebug(), tv.val.fmtDebug() });
+
+ const target = bin_file.options.target;
+ const ptr_bits = target.cpu.arch.ptrBitWidth();
+ const ptr_bytes: u64 = @divExact(ptr_bits, 8);
+
+ const module = bin_file.options.module.?;
+ const decl = module.declPtr(decl_index);
+
+ if (decl.ty.zigTypeTag() != .Fn and !decl.ty.hasRuntimeBitsIgnoreComptime()) {
+ const imm: u64 = switch (ptr_bytes) {
+ 1 => 0xaa,
+ 2 => 0xaaaa,
+ 4 => 0xaaaaaaaa,
+ 8 => 0xaaaaaaaaaaaaaaaa,
+ else => unreachable,
+ };
+ return GenResult.mcv(.{ .immediate = imm });
+ }
+
+ // TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
+ if (tv.ty.zigTypeTag() == .Pointer) blk: {
+ if (tv.ty.castPtrToFn()) |_| break :blk;
+ if (!tv.ty.elemType2().hasRuntimeBits()) {
+ return GenResult.mcv(.none);
+ }
+ }
+
+ module.markDeclAlive(decl);
+
+ if (bin_file.cast(link.File.Elf)) |elf_file| {
+ const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
+ const atom = elf_file.getAtom(atom_index);
+ return GenResult.mcv(.{ .memory = atom.getOffsetTableAddress(elf_file) });
+ } else if (bin_file.cast(link.File.MachO)) |macho_file| {
+ const atom_index = try macho_file.getOrCreateAtomForDecl(decl_index);
+ const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
+ return GenResult.mcv(.{ .linker_load = .{
+ .type = .got,
+ .sym_index = sym_index,
+ } });
+ } else if (bin_file.cast(link.File.Coff)) |coff_file| {
+ const atom_index = try coff_file.getOrCreateAtomForDecl(decl_index);
+ const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
+ return GenResult.mcv(.{ .linker_load = .{
+ .type = .got,
+ .sym_index = sym_index,
+ } });
+ } else if (bin_file.cast(link.File.Plan9)) |p9| {
+ const decl_block_index = try p9.seeDecl(decl_index);
+ const decl_block = p9.getDeclBlock(decl_block_index);
+ const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
+ return GenResult.mcv(.{ .memory = got_addr });
+ } else {
+ return GenResult.fail(bin_file.allocator, src_loc, "TODO genDeclRef for target {}", .{target});
+ }
+}
+
+fn genUnnamedConst(
+ bin_file: *link.File,
+ src_loc: Module.SrcLoc,
+ tv: TypedValue,
+ owner_decl_index: Module.Decl.Index,
+) CodeGenError!GenResult {
+ log.debug("genUnnamedConst: ty = {}, val = {}", .{ tv.ty.fmtDebug(), tv.val.fmtDebug() });
+
+ const target = bin_file.options.target;
+ const local_sym_index = bin_file.lowerUnnamedConst(tv, owner_decl_index) catch |err| {
+ return GenResult.fail(bin_file.allocator, src_loc, "lowering unnamed constant failed: {s}", .{@errorName(err)});
+ };
+ if (bin_file.cast(link.File.Elf)) |elf_file| {
+ return GenResult.mcv(.{ .memory = elf_file.getSymbol(local_sym_index).st_value });
+ } else if (bin_file.cast(link.File.MachO)) |_| {
+ return GenResult.mcv(.{ .linker_load = .{
+ .type = .direct,
+ .sym_index = local_sym_index,
+ } });
+ } else if (bin_file.cast(link.File.Coff)) |_| {
+ return GenResult.mcv(.{ .linker_load = .{
+ .type = .direct,
+ .sym_index = local_sym_index,
+ } });
+ } else if (bin_file.cast(link.File.Plan9)) |p9| {
+ const ptr_bits = target.cpu.arch.ptrBitWidth();
+ const ptr_bytes: u64 = @divExact(ptr_bits, 8);
+ const got_index = local_sym_index; // the plan9 backend returns the got_index
+ const got_addr = p9.bases.data + got_index * ptr_bytes;
+ return GenResult.mcv(.{ .memory = got_addr });
+ } else {
+ return GenResult.fail(bin_file.allocator, src_loc, "TODO genUnnamedConst for target {}", .{target});
+ }
+}
+
+pub fn genTypedValue(
+ bin_file: *link.File,
+ src_loc: Module.SrcLoc,
+ arg_tv: TypedValue,
+ owner_decl_index: Module.Decl.Index,
+) CodeGenError!GenResult {
+ var typed_value = arg_tv;
+ if (typed_value.val.castTag(.runtime_value)) |rt| {
+ typed_value.val = rt.data;
+ }
+
+ log.debug("genTypedValue: ty = {}, val = {}", .{ typed_value.ty.fmtDebug(), typed_value.val.fmtDebug() });
+
+ if (typed_value.val.isUndef())
+ return GenResult.mcv(.undef);
+
+ const target = bin_file.options.target;
+ const ptr_bits = target.cpu.arch.ptrBitWidth();
+
+ if (typed_value.val.castTag(.decl_ref)) |payload| {
+ return genDeclRef(bin_file, src_loc, typed_value, payload.data);
+ }
+ if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
+ return genDeclRef(bin_file, src_loc, typed_value, payload.data.decl_index);
+ }
+
+ switch (typed_value.ty.zigTypeTag()) {
+ .Void => return GenResult.mcv(.none),
+ .Pointer => switch (typed_value.ty.ptrSize()) {
+ .Slice => {},
+ else => {
+ switch (typed_value.val.tag()) {
+ .int_u64 => {
+ return GenResult.mcv(.{ .immediate = typed_value.val.toUnsignedInt(target) });
+ },
+ else => {},
+ }
+ },
+ },
+ .Int => {
+ const info = typed_value.ty.intInfo(target);
+ if (info.bits <= ptr_bits and info.signedness == .signed) {
+ return GenResult.mcv(.{ .immediate = @bitCast(u64, typed_value.val.toSignedInt(target)) });
+ }
+ if (!(info.bits > ptr_bits or info.signedness == .signed)) {
+ return GenResult.mcv(.{ .immediate = typed_value.val.toUnsignedInt(target) });
+ }
+ },
+ .Bool => {
+ return GenResult.mcv(.{ .immediate = @boolToInt(typed_value.val.toBool()) });
+ },
+ .Optional => {
+ if (typed_value.ty.isPtrLikeOptional()) {
+ if (typed_value.val.isNull())
+ return GenResult.mcv(.{ .immediate = 0 });
+
+ var buf: Type.Payload.ElemType = undefined;
+ return genTypedValue(bin_file, src_loc, .{
+ .ty = typed_value.ty.optionalChild(&buf),
+ .val = typed_value.val,
+ }, owner_decl_index);
+ } else if (typed_value.ty.abiSize(target) == 1) {
+ return GenResult.mcv(.{ .immediate = @boolToInt(!typed_value.val.isNull()) });
+ }
+ },
+ .Enum => {
+ if (typed_value.val.castTag(.enum_field_index)) |field_index| {
+ switch (typed_value.ty.tag()) {
+ .enum_simple => {
+ return GenResult.mcv(.{ .immediate = field_index.data });
+ },
+ .enum_full, .enum_nonexhaustive => {
+ const enum_full = typed_value.ty.cast(Type.Payload.EnumFull).?.data;
+ if (enum_full.values.count() != 0) {
+ const tag_val = enum_full.values.keys()[field_index.data];
+ return genTypedValue(bin_file, src_loc, .{
+ .ty = enum_full.tag_ty,
+ .val = tag_val,
+ }, owner_decl_index);
+ } else {
+ return GenResult.mcv(.{ .immediate = field_index.data });
+ }
+ },
+ else => unreachable,
+ }
+ } else {
+ var int_tag_buffer: Type.Payload.Bits = undefined;
+ const int_tag_ty = typed_value.ty.intTagType(&int_tag_buffer);
+ return genTypedValue(bin_file, src_loc, .{
+ .ty = int_tag_ty,
+ .val = typed_value.val,
+ }, owner_decl_index);
+ }
+ },
+ .ErrorSet => {
+ switch (typed_value.val.tag()) {
+ .@"error" => {
+ const err_name = typed_value.val.castTag(.@"error").?.data.name;
+ const module = bin_file.options.module.?;
+ const global_error_set = module.global_error_set;
+ const error_index = global_error_set.get(err_name).?;
+ return GenResult.mcv(.{ .immediate = error_index });
+ },
+ else => {
+ // In this case we are rendering an error union which has a 0 bits payload.
+ return GenResult.mcv(.{ .immediate = 0 });
+ },
+ }
+ },
+ .ErrorUnion => {
+ const error_type = typed_value.ty.errorUnionSet();
+ const payload_type = typed_value.ty.errorUnionPayload();
+ const is_pl = typed_value.val.errorUnionIsPayload();
+
+ if (!payload_type.hasRuntimeBitsIgnoreComptime()) {
+ // We use the error type directly as the type.
+ const err_val = if (!is_pl) typed_value.val else Value.initTag(.zero);
+ return genTypedValue(bin_file, src_loc, .{
+ .ty = error_type,
+ .val = err_val,
+ }, owner_decl_index);
+ }
+ },
+
+ .ComptimeInt => unreachable,
+ .ComptimeFloat => unreachable,
+ .Type => unreachable,
+ .EnumLiteral => unreachable,
+ .NoReturn => unreachable,
+ .Undefined => unreachable,
+ .Null => unreachable,
+ .Opaque => unreachable,
+
+ else => {},
+ }
+
+ return genUnnamedConst(bin_file, src_loc, typed_value, owner_decl_index);
+}
+
pub fn errUnionPayloadOffset(payload_ty: Type, target: std.Target) u64 {
const payload_align = payload_ty.abiAlignment(target);
const error_align = Type.anyerror.abiAlignment(target);
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index c0ac7e0b88..f210f2f2b3 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -1060,7 +1060,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
decl.analysis = .codegen_failure;
try mod.failed_decls.put(mod.gpa, decl_index, em);
log.err("{s}", .{em.msg});
- return error.AnalysisFail;
+ return error.CodegenFail;
},
};
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 1a9d594c56..f499a9952a 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -2618,7 +2618,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module
decl.analysis = .codegen_failure;
try mod.failed_decls.put(mod.gpa, decl_index, em);
log.err("{s}", .{em.msg});
- return error.AnalysisFail;
+ return error.CodegenFail;
},
};
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 7c1d4776af..eaf16e4009 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -2089,7 +2089,7 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl_index, em);
log.err("{s}", .{em.msg});
- return error.AnalysisFail;
+ return error.CodegenFail;
},
};
diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig
index 87e3ca5c22..cf6e4f8418 100644
--- a/src/link/Plan9.zig
+++ b/src/link/Plan9.zig
@@ -377,7 +377,7 @@ pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.I
decl.analysis = .codegen_failure;
try mod.failed_decls.put(mod.gpa, decl_index, em);
log.err("{s}", .{em.msg});
- return error.AnalysisFail;
+ return error.CodegenFail;
},
};
// duped_code is freed when the unnamed const is freed
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index 00a52177f7..ac0c8e9ca5 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -1255,7 +1255,7 @@ pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.In
.fail => |em| {
decl.analysis = .codegen_failure;
try mod.failed_decls.put(mod.gpa, decl_index, em);
- return error.AnalysisFail;
+ return error.CodegenFail;
},
};
};
diff --git a/src/register_manager.zig b/src/register_manager.zig
index 2fe0cd2b6a..4d16348c27 100644
--- a/src/register_manager.zig
+++ b/src/register_manager.zig
@@ -19,6 +19,9 @@ pub const AllocateRegistersError = error{
/// Can happen when spilling an instruction in codegen runs out of
/// memory, so we propagate that error
OutOfMemory,
+ /// Can happen when spilling an instruction in codegen triggers integer
+ /// overflow, so we propagate that error
+ Overflow,
/// Can happen when spilling an instruction triggers a codegen
/// error, so we propagate that error
CodegenFail,
From 1024332adc88928299dfc07426f11624ae8ba18b Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Fri, 3 Mar 2023 18:24:58 +0100
Subject: [PATCH 002/294] arm: use common implementation of genTypedValue
helper
---
src/arch/arm/CodeGen.zig | 192 ++++--------------------------------
src/arch/x86_64/CodeGen.zig | 6 +-
2 files changed, 23 insertions(+), 175 deletions(-)
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index 87806223e3..7d8708c44d 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -24,7 +24,7 @@ const log = std.log.scoped(.codegen);
const build_options = @import("build_options");
const Result = codegen.Result;
-const GenerateSymbolError = codegen.GenerateSymbolError;
+const CodeGenError = codegen.CodeGenError;
const DebugInfoOutput = codegen.DebugInfoOutput;
const bits = @import("bits.zig");
@@ -42,7 +42,7 @@ const c_abi_int_param_regs = abi.c_abi_int_param_regs;
const c_abi_int_return_regs = abi.c_abi_int_return_regs;
const gp = abi.RegisterClass.gp;
-const InnerError = codegen.CodeGenError || error{OutOfRegisters};
+const InnerError = CodeGenError || error{OutOfRegisters};
gpa: Allocator,
air: Air,
@@ -339,7 +339,7 @@ pub fn generate(
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
-) GenerateSymbolError!Result {
+) CodeGenError!Result {
if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
@@ -6083,178 +6083,26 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
}
}
-fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue {
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
- const ptr_bytes: u64 = @divExact(ptr_bits, 8);
-
- const mod = self.bin_file.options.module.?;
- const decl = mod.declPtr(decl_index);
- mod.markDeclAlive(decl);
-
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
- const atom = elf_file.getAtom(atom_index);
- return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
- } else if (self.bin_file.cast(link.File.MachO)) |_| {
- unreachable; // unsupported architecture for MachO
- } else if (self.bin_file.cast(link.File.Coff)) |_| {
- return self.fail("TODO codegen COFF const Decl pointer", .{});
- } else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- const decl_block_index = try p9.seeDecl(decl_index);
- const decl_block = p9.getDeclBlock(decl_block_index);
- const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
- return MCValue{ .memory = got_addr };
- } else {
- return self.fail("TODO codegen non-ELF const Decl pointer", .{});
- }
-
- _ = tv;
-}
-
-fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
- const local_sym_index = self.bin_file.lowerUnnamedConst(tv, self.mod_fn.owner_decl) catch |err| {
- return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
- };
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- return MCValue{ .memory = elf_file.getSymbol(local_sym_index).st_value };
- } else if (self.bin_file.cast(link.File.MachO)) |_| {
- unreachable;
- } else if (self.bin_file.cast(link.File.Coff)) |_| {
- return self.fail("TODO lower unnamed const in COFF", .{});
- } else if (self.bin_file.cast(link.File.Plan9)) |_| {
- return self.fail("TODO lower unnamed const in Plan9", .{});
- } else {
- return self.fail("TODO lower unnamed const", .{});
- }
-}
-
fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue {
- var typed_value = arg_tv;
- if (typed_value.val.castTag(.runtime_value)) |rt| {
- typed_value.val = rt.data;
- }
- log.debug("genTypedValue: ty = {}, val = {}", .{ typed_value.ty.fmtDebug(), typed_value.val.fmtDebug() });
- if (typed_value.val.isUndef())
- return MCValue{ .undef = {} };
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
-
- if (typed_value.val.castTag(.decl_ref)) |payload| {
- return self.lowerDeclRef(typed_value, payload.data);
- }
- if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
- return self.lowerDeclRef(typed_value, payload.data.decl_index);
- }
- const target = self.target.*;
-
- switch (typed_value.ty.zigTypeTag()) {
- .Pointer => switch (typed_value.ty.ptrSize()) {
- .Slice => {},
- else => {
- switch (typed_value.val.tag()) {
- .int_u64 => {
- return MCValue{ .immediate = @intCast(u32, typed_value.val.toUnsignedInt(target)) };
- },
- else => {},
- }
- },
+ const mcv: MCValue = switch (try codegen.genTypedValue(
+ self.bin_file,
+ self.src_loc,
+ arg_tv,
+ self.mod_fn.owner_decl,
+ )) {
+ .mcv => |mcv| switch (mcv) {
+ .none => .none,
+ .undef => .undef,
+ .linker_load => unreachable, // TODO
+ .immediate => |imm| .{ .immediate = @truncate(u32, imm) },
+ .memory => |addr| .{ .memory = addr },
},
- .Int => {
- const info = typed_value.ty.intInfo(self.target.*);
- if (info.bits <= ptr_bits) {
- const unsigned = switch (info.signedness) {
- .signed => blk: {
- const signed = @intCast(i32, typed_value.val.toSignedInt(target));
- break :blk @bitCast(u32, signed);
- },
- .unsigned => @intCast(u32, typed_value.val.toUnsignedInt(target)),
- };
-
- return MCValue{ .immediate = unsigned };
- } else {
- return self.lowerUnnamedConst(typed_value);
- }
+ .fail => |msg| {
+ self.err_msg = msg;
+ return error.CodegenFail;
},
- .Bool => {
- return MCValue{ .immediate = @boolToInt(typed_value.val.toBool()) };
- },
- .Optional => {
- if (typed_value.ty.isPtrLikeOptional()) {
- if (typed_value.val.isNull())
- return MCValue{ .immediate = 0 };
-
- var buf: Type.Payload.ElemType = undefined;
- return self.genTypedValue(.{
- .ty = typed_value.ty.optionalChild(&buf),
- .val = typed_value.val,
- });
- } else if (typed_value.ty.abiSize(self.target.*) == 1) {
- return MCValue{ .immediate = @boolToInt(typed_value.val.isNull()) };
- }
- },
- .Enum => {
- if (typed_value.val.castTag(.enum_field_index)) |field_index| {
- switch (typed_value.ty.tag()) {
- .enum_simple => {
- return MCValue{ .immediate = field_index.data };
- },
- .enum_full, .enum_nonexhaustive => {
- const enum_full = typed_value.ty.cast(Type.Payload.EnumFull).?.data;
- if (enum_full.values.count() != 0) {
- const tag_val = enum_full.values.keys()[field_index.data];
- return self.genTypedValue(.{ .ty = enum_full.tag_ty, .val = tag_val });
- } else {
- return MCValue{ .immediate = field_index.data };
- }
- },
- else => unreachable,
- }
- } else {
- var int_tag_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = typed_value.ty.intTagType(&int_tag_buffer);
- return self.genTypedValue(.{ .ty = int_tag_ty, .val = typed_value.val });
- }
- },
- .ErrorSet => {
- switch (typed_value.val.tag()) {
- .@"error" => {
- const err_name = typed_value.val.castTag(.@"error").?.data.name;
- const module = self.bin_file.options.module.?;
- const global_error_set = module.global_error_set;
- const error_index = global_error_set.get(err_name).?;
- return MCValue{ .immediate = error_index };
- },
- else => {
- // In this case we are rendering an error union which has a 0 bits payload.
- return MCValue{ .immediate = 0 };
- },
- }
- },
- .ErrorUnion => {
- const error_type = typed_value.ty.errorUnionSet();
- const payload_type = typed_value.ty.errorUnionPayload();
- const is_pl = typed_value.val.errorUnionIsPayload();
-
- if (!payload_type.hasRuntimeBitsIgnoreComptime()) {
- // We use the error type directly as the type.
- const err_val = if (!is_pl) typed_value.val else Value.initTag(.zero);
- return self.genTypedValue(.{ .ty = error_type, .val = err_val });
- }
- },
-
- .ComptimeInt => unreachable, // semantic analysis prevents this
- .ComptimeFloat => unreachable, // semantic analysis prevents this
- .Type => unreachable,
- .EnumLiteral => unreachable,
- .Void => unreachable,
- .NoReturn => unreachable,
- .Undefined => unreachable,
- .Null => unreachable,
- .Opaque => unreachable,
-
- else => {},
- }
-
- return self.lowerUnnamedConst(typed_value);
+ };
+ return mcv;
}
const CallMCValues = struct {
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 2ec1a33619..a2c11b332b 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -12,12 +12,12 @@ const trace = @import("../../tracy.zig").trace;
const Air = @import("../../Air.zig");
const Allocator = mem.Allocator;
+const CodeGenError = codegen.CodeGenError;
const Compilation = @import("../../Compilation.zig");
const DebugInfoOutput = codegen.DebugInfoOutput;
const DW = std.dwarf;
const ErrorMsg = Module.ErrorMsg;
const Result = codegen.Result;
-const GenerateSymbolError = codegen.GenerateSymbolError;
const Emit = @import("Emit.zig");
const Liveness = @import("../../Liveness.zig");
const Mir = @import("Mir.zig");
@@ -40,7 +40,7 @@ const Register = bits.Register;
const gp = abi.RegisterClass.gp;
const sse = abi.RegisterClass.sse;
-const InnerError = codegen.CodeGenError || error{OutOfRegisters};
+const InnerError = CodeGenError || error{OutOfRegisters};
gpa: Allocator,
air: Air,
@@ -253,7 +253,7 @@ pub fn generate(
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
-) GenerateSymbolError!Result {
+) CodeGenError!Result {
if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
From c413ac100fa5a4cece5702d3afb6b0898e9c6214 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Fri, 3 Mar 2023 18:40:16 +0100
Subject: [PATCH 003/294] codegen: refactor generating Int as immediate where
appropriate
---
src/codegen.zig | 11 ++++++-----
1 file changed, 6 insertions(+), 5 deletions(-)
diff --git a/src/codegen.zig b/src/codegen.zig
index 245745d6f6..7e7f34f992 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -1051,11 +1051,12 @@ pub fn genTypedValue(
},
.Int => {
const info = typed_value.ty.intInfo(target);
- if (info.bits <= ptr_bits and info.signedness == .signed) {
- return GenResult.mcv(.{ .immediate = @bitCast(u64, typed_value.val.toSignedInt(target)) });
- }
- if (!(info.bits > ptr_bits or info.signedness == .signed)) {
- return GenResult.mcv(.{ .immediate = typed_value.val.toUnsignedInt(target) });
+ if (info.bits <= ptr_bits) {
+ const unsigned = switch (info.signedness) {
+ .signed => @bitCast(u64, typed_value.val.toSignedInt(target)),
+ .unsigned => typed_value.val.toUnsignedInt(target),
+ };
+ return GenResult.mcv(.{ .immediate = unsigned });
}
},
.Bool => {
From d8d8842190214cf727611b965e830ccbfffb52d1 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Fri, 3 Mar 2023 18:42:29 +0100
Subject: [PATCH 004/294] arm: skip unimplemented behavior test for
@fieldParentPtr
---
test/behavior/field_parent_ptr.zig | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/test/behavior/field_parent_ptr.zig b/test/behavior/field_parent_ptr.zig
index 6bbd6ad7ef..bf99fd1795 100644
--- a/test/behavior/field_parent_ptr.zig
+++ b/test/behavior/field_parent_ptr.zig
@@ -3,6 +3,7 @@ const builtin = @import("builtin");
test "@fieldParentPtr non-first field" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testParentFieldPtr(&foo.c);
comptime try testParentFieldPtr(&foo.c);
@@ -10,6 +11,7 @@ test "@fieldParentPtr non-first field" {
test "@fieldParentPtr first field" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testParentFieldPtrFirst(&foo.a);
comptime try testParentFieldPtrFirst(&foo.a);
@@ -47,6 +49,7 @@ fn testParentFieldPtrFirst(a: *const bool) !void {
test "@fieldParentPtr untagged union" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -73,6 +76,7 @@ fn testFieldParentPtrUnion(c: *const i32) !void {
test "@fieldParentPtr tagged union" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -99,6 +103,7 @@ fn testFieldParentPtrTaggedUnion(c: *const i32) !void {
test "@fieldParentPtr extern union" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
From 0d2c25ca9d0794b1c822a12f3bdf8e57ede4c840 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Fri, 3 Mar 2023 18:46:08 +0100
Subject: [PATCH 005/294] aarch64: use common implementation of genTypedValue
---
src/arch/aarch64/CodeGen.zig | 215 ++++-------------------------------
1 file changed, 20 insertions(+), 195 deletions(-)
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 23f458f910..28f8370bd9 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -23,7 +23,7 @@ const leb128 = std.leb;
const log = std.log.scoped(.codegen);
const build_options = @import("build_options");
-const GenerateSymbolError = codegen.GenerateSymbolError;
+const CodeGenError = codegen.CodeGenError;
const Result = codegen.Result;
const DebugInfoOutput = codegen.DebugInfoOutput;
@@ -41,7 +41,7 @@ const c_abi_int_param_regs = abi.c_abi_int_param_regs;
const c_abi_int_return_regs = abi.c_abi_int_return_regs;
const gp = abi.RegisterClass.gp;
-const InnerError = codegen.CodeGenError || error{OutOfRegisters};
+const InnerError = CodeGenError || error{OutOfRegisters};
gpa: Allocator,
air: Air,
@@ -333,7 +333,7 @@ pub fn generate(
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
-) GenerateSymbolError!Result {
+) CodeGenError!Result {
if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
@@ -6133,201 +6133,26 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
}
}
-fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue {
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
- const ptr_bytes: u64 = @divExact(ptr_bits, 8);
-
- // TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
- if (tv.ty.zigTypeTag() == .Pointer) blk: {
- if (tv.ty.castPtrToFn()) |_| break :blk;
- if (!tv.ty.elemType2().hasRuntimeBits()) {
- return MCValue.none;
- }
- }
-
- const mod = self.bin_file.options.module.?;
- const decl = mod.declPtr(decl_index);
- mod.markDeclAlive(decl);
-
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
- const atom = elf_file.getAtom(atom_index);
- return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
- } else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
- const atom = try macho_file.getOrCreateAtomForDecl(decl_index);
- const sym_index = macho_file.getAtom(atom).getSymbolIndex().?;
- return MCValue{ .linker_load = .{
- .type = .got,
- .sym_index = sym_index,
- } };
- } else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
- const atom_index = try coff_file.getOrCreateAtomForDecl(decl_index);
- const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
- return MCValue{ .linker_load = .{
- .type = .got,
- .sym_index = sym_index,
- } };
- } else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- const decl_block_index = try p9.seeDecl(decl_index);
- const decl_block = p9.getDeclBlock(decl_block_index);
- const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
- return MCValue{ .memory = got_addr };
- } else {
- return self.fail("TODO codegen non-ELF const Decl pointer", .{});
- }
-}
-
-fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
- log.debug("lowerUnnamedConst: ty = {}, val = {}", .{ tv.ty.fmtDebug(), tv.val.fmtDebug() });
- const local_sym_index = self.bin_file.lowerUnnamedConst(tv, self.mod_fn.owner_decl) catch |err| {
- return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
- };
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- return MCValue{ .memory = elf_file.getSymbol(local_sym_index).st_value };
- } else if (self.bin_file.cast(link.File.MachO)) |_| {
- return MCValue{ .linker_load = .{
- .type = .direct,
- .sym_index = local_sym_index,
- } };
- } else if (self.bin_file.cast(link.File.Coff)) |_| {
- return MCValue{ .linker_load = .{
- .type = .direct,
- .sym_index = local_sym_index,
- } };
- } else if (self.bin_file.cast(link.File.Plan9)) |_| {
- return self.fail("TODO lower unnamed const in Plan9", .{});
- } else {
- return self.fail("TODO lower unnamed const", .{});
- }
-}
-
fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue {
- var typed_value = arg_tv;
- if (typed_value.val.castTag(.runtime_value)) |rt| {
- typed_value.val = rt.data;
- }
- log.debug("genTypedValue: ty = {}, val = {}", .{ typed_value.ty.fmtDebug(), typed_value.val.fmtDebug() });
- if (typed_value.val.isUndef())
- return MCValue{ .undef = {} };
-
- if (typed_value.val.castTag(.decl_ref)) |payload| {
- return self.lowerDeclRef(typed_value, payload.data);
- }
- if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
- return self.lowerDeclRef(typed_value, payload.data.decl_index);
- }
- const target = self.target.*;
-
- switch (typed_value.ty.zigTypeTag()) {
- .Pointer => switch (typed_value.ty.ptrSize()) {
- .Slice => {},
- else => {
- switch (typed_value.val.tag()) {
- .int_u64 => {
- return MCValue{ .immediate = typed_value.val.toUnsignedInt(target) };
- },
- else => {},
- }
- },
+ const mcv: MCValue = switch (try codegen.genTypedValue(
+ self.bin_file,
+ self.src_loc,
+ arg_tv,
+ self.mod_fn.owner_decl,
+ )) {
+ .mcv => |mcv| switch (mcv) {
+ .none => .none,
+ .undef => .undef,
+ .linker_load => |ll| .{ .linker_load = ll },
+ .immediate => |imm| .{ .immediate = imm },
+ .memory => |addr| .{ .memory = addr },
},
- .Int => {
- const info = typed_value.ty.intInfo(self.target.*);
- if (info.bits <= 64) {
- const unsigned = switch (info.signedness) {
- .signed => blk: {
- const signed = typed_value.val.toSignedInt(target);
- break :blk @bitCast(u64, signed);
- },
- .unsigned => typed_value.val.toUnsignedInt(target),
- };
-
- return MCValue{ .immediate = unsigned };
- }
+ .fail => |msg| {
+ self.err_msg = msg;
+ return error.CodegenFail;
},
- .Bool => {
- return MCValue{ .immediate = @boolToInt(typed_value.val.toBool()) };
- },
- .Optional => {
- if (typed_value.ty.isPtrLikeOptional()) {
- if (typed_value.val.isNull())
- return MCValue{ .immediate = 0 };
-
- var buf: Type.Payload.ElemType = undefined;
- return self.genTypedValue(.{
- .ty = typed_value.ty.optionalChild(&buf),
- .val = typed_value.val,
- });
- } else if (typed_value.ty.abiSize(self.target.*) == 1) {
- return MCValue{ .immediate = @boolToInt(typed_value.val.isNull()) };
- }
- },
- .Enum => {
- if (typed_value.val.castTag(.enum_field_index)) |field_index| {
- switch (typed_value.ty.tag()) {
- .enum_simple => {
- return MCValue{ .immediate = field_index.data };
- },
- .enum_full, .enum_nonexhaustive => {
- const enum_full = typed_value.ty.cast(Type.Payload.EnumFull).?.data;
- if (enum_full.values.count() != 0) {
- const tag_val = enum_full.values.keys()[field_index.data];
- return self.genTypedValue(.{ .ty = enum_full.tag_ty, .val = tag_val });
- } else {
- return MCValue{ .immediate = field_index.data };
- }
- },
- else => unreachable,
- }
- } else {
- var int_tag_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = typed_value.ty.intTagType(&int_tag_buffer);
- return self.genTypedValue(.{ .ty = int_tag_ty, .val = typed_value.val });
- }
- },
- .ErrorSet => {
- switch (typed_value.val.tag()) {
- .@"error" => {
- const err_name = typed_value.val.castTag(.@"error").?.data.name;
- const module = self.bin_file.options.module.?;
- const global_error_set = module.global_error_set;
- const error_index = global_error_set.get(err_name).?;
- return MCValue{ .immediate = error_index };
- },
- else => {
- // In this case we are rendering an error union which has a 0 bits payload.
- return MCValue{ .immediate = 0 };
- },
- }
- },
- .ErrorUnion => {
- const error_type = typed_value.ty.errorUnionSet();
- const payload_type = typed_value.ty.errorUnionPayload();
-
- const is_pl = typed_value.val.errorUnionIsPayload();
-
- if (!payload_type.hasRuntimeBitsIgnoreComptime()) {
- // We use the error type directly as the type.
- const err_val = if (!is_pl) typed_value.val else Value.initTag(.zero);
- return self.genTypedValue(.{ .ty = error_type, .val = err_val });
- }
-
- return self.lowerUnnamedConst(typed_value);
- },
-
- .ComptimeInt => unreachable, // semantic analysis prevents this
- .ComptimeFloat => unreachable, // semantic analysis prevents this
- .Type => unreachable,
- .EnumLiteral => unreachable,
- .Void => unreachable,
- .NoReturn => unreachable,
- .Undefined => unreachable,
- .Null => unreachable,
- .Opaque => unreachable,
-
- else => {},
- }
-
- return self.lowerUnnamedConst(typed_value);
+ };
+ return mcv;
}
const CallMCValues = struct {
From 5b3ea49806f5d0b9034e3eacbef9e19428a5db8a Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Fri, 3 Mar 2023 18:53:13 +0100
Subject: [PATCH 006/294] riscv64: use common implementation of genTypedValue
---
src/arch/riscv64/CodeGen.zig | 158 +++++------------------------------
1 file changed, 20 insertions(+), 138 deletions(-)
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index fad5482cbc..c7191145f9 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -23,6 +23,7 @@ const log = std.log.scoped(.codegen);
const build_options = @import("build_options");
const codegen = @import("../../codegen.zig");
+const CodeGenError = codegen.CodeGenError;
const Result = codegen.Result;
const DebugInfoOutput = codegen.DebugInfoOutput;
@@ -35,7 +36,7 @@ const Instruction = abi.Instruction;
const callee_preserved_regs = abi.callee_preserved_regs;
const gp = abi.RegisterClass.gp;
-const InnerError = codegen.CodeGenError || error{OutOfRegisters};
+const InnerError = CodeGenError || error{OutOfRegisters};
gpa: Allocator,
air: Air,
@@ -221,7 +222,7 @@ pub fn generate(
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
-) codegen.CodeGenError!Result {
+) CodeGenError!Result {
if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
@@ -2548,145 +2549,26 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
}
}
-fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue {
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
- const ptr_bytes: u64 = @divExact(ptr_bits, 8);
- const mod = self.bin_file.options.module.?;
- const decl = mod.declPtr(decl_index);
- mod.markDeclAlive(decl);
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
- const atom = elf_file.getAtom(atom_index);
- return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
- } else if (self.bin_file.cast(link.File.MachO)) |_| {
- unreachable;
- } else if (self.bin_file.cast(link.File.Coff)) |_| {
- return self.fail("TODO codegen COFF const Decl pointer", .{});
- } else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- const decl_block_index = try p9.seeDecl(decl_index);
- const decl_block = p9.getDeclBlock(decl_block_index);
- const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
- return MCValue{ .memory = got_addr };
- } else {
- return self.fail("TODO codegen non-ELF const Decl pointer", .{});
- }
- _ = tv;
-}
-
fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
- if (typed_value.val.isUndef())
- return MCValue{ .undef = {} };
-
- if (typed_value.val.castTag(.decl_ref)) |payload| {
- return self.lowerDeclRef(typed_value, payload.data);
- }
- if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
- return self.lowerDeclRef(typed_value, payload.data.decl_index);
- }
- const target = self.target.*;
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
- switch (typed_value.ty.zigTypeTag()) {
- .Pointer => switch (typed_value.ty.ptrSize()) {
- .Slice => {
- var buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const ptr_type = typed_value.ty.slicePtrFieldType(&buf);
- const ptr_mcv = try self.genTypedValue(.{ .ty = ptr_type, .val = typed_value.val });
- const mod = self.bin_file.options.module.?;
- const slice_len = typed_value.val.sliceLen(mod);
- // Codegen can't handle some kinds of indirection. If the wrong union field is accessed here it may mean
- // the Sema code needs to use anonymous Decls or alloca instructions to store data.
- const ptr_imm = ptr_mcv.memory;
- _ = slice_len;
- _ = ptr_imm;
- // We need more general support for const data being stored in memory to make this work.
- return self.fail("TODO codegen for const slices", .{});
- },
- else => {
- if (typed_value.val.tag() == .int_u64) {
- return MCValue{ .immediate = typed_value.val.toUnsignedInt(target) };
- }
- return self.fail("TODO codegen more kinds of const pointers", .{});
- },
+ const mcv: MCValue = switch (try codegen.genTypedValue(
+ self.bin_file,
+ self.src_loc,
+ typed_value,
+ self.mod_fn.owner_decl,
+ )) {
+ .mcv => |mcv| switch (mcv) {
+ .none => .none,
+ .undef => .undef,
+ .linker_load => unreachable, // TODO
+ .immediate => |imm| .{ .immediate = imm },
+ .memory => |addr| .{ .memory = addr },
},
- .Int => {
- const info = typed_value.ty.intInfo(self.target.*);
- if (info.bits > ptr_bits or info.signedness == .signed) {
- return self.fail("TODO const int bigger than ptr and signed int", .{});
- }
- return MCValue{ .immediate = typed_value.val.toUnsignedInt(target) };
+ .fail => |msg| {
+ self.err_msg = msg;
+ return error.CodegenFail;
},
- .Bool => {
- return MCValue{ .immediate = @boolToInt(typed_value.val.toBool()) };
- },
- .ComptimeInt => unreachable, // semantic analysis prevents this
- .ComptimeFloat => unreachable, // semantic analysis prevents this
- .Optional => {
- if (typed_value.ty.isPtrLikeOptional()) {
- if (typed_value.val.isNull())
- return MCValue{ .immediate = 0 };
-
- var buf: Type.Payload.ElemType = undefined;
- return self.genTypedValue(.{
- .ty = typed_value.ty.optionalChild(&buf),
- .val = typed_value.val,
- });
- } else if (typed_value.ty.abiSize(self.target.*) == 1) {
- return MCValue{ .immediate = @boolToInt(typed_value.val.isNull()) };
- }
- return self.fail("TODO non pointer optionals", .{});
- },
- .Enum => {
- if (typed_value.val.castTag(.enum_field_index)) |field_index| {
- switch (typed_value.ty.tag()) {
- .enum_simple => {
- return MCValue{ .immediate = field_index.data };
- },
- .enum_full, .enum_nonexhaustive => {
- const enum_full = typed_value.ty.cast(Type.Payload.EnumFull).?.data;
- if (enum_full.values.count() != 0) {
- const tag_val = enum_full.values.keys()[field_index.data];
- return self.genTypedValue(.{ .ty = enum_full.tag_ty, .val = tag_val });
- } else {
- return MCValue{ .immediate = field_index.data };
- }
- },
- else => unreachable,
- }
- } else {
- var int_tag_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = typed_value.ty.intTagType(&int_tag_buffer);
- return self.genTypedValue(.{ .ty = int_tag_ty, .val = typed_value.val });
- }
- },
- .ErrorSet => {
- switch (typed_value.val.tag()) {
- .@"error" => {
- const err_name = typed_value.val.castTag(.@"error").?.data.name;
- const module = self.bin_file.options.module.?;
- const global_error_set = module.global_error_set;
- const error_index = global_error_set.get(err_name).?;
- return MCValue{ .immediate = error_index };
- },
- else => {
- // In this case we are rendering an error union which has a 0 bits payload.
- return MCValue{ .immediate = 0 };
- },
- }
- },
- .ErrorUnion => {
- const error_type = typed_value.ty.errorUnionSet();
- const payload_type = typed_value.ty.errorUnionPayload();
- const sub_val = typed_value.val.castTag(.eu_payload).?.data;
-
- if (!payload_type.hasRuntimeBits()) {
- // We use the error type directly as the type.
- return self.genTypedValue(.{ .ty = error_type, .val = sub_val });
- }
-
- return self.fail("TODO implement error union const of type '{}'", .{typed_value.ty.fmtDebug()});
- },
- else => return self.fail("TODO implement const of type '{}'", .{typed_value.ty.fmtDebug()}),
- }
+ };
+ return mcv;
}
const CallMCValues = struct {
From f6eeb6c8ce83af392dc075e3f80846aefc791f42 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Fri, 3 Mar 2023 18:53:30 +0100
Subject: [PATCH 007/294] sparc64: use common implementation of genTypedValue
---
src/arch/sparc64/CodeGen.zig | 170 +++++------------------------------
1 file changed, 20 insertions(+), 150 deletions(-)
diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig
index 5a108eca85..dc1a450e9a 100644
--- a/src/arch/sparc64/CodeGen.zig
+++ b/src/arch/sparc64/CodeGen.zig
@@ -19,7 +19,7 @@ const Mir = @import("Mir.zig");
const Emit = @import("Emit.zig");
const Liveness = @import("../../Liveness.zig");
const Type = @import("../../type.zig").Type;
-const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError;
+const CodeGenError = codegen.CodeGenError;
const Result = @import("../../codegen.zig").Result;
const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
@@ -38,7 +38,7 @@ const gp = abi.RegisterClass.gp;
const Self = @This();
-const InnerError = codegen.CodeGenError || error{OutOfRegisters};
+const InnerError = CodeGenError || error{OutOfRegisters};
const RegisterView = enum(u1) {
caller,
@@ -261,7 +261,7 @@ pub fn generate(
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
-) GenerateSymbolError!Result {
+) CodeGenError!Result {
if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
@@ -3894,133 +3894,25 @@ fn genStore(self: *Self, value_reg: Register, addr_reg: Register, comptime off_t
}
fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
- var tv = typed_value;
- log.debug("genTypedValue: ty = {}, val = {}", .{ tv.ty.fmtDebug(), tv.val.fmtDebug() });
-
- if (tv.val.castTag(.runtime_value)) |rt| {
- tv.val = rt.data;
- }
-
- if (tv.val.isUndef())
- return MCValue{ .undef = {} };
-
- if (tv.val.castTag(.decl_ref)) |payload| {
- return self.lowerDeclRef(tv, payload.data);
- }
- if (tv.val.castTag(.decl_ref_mut)) |payload| {
- return self.lowerDeclRef(tv, payload.data.decl_index);
- }
- const target = self.target.*;
-
- switch (tv.ty.zigTypeTag()) {
- .Pointer => switch (tv.ty.ptrSize()) {
- .Slice => {},
- else => {
- switch (tv.val.tag()) {
- .int_u64 => {
- return MCValue{ .immediate = tv.val.toUnsignedInt(target) };
- },
- else => {},
- }
- },
+ const mcv: MCValue = switch (try codegen.genTypedValue(
+ self.bin_file,
+ self.src_loc,
+ typed_value,
+ self.mod_fn.owner_decl,
+ )) {
+ .mcv => |mcv| switch (mcv) {
+ .none => .none,
+ .undef => .undef,
+ .linker_load => unreachable, // TODO
+ .immediate => |imm| .{ .immediate = imm },
+ .memory => |addr| .{ .memory = addr },
},
- .Bool => {
- return MCValue{ .immediate = @boolToInt(tv.val.toBool()) };
+ .fail => |msg| {
+ self.err_msg = msg;
+ return error.CodegenFail;
},
- .Int => {
- const info = tv.ty.intInfo(self.target.*);
- if (info.bits <= 64) {
- const unsigned = switch (info.signedness) {
- .signed => blk: {
- const signed = tv.val.toSignedInt(target);
- break :blk @bitCast(u64, signed);
- },
- .unsigned => tv.val.toUnsignedInt(target),
- };
-
- return MCValue{ .immediate = unsigned };
- } else {
- return self.fail("TODO implement int genTypedValue of > 64 bits", .{});
- }
- },
- .Optional => {
- if (tv.ty.isPtrLikeOptional()) {
- if (tv.val.isNull())
- return MCValue{ .immediate = 0 };
-
- var buf: Type.Payload.ElemType = undefined;
- return self.genTypedValue(.{
- .ty = tv.ty.optionalChild(&buf),
- .val = tv.val,
- });
- } else if (tv.ty.abiSize(self.target.*) == 1) {
- return MCValue{ .immediate = @boolToInt(tv.val.isNull()) };
- }
- },
- .Enum => {
- if (tv.val.castTag(.enum_field_index)) |field_index| {
- switch (tv.ty.tag()) {
- .enum_simple => {
- return MCValue{ .immediate = field_index.data };
- },
- .enum_full, .enum_nonexhaustive => {
- const enum_full = tv.ty.cast(Type.Payload.EnumFull).?.data;
- if (enum_full.values.count() != 0) {
- const tag_val = enum_full.values.keys()[field_index.data];
- return self.genTypedValue(.{ .ty = enum_full.tag_ty, .val = tag_val });
- } else {
- return MCValue{ .immediate = field_index.data };
- }
- },
- else => unreachable,
- }
- } else {
- var int_tag_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = tv.ty.intTagType(&int_tag_buffer);
- return self.genTypedValue(.{ .ty = int_tag_ty, .val = tv.val });
- }
- },
- .ErrorSet => {
- const err_name = tv.val.castTag(.@"error").?.data.name;
- const module = self.bin_file.options.module.?;
- const global_error_set = module.global_error_set;
- const error_index = global_error_set.get(err_name).?;
- return MCValue{ .immediate = error_index };
- },
- .ErrorUnion => {
- const error_type = tv.ty.errorUnionSet();
- const payload_type = tv.ty.errorUnionPayload();
-
- if (tv.val.castTag(.eu_payload)) |pl| {
- if (!payload_type.hasRuntimeBits()) {
- // We use the error type directly as the type.
- return MCValue{ .immediate = 0 };
- }
-
- _ = pl;
- return self.fail("TODO implement error union const of type '{}' (non-error)", .{tv.ty.fmtDebug()});
- } else {
- if (!payload_type.hasRuntimeBits()) {
- // We use the error type directly as the type.
- return self.genTypedValue(.{ .ty = error_type, .val = tv.val });
- }
-
- return self.fail("TODO implement error union const of type '{}' (error)", .{tv.ty.fmtDebug()});
- }
- },
- .ComptimeInt => unreachable, // semantic analysis prevents this
- .ComptimeFloat => unreachable, // semantic analysis prevents this
- .Type => unreachable,
- .EnumLiteral => unreachable,
- .Void => unreachable,
- .NoReturn => unreachable,
- .Undefined => unreachable,
- .Null => unreachable,
- .Opaque => unreachable,
- else => {},
- }
-
- return self.fail("TODO implement const of type '{}'", .{tv.ty.fmtDebug()});
+ };
+ return mcv;
}
fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
@@ -4196,28 +4088,6 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
}
}
-fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue {
- // TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
- if (tv.ty.zigTypeTag() == .Pointer) blk: {
- if (tv.ty.castPtrToFn()) |_| break :blk;
- if (!tv.ty.elemType2().hasRuntimeBits()) {
- return MCValue.none;
- }
- }
-
- const mod = self.bin_file.options.module.?;
- const decl = mod.declPtr(decl_index);
-
- mod.markDeclAlive(decl);
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
- const atom = elf_file.getAtom(atom_index);
- return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
- } else {
- return self.fail("TODO codegen non-ELF const Decl pointer", .{});
- }
-}
-
fn minMax(
self: *Self,
tag: Air.Inst.Tag,
From d23472747eb288e4c2332e03f6185c69e864f67d Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Fri, 3 Mar 2023 18:53:47 +0100
Subject: [PATCH 008/294] elf: fully zero out symbol when appending to freelist
---
src/link/Elf.zig | 11 +++++++++--
1 file changed, 9 insertions(+), 2 deletions(-)
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index f499a9952a..a91722d072 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -2097,9 +2097,16 @@ fn freeAtom(self: *Elf, atom_index: Atom.Index) void {
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
const local_sym_index = atom.getSymbolIndex().?;
+ log.debug("adding %{d} to local symbols free list", .{local_sym_index});
self.local_symbol_free_list.append(gpa, local_sym_index) catch {};
- self.local_symbols.items[local_sym_index].st_info = 0;
- self.local_symbols.items[local_sym_index].st_shndx = 0;
+ self.local_symbols.items[local_sym_index] = .{
+ .st_name = 0,
+ .st_info = 0,
+ .st_other = 0,
+ .st_shndx = 0,
+ .st_value = 0,
+ .st_size = 0,
+ };
_ = self.atom_by_index_table.remove(local_sym_index);
self.getAtomPtr(atom_index).local_sym_index = 0;
From dc709fbf48798ae74d5c7763cf99dffeb8143795 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Fri, 3 Mar 2023 18:56:57 +0100
Subject: [PATCH 009/294] codegen: rename GenerateSymbolError to CodeGenError
---
src/arch/wasm/CodeGen.zig | 2 +-
src/codegen.zig | 2 --
2 files changed, 1 insertion(+), 3 deletions(-)
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 511a10769e..5cd6c95690 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -1162,7 +1162,7 @@ pub fn generate(
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: codegen.DebugInfoOutput,
-) codegen.GenerateSymbolError!codegen.Result {
+) codegen.CodeGenError!codegen.Result {
_ = src_loc;
var code_gen: CodeGen = .{
.gpa = bin_file.allocator,
diff --git a/src/codegen.zig b/src/codegen.zig
index 7e7f34f992..a91795841c 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -35,8 +35,6 @@ pub const CodeGenError = error{
CodegenFail,
};
-pub const GenerateSymbolError = CodeGenError;
-
pub const DebugInfoOutput = union(enum) {
dwarf: *link.File.Dwarf.DeclState,
/// the plan9 debuginfo output is a bytecode with 4 opcodes
From d6bd00e85500fa1a7909695ae5943be438f7521d Mon Sep 17 00:00:00 2001
From: r00ster91
Date: Fri, 3 Mar 2023 17:30:18 +0100
Subject: [PATCH 010/294] Zir: move set_cold from Inst.Tag to Inst.Extended
If I could mark a builtin function as cold, I would mark @setCold as cold.
We have run out of `Zir.Inst.Tag`s so I had to move a tag from Zir.Inst.Tag to
Zir.Inst.Extended. This is because a new noreturn builtin will be added and
noreturn builtins cannot be part of Inst.Tag:
```
/// `noreturn` instructions may not go here; they must be part of the main `Tag` enum.
pub const Extended = enum(u16) {
```
Here's another reason I went for @setCold:
```
$ git grep setRuntimeSafety | wc -l
322
$ git grep setCold | wc -l
79
$ git grep setEvalBranchQuota | wc -l
82
```
This also simply removes @setCold from Autodoc and the docs frontend because
as far as I could understand it, builtins represented using Zir extended
instructions are not yet supported because I couldn't find
@setStackAlign or @setFloatMode there, either.
---
lib/docs/main.js | 4 ----
src/AstGen.zig | 13 ++++++++++---
src/Autodoc.zig | 1 -
src/Sema.zig | 18 +++++++++---------
src/Zir.zig | 10 ++++------
src/print_zir.zig | 2 +-
6 files changed, 24 insertions(+), 24 deletions(-)
diff --git a/lib/docs/main.js b/lib/docs/main.js
index a0647bbe61..fc99b2f861 100644
--- a/lib/docs/main.js
+++ b/lib/docs/main.js
@@ -1187,10 +1187,6 @@ const NAV_MODES = {
payloadHtml += "panic";
break;
}
- case "set_cold": {
- payloadHtml += "setCold";
- break;
- }
case "set_runtime_safety": {
payloadHtml += "setRuntimeSafety";
break;
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 41a8ccadb2..679fc2df0c 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -2609,8 +2609,9 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.extended => switch (gz.astgen.instructions.items(.data)[inst].extended.opcode) {
.breakpoint,
.fence,
- .set_align_stack,
.set_float_mode,
+ .set_align_stack,
+ .set_cold,
=> break :b true,
else => break :b false,
},
@@ -2658,7 +2659,6 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.validate_struct_init_comptime,
.validate_array_init,
.validate_array_init_comptime,
- .set_cold,
.set_runtime_safety,
.closure_capture,
.memcpy,
@@ -8078,6 +8078,14 @@ fn builtinCall(
});
return rvalue(gz, ri, result, node);
},
+ .set_cold => {
+ const order = try expr(gz, scope, ri, params[0]);
+ const result = try gz.addExtendedPayload(.set_cold, Zir.Inst.UnNode{
+ .node = gz.nodeIndexToRelative(node),
+ .operand = order,
+ });
+ return rvalue(gz, ri, result, node);
+ },
.src => {
const token_starts = tree.tokens.items(.start);
@@ -8111,7 +8119,6 @@ fn builtinCall(
.bool_to_int => return simpleUnOp(gz, scope, ri, node, bool_ri, params[0], .bool_to_int),
.embed_file => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], .embed_file),
.error_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .anyerror_type } }, params[0], .error_name),
- .set_cold => return simpleUnOp(gz, scope, ri, node, bool_ri, params[0], .set_cold),
.set_runtime_safety => return simpleUnOp(gz, scope, ri, node, bool_ri, params[0], .set_runtime_safety),
.sqrt => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .sqrt),
.sin => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .sin),
diff --git a/src/Autodoc.zig b/src/Autodoc.zig
index 3cf3fff4c0..15d90b104b 100644
--- a/src/Autodoc.zig
+++ b/src/Autodoc.zig
@@ -1338,7 +1338,6 @@ fn walkInstruction(
.embed_file,
.error_name,
.panic,
- .set_cold, // @check
.set_runtime_safety, // @check
.sqrt,
.sin,
diff --git a/src/Sema.zig b/src/Sema.zig
index f9a6f39867..4702d10688 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -1167,6 +1167,11 @@ fn analyzeBodyInner(
i += 1;
continue;
},
+ .set_cold => {
+ try sema.zirSetCold(block, extended);
+ i += 1;
+ continue;
+ },
.breakpoint => {
if (!block.is_comptime) {
_ = try block.addNoOp(.breakpoint);
@@ -1304,11 +1309,6 @@ fn analyzeBodyInner(
i += 1;
continue;
},
- .set_cold => {
- try sema.zirSetCold(block, inst);
- i += 1;
- continue;
- },
.set_runtime_safety => {
try sema.zirSetRuntimeSafety(block, inst);
i += 1;
@@ -5721,10 +5721,10 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst
gop.value_ptr.* = .{ .alignment = alignment, .src = src };
}
-fn zirSetCold(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
- const inst_data = sema.code.instructions.items(.data)[inst].un_node;
- const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
- const is_cold = try sema.resolveConstBool(block, operand_src, inst_data.operand, "operand to @setCold must be comptime-known");
+fn zirSetCold(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
+ const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
+ const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
+ const is_cold = try sema.resolveConstBool(block, operand_src, extra.operand, "operand to @setCold must be comptime-known");
const func = sema.func orelse return; // does nothing outside a function
func.is_cold = is_cold;
}
diff --git a/src/Zir.zig b/src/Zir.zig
index 4dd2386c51..c7f2141dcc 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -808,8 +808,6 @@ pub const Inst = struct {
panic,
/// Same as `panic` but forces comptime.
panic_comptime,
- /// Implement builtin `@setCold`. Uses `un_node`.
- set_cold,
/// Implement builtin `@setRuntimeSafety`. Uses `un_node`.
set_runtime_safety,
/// Implement builtin `@sqrt`. Uses `un_node`.
@@ -1187,7 +1185,6 @@ pub const Inst = struct {
.bool_to_int,
.embed_file,
.error_name,
- .set_cold,
.set_runtime_safety,
.sqrt,
.sin,
@@ -1323,7 +1320,6 @@ pub const Inst = struct {
.validate_deref,
.@"export",
.export_value,
- .set_cold,
.set_runtime_safety,
.memcpy,
.memset,
@@ -1561,7 +1557,7 @@ pub const Inst = struct {
=> false,
.extended => switch (data.extended.opcode) {
- .breakpoint, .fence => true,
+ .fence, .set_cold, .breakpoint => true,
else => false,
},
};
@@ -1750,7 +1746,6 @@ pub const Inst = struct {
.error_name = .un_node,
.panic = .un_node,
.panic_comptime = .un_node,
- .set_cold = .un_node,
.set_runtime_safety = .un_node,
.sqrt = .un_node,
.sin = .un_node,
@@ -1979,6 +1974,9 @@ pub const Inst = struct {
/// Implement builtin `@setAlignStack`.
/// `operand` is payload index to `UnNode`.
set_align_stack,
+ /// Implements `@setCold`.
+ /// `operand` is payload index to `UnNode`.
+ set_cold,
/// Implements the `@errSetCast` builtin.
/// `operand` is payload index to `BinNode`. `lhs` is dest type, `rhs` is operand.
err_set_cast,
diff --git a/src/print_zir.zig b/src/print_zir.zig
index fb9031296d..5ec9fbcdfc 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -196,7 +196,6 @@ const Writer = struct {
.error_name,
.panic,
.panic_comptime,
- .set_cold,
.set_runtime_safety,
.sqrt,
.sin,
@@ -503,6 +502,7 @@ const Writer = struct {
.fence,
.set_float_mode,
.set_align_stack,
+ .set_cold,
.wasm_memory_size,
.error_to_int,
.int_to_error,
From e0d390463865340adc8055d1e34c0bc7acf4e4c3 Mon Sep 17 00:00:00 2001
From: r00ster91
Date: Fri, 3 Mar 2023 09:42:34 +0100
Subject: [PATCH 011/294] Ast: properly handle sentinel-terminated slices in
tuple
Co-authored-by: Veikka Tuominen
---
lib/std/zig/Ast.zig | 9 ++++++---
test/behavior/tuple.zig | 19 +++++++++++++++++++
2 files changed, 25 insertions(+), 3 deletions(-)
diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig
index f99d58aafa..cb86696e13 100644
--- a/lib/std/zig/Ast.zig
+++ b/lib/std/zig/Ast.zig
@@ -1407,7 +1407,8 @@ pub fn containerField(tree: Ast, node: Node.Index) full.ContainerField {
.type_expr = data.lhs,
.value_expr = extra.value_expr,
.align_expr = extra.align_expr,
- .tuple_like = tree.tokens.items(.tag)[main_token + 1] != .colon,
+ .tuple_like = tree.tokens.items(.tag)[main_token] != .identifier or
+ tree.tokens.items(.tag)[main_token + 1] != .colon,
});
}
@@ -1420,7 +1421,8 @@ pub fn containerFieldInit(tree: Ast, node: Node.Index) full.ContainerField {
.type_expr = data.lhs,
.value_expr = data.rhs,
.align_expr = 0,
- .tuple_like = tree.tokens.items(.tag)[main_token + 1] != .colon,
+ .tuple_like = tree.tokens.items(.tag)[main_token] != .identifier or
+ tree.tokens.items(.tag)[main_token + 1] != .colon,
});
}
@@ -1433,7 +1435,8 @@ pub fn containerFieldAlign(tree: Ast, node: Node.Index) full.ContainerField {
.type_expr = data.lhs,
.value_expr = 0,
.align_expr = data.rhs,
- .tuple_like = tree.tokens.items(.tag)[main_token + 1] != .colon,
+ .tuple_like = tree.tokens.items(.tag)[main_token] != .identifier or
+ tree.tokens.items(.tag)[main_token + 1] != .colon,
});
}
diff --git a/test/behavior/tuple.zig b/test/behavior/tuple.zig
index 13b02b40e8..f7860be34e 100644
--- a/test/behavior/tuple.zig
+++ b/test/behavior/tuple.zig
@@ -397,3 +397,22 @@ test "nested runtime conditionals in tuple initializer" {
};
try expectEqualStrings("up", x[0]);
}
+
+test "sentinel slice in tuple with other fields" {
+ const S = struct {
+ a: u32,
+ b: u32,
+ };
+
+ const Submission = union(enum) {
+ open: struct { *S, [:0]const u8, u32 },
+ };
+
+ _ = Submission;
+}
+
+test "sentinel slice in tuple" {
+ const S = struct { [:0]const u8 };
+
+ _ = S;
+}
From 653814f76ba5d678ebad91f140417cd5829c6aad Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 3 Mar 2023 16:17:23 -0700
Subject: [PATCH 012/294] std.Build.addModule: return the created module
---
lib/std/Build.zig | 23 ++++++++++-------------
1 file changed, 10 insertions(+), 13 deletions(-)
diff --git a/lib/std/Build.zig b/lib/std/Build.zig
index 26919962e3..120196f972 100644
--- a/lib/std/Build.zig
+++ b/lib/std/Build.zig
@@ -550,17 +550,13 @@ pub fn addAssembly(b: *Build, options: AssemblyOptions) *CompileStep {
return obj_step;
}
-pub const AddModuleOptions = struct {
- name: []const u8,
- source_file: FileSource,
- dependencies: []const ModuleDependency = &.{},
-};
-
-pub fn addModule(b: *Build, options: AddModuleOptions) void {
- b.modules.put(b.dupe(options.name), b.createModule(.{
- .source_file = options.source_file,
- .dependencies = options.dependencies,
- })) catch @panic("OOM");
+/// This function creates a module and adds it to the package's module set, making
+/// it available to other packages which depend on this one.
+/// `createModule` can be used instead to create a private module.
+pub fn addModule(b: *Build, name: []const u8, options: CreateModuleOptions) *Module {
+ const module = b.createModule(options);
+ b.modules.put(b.dupe(name), module) catch @panic("OOM");
+ return module;
}
pub const ModuleDependency = struct {
@@ -573,8 +569,9 @@ pub const CreateModuleOptions = struct {
dependencies: []const ModuleDependency = &.{},
};
-/// Prefer to use `addModule` which will make the module available to other
-/// packages which depend on this package.
+/// This function creates a private module, to be used by the current package,
+/// but not exposed to other packages depending on this one.
+/// `addModule` can be used instead to create a public module.
pub fn createModule(b: *Build, options: CreateModuleOptions) *Module {
const module = b.allocator.create(Module) catch @panic("OOM");
module.* = .{
From 65368683ad92b858d0a391cb29d37c0476784b40 Mon Sep 17 00:00:00 2001
From: r00ster91
Date: Fri, 3 Mar 2023 18:35:03 +0100
Subject: [PATCH 013/294] add @trap builtin
This introduces a new builtin function that compiles down to something that results in an illegal instruction exception/interrupt.
It can be used to exit a program abnormally.
This implements the builtin for all backends.
---
doc/langref.html.in | 17 ++++++++++++++++-
lib/zig.h | 10 ++++++++--
src/Air.zig | 10 +++++++++-
src/AstGen.zig | 8 +++++++-
src/BuiltinFn.zig | 8 ++++++++
src/Liveness.zig | 2 ++
src/Sema.zig | 9 +++++++++
src/Zir.zig | 11 +++++++++--
src/arch/aarch64/CodeGen.zig | 11 ++++++++++-
src/arch/arm/CodeGen.zig | 9 +++++++++
src/arch/arm/Emit.zig | 9 +++++++--
src/arch/arm/Mir.zig | 2 ++
src/arch/arm/bits.zig | 11 +++++++++++
src/arch/riscv64/CodeGen.zig | 9 +++++++++
src/arch/riscv64/Emit.zig | 2 ++
src/arch/riscv64/Mir.zig | 1 +
src/arch/riscv64/bits.zig | 1 +
src/arch/sparc64/CodeGen.zig | 16 ++++++++++++++++
src/arch/wasm/CodeGen.zig | 6 ++++++
src/arch/x86_64/CodeGen.zig | 10 ++++++++++
src/arch/x86_64/Emit.zig | 7 +++++++
src/arch/x86_64/Mir.zig | 3 +++
src/codegen/c.zig | 6 ++++++
src/codegen/llvm.zig | 8 ++++++++
src/print_air.zig | 1 +
src/print_zir.zig | 1 +
26 files changed, 178 insertions(+), 10 deletions(-)
diff --git a/doc/langref.html.in b/doc/langref.html.in
index e016ef13f8..0290d3acd6 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -7818,12 +7818,14 @@ comptime {
This function inserts a platform-specific debug trap instruction which causes
debuggers to break there.
+ Unlike for {#syntax#}@trap(){#endsyntax#}, execution may continue after this point if the program is resumed.
This function is only valid within function scope.
-
+ {#see_also|@trap#}
{#header_close#}
+
{#header_open|@mulAdd#}
{#syntax#}@mulAdd(comptime T: type, a: T, b: T, c: T) T{#endsyntax#}
@@ -9393,6 +9395,19 @@ fn List(comptime T: type) type {
{#header_close#}
+ {#header_open|@trap#}
+ {#syntax#}@trap() noreturn{#endsyntax#}
+
+ This function inserts a platform-specific trap/jam instruction which can be used to exit the program abnormally.
+ This may be implemented by explicitly emitting an invalid instruction which may cause an illegal instruction exception of some sort.
+ Unlike for {#syntax#}@breakpoint(){#endsyntax#}, execution does not continue after this point.
+
+
+ This function is only valid within function scope.
+
+ {#see_also|@breakpoint#}
+ {#header_close#}
+
{#header_open|@truncate#}
{#syntax#}@truncate(comptime T: type, integer: anytype) T{#endsyntax#}
diff --git a/lib/zig.h b/lib/zig.h
index c10720d1bd..f3ad7db8a1 100644
--- a/lib/zig.h
+++ b/lib/zig.h
@@ -180,10 +180,16 @@ typedef char bool;
#define zig_export(sig, symbol, name) __asm(name " = " symbol)
#endif
+#if zig_has_builtin(trap)
+#define zig_trap() __builtin_trap()
+#elif defined(__i386__) || defined(__x86_64__)
+#define zig_trap() __asm__ volatile("ud2");
+#else
+#define zig_trap() raise(SIGILL)
+#endif
+
#if zig_has_builtin(debugtrap)
#define zig_breakpoint() __builtin_debugtrap()
-#elif zig_has_builtin(trap) || defined(zig_gnuc)
-#define zig_breakpoint() __builtin_trap()
#elif defined(_MSC_VER) || defined(__MINGW32__) || defined(__MINGW64__)
#define zig_breakpoint() __debugbreak()
#elif defined(__i386__) || defined(__x86_64__)
diff --git a/src/Air.zig b/src/Air.zig
index 3ebdd319de..4646dcc89e 100644
--- a/src/Air.zig
+++ b/src/Air.zig
@@ -232,7 +232,14 @@ pub const Inst = struct {
/// Result type is always noreturn; no instructions in a block follow this one.
/// Uses the `br` field.
br,
- /// Lowers to a hardware trap instruction, or the next best thing.
+ /// Lowers to a trap/jam instruction causing program abortion.
+ /// This may lower to an instruction known to be invalid.
+ /// Sometimes, for the lack of a better instruction, `trap` and `breakpoint` may compile down to the same code.
+ /// Result type is always noreturn; no instructions in a block follow this one.
+ trap,
+ /// Lowers to a trap instruction causing debuggers to break here, or the next best thing.
+ /// The debugger or something else may allow the program to resume after this point.
+ /// Sometimes, for the lack of a better instruction, `trap` and `breakpoint` may compile down to the same code.
/// Result type is always void.
breakpoint,
/// Yields the return address of the current function.
@@ -1186,6 +1193,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.ret,
.ret_load,
.unreach,
+ .trap,
=> return Type.initTag(.noreturn),
.breakpoint,
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 679fc2df0c..fd51e73cf9 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -2631,6 +2631,7 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.repeat_inline,
.panic,
.panic_comptime,
+ .trap,
.check_comptime_control_flow,
=> {
noreturn_src_node = statement;
@@ -8105,7 +8106,7 @@ fn builtinCall(
.error_return_trace => return rvalue(gz, ri, try gz.addNodeExtended(.error_return_trace, node), node),
.frame => return rvalue(gz, ri, try gz.addNodeExtended(.frame, node), node),
.frame_address => return rvalue(gz, ri, try gz.addNodeExtended(.frame_address, node), node),
- .breakpoint => return rvalue(gz, ri, try gz.addNodeExtended(.breakpoint, node), node),
+ .breakpoint => return rvalue(gz, ri, try gz.addNodeExtended(.breakpoint, node), node),
.type_info => return simpleUnOpType(gz, scope, ri, node, params[0], .type_info),
.size_of => return simpleUnOpType(gz, scope, ri, node, params[0], .size_of),
@@ -8178,6 +8179,11 @@ fn builtinCall(
try emitDbgNode(gz, node);
return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], if (gz.force_comptime) .panic_comptime else .panic);
},
+ .trap => {
+ try emitDbgNode(gz, node);
+ _ = try gz.addNode(.trap, node);
+ return rvalue(gz, ri, .void_value, node);
+ },
.error_to_int => {
const operand = try expr(gz, scope, .{ .rl = .none }, params[0]);
const result = try gz.addExtendedPayload(.error_to_int, Zir.Inst.UnNode{
diff --git a/src/BuiltinFn.zig b/src/BuiltinFn.zig
index 20edbabe47..79c6617483 100644
--- a/src/BuiltinFn.zig
+++ b/src/BuiltinFn.zig
@@ -109,6 +109,7 @@ pub const Tag = enum {
sub_with_overflow,
tag_name,
This,
+ trap,
truncate,
Type,
type_info,
@@ -915,6 +916,13 @@ pub const list = list: {
.param_count = 0,
},
},
+ .{
+ "@trap",
+ .{
+ .tag = .trap,
+ .param_count = 0,
+ },
+ },
.{
"@truncate",
.{
diff --git a/src/Liveness.zig b/src/Liveness.zig
index 481cf25d04..8dc81aa165 100644
--- a/src/Liveness.zig
+++ b/src/Liveness.zig
@@ -226,6 +226,7 @@ pub fn categorizeOperand(
.ret_ptr,
.constant,
.const_ty,
+ .trap,
.breakpoint,
.dbg_stmt,
.dbg_inline_begin,
@@ -848,6 +849,7 @@ fn analyzeInst(
.ret_ptr,
.constant,
.const_ty,
+ .trap,
.breakpoint,
.dbg_stmt,
.dbg_inline_begin,
diff --git a/src/Sema.zig b/src/Sema.zig
index 4702d10688..8940527bc0 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -1101,6 +1101,7 @@ fn analyzeBodyInner(
.@"unreachable" => break sema.zirUnreachable(block, inst),
.panic => break sema.zirPanic(block, inst, false),
.panic_comptime => break sema.zirPanic(block, inst, true),
+ .trap => break sema.zirTrap(block, inst),
// zig fmt: on
.extended => ext: {
@@ -5144,6 +5145,14 @@ fn zirPanic(sema: *Sema, block: *Block, inst: Zir.Inst.Index, force_comptime: bo
return always_noreturn;
}
+fn zirTrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
+ const src_node = sema.code.instructions.items(.data)[inst].node;
+ const src = LazySrcLoc.nodeOffset(src_node);
+ sema.src = src;
+ _ = try block.addNoOp(.trap);
+ return always_noreturn;
+}
+
fn zirLoop(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
diff --git a/src/Zir.zig b/src/Zir.zig
index c7f2141dcc..b8ea2ea295 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -617,7 +617,7 @@ pub const Inst = struct {
/// Uses the `un_node` field.
typeof_log2_int_type,
/// Asserts control-flow will not reach this instruction (`unreachable`).
- /// Uses the `unreachable` union field.
+ /// Uses the `@"unreachable"` union field.
@"unreachable",
/// Bitwise XOR. `^`
/// Uses the `pl_node` union field. Payload is `Bin`.
@@ -808,6 +808,9 @@ pub const Inst = struct {
panic,
/// Same as `panic` but forces comptime.
panic_comptime,
+ /// Implements `@trap`.
+ /// Uses the `node` field.
+ trap,
/// Implement builtin `@setRuntimeSafety`. Uses `un_node`.
set_runtime_safety,
/// Implement builtin `@sqrt`. Uses `un_node`.
@@ -1274,6 +1277,7 @@ pub const Inst = struct {
.repeat_inline,
.panic,
.panic_comptime,
+ .trap,
.check_comptime_control_flow,
=> true,
};
@@ -1549,6 +1553,7 @@ pub const Inst = struct {
.repeat_inline,
.panic,
.panic_comptime,
+ .trap,
.for_len,
.@"try",
.try_ptr,
@@ -1746,6 +1751,7 @@ pub const Inst = struct {
.error_name = .un_node,
.panic = .un_node,
.panic_comptime = .un_node,
+ .trap = .node,
.set_runtime_safety = .un_node,
.sqrt = .un_node,
.sin = .un_node,
@@ -1982,6 +1988,7 @@ pub const Inst = struct {
err_set_cast,
/// `operand` is payload index to `UnNode`.
await_nosuspend,
+ /// Implements `@breakpoint`.
/// `operand` is `src_node: i32`.
breakpoint,
/// Implements the `@select` builtin.
@@ -1995,7 +2002,7 @@ pub const Inst = struct {
int_to_error,
/// Implement builtin `@Type`.
/// `operand` is payload index to `UnNode`.
- /// `small` contains `NameStrategy
+ /// `small` contains `NameStrategy`.
reify,
/// Implements the `@asyncCall` builtin.
/// `operand` is payload index to `AsyncCall`.
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 818b04f890..a42d0539f2 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -737,6 +737,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.bitcast => try self.airBitCast(inst),
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
+ .trap => try self.airTrap(),
.breakpoint => try self.airBreakpoint(),
.ret_addr => try self.airRetAddr(inst),
.frame_addr => try self.airFrameAddress(inst),
@@ -4198,10 +4199,18 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ .none, .none, .none });
}
+fn airTrap(self: *Self) !void {
+ _ = try self.addInst(.{
+ .tag = .brk,
+ .data = .{ .imm16 = 0x0001 },
+ });
+ return self.finishAirBookkeeping();
+}
+
fn airBreakpoint(self: *Self) !void {
_ = try self.addInst(.{
.tag = .brk,
- .data = .{ .imm16 = 1 },
+ .data = .{ .imm16 = 0xf000 },
});
return self.finishAirBookkeeping();
}
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index ceabe70438..cecda8fd4a 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -721,6 +721,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.bitcast => try self.airBitCast(inst),
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
+ .trap => try self.airTrap(),
.breakpoint => try self.airBreakpoint(),
.ret_addr => try self.airRetAddr(inst),
.frame_addr => try self.airFrameAddress(inst),
@@ -4146,6 +4147,14 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ .none, .none, .none });
}
+fn airTrap(self: *Self) !void {
+ _ = try self.addInst(.{
+ .tag = .undefined_instruction,
+ .data = .{ .nop = {} },
+ });
+ return self.finishAirBookkeeping();
+}
+
fn airBreakpoint(self: *Self) !void {
_ = try self.addInst(.{
.tag = .bkpt,
diff --git a/src/arch/arm/Emit.zig b/src/arch/arm/Emit.zig
index 17540f0968..17415318de 100644
--- a/src/arch/arm/Emit.zig
+++ b/src/arch/arm/Emit.zig
@@ -1,4 +1,4 @@
-//! This file contains the functionality for lowering AArch64 MIR into
+//! This file contains the functionality for lowering AArch32 MIR into
//! machine code
const Emit = @This();
@@ -15,7 +15,7 @@ const Target = std.Target;
const assert = std.debug.assert;
const Instruction = bits.Instruction;
const Register = bits.Register;
-const log = std.log.scoped(.aarch64_emit);
+const log = std.log.scoped(.aarch32_emit);
const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
const CodeGen = @import("CodeGen.zig");
@@ -100,6 +100,7 @@ pub fn emitMir(
.b => try emit.mirBranch(inst),
+ .undefined_instruction => try emit.mirUndefinedInstruction(),
.bkpt => try emit.mirExceptionGeneration(inst),
.blx => try emit.mirBranchExchange(inst),
@@ -494,6 +495,10 @@ fn mirBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
}
}
+fn mirUndefinedInstruction(emit: *Emit) !void {
+ try emit.writeInstruction(Instruction.undefinedInstruction());
+}
+
fn mirExceptionGeneration(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const imm16 = emit.mir.instructions.items(.data)[inst].imm16;
diff --git a/src/arch/arm/Mir.zig b/src/arch/arm/Mir.zig
index 07a8384c2c..736d0574bb 100644
--- a/src/arch/arm/Mir.zig
+++ b/src/arch/arm/Mir.zig
@@ -35,6 +35,8 @@ pub const Inst = struct {
asr,
/// Branch
b,
+ /// Undefined instruction
+ undefined_instruction,
/// Breakpoint
bkpt,
/// Branch with Link and Exchange
diff --git a/src/arch/arm/bits.zig b/src/arch/arm/bits.zig
index 8e76ae9409..185c4ed921 100644
--- a/src/arch/arm/bits.zig
+++ b/src/arch/arm/bits.zig
@@ -307,6 +307,9 @@ pub const Instruction = union(enum) {
fixed: u4 = 0b1111,
cond: u4,
},
+ undefined_instruction: packed struct {
+ imm32: u32 = 0xe7ffdefe,
+ },
breakpoint: packed struct {
imm4: u4,
fixed_1: u4 = 0b0111,
@@ -613,6 +616,7 @@ pub const Instruction = union(enum) {
.branch => |v| @bitCast(u32, v),
.branch_exchange => |v| @bitCast(u32, v),
.supervisor_call => |v| @bitCast(u32, v),
+ .undefined_instruction => |v| v.imm32,
.breakpoint => |v| @intCast(u32, v.imm4) | (@intCast(u32, v.fixed_1) << 4) | (@intCast(u32, v.imm12) << 8) | (@intCast(u32, v.fixed_2_and_cond) << 20),
};
}
@@ -890,6 +894,13 @@ pub const Instruction = union(enum) {
};
}
+ // This instruction has no official mnemonic equivalent so it is public as-is.
+ pub fn undefinedInstruction() Instruction {
+ return Instruction{
+ .undefined_instruction = .{},
+ };
+ }
+
fn breakpoint(imm: u16) Instruction {
return Instruction{
.breakpoint = .{
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index afcf4b0bb7..0b45982fb3 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -550,6 +550,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.bitcast => try self.airBitCast(inst),
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
+ .trap => try self.airTrap(),
.breakpoint => try self.airBreakpoint(),
.ret_addr => try self.airRetAddr(inst),
.frame_addr => try self.airFrameAddress(inst),
@@ -1652,6 +1653,14 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, mcv, .{ .none, .none, .none });
}
+fn airTrap(self: *Self) !void {
+ _ = try self.addInst(.{
+ .tag = .unimp,
+ .data = .{ .nop = {} },
+ });
+ return self.finishAirBookkeeping();
+}
+
fn airBreakpoint(self: *Self) !void {
_ = try self.addInst(.{
.tag = .ebreak,
diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig
index 387c735896..3b330cbd3f 100644
--- a/src/arch/riscv64/Emit.zig
+++ b/src/arch/riscv64/Emit.zig
@@ -51,6 +51,7 @@ pub fn emitMir(
.ebreak => try emit.mirSystem(inst),
.ecall => try emit.mirSystem(inst),
+ .unimp => try emit.mirSystem(inst),
.dbg_line => try emit.mirDbgLine(inst),
@@ -153,6 +154,7 @@ fn mirSystem(emit: *Emit, inst: Mir.Inst.Index) !void {
switch (tag) {
.ebreak => try emit.writeInstruction(Instruction.ebreak),
.ecall => try emit.writeInstruction(Instruction.ecall),
+ .unimp => try emit.writeInstruction(Instruction.unimp),
else => unreachable,
}
}
diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig
index 97accb7642..8905b24c3c 100644
--- a/src/arch/riscv64/Mir.zig
+++ b/src/arch/riscv64/Mir.zig
@@ -32,6 +32,7 @@ pub const Inst = struct {
dbg_epilogue_begin,
/// Pseudo-instruction: Update debug line
dbg_line,
+ unimp,
ebreak,
ecall,
jalr,
diff --git a/src/arch/riscv64/bits.zig b/src/arch/riscv64/bits.zig
index 6b94927df8..7b3ff0bfe9 100644
--- a/src/arch/riscv64/bits.zig
+++ b/src/arch/riscv64/bits.zig
@@ -380,6 +380,7 @@ pub const Instruction = union(enum) {
pub const ecall = iType(0b1110011, 0b000, .zero, .zero, 0x000);
pub const ebreak = iType(0b1110011, 0b000, .zero, .zero, 0x001);
+ pub const unimp = iType(0, 0, .zero, .zero, 0);
};
pub const Register = enum(u6) {
diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig
index c8f77fe702..1b7290ddce 100644
--- a/src/arch/sparc64/CodeGen.zig
+++ b/src/arch/sparc64/CodeGen.zig
@@ -566,6 +566,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.bitcast => try self.airBitCast(inst),
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
+ .trap => try self.airTrap(),
.breakpoint => try self.airBreakpoint(),
.ret_addr => @panic("TODO try self.airRetAddr(inst)"),
.frame_addr => @panic("TODO try self.airFrameAddress(inst)"),
@@ -1160,6 +1161,21 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, .dead, .{ branch.operand, .none, .none });
}
+fn airTrap(self: *Self) !void {
+ // ta 0x05
+ _ = try self.addInst(.{
+ .tag = .tcc,
+ .data = .{
+ .trap = .{
+ .is_imm = true,
+ .cond = .al,
+ .rs2_or_imm = .{ .imm = 0x05 },
+ },
+ },
+ });
+ return self.finishAirBookkeeping();
+}
+
fn airBreakpoint(self: *Self) !void {
// ta 0x01
_ = try self.addInst(.{
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 2f191fd834..d388bc8fab 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -1829,6 +1829,7 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.arg => func.airArg(inst),
.bitcast => func.airBitcast(inst),
.block => func.airBlock(inst),
+ .trap => func.airTrap(inst),
.breakpoint => func.airBreakpoint(inst),
.br => func.airBr(inst),
.bool_to_int => func.airBoolToInt(inst),
@@ -3289,6 +3290,11 @@ fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
func.finishAir(inst, result, &.{ty_op.operand});
}
+fn airTrap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ try func.addTag(.@"unreachable");
+ func.finishAir(inst, .none, &.{});
+}
+
fn airBreakpoint(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// unsupported by wasm itfunc. Can be implemented once we support DWARF
// for wasm
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 53d38f520a..70b51e50fd 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -638,6 +638,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.bitcast => try self.airBitCast(inst),
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
+ .trap => try self.airTrap(),
.breakpoint => try self.airBreakpoint(),
.ret_addr => try self.airRetAddr(inst),
.frame_addr => try self.airFrameAddress(inst),
@@ -3917,6 +3918,15 @@ fn genVarDbgInfo(
}
}
+fn airTrap(self: *Self) !void {
+ _ = try self.addInst(.{
+ .tag = .ud,
+ .ops = Mir.Inst.Ops.encode(.{}),
+ .data = undefined,
+ });
+ return self.finishAirBookkeeping();
+}
+
fn airBreakpoint(self: *Self) !void {
_ = try self.addInst(.{
.tag = .interrupt,
diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig
index 12c19915c6..e521de4bd4 100644
--- a/src/arch/x86_64/Emit.zig
+++ b/src/arch/x86_64/Emit.zig
@@ -166,6 +166,7 @@ pub fn lowerMir(emit: *Emit) InnerError!void {
.@"test" => try emit.mirTest(inst),
+ .ud => try emit.mirUndefinedInstruction(),
.interrupt => try emit.mirInterrupt(inst),
.nop => {}, // just skip it
@@ -234,6 +235,10 @@ fn fixupRelocs(emit: *Emit) InnerError!void {
}
}
+fn mirUndefinedInstruction(emit: *Emit) InnerError!void {
+ return lowerToZoEnc(.ud2, emit.code);
+}
+
fn mirInterrupt(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const tag = emit.mir.instructions.items(.tag)[inst];
assert(tag == .interrupt);
@@ -1279,6 +1284,7 @@ const Tag = enum {
push,
pop,
@"test",
+ ud2,
int3,
nop,
imul,
@@ -1571,6 +1577,7 @@ inline fn getOpCode(tag: Tag, enc: Encoding, is_one_byte: bool) OpCode {
.zo => return switch (tag) {
.ret_near => OpCode.init(&.{0xc3}),
.ret_far => OpCode.init(&.{0xcb}),
+ .ud2 => OpCode.init(&.{ 0x0F, 0x0B }),
.int3 => OpCode.init(&.{0xcc}),
.nop => OpCode.init(&.{0x90}),
.syscall => OpCode.init(&.{ 0x0f, 0x05 }),
diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig
index 112d9a5982..ba71f4cddd 100644
--- a/src/arch/x86_64/Mir.zig
+++ b/src/arch/x86_64/Mir.zig
@@ -329,6 +329,9 @@ pub const Inst = struct {
/// TODO handle more cases
@"test",
+ /// Undefined Instruction
+ ud,
+
/// Breakpoint form:
/// 0b00 int3
interrupt,
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index cf428d4bd6..c0585c3a4a 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -2741,6 +2741,7 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.const_ty => unreachable, // excluded from function bodies
.arg => try airArg(f, inst),
+ .trap => try airTrap(f.object.writer()),
.breakpoint => try airBreakpoint(f.object.writer()),
.ret_addr => try airRetAddr(f, inst),
.frame_addr => try airFrameAddress(f, inst),
@@ -4428,6 +4429,11 @@ fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue {
return local;
}
+fn airTrap(writer: anytype) !CValue {
+ try writer.writeAll("zig_trap();\n");
+ return .none;
+}
+
fn airBreakpoint(writer: anytype) !CValue {
try writer.writeAll("zig_breakpoint();\n");
return .none;
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 6f240b88f5..1f8473ac32 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -4590,6 +4590,7 @@ pub const FuncGen = struct {
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
.switch_br => try self.airSwitchBr(inst),
+ .trap => try self.airTrap(inst),
.breakpoint => try self.airBreakpoint(inst),
.ret_addr => try self.airRetAddr(inst),
.frame_addr => try self.airFrameAddress(inst),
@@ -8256,6 +8257,13 @@ pub const FuncGen = struct {
return fg.load(ptr, ptr_ty);
}
+ fn airTrap(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ _ = inst;
+ const llvm_fn = self.getIntrinsic("llvm.trap", &.{});
+ _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, undefined, 0, .Cold, .Auto, "");
+ return null;
+ }
+
fn airBreakpoint(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
_ = inst;
const llvm_fn = self.getIntrinsic("llvm.debugtrap", &.{});
diff --git a/src/print_air.zig b/src/print_air.zig
index 447af5a9c7..f5c06daae2 100644
--- a/src/print_air.zig
+++ b/src/print_air.zig
@@ -194,6 +194,7 @@ const Writer = struct {
.c_va_end,
=> try w.writeUnOp(s, inst),
+ .trap,
.breakpoint,
.unreach,
.ret_addr,
diff --git a/src/print_zir.zig b/src/print_zir.zig
index 5ec9fbcdfc..5e7d0d45de 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -410,6 +410,7 @@ const Writer = struct {
.alloc_inferred_comptime_mut,
.ret_ptr,
.ret_type,
+ .trap,
=> try self.writeNode(stream, inst),
.error_value,
From 4eb3f50fcf6fcfb6b8013571be00b9eeeb909833 Mon Sep 17 00:00:00 2001
From: r00ster91
Date: Fri, 3 Mar 2023 19:59:18 +0100
Subject: [PATCH 014/294] Wasm @breakpoint: emit unreachable
This should improve the developer debugging experience.
---
src/arch/wasm/CodeGen.zig | 1 +
1 file changed, 1 insertion(+)
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index d388bc8fab..dbabb436c8 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -3298,6 +3298,7 @@ fn airTrap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airBreakpoint(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// unsupported by wasm itfunc. Can be implemented once we support DWARF
// for wasm
+ try func.addTag(.@"unreachable");
func.finishAir(inst, .none, &.{});
}
From 2cf27c571880a607401dca181f8103e855d0c46d Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Sat, 4 Mar 2023 02:11:04 -0500
Subject: [PATCH 015/294] llvm: fix incorrectly annotated DIType
Closes #14715
Closes #14783
---
src/codegen/llvm.zig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 6f240b88f5..937c1cf120 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -1773,7 +1773,7 @@ pub const Object = struct {
if (ty.optionalReprIsPayload()) {
const ptr_di_ty = try o.lowerDebugType(child_ty, resolve);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
- try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(ptr_di_ty), .{ .mod = o.module });
+ try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.init(ptr_di_ty, resolve), .{ .mod = o.module });
return ptr_di_ty;
}
From 010596c93054543c3c218e7d4b045d5e46384dab Mon Sep 17 00:00:00 2001
From: r00ster91
Date: Sat, 4 Mar 2023 12:51:16 +0100
Subject: [PATCH 016/294] AstGen: compile-error on primitive value export
Fixes #14778
Co-authored-by: Veikka Tuominen
---
src/AstGen.zig | 5 +++-
.../exporting_primitive_values.zig | 29 +++++++++++++++++++
2 files changed, 33 insertions(+), 1 deletion(-)
create mode 100644 test/cases/compile_errors/exporting_primitive_values.zig
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 41a8ccadb2..8e3f11df76 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -7976,6 +7976,9 @@ fn builtinCall(
switch (node_tags[params[0]]) {
.identifier => {
const ident_token = main_tokens[params[0]];
+ if (isPrimitive(tree.tokenSlice(ident_token))) {
+ return astgen.failTok(ident_token, "unable to export primitive value", .{});
+ }
decl_name = try astgen.identAsString(ident_token);
var s = scope;
@@ -8988,7 +8991,7 @@ const primitive_instrs = std.ComptimeStringMap(Zir.Inst.Ref, .{
});
comptime {
- // These checks ensure that std.zig.primitives stays in synce with the primitive->Zir map.
+ // These checks ensure that std.zig.primitives stays in sync with the primitive->Zir map.
const primitives = std.zig.primitives;
for (primitive_instrs.kvs) |kv| {
if (!primitives.isPrimitive(kv.key)) {
diff --git a/test/cases/compile_errors/exporting_primitive_values.zig b/test/cases/compile_errors/exporting_primitive_values.zig
new file mode 100644
index 0000000000..bf3c38a553
--- /dev/null
+++ b/test/cases/compile_errors/exporting_primitive_values.zig
@@ -0,0 +1,29 @@
+pub export fn entry1() void {
+ @export(u100, .{ .name = "a" });
+}
+pub export fn entry3() void {
+ @export(undefined, .{ .name = "b" });
+}
+pub export fn entry4() void {
+ @export(null, .{ .name = "c" });
+}
+pub export fn entry5() void {
+ @export(false, .{ .name = "d" });
+}
+pub export fn entry6() void {
+ @export(u8, .{ .name = "e" });
+}
+pub export fn entry7() void {
+ @export(u65535, .{ .name = "f" });
+}
+
+// error
+// backend=llvm
+// target=native
+//
+// :2:13: error: unable to export primitive value
+// :5:13: error: unable to export primitive value
+// :8:13: error: unable to export primitive value
+// :11:13: error: unable to export primitive value
+// :14:13: error: unable to export primitive value
+// :17:13: error: unable to export primitive value
From 16302578d5a0ca226c7db76bc8e39574dea1dc1d Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 4 Mar 2023 14:04:58 -0700
Subject: [PATCH 017/294] add behavior test case for previous commit
---
test/behavior/slice.zig | 15 +++++++++++++++
1 file changed, 15 insertions(+)
diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig
index 435e1887bb..ed5e2a721d 100644
--- a/test/behavior/slice.zig
+++ b/test/behavior/slice.zig
@@ -747,3 +747,18 @@ test "slice decays to many pointer" {
const p: [*:0]const u8 = buf[0..7 :0];
try expectEqualStrings(buf[0..7], std.mem.span(p));
}
+
+test "write through pointer to optional slice arg" {
+ const S = struct {
+ fn bar(foo: *?[]const u8) !void {
+ foo.* = try baz();
+ }
+
+ fn baz() ![]const u8 {
+ return "ok";
+ }
+ };
+ var foo: ?[]const u8 = null;
+ try S.bar(&foo);
+ try expectEqualStrings(foo.?, "ok");
+}
From c9d990d79083f117564837f762c3e225d7fbc5cf Mon Sep 17 00:00:00 2001
From: tranquillity-codes
Date: Sat, 4 Mar 2023 10:10:07 +0100
Subject: [PATCH 018/294] fix doc Build Mode
---
doc/langref.html.in | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/doc/langref.html.in b/doc/langref.html.in
index e016ef13f8..71d99b3aae 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -9565,9 +9565,10 @@ pub fn build(b: *std.Build) void {
This causes these options to be available:
- - -Drelease-safe=[bool]
- Optimizations on and safety on
- - -Drelease-fast=[bool]
- Optimizations on and safety off
- - -Drelease-small=[bool]
- Size optimizations on and safety off
+ - -Doptimize=Debug
- Optimizations off and safety on (default)
+ - -Doptimize=ReleaseSafe
- Optimizations on and safety on
+ - -Doptimize=ReleaseFast
- Optimizations on and safety off
+ - -Doptimize=ReleaseSmall
- Size optimizations on and safety off
{#header_open|Debug#}
{#shell_samp#}$ zig build-exe example.zig{#end_shell_samp#}
From 874ae81f1b2ae76cea6f5c79203f4baa68263163 Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Fri, 3 Mar 2023 00:18:34 -0500
Subject: [PATCH 019/294] CBE: implement big integer literals
---
lib/std/math/big/int.zig | 1 +
lib/zig.h | 12 +-
src/codegen/c.zig | 361 +++++++++++++++++++-------------------
src/codegen/c/type.zig | 348 ++++++++++++++++++++++++++++++++----
test/behavior/bitcast.zig | 1 -
5 files changed, 502 insertions(+), 221 deletions(-)
diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig
index b7725b9ae9..4e4e7c489e 100644
--- a/lib/std/math/big/int.zig
+++ b/lib/std/math/big/int.zig
@@ -1674,6 +1674,7 @@ pub const Mutable = struct {
/// If a is positive, this passes through to truncate.
/// If a is negative, then r is set to positive with the bit pattern ~(a - 1).
+ /// r may alias a.
///
/// Asserts `r` has enough storage to store the result.
/// The upper bound is `calcTwosCompLimbCount(a.len)`.
diff --git a/lib/zig.h b/lib/zig.h
index f3ad7db8a1..7353ea935d 100644
--- a/lib/zig.h
+++ b/lib/zig.h
@@ -1360,8 +1360,8 @@ typedef signed __int128 zig_i128;
#define zig_make_u128(hi, lo) ((zig_u128)(hi)<<64|(lo))
#define zig_make_i128(hi, lo) ((zig_i128)zig_make_u128(hi, lo))
-#define zig_make_constant_u128(hi, lo) zig_make_u128(hi, lo)
-#define zig_make_constant_i128(hi, lo) zig_make_i128(hi, lo)
+#define zig_init_u128(hi, lo) zig_make_u128(hi, lo)
+#define zig_init_i128(hi, lo) zig_make_i128(hi, lo)
#define zig_hi_u128(val) ((uint64_t)((val) >> 64))
#define zig_lo_u128(val) ((uint64_t)((val) >> 0))
#define zig_hi_i128(val) (( int64_t)((val) >> 64))
@@ -1391,11 +1391,11 @@ typedef struct { zig_align(16) int64_t hi; uint64_t lo; } zig_i128;
#define zig_make_i128(hi, lo) ((zig_i128){ .h##i = (hi), .l##o = (lo) })
#if _MSC_VER /* MSVC doesn't allow struct literals in constant expressions */
-#define zig_make_constant_u128(hi, lo) { .h##i = (hi), .l##o = (lo) }
-#define zig_make_constant_i128(hi, lo) { .h##i = (hi), .l##o = (lo) }
+#define zig_init_u128(hi, lo) { .h##i = (hi), .l##o = (lo) }
+#define zig_init_i128(hi, lo) { .h##i = (hi), .l##o = (lo) }
#else /* But non-MSVC doesn't like the unprotected commas */
-#define zig_make_constant_u128(hi, lo) zig_make_u128(hi, lo)
-#define zig_make_constant_i128(hi, lo) zig_make_i128(hi, lo)
+#define zig_init_u128(hi, lo) zig_make_u128(hi, lo)
+#define zig_init_i128(hi, lo) zig_make_i128(hi, lo)
#endif
#define zig_hi_u128(val) ((val).hi)
#define zig_lo_u128(val) ((val).lo)
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index c0585c3a4a..addd3c8332 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -449,7 +449,7 @@ pub const Function = struct {
}
fn fmtIntLiteral(f: *Function, ty: Type, val: Value) !std.fmt.Formatter(formatIntLiteral) {
- return f.object.dg.fmtIntLiteral(ty, val);
+ return f.object.dg.fmtIntLiteral(ty, val, .Other);
}
fn getLazyFnName(f: *Function, key: LazyFnKey, data: LazyFnValue.Data) ![]const u8 {
@@ -574,9 +574,9 @@ pub const DeclGen = struct {
const len_val = Value.initPayload(&len_pl.base);
if (location == .StaticInitializer) {
- return writer.print(", {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val)});
+ return writer.print(", {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val, .Other)});
} else {
- return writer.print(", .len = {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val)});
+ return writer.print(", .len = {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val, .Other)});
}
}
@@ -606,7 +606,7 @@ pub const DeclGen = struct {
try writer.writeByte(')');
}
switch (ptr_val.tag()) {
- .int_u64, .one => try writer.print("{x}", .{try dg.fmtIntLiteral(Type.usize, ptr_val)}),
+ .int_u64, .one => try writer.print("{x}", .{try dg.fmtIntLiteral(Type.usize, ptr_val, .Other)}),
.decl_ref_mut, .decl_ref, .variable => {
const decl_index = switch (ptr_val.tag()) {
.decl_ref => ptr_val.castTag(.decl_ref).?.data,
@@ -670,7 +670,9 @@ pub const DeclGen = struct {
container_ptr_ty,
location,
);
- try writer.print(" + {})", .{try dg.fmtIntLiteral(Type.usize, byte_offset_val)});
+ try writer.print(" + {})", .{
+ try dg.fmtIntLiteral(Type.usize, byte_offset_val, .Other),
+ });
},
.end => {
try writer.writeAll("((");
@@ -680,7 +682,9 @@ pub const DeclGen = struct {
container_ptr_ty,
location,
);
- try writer.print(") + {})", .{try dg.fmtIntLiteral(Type.usize, Value.one)});
+ try writer.print(") + {})", .{
+ try dg.fmtIntLiteral(Type.usize, Value.one, .Other),
+ });
},
}
},
@@ -746,7 +750,7 @@ pub const DeclGen = struct {
return writer.writeAll("false");
}
},
- .Int, .Enum, .ErrorSet => return writer.print("{x}", .{try dg.fmtIntLiteralLoc(ty, val, location)}),
+ .Int, .Enum, .ErrorSet => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, val, location)}),
.Float => {
const bits = ty.floatBits(target);
var int_pl = Type.Payload.Bits{ .base = .{ .tag = .int_signed }, .data = bits };
@@ -780,11 +784,11 @@ pub const DeclGen = struct {
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_ty = ty.slicePtrFieldType(&buf);
try dg.renderType(writer, ptr_ty);
- return writer.print("){x}, {0x}}}", .{try dg.fmtIntLiteral(Type.usize, val)});
+ return writer.print("){x}, {0x}}}", .{try dg.fmtIntLiteral(Type.usize, val, .Other)});
} else {
try writer.writeAll("((");
try dg.renderType(writer, ty);
- return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val)});
+ return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)});
},
.Optional => {
var opt_buf: Type.Payload.ElemType = undefined;
@@ -831,7 +835,7 @@ pub const DeclGen = struct {
return writer.writeByte('}');
},
- .Packed => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, Value.undef)}),
+ .Packed => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, Value.undef, .Other)}),
},
.Union => {
if (!location.isInitializer()) {
@@ -854,7 +858,7 @@ pub const DeclGen = struct {
if (!field.ty.hasRuntimeBits()) continue;
try dg.renderValue(writer, field.ty, val, initializer_type);
break;
- } else try writer.print("{x}", .{try dg.fmtIntLiteral(Type.u8, Value.undef)});
+ } else try writer.print("{x}", .{try dg.fmtIntLiteral(Type.u8, Value.undef, .Other)});
if (ty.unionTagTypeSafety()) |_| try writer.writeByte('}');
return writer.writeByte('}');
},
@@ -868,7 +872,7 @@ pub const DeclGen = struct {
try writer.writeAll("{ .payload = ");
try dg.renderValue(writer, ty.errorUnionPayload(), val, initializer_type);
return writer.print(", .error = {x} }}", .{
- try dg.fmtIntLiteral(ty.errorUnionSet(), val),
+ try dg.fmtIntLiteral(ty.errorUnionSet(), val, .Other),
});
},
.Array, .Vector => {
@@ -927,7 +931,7 @@ pub const DeclGen = struct {
.decl_ref_mut,
.decl_ref,
=> try dg.renderParentPtr(writer, val, ty, location),
- else => try writer.print("{}", .{try dg.fmtIntLiteralLoc(ty, val, location)}),
+ else => try writer.print("{}", .{try dg.fmtIntLiteral(ty, val, location)}),
},
.Float => {
const bits = ty.floatBits(target);
@@ -1020,7 +1024,7 @@ pub const DeclGen = struct {
try writer.writeAll(", ");
empty = false;
}
- try writer.print("{x}", .{try dg.fmtIntLiteralLoc(int_ty, int_val, location)});
+ try writer.print("{x}", .{try dg.fmtIntLiteral(int_ty, int_val, location)});
if (!empty) try writer.writeByte(')');
return;
},
@@ -1069,7 +1073,7 @@ pub const DeclGen = struct {
.int_u64, .one => {
try writer.writeAll("((");
try dg.renderType(writer, ty);
- return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val)});
+ return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)});
},
.field_ptr,
.elem_ptr,
@@ -1889,11 +1893,11 @@ pub const DeclGen = struct {
const int_info = ty.intInfo(target);
if (int_info.signedness == .signed) {
const min_val = try ty.minInt(stack.get(), target);
- try writer.print(", {x}", .{try dg.fmtIntLiteral(ty, min_val)});
+ try writer.print(", {x}", .{try dg.fmtIntLiteral(ty, min_val, .Other)});
}
const max_val = try ty.maxInt(stack.get(), target);
- try writer.print(", {x}", .{try dg.fmtIntLiteral(ty, max_val)});
+ try writer.print(", {x}", .{try dg.fmtIntLiteral(ty, max_val, .Other)});
},
.Bits => {
var bits_pl = Value.Payload.U64{
@@ -1901,7 +1905,7 @@ pub const DeclGen = struct {
.data = ty.bitSize(target),
};
const bits_val = Value.initPayload(&bits_pl.base);
- try writer.print(", {}", .{try dg.fmtIntLiteral(Type.u8, bits_val)});
+ try writer.print(", {}", .{try dg.fmtIntLiteral(Type.u8, bits_val, .Other)});
},
}
}
@@ -1910,30 +1914,21 @@ pub const DeclGen = struct {
dg: *DeclGen,
ty: Type,
val: Value,
+ loc: ValueRenderLocation,
) !std.fmt.Formatter(formatIntLiteral) {
- const int_info = ty.intInfo(dg.module.getTarget());
- const c_bits = toCIntBits(int_info.bits);
- if (c_bits == null or c_bits.? > 128)
- return dg.fail("TODO implement integer constants larger than 128 bits", .{});
+ const kind: CType.Kind = switch (loc) {
+ .FunctionArgument => .parameter,
+ .Initializer, .Other => .complete,
+ .StaticInitializer => .global,
+ };
return std.fmt.Formatter(formatIntLiteral){ .data = .{
- .ty = ty,
+ .dg = dg,
+ .int_info = ty.intInfo(dg.module.getTarget()),
+ .kind = kind,
+ .cty = try dg.typeToCType(ty, kind),
.val = val,
- .mod = dg.module,
} };
}
-
- fn fmtIntLiteralLoc(
- dg: *DeclGen,
- ty: Type,
- val: Value,
- location: ValueRenderLocation, // TODO: Instead add this as optional arg to fmtIntLiteral
- ) !std.fmt.Formatter(formatIntLiteral) {
- const int_info = ty.intInfo(dg.module.getTarget());
- const c_bits = toCIntBits(int_info.bits);
- if (c_bits == null or c_bits.? > 128)
- return dg.fail("TODO implement integer constants larger than 128 bits", .{});
- return std.fmt.Formatter(formatIntLiteral){ .data = .{ .ty = ty, .val = val, .mod = dg.module, .location = location } };
- }
};
const CTypeFix = enum { prefix, suffix };
@@ -2450,7 +2445,7 @@ pub fn genErrDecls(o: *Object) !void {
const len_val = Value.initPayload(&len_pl.base);
try writer.print("{{" ++ name_prefix ++ "{}, {}}}", .{
- fmtIdent(name), try o.dg.fmtIntLiteral(Type.usize, len_val),
+ fmtIdent(name), try o.dg.fmtIntLiteral(Type.usize, len_val, .Other),
});
}
try writer.writeAll("};\n");
@@ -2501,7 +2496,10 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
var int_pl: Value.Payload.U64 = undefined;
const int_val = tag_val.enumToInt(enum_ty, &int_pl);
- var name_ty_pl = Type.Payload.Len{ .base = .{ .tag = .array_u8_sentinel_0 }, .data = name.len };
+ var name_ty_pl = Type.Payload.Len{
+ .base = .{ .tag = .array_u8_sentinel_0 },
+ .data = name.len,
+ };
const name_ty = Type.initPayload(&name_ty_pl.base);
var name_pl = Value.Payload.Bytes{ .base = .{ .tag = .bytes }, .data = name };
@@ -2510,14 +2508,16 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = name.len };
const len_val = Value.initPayload(&len_pl.base);
- try w.print(" case {}: {{\n static ", .{try o.dg.fmtIntLiteral(enum_ty, int_val)});
+ try w.print(" case {}: {{\n static ", .{
+ try o.dg.fmtIntLiteral(enum_ty, int_val, .Other),
+ });
try o.dg.renderTypeAndName(w, name_ty, .{ .identifier = "name" }, Const, 0, .complete);
try w.writeAll(" = ");
try o.dg.renderValue(w, name_ty, name_val, .Initializer);
try w.writeAll(";\n return (");
try o.dg.renderType(w, name_slice_ty);
try w.print("){{{}, {}}};\n", .{
- fmtIdent("name"), try o.dg.fmtIntLiteral(Type.usize, len_val),
+ fmtIdent("name"), try o.dg.fmtIntLiteral(Type.usize, len_val, .Other),
});
try w.writeAll(" }\n");
@@ -2535,7 +2535,12 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
const fwd_decl_writer = o.dg.fwd_decl.writer();
try fwd_decl_writer.print("static zig_{s} ", .{@tagName(key)});
- try o.dg.renderFunctionSignature(fwd_decl_writer, fn_decl_index, .forward, .{ .string = fn_name });
+ try o.dg.renderFunctionSignature(
+ fwd_decl_writer,
+ fn_decl_index,
+ .forward,
+ .{ .string = fn_name },
+ );
try fwd_decl_writer.writeAll(";\n");
try w.print("static zig_{s} ", .{@tagName(key)});
@@ -7177,30 +7182,33 @@ fn undefPattern(comptime IntType: type) IntType {
return @bitCast(IntType, @as(UnsignedType, (1 << (int_info.bits | 1)) / 3));
}
-const FormatIntLiteralContext = struct { ty: Type, val: Value, mod: *Module, location: ?ValueRenderLocation = null };
+const FormatIntLiteralContext = struct {
+ dg: *DeclGen,
+ int_info: std.builtin.Type.Int,
+ kind: CType.Kind,
+ cty: CType,
+ val: Value,
+};
fn formatIntLiteral(
data: FormatIntLiteralContext,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) @TypeOf(writer).Error!void {
- const target = data.mod.getTarget();
- const int_info = data.ty.intInfo(target);
+ const target = data.dg.module.getTarget();
const ExpectedContents = struct {
const base = 10;
- const limbs_count_128 = BigInt.calcTwosCompLimbCount(128);
- const expected_needed_limbs_count = BigInt.calcToStringLimbsBufferLen(limbs_count_128, base);
- const worst_case_int = BigInt.Const{
- .limbs = &([1]BigIntLimb{std.math.maxInt(BigIntLimb)} ** expected_needed_limbs_count),
- .positive = false,
- };
+ const bits = 128;
+ const limbs_count = BigInt.calcTwosCompLimbCount(bits);
- undef_limbs: [limbs_count_128]BigIntLimb,
- wrap_limbs: [limbs_count_128]BigIntLimb,
+ undef_limbs: [limbs_count]BigIntLimb,
+ wrap_limbs: [limbs_count]BigIntLimb,
+ to_string_buf: [bits]u8,
+ to_string_limbs: [BigInt.calcToStringLimbsBufferLen(limbs_count, base)]BigIntLimb,
};
var stack align(@alignOf(ExpectedContents)) =
- std.heap.stackFallback(@sizeOf(ExpectedContents), data.mod.gpa);
+ std.heap.stackFallback(@sizeOf(ExpectedContents), data.dg.gpa);
const allocator = stack.get();
var undef_limbs: []BigIntLimb = &.{};
@@ -7208,7 +7216,7 @@ fn formatIntLiteral(
var int_buf: Value.BigIntSpace = undefined;
const int = if (data.val.isUndefDeep()) blk: {
- undef_limbs = try allocator.alloc(BigIntLimb, BigInt.calcTwosCompLimbCount(int_info.bits));
+ undef_limbs = try allocator.alloc(BigIntLimb, BigInt.calcTwosCompLimbCount(data.int_info.bits));
std.mem.set(BigIntLimb, undef_limbs, undefPattern(BigIntLimb));
var undef_int = BigInt.Mutable{
@@ -7216,163 +7224,150 @@ fn formatIntLiteral(
.len = undef_limbs.len,
.positive = true,
};
- undef_int.truncate(undef_int.toConst(), int_info.signedness, int_info.bits);
+ undef_int.truncate(undef_int.toConst(), data.int_info.signedness, data.int_info.bits);
break :blk undef_int.toConst();
} else data.val.toBigInt(&int_buf, target);
- assert(int.fitsInTwosComp(int_info.signedness, int_info.bits));
+ assert(int.fitsInTwosComp(data.int_info.signedness, data.int_info.bits));
- const c_bits = toCIntBits(int_info.bits) orelse unreachable;
+ const c_bits = @intCast(usize, data.cty.byteSize(data.dg.ctypes.set, target) * 8);
var one_limbs: [BigInt.calcLimbLen(1)]BigIntLimb = undefined;
const one = BigInt.Mutable.init(&one_limbs, 1).toConst();
- const wrap_limbs = try allocator.alloc(BigIntLimb, BigInt.calcTwosCompLimbCount(c_bits));
- defer allocator.free(wrap_limbs);
- var wrap = BigInt.Mutable{ .limbs = wrap_limbs, .len = undefined, .positive = undefined };
- if (wrap.addWrap(int, one, int_info.signedness, c_bits) or
- int_info.signedness == .signed and wrap.subWrap(int, one, int_info.signedness, c_bits))
- {
- const abbrev = switch (data.ty.tag()) {
- .c_short, .c_ushort => "SHRT",
- .c_int, .c_uint => "INT",
- .c_long, .c_ulong => "LONG",
- .c_longlong, .c_ulonglong => "LLONG",
- .isize, .usize => "INTPTR",
- else => return writer.print("zig_{s}Int_{c}{d}", .{
- if (int.positive) "max" else "min", signAbbrev(int_info.signedness), c_bits,
+ var wrap = BigInt.Mutable{
+ .limbs = try allocator.alloc(BigIntLimb, BigInt.calcTwosCompLimbCount(c_bits)),
+ .len = undefined,
+ .positive = undefined,
+ };
+ defer allocator.free(wrap.limbs);
+ if (wrap.addWrap(int, one, data.int_info.signedness, c_bits) or
+ data.int_info.signedness == .signed and wrap.subWrap(int, one, data.int_info.signedness, c_bits))
+ return writer.print("{s}_{s}", .{
+ data.cty.getStandardDefineAbbrev() orelse return writer.print("zig_{s}Int_{c}{d}", .{
+ if (int.positive) "max" else "min", signAbbrev(data.int_info.signedness), c_bits,
}),
- };
- if (int_info.signedness == .unsigned) try writer.writeByte('U');
- return writer.print("{s}_{s}", .{ abbrev, if (int.positive) "MAX" else "MIN" });
- }
+ if (int.positive) "MAX" else "MIN",
+ });
- var use_twos_comp = false;
- if (!int.positive) {
- if (c_bits > 64) {
- // TODO: Can this be done for decimal literals as well?
- if (fmt.len == 1 and fmt[0] != 'd') {
- use_twos_comp = true;
- } else {
- // TODO: Use fmtIntLiteral for 0?
- try writer.print("zig_sub_{c}{d}(zig_make_{c}{d}(0, 0), ", .{ signAbbrev(int_info.signedness), c_bits, signAbbrev(int_info.signedness), c_bits });
- }
- } else {
- try writer.writeByte('-');
- }
- }
-
- switch (data.ty.tag()) {
- .c_short, .c_ushort, .c_int, .c_uint, .c_long, .c_ulong, .c_longlong, .c_ulonglong => {},
- else => {
- if (int_info.bits <= 64) {
- try writer.print("{s}INT{d}_C(", .{ switch (int_info.signedness) {
- .signed => "",
- .unsigned => "U",
- }, c_bits });
- } else if (data.location != null and data.location.? == .StaticInitializer) {
- // MSVC treats casting the struct initializer as not constant (C2099), so an alternate form is used in global initializers
- try writer.print("zig_make_constant_{c}{d}(", .{ signAbbrev(int_info.signedness), c_bits });
- } else {
- try writer.print("zig_make_{c}{d}(", .{ signAbbrev(int_info.signedness), c_bits });
- }
+ const c_limb_info: struct {
+ cty: CType,
+ count: usize,
+ endian: std.builtin.Endian,
+ homogeneous: bool,
+ } = switch (data.cty.tag()) {
+ else => .{
+ .cty = CType.initTag(.void),
+ .count = 1,
+ .endian = .Little,
+ .homogeneous = true,
},
- }
+ .zig_u128, .zig_i128 => .{
+ .cty = CType.initTag(.uint64_t),
+ .count = 2,
+ .endian = .Big,
+ .homogeneous = false,
+ },
+ .array => info: {
+ const array_data = data.cty.castTag(.array).?.data;
+ break :info .{
+ .cty = data.dg.indexToCType(array_data.elem_type),
+ .count = @intCast(usize, array_data.len),
+ .endian = target.cpu.arch.endian(),
+ .homogeneous = true,
+ };
+ },
+ };
+ if (c_limb_info.count == 1) {
+ if (!int.positive) try writer.writeByte('-');
+ try data.cty.renderLiteralPrefix(writer, data.kind);
- const limbs_count_64 = @divExact(64, @bitSizeOf(BigIntLimb));
- if (c_bits <= 64) {
- var base: u8 = undefined;
- var case: std.fmt.Case = undefined;
- switch (fmt.len) {
- 0 => base = 10,
+ const style: struct { base: u8, case: std.fmt.Case = undefined } = switch (fmt.len) {
+ 0 => .{ .base = 10 },
1 => switch (fmt[0]) {
- 'b' => {
- base = 2;
+ 'b' => style: {
try writer.writeAll("0b");
+ break :style .{ .base = 2 };
},
- 'o' => {
- base = 8;
+ 'o' => style: {
try writer.writeByte('0');
+ break :style .{ .base = 8 };
},
- 'd' => base = 10,
- 'x' => {
- base = 16;
- case = .lower;
- try writer.writeAll("0x");
- },
- 'X' => {
- base = 16;
- case = .upper;
+ 'd' => .{ .base = 10 },
+ 'x', 'X' => |base| style: {
try writer.writeAll("0x");
+ break :style .{ .base = 16, .case = switch (base) {
+ 'x' => .lower,
+ 'X' => .upper,
+ else => unreachable,
+ } };
},
else => @compileError("Invalid fmt: " ++ fmt),
},
else => @compileError("Invalid fmt: " ++ fmt),
- }
+ };
- var str: [64]u8 = undefined;
- var limbs_buf: [BigInt.calcToStringLimbsBufferLen(limbs_count_64, 10)]BigIntLimb = undefined;
- try writer.writeAll(str[0..int.abs().toString(&str, base, case, &limbs_buf)]);
+ const string = try int.abs().toStringAlloc(allocator, style.base, style.case);
+ defer allocator.free(string);
+ try writer.writeAll(string);
} else {
- assert(c_bits == 128);
- const split = std.math.min(int.limbs.len, limbs_count_64);
- var twos_comp_limbs: [BigInt.calcTwosCompLimbCount(128)]BigIntLimb = undefined;
+ try data.cty.renderLiteralPrefix(writer, data.kind);
+ wrap.convertToTwosComplement(int, .unsigned, data.int_info.bits);
+ std.mem.set(BigIntLimb, wrap.limbs[wrap.len..], 0);
+ wrap.len = wrap.limbs.len;
+ const limbs_per_c_limb = @divExact(wrap.len, c_limb_info.count);
- // Adding a negation in the C code before the doesn't work in all cases:
- // - struct versions would require an extra zig_sub_ call to negate, which wouldn't work in constant expressions
- // - negating the f80 int representation (i128) doesn't make sense
- // Instead we write out the literal as a negative number in twos complement
- var limbs = int.limbs;
+ var c_limb_int_info = std.builtin.Type.Int{
+ .signedness = undefined,
+ .bits = @intCast(u16, @divExact(c_bits, c_limb_info.count)),
+ };
+ var c_limb_cty: CType = undefined;
- if (use_twos_comp) {
- var twos_comp = BigInt.Mutable{
- .limbs = &twos_comp_limbs,
- .positive = undefined,
+ var limb_offset: usize = 0;
+ const most_significant_limb_i = wrap.len - limbs_per_c_limb;
+ while (limb_offset < wrap.len) : (limb_offset += limbs_per_c_limb) {
+ const limb_i = switch (c_limb_info.endian) {
+ .Little => limb_offset,
+ .Big => most_significant_limb_i - limb_offset,
+ };
+ var c_limb_mut = BigInt.Mutable{
+ .limbs = wrap.limbs[limb_i..][0..limbs_per_c_limb],
.len = undefined,
+ .positive = true,
+ };
+ c_limb_mut.normalize(limbs_per_c_limb);
+
+ if (limb_i == most_significant_limb_i and
+ !c_limb_info.homogeneous and data.int_info.signedness == .signed)
+ {
+ // most significant limb is actually signed
+ c_limb_int_info.signedness = .signed;
+ c_limb_cty = c_limb_info.cty.toSigned();
+
+ c_limb_mut.positive = wrap.positive;
+ c_limb_mut.convertToTwosComplement(
+ c_limb_mut.toConst(),
+ .signed,
+ data.int_info.bits - limb_i * @bitSizeOf(BigIntLimb),
+ );
+ } else {
+ c_limb_int_info.signedness = .unsigned;
+ c_limb_cty = c_limb_info.cty;
+ }
+ var c_limb_val_pl = Value.Payload.BigInt{
+ .base = .{ .tag = if (c_limb_mut.positive) .int_big_positive else .int_big_negative },
+ .data = c_limb_mut.limbs[0..c_limb_mut.len],
};
- twos_comp.convertToTwosComplement(int, .signed, int_info.bits);
- limbs = twos_comp.limbs;
+ if (limb_offset > 0) try writer.writeAll(", ");
+ try formatIntLiteral(.{
+ .dg = data.dg,
+ .int_info = c_limb_int_info,
+ .kind = data.kind,
+ .cty = c_limb_cty,
+ .val = Value.initPayload(&c_limb_val_pl.base),
+ }, fmt, options, writer);
}
-
- var upper_pl = Value.Payload.BigInt{
- .base = .{ .tag = .int_big_positive },
- .data = limbs[split..],
- };
- const upper_val = Value.initPayload(&upper_pl.base);
- try formatIntLiteral(.{
- .ty = switch (int_info.signedness) {
- .unsigned => Type.u64,
- .signed => if (use_twos_comp) Type.u64 else Type.i64,
- },
- .val = upper_val,
- .mod = data.mod,
- }, fmt, options, writer);
-
- try writer.writeAll(", ");
-
- var lower_pl = Value.Payload.BigInt{
- .base = .{ .tag = .int_big_positive },
- .data = limbs[0..split],
- };
- const lower_val = Value.initPayload(&lower_pl.base);
- try formatIntLiteral(.{
- .ty = Type.u64,
- .val = lower_val,
- .mod = data.mod,
- }, fmt, options, writer);
-
- if (!int.positive and c_bits > 64 and !use_twos_comp) try writer.writeByte(')');
- return writer.writeByte(')');
- }
-
- switch (data.ty.tag()) {
- .c_short, .c_ushort, .c_int => {},
- .c_uint => try writer.writeAll("u"),
- .c_long => try writer.writeAll("l"),
- .c_ulong => try writer.writeAll("ul"),
- .c_longlong => try writer.writeAll("ll"),
- .c_ulonglong => try writer.writeAll("ull"),
- else => try writer.writeByte(')'),
}
+ try data.cty.renderLiteralSuffix(writer);
}
fn isByRef(ty: Type) bool {
diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig
index 1f1a220cd2..a1b11df315 100644
--- a/src/codegen/c/type.zig
+++ b/src/codegen/c/type.zig
@@ -496,6 +496,296 @@ pub const CType = extern union {
}
};
+ pub fn toSigned(self: CType) CType {
+ return CType.initTag(switch (self.tag()) {
+ .char, .@"signed char", .@"unsigned char" => .@"signed char",
+ .short, .@"unsigned short" => .short,
+ .int, .@"unsigned int" => .int,
+ .long, .@"unsigned long" => .long,
+ .@"long long", .@"unsigned long long" => .@"long long",
+ .size_t, .ptrdiff_t => .ptrdiff_t,
+ .uint8_t, .int8_t => .int8_t,
+ .uint16_t, .int16_t => .int16_t,
+ .uint32_t, .int32_t => .int32_t,
+ .uint64_t, .int64_t => .int64_t,
+ .uintptr_t, .intptr_t => .intptr_t,
+ .zig_u128, .zig_i128 => .zig_i128,
+ .float,
+ .double,
+ .@"long double",
+ .zig_f16,
+ .zig_f32,
+ .zig_f80,
+ .zig_f128,
+ .zig_c_longdouble,
+ => |t| t,
+ else => unreachable,
+ });
+ }
+
+ pub fn toUnsigned(self: CType) CType {
+ return CType.initTag(switch (self.tag()) {
+ .char, .@"signed char", .@"unsigned char" => .@"unsigned char",
+ .short, .@"unsigned short" => .@"unsigned short",
+ .int, .@"unsigned int" => .@"unsigned int",
+ .long, .@"unsigned long" => .@"unsigned long",
+ .@"long long", .@"unsigned long long" => .@"unsigned long long",
+ .size_t, .ptrdiff_t => .size_t,
+ .uint8_t, .int8_t => .uint8_t,
+ .uint16_t, .int16_t => .uint16_t,
+ .uint32_t, .int32_t => .uint32_t,
+ .uint64_t, .int64_t => .uint64_t,
+ .uintptr_t, .intptr_t => .uintptr_t,
+ .zig_u128, .zig_i128 => .zig_u128,
+ else => unreachable,
+ });
+ }
+
+ pub fn getStandardDefineAbbrev(self: CType) ?[]const u8 {
+ return switch (self.tag()) {
+ .char => "CHAR",
+ .@"signed char" => "SCHAR",
+ .short => "SHRT",
+ .int => "INT",
+ .long => "LONG",
+ .@"long long" => "LLONG",
+ .@"unsigned char" => "UCHAR",
+ .@"unsigned short" => "USHRT",
+ .@"unsigned int" => "UINT",
+ .@"unsigned long" => "ULONG",
+ .@"unsigned long long" => "ULLONG",
+ .float => "FLT",
+ .double => "DBL",
+ .@"long double" => "LDBL",
+ .size_t => "SIZE",
+ .ptrdiff_t => "PTRDIFF",
+ .uint8_t => "UINT8",
+ .int8_t => "INT8",
+ .uint16_t => "UINT16",
+ .int16_t => "INT16",
+ .uint32_t => "UINT32",
+ .int32_t => "INT32",
+ .uint64_t => "UINT64",
+ .int64_t => "INT64",
+ .uintptr_t => "UINTPTR",
+ .intptr_t => "INTPTR",
+ else => null,
+ };
+ }
+
+ pub fn renderLiteralPrefix(self: CType, writer: anytype, kind: Kind) @TypeOf(writer).Error!void {
+ switch (self.tag()) {
+ .void => unreachable,
+ ._Bool,
+ .char,
+ .@"signed char",
+ .short,
+ .@"unsigned short",
+ .bool,
+ .size_t,
+ .ptrdiff_t,
+ .uintptr_t,
+ .intptr_t,
+ => |t| switch (kind) {
+ else => try writer.print("({s})", .{@tagName(t)}),
+ .global => {},
+ },
+ .int,
+ .long,
+ .@"long long",
+ .@"unsigned char",
+ .@"unsigned int",
+ .@"unsigned long",
+ .@"unsigned long long",
+ .float,
+ .double,
+ .@"long double",
+ => {},
+ .uint8_t,
+ .int8_t,
+ .uint16_t,
+ .int16_t,
+ .uint32_t,
+ .int32_t,
+ .uint64_t,
+ .int64_t,
+ => try writer.print("{s}_C(", .{self.getStandardDefineAbbrev().?}),
+ .zig_u128,
+ .zig_i128,
+ .zig_f16,
+ .zig_f32,
+ .zig_f64,
+ .zig_f80,
+ .zig_f128,
+ .zig_c_longdouble,
+ => |t| try writer.print("zig_{s}_{s}(", .{
+ switch (kind) {
+ else => "make",
+ .global => "init",
+ },
+ @tagName(t)["zig_".len..],
+ }),
+ .pointer,
+ .pointer_const,
+ .pointer_volatile,
+ .pointer_const_volatile,
+ => unreachable,
+ .array,
+ .vector,
+ => try writer.writeByte('{'),
+ .fwd_anon_struct,
+ .fwd_anon_union,
+ .fwd_struct,
+ .fwd_union,
+ .unnamed_struct,
+ .unnamed_union,
+ .packed_unnamed_struct,
+ .packed_unnamed_union,
+ .anon_struct,
+ .anon_union,
+ .@"struct",
+ .@"union",
+ .packed_struct,
+ .packed_union,
+ .function,
+ .varargs_function,
+ => unreachable,
+ }
+ }
+
+ pub fn renderLiteralSuffix(self: CType, writer: anytype) @TypeOf(writer).Error!void {
+ switch (self.tag()) {
+ .void => unreachable,
+ ._Bool => {},
+ .char,
+ .@"signed char",
+ .short,
+ .int,
+ => {},
+ .long => try writer.writeByte('l'),
+ .@"long long" => try writer.writeAll("ll"),
+ .@"unsigned char",
+ .@"unsigned short",
+ .@"unsigned int",
+ => try writer.writeByte('u'),
+ .@"unsigned long",
+ .size_t,
+ .uintptr_t,
+ => try writer.writeAll("ul"),
+ .@"unsigned long long" => try writer.writeAll("ull"),
+ .float => try writer.writeByte('f'),
+ .double => {},
+ .@"long double" => try writer.writeByte('l'),
+ .bool,
+ .ptrdiff_t,
+ .intptr_t,
+ => {},
+ .uint8_t,
+ .int8_t,
+ .uint16_t,
+ .int16_t,
+ .uint32_t,
+ .int32_t,
+ .uint64_t,
+ .int64_t,
+ .zig_u128,
+ .zig_i128,
+ .zig_f16,
+ .zig_f32,
+ .zig_f64,
+ .zig_f80,
+ .zig_f128,
+ .zig_c_longdouble,
+ => try writer.writeByte(')'),
+ .pointer,
+ .pointer_const,
+ .pointer_volatile,
+ .pointer_const_volatile,
+ => unreachable,
+ .array,
+ .vector,
+ => try writer.writeByte('}'),
+ .fwd_anon_struct,
+ .fwd_anon_union,
+ .fwd_struct,
+ .fwd_union,
+ .unnamed_struct,
+ .unnamed_union,
+ .packed_unnamed_struct,
+ .packed_unnamed_union,
+ .anon_struct,
+ .anon_union,
+ .@"struct",
+ .@"union",
+ .packed_struct,
+ .packed_union,
+ .function,
+ .varargs_function,
+ => unreachable,
+ }
+ }
+
+ pub fn byteSize(self: CType, store: Store.Set, target: Target) u64 {
+ return switch (self.tag()) {
+ .void => 0,
+ .char, .@"signed char", ._Bool, .@"unsigned char", .bool, .uint8_t, .int8_t => 1,
+ .short => target.c_type_byte_size(.short),
+ .int => target.c_type_byte_size(.int),
+ .long => target.c_type_byte_size(.long),
+ .@"long long" => target.c_type_byte_size(.longlong),
+ .@"unsigned short" => target.c_type_byte_size(.ushort),
+ .@"unsigned int" => target.c_type_byte_size(.uint),
+ .@"unsigned long" => target.c_type_byte_size(.ulong),
+ .@"unsigned long long" => target.c_type_byte_size(.ulonglong),
+ .float => target.c_type_byte_size(.float),
+ .double => target.c_type_byte_size(.double),
+ .@"long double" => target.c_type_byte_size(.longdouble),
+ .size_t,
+ .ptrdiff_t,
+ .uintptr_t,
+ .intptr_t,
+ .pointer,
+ .pointer_const,
+ .pointer_volatile,
+ .pointer_const_volatile,
+ => @divExact(target.cpu.arch.ptrBitWidth(), 8),
+ .uint16_t, .int16_t, .zig_f16 => 2,
+ .uint32_t, .int32_t, .zig_f32 => 4,
+ .uint64_t, .int64_t, .zig_f64 => 8,
+ .zig_u128, .zig_i128, .zig_f128 => 16,
+ .zig_f80 => if (target.c_type_bit_size(.longdouble) == 80)
+ target.c_type_byte_size(.longdouble)
+ else
+ 16,
+ .zig_c_longdouble => target.c_type_byte_size(.longdouble),
+
+ .array,
+ .vector,
+ => {
+ const data = self.cast(Payload.Sequence).?.data;
+ return data.len * store.indexToCType(data.elem_type).byteSize(store, target);
+ },
+
+ .fwd_anon_struct,
+ .fwd_anon_union,
+ .fwd_struct,
+ .fwd_union,
+ .unnamed_struct,
+ .unnamed_union,
+ .packed_unnamed_struct,
+ .packed_unnamed_union,
+ .anon_struct,
+ .anon_union,
+ .@"struct",
+ .@"union",
+ .packed_struct,
+ .packed_union,
+ .function,
+ .varargs_function,
+ => unreachable,
+ };
+ }
+
pub fn isPacked(self: CType) bool {
return switch (self.tag()) {
else => false,
@@ -787,26 +1077,26 @@ pub const CType = extern union {
};
}
- fn tagFromIntInfo(signedness: std.builtin.Signedness, bits: u16) Tag {
- return switch (bits) {
+ fn tagFromIntInfo(int_info: std.builtin.Type.Int) Tag {
+ return switch (int_info.bits) {
0 => .void,
- 1...8 => switch (signedness) {
+ 1...8 => switch (int_info.signedness) {
.unsigned => .uint8_t,
.signed => .int8_t,
},
- 9...16 => switch (signedness) {
+ 9...16 => switch (int_info.signedness) {
.unsigned => .uint16_t,
.signed => .int16_t,
},
- 17...32 => switch (signedness) {
+ 17...32 => switch (int_info.signedness) {
.unsigned => .uint32_t,
.signed => .int32_t,
},
- 33...64 => switch (signedness) {
+ 33...64 => switch (int_info.signedness) {
.unsigned => .uint64_t,
.signed => .int64_t,
},
- 65...128 => switch (signedness) {
+ 65...128 => switch (int_info.signedness) {
.unsigned => .zig_u128,
.signed => .zig_i128,
},
@@ -945,31 +1235,27 @@ pub const CType = extern union {
.c_ulong => self.init(.@"unsigned long"),
.c_longlong => self.init(.@"long long"),
.c_ulonglong => self.init(.@"unsigned long long"),
- else => {
- const info = ty.intInfo(target);
- const t = tagFromIntInfo(info.signedness, info.bits);
- switch (t) {
- .void => unreachable,
- else => self.init(t),
- .array => switch (kind) {
- .forward, .complete, .global => {
- const abi_size = ty.abiSize(target);
- const abi_align = ty.abiAlignment(target);
- self.storage = .{ .seq = .{ .base = .{ .tag = .array }, .data = .{
- .len = @divExact(abi_size, abi_align),
- .elem_type = tagFromIntInfo(
- .unsigned,
- @intCast(u16, abi_align * 8),
- ).toIndex(),
- } } };
- self.value = .{ .cty = initPayload(&self.storage.seq) };
- },
- .forward_parameter,
- .parameter,
- => try self.initArrayParameter(ty, kind, lookup),
- .payload => unreachable,
+ else => switch (tagFromIntInfo(ty.intInfo(target))) {
+ .void => unreachable,
+ else => |t| self.init(t),
+ .array => switch (kind) {
+ .forward, .complete, .global => {
+ const abi_size = ty.abiSize(target);
+ const abi_align = ty.abiAlignment(target);
+ self.storage = .{ .seq = .{ .base = .{ .tag = .array }, .data = .{
+ .len = @divExact(abi_size, abi_align),
+ .elem_type = tagFromIntInfo(.{
+ .signedness = .unsigned,
+ .bits = @intCast(u16, abi_align * 8),
+ }).toIndex(),
+ } } };
+ self.value = .{ .cty = initPayload(&self.storage.seq) };
},
- }
+ .forward_parameter,
+ .parameter,
+ => try self.initArrayParameter(ty, kind, lookup),
+ .payload => unreachable,
+ },
},
} else switch (ty.zigTypeTag()) {
.Frame => unreachable,
diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig
index f8a1928dd1..70ac38d6fa 100644
--- a/test/behavior/bitcast.zig
+++ b/test/behavior/bitcast.zig
@@ -368,7 +368,6 @@ test "comptime @bitCast packed struct to int and back" {
}
test "comptime bitcast with fields following f80" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
From a8f4ac2b94e7945a5a1623547f258f5f32f12674 Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Fri, 3 Mar 2023 00:18:35 -0500
Subject: [PATCH 020/294] CBE: implement big integer and vector comparisons
---
lib/zig.h | 313 +++++++++++++++++++++++++++-------
src/codegen/c.zig | 342 ++++++++++++++++++++++++--------------
src/codegen/c/type.zig | 124 ++++++++++++++
src/type.zig | 2 +-
test/behavior/bitcast.zig | 2 -
test/behavior/math.zig | 1 -
test/behavior/vector.zig | 2 -
7 files changed, 595 insertions(+), 191 deletions(-)
diff --git a/lib/zig.h b/lib/zig.h
index 7353ea935d..c39cffee24 100644
--- a/lib/zig.h
+++ b/lib/zig.h
@@ -37,6 +37,14 @@ typedef char bool;
#define zig_has_attribute(attribute) 0
#endif
+#if __LITTLE_ENDIAN__ || _MSC_VER
+#define zig_little_endian 1
+#define zig_big_endian 0
+#else
+#define zig_little_endian 0
+#define zig_big_endian 1
+#endif
+
#if __STDC_VERSION__ >= 201112L
#define zig_threadlocal _Thread_local
#elif defined(__GNUC__)
@@ -1379,7 +1387,7 @@ typedef signed __int128 zig_i128;
#else /* zig_has_int128 */
-#if __LITTLE_ENDIAN__ || _MSC_VER
+#if zig_little_endian
typedef struct { zig_align(16) uint64_t lo; uint64_t hi; } zig_u128;
typedef struct { zig_align(16) uint64_t lo; int64_t hi; } zig_i128;
#else
@@ -1909,6 +1917,177 @@ static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, uint8_t bits) {
return zig_bitcast_i128(zig_bit_reverse_u128(zig_bitcast_u128(val), bits));
}
+/* ========================== Big Integer Support =========================== */
+
+static inline uint16_t zig_big_bytes(uint16_t bits) {
+ uint16_t bytes = (bits + CHAR_BIT - 1) / CHAR_BIT;
+ uint16_t alignment = 16;
+ while (alignment / 2 >= bytes) alignment /= 2;
+ return (bytes + alignment - 1) / alignment * alignment;
+}
+
+static inline int32_t zig_cmp_big(const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ const uint8_t *lhs_bytes = lhs;
+ const uint8_t *rhs_bytes = rhs;
+ uint16_t byte_offset = 0;
+ bool do_signed = is_signed;
+ uint16_t remaining_bytes = zig_big_bytes(bits);
+
+#if zig_little_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+ int32_t limb_cmp;
+
+#if zig_little_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ zig_i128 lhs_limb;
+ zig_i128 rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_cmp = zig_cmp_i128(lhs_limb, rhs_limb);
+ do_signed = false;
+ } else {
+ zig_u128 lhs_limb;
+ zig_u128 rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_cmp = zig_cmp_u128(lhs_limb, rhs_limb);
+ }
+
+ if (limb_cmp != 0) return limb_cmp;
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ int64_t lhs_limb;
+ int64_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ do_signed = false;
+ } else {
+ uint64_t lhs_limb;
+ uint64_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ }
+
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ int32_t lhs_limb;
+ int32_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ do_signed = false;
+ } else {
+ uint32_t lhs_limb;
+ uint32_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ }
+
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ int16_t lhs_limb;
+ int16_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ do_signed = false;
+ } else {
+ uint16_t lhs_limb;
+ uint16_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ }
+
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ int8_t lhs_limb;
+ int8_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ do_signed = false;
+ } else {
+ uint8_t lhs_limb;
+ uint8_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ }
+
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+ }
+
+ return 0;
+}
+
/* ========================= Floating Point Support ========================= */
#if _MSC_VER
@@ -1933,7 +2112,6 @@ static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, uint8_t bits) {
#define zig_make_special_f64(sign, name, arg, repr) sign zig_make_f64(__builtin_##name, )(arg)
#define zig_make_special_f80(sign, name, arg, repr) sign zig_make_f80(__builtin_##name, )(arg)
#define zig_make_special_f128(sign, name, arg, repr) sign zig_make_f128(__builtin_##name, )(arg)
-#define zig_make_special_c_longdouble(sign, name, arg, repr) sign zig_make_c_longdouble(__builtin_##name, )(arg)
#else
#define zig_has_float_builtins 0
#define zig_make_special_f16(sign, name, arg, repr) zig_float_from_repr_f16(repr)
@@ -1941,13 +2119,13 @@ static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, uint8_t bits) {
#define zig_make_special_f64(sign, name, arg, repr) zig_float_from_repr_f64(repr)
#define zig_make_special_f80(sign, name, arg, repr) zig_float_from_repr_f80(repr)
#define zig_make_special_f128(sign, name, arg, repr) zig_float_from_repr_f128(repr)
-#define zig_make_special_c_longdouble(sign, name, arg, repr) zig_float_from_repr_c_longdouble(repr)
#endif
#define zig_has_f16 1
#define zig_bitSizeOf_f16 16
+typedef int16_t zig_repr_f16;
#define zig_libc_name_f16(name) __##name##h
-#define zig_make_special_constant_f16(sign, name, arg, repr) zig_make_special_f16(sign, name, arg, repr)
+#define zig_init_special_f16(sign, name, arg, repr) zig_make_special_f16(sign, name, arg, repr)
#if FLT_MANT_DIG == 11
typedef float zig_f16;
#define zig_make_f16(fp, repr) fp##f
@@ -1956,7 +2134,9 @@ typedef double zig_f16;
#define zig_make_f16(fp, repr) fp
#elif LDBL_MANT_DIG == 11
#define zig_bitSizeOf_c_longdouble 16
-typedef uint16_t zig_repr_c_longdouble;
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f16 zig_repr_c_longdouble;
+#endif
typedef long double zig_f16;
#define zig_make_f16(fp, repr) fp##l
#elif FLT16_MANT_DIG == 11 && (zig_has_builtin(inff16) || defined(zig_gnuc))
@@ -1973,17 +2153,18 @@ typedef int16_t zig_f16;
#define zig_make_f16(fp, repr) repr
#undef zig_make_special_f16
#define zig_make_special_f16(sign, name, arg, repr) repr
-#undef zig_make_special_constant_f16
-#define zig_make_special_constant_f16(sign, name, arg, repr) repr
+#undef zig_init_special_f16
+#define zig_init_special_f16(sign, name, arg, repr) repr
#endif
#define zig_has_f32 1
#define zig_bitSizeOf_f32 32
+typedef int32_t zig_repr_f32;
#define zig_libc_name_f32(name) name##f
#if _MSC_VER
-#define zig_make_special_constant_f32(sign, name, arg, repr) sign zig_make_f32(zig_msvc_flt_##name, )
+#define zig_init_special_f32(sign, name, arg, repr) sign zig_make_f32(zig_msvc_flt_##name, )
#else
-#define zig_make_special_constant_f32(sign, name, arg, repr) zig_make_special_f32(sign, name, arg, repr)
+#define zig_init_special_f32(sign, name, arg, repr) zig_make_special_f32(sign, name, arg, repr)
#endif
#if FLT_MANT_DIG == 24
typedef float zig_f32;
@@ -1993,7 +2174,9 @@ typedef double zig_f32;
#define zig_make_f32(fp, repr) fp
#elif LDBL_MANT_DIG == 24
#define zig_bitSizeOf_c_longdouble 32
-typedef uint32_t zig_repr_c_longdouble;
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f32 zig_repr_c_longdouble;
+#endif
typedef long double zig_f32;
#define zig_make_f32(fp, repr) fp##l
#elif FLT32_MANT_DIG == 24
@@ -2007,21 +2190,24 @@ typedef int32_t zig_f32;
#define zig_make_f32(fp, repr) repr
#undef zig_make_special_f32
#define zig_make_special_f32(sign, name, arg, repr) repr
-#undef zig_make_special_constant_f32
-#define zig_make_special_constant_f32(sign, name, arg, repr) repr
+#undef zig_init_special_f32
+#define zig_init_special_f32(sign, name, arg, repr) repr
#endif
#define zig_has_f64 1
#define zig_bitSizeOf_f64 64
+typedef int64_t zig_repr_f64;
#define zig_libc_name_f64(name) name
#if _MSC_VER
#ifdef ZIG_TARGET_ABI_MSVC
#define zig_bitSizeOf_c_longdouble 64
-typedef uint64_t zig_repr_c_longdouble;
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f64 zig_repr_c_longdouble;
#endif
-#define zig_make_special_constant_f64(sign, name, arg, repr) sign zig_make_f64(zig_msvc_flt_##name, )
+#endif
+#define zig_init_special_f64(sign, name, arg, repr) sign zig_make_f64(zig_msvc_flt_##name, )
#else /* _MSC_VER */
-#define zig_make_special_constant_f64(sign, name, arg, repr) zig_make_special_f64(sign, name, arg, repr)
+#define zig_init_special_f64(sign, name, arg, repr) zig_make_special_f64(sign, name, arg, repr)
#endif /* _MSC_VER */
#if FLT_MANT_DIG == 53
typedef float zig_f64;
@@ -2031,7 +2217,9 @@ typedef double zig_f64;
#define zig_make_f64(fp, repr) fp
#elif LDBL_MANT_DIG == 53
#define zig_bitSizeOf_c_longdouble 64
-typedef uint64_t zig_repr_c_longdouble;
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f64 zig_repr_c_longdouble;
+#endif
typedef long double zig_f64;
#define zig_make_f64(fp, repr) fp##l
#elif FLT64_MANT_DIG == 53
@@ -2048,14 +2236,15 @@ typedef int64_t zig_f64;
#define zig_make_f64(fp, repr) repr
#undef zig_make_special_f64
#define zig_make_special_f64(sign, name, arg, repr) repr
-#undef zig_make_special_constant_f64
-#define zig_make_special_constant_f64(sign, name, arg, repr) repr
+#undef zig_init_special_f64
+#define zig_init_special_f64(sign, name, arg, repr) repr
#endif
#define zig_has_f80 1
#define zig_bitSizeOf_f80 80
+typedef zig_i128 zig_repr_f80;
#define zig_libc_name_f80(name) __##name##x
-#define zig_make_special_constant_f80(sign, name, arg, repr) zig_make_special_f80(sign, name, arg, repr)
+#define zig_init_special_f80(sign, name, arg, repr) zig_make_special_f80(sign, name, arg, repr)
#if FLT_MANT_DIG == 64
typedef float zig_f80;
#define zig_make_f80(fp, repr) fp##f
@@ -2064,7 +2253,9 @@ typedef double zig_f80;
#define zig_make_f80(fp, repr) fp
#elif LDBL_MANT_DIG == 64
#define zig_bitSizeOf_c_longdouble 80
-typedef zig_u128 zig_repr_c_longdouble;
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f80 zig_repr_c_longdouble;
+#endif
typedef long double zig_f80;
#define zig_make_f80(fp, repr) fp##l
#elif FLT80_MANT_DIG == 64
@@ -2084,14 +2275,15 @@ typedef zig_i128 zig_f80;
#define zig_make_f80(fp, repr) repr
#undef zig_make_special_f80
#define zig_make_special_f80(sign, name, arg, repr) repr
-#undef zig_make_special_constant_f80
-#define zig_make_special_constant_f80(sign, name, arg, repr) repr
+#undef zig_init_special_f80
+#define zig_init_special_f80(sign, name, arg, repr) repr
#endif
#define zig_has_f128 1
#define zig_bitSizeOf_f128 128
+typedef zig_i128 zig_repr_f128;
#define zig_libc_name_f128(name) name##q
-#define zig_make_special_constant_f128(sign, name, arg, repr) zig_make_special_f128(sign, name, arg, repr)
+#define zig_init_special_f128(sign, name, arg, repr) zig_make_special_f128(sign, name, arg, repr)
#if FLT_MANT_DIG == 113
typedef float zig_f128;
#define zig_make_f128(fp, repr) fp##f
@@ -2100,7 +2292,9 @@ typedef double zig_f128;
#define zig_make_f128(fp, repr) fp
#elif LDBL_MANT_DIG == 113
#define zig_bitSizeOf_c_longdouble 128
-typedef zig_u128 zig_repr_c_longdouble;
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f128 zig_repr_c_longdouble;
+#endif
typedef long double zig_f128;
#define zig_make_f128(fp, repr) fp##l
#elif FLT128_MANT_DIG == 113
@@ -2122,63 +2316,44 @@ typedef zig_i128 zig_f128;
#define zig_make_f128(fp, repr) repr
#undef zig_make_special_f128
#define zig_make_special_f128(sign, name, arg, repr) repr
-#undef zig_make_special_constant_f128
-#define zig_make_special_constant_f128(sign, name, arg, repr) repr
+#undef zig_init_special_f128
+#define zig_init_special_f128(sign, name, arg, repr) repr
#endif
-#define zig_has_c_longdouble 1
-
-#ifdef ZIG_TARGET_ABI_MSVC
-#define zig_libc_name_c_longdouble(name) name
-#else
-#define zig_libc_name_c_longdouble(name) name##l
-#endif
-
-#define zig_make_special_constant_c_longdouble(sign, name, arg, repr) zig_make_special_c_longdouble(sign, name, arg, repr)
#ifdef zig_bitSizeOf_c_longdouble
+#define zig_has_c_longdouble 1
#ifdef ZIG_TARGET_ABI_MSVC
#undef zig_bitSizeOf_c_longdouble
#define zig_bitSizeOf_c_longdouble 64
-typedef uint64_t zig_repr_c_longdouble;
typedef zig_f64 zig_c_longdouble;
-#define zig_make_c_longdouble(fp, repr) fp
+typedef zig_repr_f64 zig_repr_c_longdouble;
#else
typedef long double zig_c_longdouble;
-#define zig_make_c_longdouble(fp, repr) fp##l
#endif
#else /* zig_bitSizeOf_c_longdouble */
-#undef zig_has_c_longdouble
#define zig_has_c_longdouble 0
-#define zig_bitSizeOf_c_longdouble 80
-typedef zig_u128 zig_repr_c_longdouble;
-#define zig_compiler_rt_abbrev_c_longdouble zig_compiler_rt_abbrev_f80
#define zig_bitSizeOf_repr_c_longdouble 128
-typedef zig_i128 zig_c_longdouble;
-#define zig_make_c_longdouble(fp, repr) repr
-#undef zig_make_special_c_longdouble
-#define zig_make_special_c_longdouble(sign, name, arg, repr) repr
-#undef zig_make_special_constant_c_longdouble
-#define zig_make_special_constant_c_longdouble(sign, name, arg, repr) repr
+typedef zig_f128 zig_c_longdouble;
+typedef zig_repr_f128 zig_repr_c_longdouble;
#endif /* zig_bitSizeOf_c_longdouble */
#if !zig_has_float_builtins
-#define zig_float_from_repr(Type, ReprType) \
- static inline zig_##Type zig_float_from_repr_##Type(ReprType repr) { \
+#define zig_float_from_repr(Type) \
+ static inline zig_##Type zig_float_from_repr_##Type(zig_repr_##Type repr) { \
zig_##Type result; \
memcpy(&result, &repr, sizeof(result)); \
return result; \
}
-zig_float_from_repr(f16, uint16_t)
-zig_float_from_repr(f32, uint32_t)
-zig_float_from_repr(f64, uint64_t)
-zig_float_from_repr(f80, zig_u128)
-zig_float_from_repr(f128, zig_u128)
-zig_float_from_repr(c_longdouble, zig_repr_c_longdouble)
+zig_float_from_repr(f16)
+zig_float_from_repr(f32)
+zig_float_from_repr(f64)
+zig_float_from_repr(f80)
+zig_float_from_repr(f128)
#endif
#define zig_cast_f16 (zig_f16)
@@ -2187,11 +2362,9 @@ zig_float_from_repr(c_longdouble, zig_repr_c_longdouble)
#if _MSC_VER && !zig_has_f128
#define zig_cast_f80
-#define zig_cast_c_longdouble
#define zig_cast_f128
#else
#define zig_cast_f80 (zig_f80)
-#define zig_cast_c_longdouble (zig_c_longdouble)
#define zig_cast_f128 (zig_f128)
#endif
@@ -2320,7 +2493,6 @@ zig_float_builtins(f32)
zig_float_builtins(f64)
zig_float_builtins(f80)
zig_float_builtins(f128)
-zig_float_builtins(c_longdouble)
#if _MSC_VER && (_M_IX86 || _M_X64)
@@ -2563,6 +2735,29 @@ zig_msvc_atomics_128op(u128, max)
#endif /* _MSC_VER && (_M_IX86 || _M_X64) */
+/* ============================= Vector Support ============================= */
+
+#define zig_cmp_vec(operation, operator) \
+ static inline void zig_##operation##_vec(bool *result, const void *lhs, const void *rhs, uint32_t len, bool is_signed, uint16_t elem_bits) { \
+ uint32_t index = 0; \
+ const uint8_t *lhs_ptr = lhs; \
+ const uint8_t *rhs_ptr = rhs; \
+ uint16_t elem_bytes = zig_big_bytes(elem_bits); \
+ \
+ while (index < len) { \
+ result[index] = zig_cmp_big(lhs_ptr, rhs_ptr, is_signed, elem_bits) operator 0; \
+ lhs_ptr += elem_bytes; \
+ rhs_ptr += elem_bytes; \
+ index += 1; \
+ } \
+ }
+zig_cmp_vec(eq, ==)
+zig_cmp_vec(ne, !=)
+zig_cmp_vec(lt, < )
+zig_cmp_vec(le, <=)
+zig_cmp_vec(gt, > )
+zig_cmp_vec(ge, >=)
+
/* ======================== Special Case Intrinsics ========================= */
#if (_MSC_VER && _M_X64) || defined(__x86_64__)
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index addd3c8332..f4a817cecd 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -112,11 +112,7 @@ const ValueRenderLocation = enum {
}
};
-const BuiltinInfo = enum {
- None,
- Range,
- Bits,
-};
+const BuiltinInfo = enum { none, bits };
const reserved_idents = std.ComptimeStringMap(void, .{
// C language
@@ -440,6 +436,10 @@ pub const Function = struct {
return f.object.dg.typeToCType(ty, kind);
}
+ fn byteSize(f: *Function, cty: CType) u64 {
+ return f.object.dg.byteSize(cty);
+ }
+
fn renderType(f: *Function, w: anytype, t: Type) !void {
return f.object.dg.renderType(w, t);
}
@@ -1003,8 +1003,9 @@ pub const DeclGen = struct {
// return dg.fail("Only quiet nans are supported in global variable initializers", .{});
}
- try writer.writeAll("zig_make_special_");
- if (location == .StaticInitializer) try writer.writeAll("constant_");
+ try writer.writeAll("zig_");
+ try writer.writeAll(if (location == .StaticInitializer) "init" else "make");
+ try writer.writeAll("_special_");
try dg.renderTypeForBuiltinFnName(writer, ty);
try writer.writeByte('(');
if (std.math.signbit(f128_val)) try writer.writeByte('-');
@@ -1565,6 +1566,10 @@ pub const DeclGen = struct {
return dg.ctypes.typeToCType(dg.gpa, ty, dg.module, kind);
}
+ fn byteSize(dg: *DeclGen, cty: CType) u64 {
+ return cty.byteSize(dg.ctypes.set, dg.module.getTarget());
+ }
+
/// Renders a type as a single identifier, generating intermediate typedefs
/// if necessary.
///
@@ -1861,51 +1866,64 @@ pub const DeclGen = struct {
}
fn renderTypeForBuiltinFnName(dg: *DeclGen, writer: anytype, ty: Type) !void {
- const target = dg.module.getTarget();
- if (ty.isAbiInt()) {
- const int_info = ty.intInfo(target);
- const c_bits = toCIntBits(int_info.bits) orelse
- return dg.fail("TODO: C backend: implement integer types larger than 128 bits", .{});
- try writer.print("{c}{d}", .{ signAbbrev(int_info.signedness), c_bits });
- } else if (ty.isRuntimeFloat()) {
- try ty.print(writer, dg.module);
- } else if (ty.isPtrAtRuntime()) {
- try writer.print("p{d}", .{ty.bitSize(target)});
- } else if (ty.zigTypeTag() == .Bool) {
- try writer.print("u8", .{});
- } else return dg.fail("TODO: CBE: implement renderTypeForBuiltinFnName for type {}", .{
- ty.fmt(dg.module),
- });
+ try dg.renderCTypeForBuiltinFnName(writer, try dg.typeToCType(ty, .complete));
+ }
+
+ fn renderCTypeForBuiltinFnName(dg: *DeclGen, writer: anytype, cty: CType) !void {
+ switch (cty.tag()) {
+ else => try writer.print("{c}{d}", .{
+ if (cty.isBool())
+ signAbbrev(.unsigned)
+ else if (cty.isInteger())
+ signAbbrev(cty.signedness() orelse .unsigned)
+ else if (cty.isFloat())
+ @as(u8, 'f')
+ else if (cty.isPointer())
+ @as(u8, 'p')
+ else
+ return dg.fail("TODO: CBE: implement renderTypeForBuiltinFnName for type {}", .{
+ cty.tag(),
+ }),
+ if (cty.isFloat()) cty.floatActiveBits(dg.module.getTarget()) else dg.byteSize(cty) * 8,
+ }),
+ .array => try writer.writeAll("big"),
+ .vector => try writer.writeAll("vec"),
+ }
}
fn renderBuiltinInfo(dg: *DeclGen, writer: anytype, ty: Type, info: BuiltinInfo) !void {
- const target = dg.module.getTarget();
switch (info) {
- .None => {},
- .Range => {
- var arena = std.heap.ArenaAllocator.init(dg.gpa);
- defer arena.deinit();
-
- const ExpectedContents = union { u: Value.Payload.U64, i: Value.Payload.I64 };
- var stack align(@alignOf(ExpectedContents)) =
- std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator());
-
- const int_info = ty.intInfo(target);
- if (int_info.signedness == .signed) {
- const min_val = try ty.minInt(stack.get(), target);
- try writer.print(", {x}", .{try dg.fmtIntLiteral(ty, min_val, .Other)});
+ .none => {},
+ .bits => {
+ const cty = try dg.typeToCType(ty, .complete);
+ if (cty.castTag(.vector)) |pl| {
+ var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = pl.data.len };
+ try writer.print(", {}", .{try dg.fmtIntLiteral(
+ Type.u32,
+ Value.initPayload(&len_pl.base),
+ .FunctionArgument,
+ )});
}
- const max_val = try ty.maxInt(stack.get(), target);
- try writer.print(", {x}", .{try dg.fmtIntLiteral(ty, max_val, .Other)});
- },
- .Bits => {
- var bits_pl = Value.Payload.U64{
- .base = .{ .tag = .int_u64 },
- .data = ty.bitSize(target),
- };
- const bits_val = Value.initPayload(&bits_pl.base);
- try writer.print(", {}", .{try dg.fmtIntLiteral(Type.u8, bits_val, .Other)});
+ const target = dg.module.getTarget();
+ const elem_ty = ty.shallowElemType();
+ const elem_info = if (elem_ty.isAbiInt())
+ elem_ty.intInfo(target)
+ else
+ std.builtin.Type.Int{
+ .signedness = .unsigned,
+ .bits = @intCast(u16, elem_ty.bitSize(target)),
+ };
+ switch (cty.tag()) {
+ else => {},
+ .array, .vector => try writer.print(", {}", .{elem_info.signedness == .signed}),
+ }
+
+ var bits_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = elem_info.bits };
+ try writer.print(", {}", .{try dg.fmtIntLiteral(switch (cty.tag()) {
+ else => Type.u8,
+ .array, .vector => Type.u16,
+ }, Value.initPayload(&bits_pl.base), .FunctionArgument)});
},
}
}
@@ -2758,35 +2776,35 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
// TODO use a different strategy for add, sub, mul, div
// that communicates to the optimizer that wrapping is UB.
- .add => try airBinOp(f, inst, "+", "add", .None),
- .sub => try airBinOp(f, inst, "-", "sub", .None),
- .mul => try airBinOp(f, inst, "*", "mul", .None),
+ .add => try airBinOp(f, inst, "+", "add", .none),
+ .sub => try airBinOp(f, inst, "-", "sub", .none),
+ .mul => try airBinOp(f, inst, "*", "mul", .none),
.neg => try airFloatNeg(f, inst),
- .div_float => try airBinBuiltinCall(f, inst, "div", .None),
+ .div_float => try airBinBuiltinCall(f, inst, "div", .none),
- .div_trunc, .div_exact => try airBinOp(f, inst, "/", "div_trunc", .None),
+ .div_trunc, .div_exact => try airBinOp(f, inst, "/", "div_trunc", .none),
.rem => blk: {
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
const lhs_ty = f.air.typeOf(bin_op.lhs);
// For binary operations @TypeOf(lhs)==@TypeOf(rhs),
// so we only check one.
break :blk if (lhs_ty.isInt())
- try airBinOp(f, inst, "%", "rem", .None)
+ try airBinOp(f, inst, "%", "rem", .none)
else
try airBinFloatOp(f, inst, "fmod");
},
- .div_floor => try airBinBuiltinCall(f, inst, "div_floor", .None),
- .mod => try airBinBuiltinCall(f, inst, "mod", .None),
+ .div_floor => try airBinBuiltinCall(f, inst, "div_floor", .none),
+ .mod => try airBinBuiltinCall(f, inst, "mod", .none),
- .addwrap => try airBinBuiltinCall(f, inst, "addw", .Bits),
- .subwrap => try airBinBuiltinCall(f, inst, "subw", .Bits),
- .mulwrap => try airBinBuiltinCall(f, inst, "mulw", .Bits),
+ .addwrap => try airBinBuiltinCall(f, inst, "addw", .bits),
+ .subwrap => try airBinBuiltinCall(f, inst, "subw", .bits),
+ .mulwrap => try airBinBuiltinCall(f, inst, "mulw", .bits),
- .add_sat => try airBinBuiltinCall(f, inst, "adds", .Bits),
- .sub_sat => try airBinBuiltinCall(f, inst, "subs", .Bits),
- .mul_sat => try airBinBuiltinCall(f, inst, "muls", .Bits),
- .shl_sat => try airBinBuiltinCall(f, inst, "shls", .Bits),
+ .add_sat => try airBinBuiltinCall(f, inst, "adds", .bits),
+ .sub_sat => try airBinBuiltinCall(f, inst, "subs", .bits),
+ .mul_sat => try airBinBuiltinCall(f, inst, "muls", .bits),
+ .shl_sat => try airBinBuiltinCall(f, inst, "shls", .bits),
.sqrt => try airUnFloatOp(f, inst, "sqrt"),
.sin => try airUnFloatOp(f, inst, "sin"),
@@ -2805,34 +2823,38 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.mul_add => try airMulAdd(f, inst),
- .add_with_overflow => try airOverflow(f, inst, "add", .Bits),
- .sub_with_overflow => try airOverflow(f, inst, "sub", .Bits),
- .mul_with_overflow => try airOverflow(f, inst, "mul", .Bits),
- .shl_with_overflow => try airOverflow(f, inst, "shl", .Bits),
+ .add_with_overflow => try airOverflow(f, inst, "add", .bits),
+ .sub_with_overflow => try airOverflow(f, inst, "sub", .bits),
+ .mul_with_overflow => try airOverflow(f, inst, "mul", .bits),
+ .shl_with_overflow => try airOverflow(f, inst, "shl", .bits),
.min => try airMinMax(f, inst, '<', "fmin"),
.max => try airMinMax(f, inst, '>', "fmax"),
.slice => try airSlice(f, inst),
- .cmp_gt => try airCmpOp(f, inst, ">", "gt"),
- .cmp_gte => try airCmpOp(f, inst, ">=", "ge"),
- .cmp_lt => try airCmpOp(f, inst, "<", "lt"),
- .cmp_lte => try airCmpOp(f, inst, "<=", "le"),
+ .cmp_gt => try airCmpOp(f, inst, .gt),
+ .cmp_gte => try airCmpOp(f, inst, .gte),
+ .cmp_lt => try airCmpOp(f, inst, .lt),
+ .cmp_lte => try airCmpOp(f, inst, .lte),
- .cmp_eq => try airEquality(f, inst, "((", "==", "eq"),
- .cmp_neq => try airEquality(f, inst, "!((", "!=", "ne"),
+ .cmp_eq => try airEquality(f, inst, .eq),
+ .cmp_neq => try airEquality(f, inst, .neq),
- .cmp_vector => return f.fail("TODO: C backend: implement cmp_vector", .{}),
+ .cmp_vector => blk: {
+ const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
+ const extra = f.air.extraData(Air.VectorCmp, ty_pl.payload).data;
+ break :blk try cmpBuiltinCall(f, inst, extra, extra.compareOperator(), .operator, .bits);
+ },
.cmp_lt_errors_len => try airCmpLtErrorsLen(f, inst),
// bool_and and bool_or are non-short-circuit operations
- .bool_and, .bit_and => try airBinOp(f, inst, "&", "and", .None),
- .bool_or, .bit_or => try airBinOp(f, inst, "|", "or", .None),
- .xor => try airBinOp(f, inst, "^", "xor", .None),
- .shr, .shr_exact => try airBinBuiltinCall(f, inst, "shr", .None),
- .shl, => try airBinBuiltinCall(f, inst, "shlw", .Bits),
- .shl_exact => try airBinOp(f, inst, "<<", "shl", .None),
+ .bool_and, .bit_and => try airBinOp(f, inst, "&", "and", .none),
+ .bool_or, .bit_or => try airBinOp(f, inst, "|", "or", .none),
+ .xor => try airBinOp(f, inst, "^", "xor", .none),
+ .shr, .shr_exact => try airBinBuiltinCall(f, inst, "shr", .none),
+ .shl, => try airBinBuiltinCall(f, inst, "shlw", .bits),
+ .shl_exact => try airBinOp(f, inst, "<<", "shl", .none),
.not => try airNot (f, inst),
.optional_payload => try airOptionalPayload(f, inst),
@@ -2877,11 +2899,11 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.memcpy => try airMemcpy(f, inst),
.set_union_tag => try airSetUnionTag(f, inst),
.get_union_tag => try airGetUnionTag(f, inst),
- .clz => try airUnBuiltinCall(f, inst, "clz", .Bits),
- .ctz => try airUnBuiltinCall(f, inst, "ctz", .Bits),
- .popcount => try airUnBuiltinCall(f, inst, "popcount", .Bits),
- .byte_swap => try airUnBuiltinCall(f, inst, "byte_swap", .Bits),
- .bit_reverse => try airUnBuiltinCall(f, inst, "bit_reverse", .Bits),
+ .clz => try airUnBuiltinCall(f, inst, "clz", .bits),
+ .ctz => try airUnBuiltinCall(f, inst, "ctz", .bits),
+ .popcount => try airUnBuiltinCall(f, inst, "popcount", .bits),
+ .byte_swap => try airUnBuiltinCall(f, inst, "byte_swap", .bits),
+ .bit_reverse => try airUnBuiltinCall(f, inst, "bit_reverse", .bits),
.tag_name => try airTagName(f, inst),
.error_name => try airErrorName(f, inst),
.splat => try airSplat(f, inst),
@@ -3349,7 +3371,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValueDeref(writer, operand);
try writer.print(", {})", .{try f.fmtIntLiteral(bit_offset_ty, bit_offset_val)});
if (cant_cast) try writer.writeByte(')');
- try f.object.dg.renderBuiltinInfo(writer, field_ty, .Bits);
+ try f.object.dg.renderBuiltinInfo(writer, field_ty, .bits);
try writer.writeByte(')');
} else {
try f.writeCValue(writer, local, .Other);
@@ -3744,7 +3766,7 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info:
fn airNot(f: *Function, inst: Air.Inst.Index) !CValue {
const inst_ty = f.air.typeOfIndex(inst);
if (inst_ty.tag() != .bool)
- return try airUnBuiltinCall(f, inst, "not", .Bits);
+ return try airUnBuiltinCall(f, inst, "not", .bits);
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
@@ -3803,7 +3825,7 @@ fn airBinOp(
return local;
}
-fn airCmpOp(f: *Function, inst: Air.Inst.Index, operator: []const u8, operation: []const u8) !CValue {
+fn airCmpOp(f: *Function, inst: Air.Inst.Index, operator: std.math.CompareOperator) !CValue {
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
if (f.liveness.isUnused(inst)) {
@@ -3813,10 +3835,11 @@ fn airCmpOp(f: *Function, inst: Air.Inst.Index, operator: []const u8, operation:
const operand_ty = f.air.typeOf(bin_op.lhs);
const target = f.object.dg.module.getTarget();
- if (operand_ty.isInt() and operand_ty.bitSize(target) > 64)
- return try cmpBuiltinCall(f, inst, operator, "cmp");
+ const operand_bits = operand_ty.bitSize(target);
+ if (operand_ty.isInt() and operand_bits > 64)
+ return cmpBuiltinCall(f, inst, bin_op, operator, .cmp, if (operand_bits > 128) .bits else .none);
if (operand_ty.isRuntimeFloat())
- return try cmpBuiltinCall(f, inst, operator, operation);
+ return cmpBuiltinCall(f, inst, bin_op, operator, .operator, .none);
const inst_ty = f.air.typeOfIndex(inst);
const lhs = try f.resolveInst(bin_op.lhs);
@@ -3829,7 +3852,7 @@ fn airCmpOp(f: *Function, inst: Air.Inst.Index, operator: []const u8, operation:
try writer.writeAll(" = ");
try f.writeCValue(writer, lhs, .Other);
try writer.writeByte(' ');
- try writer.writeAll(operator);
+ try writer.writeAll(compareOperatorC(operator));
try writer.writeByte(' ');
try f.writeCValue(writer, rhs, .Other);
try writer.writeAll(";\n");
@@ -3840,9 +3863,7 @@ fn airCmpOp(f: *Function, inst: Air.Inst.Index, operator: []const u8, operation:
fn airEquality(
f: *Function,
inst: Air.Inst.Index,
- negate_prefix: []const u8,
- operator: []const u8,
- operation: []const u8,
+ operator: std.math.CompareOperator,
) !CValue {
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
@@ -3853,10 +3874,11 @@ fn airEquality(
const operand_ty = f.air.typeOf(bin_op.lhs);
const target = f.object.dg.module.getTarget();
- if (operand_ty.isInt() and operand_ty.bitSize(target) > 64)
- return try cmpBuiltinCall(f, inst, operator, "cmp");
+ const operand_bits = operand_ty.bitSize(target);
+ if (operand_ty.isInt() and operand_bits > 64)
+ return cmpBuiltinCall(f, inst, bin_op, operator, .cmp, if (operand_bits > 128) .bits else .none);
if (operand_ty.isRuntimeFloat())
- return try cmpBuiltinCall(f, inst, operator, operation);
+ return cmpBuiltinCall(f, inst, bin_op, operator, .operator, .none);
const lhs = try f.resolveInst(bin_op.lhs);
const rhs = try f.resolveInst(bin_op.rhs);
@@ -3872,7 +3894,12 @@ fn airEquality(
// (A && B) || (C && (A == B))
// A = lhs.is_null ; B = rhs.is_null ; C = rhs.payload == lhs.payload
- try writer.writeAll(negate_prefix);
+ switch (operator) {
+ .eq => {},
+ .neq => try writer.writeByte('!'),
+ else => unreachable,
+ }
+ try writer.writeAll("((");
try f.writeCValue(writer, lhs, .Other);
try writer.writeAll(".is_null && ");
try f.writeCValue(writer, rhs, .Other);
@@ -3891,7 +3918,7 @@ fn airEquality(
try f.writeCValue(writer, lhs, .Other);
try writer.writeByte(' ');
- try writer.writeAll(operator);
+ try writer.writeAll(compareOperatorC(operator));
try writer.writeByte(' ');
try f.writeCValue(writer, rhs, .Other);
try writer.writeAll(";\n");
@@ -3972,7 +3999,7 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []cons
const inst_ty = f.air.typeOfIndex(inst);
const target = f.object.dg.module.getTarget();
if (inst_ty.isInt() and inst_ty.bitSize(target) > 64)
- return try airBinBuiltinCall(f, inst, operation[1..], .None);
+ return try airBinBuiltinCall(f, inst, operation[1..], .none);
if (inst_ty.isRuntimeFloat())
return try airBinFloatOp(f, inst, operation);
@@ -4418,12 +4445,35 @@ fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue {
// Ensure padding bits have the expected value.
if (dest_ty.isAbiInt()) {
+ const dest_cty = try f.typeToCType(dest_ty, .complete);
+ const dest_info = dest_ty.intInfo(target);
+ var wrap_ty_pl = Type.Payload.Bits{ .base = .{ .tag = switch (dest_info.signedness) {
+ .unsigned => .int_unsigned,
+ .signed => .int_signed,
+ } }, .data = dest_info.bits };
+
try f.writeCValue(writer, local, .Other);
+ if (dest_cty.castTag(.array)) |pl| {
+ try writer.print("[{d}]", .{switch (target.cpu.arch.endian()) {
+ .Little => pl.data.len - 1,
+ .Big => 0,
+ }});
+ wrap_ty_pl.data -= 1;
+ wrap_ty_pl.data %= @intCast(u16, f.byteSize(f.indexToCType(pl.data.elem_type)) * 8);
+ wrap_ty_pl.data += 1;
+ }
+ const wrap_ty = Type.initPayload(&wrap_ty_pl.base);
try writer.writeAll(" = zig_wrap_");
- try f.object.dg.renderTypeForBuiltinFnName(writer, dest_ty);
+ try f.object.dg.renderTypeForBuiltinFnName(writer, wrap_ty);
try writer.writeByte('(');
try f.writeCValue(writer, local, .Other);
- try f.object.dg.renderBuiltinInfo(writer, dest_ty, .Bits);
+ if (dest_cty.castTag(.array)) |pl| {
+ try writer.print("[{d}]", .{switch (target.cpu.arch.endian()) {
+ .Little => pl.data.len - 1,
+ .Big => 0,
+ }});
+ }
+ try f.object.dg.renderBuiltinInfo(writer, wrap_ty, .bits);
try writer.writeAll(");\n");
}
@@ -5438,7 +5488,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
try writer.writeByte(')');
if (cant_cast) try writer.writeByte(')');
- try f.object.dg.renderBuiltinInfo(writer, field_int_ty, .Bits);
+ try f.object.dg.renderBuiltinInfo(writer, field_int_ty, .bits);
try writer.writeAll(");\n");
if (inst_ty.eql(field_int_ty, f.object.dg.module)) return temp_local;
@@ -5871,7 +5921,7 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValue(writer, operand, .FunctionArgument);
try writer.writeByte(')');
if (inst_ty.isInt() and operand_ty.isRuntimeFloat()) {
- try f.object.dg.renderBuiltinInfo(writer, inst_ty, .Bits);
+ try f.object.dg.renderBuiltinInfo(writer, inst_ty, .bits);
try writer.writeByte(')');
}
try writer.writeAll(";\n");
@@ -5972,29 +6022,46 @@ fn airBinBuiltinCall(
fn cmpBuiltinCall(
f: *Function,
inst: Air.Inst.Index,
- operator: []const u8,
- operation: []const u8,
+ data: anytype,
+ operator: std.math.CompareOperator,
+ operation: enum { cmp, operator },
+ info: BuiltinInfo,
) !CValue {
const inst_ty = f.air.typeOfIndex(inst);
- const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- const operand_ty = f.air.typeOf(bin_op.lhs);
+ const operand_ty = f.air.typeOf(data.lhs);
- const lhs = try f.resolveInst(bin_op.lhs);
- const rhs = try f.resolveInst(bin_op.rhs);
- try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
+ const lhs = try f.resolveInst(data.lhs);
+ const rhs = try f.resolveInst(data.rhs);
+ try reap(f, inst, &.{ data.lhs, data.rhs });
+
+ const ref_ret = inst_ty.tag() != .bool;
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
- try f.writeCValue(writer, local, .Other);
- try writer.writeAll(" = zig_");
- try writer.writeAll(operation);
- try writer.writeByte('_');
+ if (!ref_ret) {
+ try f.writeCValue(writer, local, .Other);
+ try writer.writeAll(" = ");
+ }
+ try writer.print("zig_{s}_", .{switch (operation) {
+ else => @tagName(operation),
+ .operator => compareOperatorAbbrev(operator),
+ }});
try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty);
try writer.writeByte('(');
+ if (ref_ret) {
+ try f.writeCValue(writer, local, .FunctionArgument);
+ try writer.writeAll(", ");
+ }
try f.writeCValue(writer, lhs, .FunctionArgument);
try writer.writeAll(", ");
try f.writeCValue(writer, rhs, .FunctionArgument);
- try writer.print(") {s} {};\n", .{ operator, try f.fmtIntLiteral(Type.initTag(.i32), Value.zero) });
+ try f.object.dg.renderBuiltinInfo(writer, operand_ty, info);
+ try writer.writeByte(')');
+ if (!ref_ret) try writer.print(" {s} {}", .{
+ compareOperatorC(operator),
+ try f.fmtIntLiteral(Type.initTag(.i32), Value.zero),
+ });
+ try writer.writeAll(";\n");
return local;
}
@@ -6675,7 +6742,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll(", ");
try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
- try f.object.dg.renderBuiltinInfo(writer, inst_ty, .Bits);
+ try f.object.dg.renderBuiltinInfo(writer, inst_ty, .bits);
try writer.writeByte(')');
if (!empty) try writer.writeByte(')');
@@ -7094,6 +7161,28 @@ fn compilerRtAbbrev(ty: Type, target: std.Target) []const u8 {
} else unreachable;
}
+fn compareOperatorAbbrev(operator: std.math.CompareOperator) []const u8 {
+ return switch (operator) {
+ .lt => "lt",
+ .lte => "le",
+ .eq => "eq",
+ .gte => "ge",
+ .gt => "gt",
+ .neq => "ne",
+ };
+}
+
+fn compareOperatorC(operator: std.math.CompareOperator) []const u8 {
+ return switch (operator) {
+ .lt => "<",
+ .lte => "<=",
+ .eq => "==",
+ .gte => ">=",
+ .gt => ">",
+ .neq => "!=",
+ };
+}
+
fn StringLiteral(comptime WriterType: type) type {
// MSVC has a length limit of 16380 per string literal (before concatenation)
const max_char_len = 4;
@@ -7239,14 +7328,6 @@ fn formatIntLiteral(
.positive = undefined,
};
defer allocator.free(wrap.limbs);
- if (wrap.addWrap(int, one, data.int_info.signedness, c_bits) or
- data.int_info.signedness == .signed and wrap.subWrap(int, one, data.int_info.signedness, c_bits))
- return writer.print("{s}_{s}", .{
- data.cty.getStandardDefineAbbrev() orelse return writer.print("zig_{s}Int_{c}{d}", .{
- if (int.positive) "max" else "min", signAbbrev(data.int_info.signedness), c_bits,
- }),
- if (int.positive) "MAX" else "MIN",
- });
const c_limb_info: struct {
cty: CType,
@@ -7277,6 +7358,15 @@ fn formatIntLiteral(
},
};
if (c_limb_info.count == 1) {
+ if (wrap.addWrap(int, one, data.int_info.signedness, c_bits) or
+ data.int_info.signedness == .signed and wrap.subWrap(int, one, data.int_info.signedness, c_bits))
+ return writer.print("{s}_{s}", .{
+ data.cty.getStandardDefineAbbrev() orelse return writer.print("zig_{s}Int_{c}{d}", .{
+ if (int.positive) "max" else "min", signAbbrev(data.int_info.signedness), c_bits,
+ }),
+ if (int.positive) "MAX" else "MIN",
+ });
+
if (!int.positive) try writer.writeByte('-');
try data.cty.renderLiteralPrefix(writer, data.kind);
@@ -7310,7 +7400,7 @@ fn formatIntLiteral(
try writer.writeAll(string);
} else {
try data.cty.renderLiteralPrefix(writer, data.kind);
- wrap.convertToTwosComplement(int, .unsigned, data.int_info.bits);
+ wrap.convertToTwosComplement(int, data.int_info.signedness, c_bits);
std.mem.set(BigIntLimb, wrap.limbs[wrap.len..], 0);
wrap.len = wrap.limbs.len;
const limbs_per_c_limb = @divExact(wrap.len, c_limb_info.count);
@@ -7343,7 +7433,7 @@ fn formatIntLiteral(
c_limb_cty = c_limb_info.cty.toSigned();
c_limb_mut.positive = wrap.positive;
- c_limb_mut.convertToTwosComplement(
+ c_limb_mut.truncate(
c_limb_mut.toConst(),
.signed,
data.int_info.bits - limb_i * @bitSizeOf(BigIntLimb),
diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig
index a1b11df315..85e4cc9840 100644
--- a/src/codegen/c/type.zig
+++ b/src/codegen/c/type.zig
@@ -496,6 +496,116 @@ pub const CType = extern union {
}
};
+ pub fn isBool(self: CType) bool {
+ return switch (self.tag()) {
+ ._Bool,
+ .bool,
+ => true,
+ else => false,
+ };
+ }
+
+ pub fn isInteger(self: CType) bool {
+ return switch (self.tag()) {
+ .char,
+ .@"signed char",
+ .short,
+ .int,
+ .long,
+ .@"long long",
+ .@"unsigned char",
+ .@"unsigned short",
+ .@"unsigned int",
+ .@"unsigned long",
+ .@"unsigned long long",
+ .size_t,
+ .ptrdiff_t,
+ .uint8_t,
+ .int8_t,
+ .uint16_t,
+ .int16_t,
+ .uint32_t,
+ .int32_t,
+ .uint64_t,
+ .int64_t,
+ .uintptr_t,
+ .intptr_t,
+ .zig_u128,
+ .zig_i128,
+ => true,
+ else => false,
+ };
+ }
+
+ pub fn signedness(self: CType) ?std.builtin.Signedness {
+ return switch (self.tag()) {
+ .char => null, // unknown signedness
+ .@"signed char",
+ .short,
+ .int,
+ .long,
+ .@"long long",
+ .ptrdiff_t,
+ .int8_t,
+ .int16_t,
+ .int32_t,
+ .int64_t,
+ .intptr_t,
+ .zig_i128,
+ => .signed,
+ .@"unsigned char",
+ .@"unsigned short",
+ .@"unsigned int",
+ .@"unsigned long",
+ .@"unsigned long long",
+ .size_t,
+ .uint8_t,
+ .uint16_t,
+ .uint32_t,
+ .uint64_t,
+ .uintptr_t,
+ .zig_u128,
+ => .unsigned,
+ else => unreachable,
+ };
+ }
+
+ pub fn isFloat(self: CType) bool {
+ return switch (self.tag()) {
+ .float,
+ .double,
+ .@"long double",
+ .zig_f16,
+ .zig_f32,
+ .zig_f64,
+ .zig_f80,
+ .zig_f128,
+ .zig_c_longdouble,
+ => true,
+ else => false,
+ };
+ }
+
+ pub fn isPointer(self: CType) bool {
+ return switch (self.tag()) {
+ .pointer,
+ .pointer_const,
+ .pointer_volatile,
+ .pointer_const_volatile,
+ => true,
+ else => false,
+ };
+ }
+
+ pub fn isFunction(self: CType) bool {
+ return switch (self.tag()) {
+ .function,
+ .varargs_function,
+ => true,
+ else => false,
+ };
+ }
+
pub fn toSigned(self: CType) CType {
return CType.initTag(switch (self.tag()) {
.char, .@"signed char", .@"unsigned char" => .@"signed char",
@@ -725,6 +835,20 @@ pub const CType = extern union {
}
}
+ pub fn floatActiveBits(self: CType, target: Target) u16 {
+ return switch (self.tag()) {
+ .float => target.c_type_bit_size(.float),
+ .double => target.c_type_bit_size(.double),
+ .@"long double", .zig_c_longdouble => target.c_type_bit_size(.longdouble),
+ .zig_f16 => 16,
+ .zig_f32 => 32,
+ .zig_f64 => 64,
+ .zig_f80 => 80,
+ .zig_f128 => 128,
+ else => unreachable,
+ };
+ }
+
pub fn byteSize(self: CType, store: Store.Set, target: Target) u64 {
return switch (self.tag()) {
.void => 0,
diff --git a/src/type.zig b/src/type.zig
index 15525f14eb..9e501d893c 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -4213,7 +4213,7 @@ pub const Type = extern union {
};
}
- fn shallowElemType(child_ty: Type) Type {
+ pub fn shallowElemType(child_ty: Type) Type {
return switch (child_ty.zigTypeTag()) {
.Array, .Vector => child_ty.childType(),
else => child_ty,
diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig
index 70ac38d6fa..552080c836 100644
--- a/test/behavior/bitcast.zig
+++ b/test/behavior/bitcast.zig
@@ -34,7 +34,6 @@ test "@bitCast iX -> uX (8, 16, 128)" {
test "@bitCast iX -> uX exotic integers" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
@@ -81,7 +80,6 @@ fn conv_uN(comptime N: usize, x: std.meta.Int(.unsigned, N)) std.meta.Int(.signe
test "bitcast uX to bytes" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
diff --git a/test/behavior/math.zig b/test/behavior/math.zig
index 54263e1daf..9ebeca8541 100644
--- a/test/behavior/math.zig
+++ b/test/behavior/math.zig
@@ -1526,7 +1526,6 @@ fn testNanEqNan(comptime F: type) !void {
}
test "vector comparison" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig
index 50fef7f646..d885a7fabc 100644
--- a/test/behavior/vector.zig
+++ b/test/behavior/vector.zig
@@ -48,7 +48,6 @@ test "vector wrap operators" {
test "vector bin compares with mem.eql" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -403,7 +402,6 @@ test "initialize vector which is a struct field" {
test "vector comparison operators" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
From 93d696e84ef17a32d5c2f1520a295ebcda968e91 Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Fri, 3 Mar 2023 01:18:23 -0500
Subject: [PATCH 021/294] CBE: implement some big integer and vector unary
operations
---
lib/zig.h | 422 ++++++++++++++++++++++++++++++++++-
src/codegen/c.zig | 51 ++++-
test/behavior/bugs/10147.zig | 1 -
test/behavior/math.zig | 8 +-
test/behavior/popcount.zig | 1 -
5 files changed, 460 insertions(+), 23 deletions(-)
diff --git a/lib/zig.h b/lib/zig.h
index c39cffee24..e5cb421c6f 100644
--- a/lib/zig.h
+++ b/lib/zig.h
@@ -1919,7 +1919,7 @@ static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, uint8_t bits) {
/* ========================== Big Integer Support =========================== */
-static inline uint16_t zig_big_bytes(uint16_t bits) {
+static inline uint16_t zig_int_bytes(uint16_t bits) {
uint16_t bytes = (bits + CHAR_BIT - 1) / CHAR_BIT;
uint16_t alignment = 16;
while (alignment / 2 >= bytes) alignment /= 2;
@@ -1931,7 +1931,7 @@ static inline int32_t zig_cmp_big(const void *lhs, const void *rhs, bool is_sign
const uint8_t *rhs_bytes = rhs;
uint16_t byte_offset = 0;
bool do_signed = is_signed;
- uint16_t remaining_bytes = zig_big_bytes(bits);
+ uint16_t remaining_bytes = zig_int_bytes(bits);
#if zig_little_endian
byte_offset = remaining_bytes;
@@ -1965,7 +1965,7 @@ static inline int32_t zig_cmp_big(const void *lhs, const void *rhs, bool is_sign
remaining_bytes -= 128 / CHAR_BIT;
#if zig_big_endian
- byte_offset -= 128 / CHAR_BIT;
+ byte_offset += 128 / CHAR_BIT;
#endif
}
@@ -1994,7 +1994,7 @@ static inline int32_t zig_cmp_big(const void *lhs, const void *rhs, bool is_sign
remaining_bytes -= 64 / CHAR_BIT;
#if zig_big_endian
- byte_offset -= 64 / CHAR_BIT;
+ byte_offset += 64 / CHAR_BIT;
#endif
}
@@ -2023,7 +2023,7 @@ static inline int32_t zig_cmp_big(const void *lhs, const void *rhs, bool is_sign
remaining_bytes -= 32 / CHAR_BIT;
#if zig_big_endian
- byte_offset -= 32 / CHAR_BIT;
+ byte_offset += 32 / CHAR_BIT;
#endif
}
@@ -2052,7 +2052,7 @@ static inline int32_t zig_cmp_big(const void *lhs, const void *rhs, bool is_sign
remaining_bytes -= 16 / CHAR_BIT;
#if zig_big_endian
- byte_offset -= 16 / CHAR_BIT;
+ byte_offset += 16 / CHAR_BIT;
#endif
}
@@ -2081,13 +2081,368 @@ static inline int32_t zig_cmp_big(const void *lhs, const void *rhs, bool is_sign
remaining_bytes -= 8 / CHAR_BIT;
#if zig_big_endian
- byte_offset -= 8 / CHAR_BIT;
+ byte_offset += 8 / CHAR_BIT;
#endif
}
return 0;
}
+static inline uint16_t zig_clz_big(const void *val, bool is_signed, uint16_t bits) {
+ const uint8_t *val_bytes = val;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t skip_bits = remaining_bytes * 8 - bits;
+ uint16_t total_lz = 0;
+ uint16_t limb_lz;
+ (void)is_signed;
+
+#if zig_little_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ {
+ zig_u128 val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u128(val_limb, 128 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 128 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ {
+ uint64_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u64(val_limb, 64 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 64 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ {
+ uint32_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u32(val_limb, 32 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 32 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ {
+ uint16_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u16(val_limb, 16 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 16 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ {
+ uint8_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u8(val_limb, 8 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 8 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return total_lz;
+}
+
+static inline uint16_t zig_ctz_big(const void *val, bool is_signed, uint16_t bits) {
+ const uint8_t *val_bytes = val;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t total_tz = 0;
+ uint16_t limb_tz;
+ (void)is_signed;
+
+#if zig_big_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ {
+ zig_u128 val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u128(val_limb, 128);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 128) return total_tz;
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ {
+ uint64_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u64(val_limb, 64);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 64) return total_tz;
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ {
+ uint32_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u32(val_limb, 32);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 32) return total_tz;
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ {
+ uint16_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u16(val_limb, 16);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 16) return total_tz;
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ {
+ uint8_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u8(val_limb, 8);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 8) return total_tz;
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return total_tz;
+}
+
+static inline uint16_t zig_popcount_big(const void *val, bool is_signed, uint16_t bits) {
+ const uint8_t *val_bytes = val;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t total_pc = 0;
+ (void)is_signed;
+
+#if zig_big_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ {
+ zig_u128 val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc += zig_popcount_u128(val_limb, 128);
+ }
+
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ {
+ uint64_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc += zig_popcount_u64(val_limb, 64);
+ }
+
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ {
+ uint32_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc += zig_popcount_u32(val_limb, 32);
+ }
+
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ {
+ uint16_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc = zig_popcount_u16(val_limb, 16);
+ }
+
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ {
+ uint8_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc = zig_popcount_u8(val_limb, 8);
+ }
+
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return total_pc;
+}
+
/* ========================= Floating Point Support ========================= */
#if _MSC_VER
@@ -2742,7 +3097,7 @@ zig_msvc_atomics_128op(u128, max)
uint32_t index = 0; \
const uint8_t *lhs_ptr = lhs; \
const uint8_t *rhs_ptr = rhs; \
- uint16_t elem_bytes = zig_big_bytes(elem_bits); \
+ uint16_t elem_bytes = zig_int_bytes(elem_bits); \
\
while (index < len) { \
result[index] = zig_cmp_big(lhs_ptr, rhs_ptr, is_signed, elem_bits) operator 0; \
@@ -2758,6 +3113,57 @@ zig_cmp_vec(le, <=)
zig_cmp_vec(gt, > )
zig_cmp_vec(ge, >=)
+static inline void zig_clz_vec(void *result, const void *val, uint32_t len, bool is_signed, uint16_t elem_bits) {
+ uint32_t index = 0;
+ const uint8_t *val_ptr = val;
+ uint16_t elem_bytes = zig_int_bytes(elem_bits);
+
+ while (index < len) {
+ uint16_t lz = zig_clz_big(val_ptr, is_signed, elem_bits);
+ if (elem_bits <= 128) {
+ ((uint8_t *)result)[index] = (uint8_t)lz;
+ } else {
+ ((uint16_t *)result)[index] = lz;
+ }
+ val_ptr += elem_bytes;
+ index += 1;
+ }
+}
+
+static inline void zig_ctz_vec(void *result, const void *val, uint32_t len, bool is_signed, uint16_t elem_bits) {
+ uint32_t index = 0;
+ const uint8_t *val_ptr = val;
+ uint16_t elem_bytes = zig_int_bytes(elem_bits);
+
+ while (index < len) {
+ uint16_t tz = zig_ctz_big(val_ptr, is_signed, elem_bits);
+ if (elem_bits <= 128) {
+ ((uint8_t *)result)[index] = (uint8_t)tz;
+ } else {
+ ((uint16_t *)result)[index] = tz;
+ }
+ val_ptr += elem_bytes;
+ index += 1;
+ }
+}
+
+static inline void zig_popcount_vec(void *result, const void *val, uint32_t len, bool is_signed, uint16_t elem_bits) {
+ uint32_t index = 0;
+ const uint8_t *val_ptr = val;
+ uint16_t elem_bytes = zig_int_bytes(elem_bits);
+
+ while (index < len) {
+ uint16_t pc = zig_popcount_big(val_ptr, is_signed, elem_bits);
+ if (elem_bits <= 128) {
+ ((uint8_t *)result)[index] = (uint8_t)pc;
+ } else {
+ ((uint16_t *)result)[index] = pc;
+ }
+ val_ptr += elem_bytes;
+ index += 1;
+ }
+}
+
/* ======================== Special Case Intrinsics ========================= */
#if (_MSC_VER && _M_X64) || defined(__x86_64__)
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index f4a817cecd..4d3e71e78a 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -2844,7 +2844,7 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.cmp_vector => blk: {
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const extra = f.air.extraData(Air.VectorCmp, ty_pl.payload).data;
- break :blk try cmpBuiltinCall(f, inst, extra, extra.compareOperator(), .operator, .bits);
+ break :blk try airCmpBuiltinCall(f, inst, extra, extra.compareOperator(), .operator, .bits,);
},
.cmp_lt_errors_len => try airCmpLtErrorsLen(f, inst),
@@ -3837,9 +3837,16 @@ fn airCmpOp(f: *Function, inst: Air.Inst.Index, operator: std.math.CompareOperat
const target = f.object.dg.module.getTarget();
const operand_bits = operand_ty.bitSize(target);
if (operand_ty.isInt() and operand_bits > 64)
- return cmpBuiltinCall(f, inst, bin_op, operator, .cmp, if (operand_bits > 128) .bits else .none);
+ return airCmpBuiltinCall(
+ f,
+ inst,
+ bin_op,
+ operator,
+ .cmp,
+ if (operand_bits > 128) .bits else .none,
+ );
if (operand_ty.isRuntimeFloat())
- return cmpBuiltinCall(f, inst, bin_op, operator, .operator, .none);
+ return airCmpBuiltinCall(f, inst, bin_op, operator, .operator, .none);
const inst_ty = f.air.typeOfIndex(inst);
const lhs = try f.resolveInst(bin_op.lhs);
@@ -3876,9 +3883,16 @@ fn airEquality(
const target = f.object.dg.module.getTarget();
const operand_bits = operand_ty.bitSize(target);
if (operand_ty.isInt() and operand_bits > 64)
- return cmpBuiltinCall(f, inst, bin_op, operator, .cmp, if (operand_bits > 128) .bits else .none);
+ return airCmpBuiltinCall(
+ f,
+ inst,
+ bin_op,
+ operator,
+ .cmp,
+ if (operand_bits > 128) .bits else .none,
+ );
if (operand_ty.isRuntimeFloat())
- return cmpBuiltinCall(f, inst, bin_op, operator, .operator, .none);
+ return airCmpBuiltinCall(f, inst, bin_op, operator, .operator, .none);
const lhs = try f.resolveInst(bin_op.lhs);
const rhs = try f.resolveInst(bin_op.rhs);
@@ -5969,14 +5983,25 @@ fn airUnBuiltinCall(
const inst_ty = f.air.typeOfIndex(inst);
const operand_ty = f.air.typeOf(ty_op.operand);
+ const inst_cty = try f.typeToCType(inst_ty, .complete);
+ const ref_ret = switch (inst_cty.tag()) {
+ else => false,
+ .array, .vector => true,
+ };
+
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
- try f.writeCValue(writer, local, .Other);
- try writer.writeAll(" = zig_");
- try writer.writeAll(operation);
- try writer.writeByte('_');
+ if (!ref_ret) {
+ try f.writeCValue(writer, local, .Other);
+ try writer.writeAll(" = ");
+ }
+ try writer.print("zig_{s}_", .{operation});
try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty);
try writer.writeByte('(');
+ if (ref_ret) {
+ try f.writeCValue(writer, local, .FunctionArgument);
+ try writer.writeAll(", ");
+ }
try f.writeCValue(writer, operand, .FunctionArgument);
try f.object.dg.renderBuiltinInfo(writer, operand_ty, info);
try writer.writeAll(");\n");
@@ -6019,7 +6044,7 @@ fn airBinBuiltinCall(
return local;
}
-fn cmpBuiltinCall(
+fn airCmpBuiltinCall(
f: *Function,
inst: Air.Inst.Index,
data: anytype,
@@ -6034,7 +6059,11 @@ fn cmpBuiltinCall(
const rhs = try f.resolveInst(data.rhs);
try reap(f, inst, &.{ data.lhs, data.rhs });
- const ref_ret = inst_ty.tag() != .bool;
+ const inst_cty = try f.typeToCType(inst_ty, .complete);
+ const ref_ret = switch (inst_cty.tag()) {
+ else => false,
+ .array, .vector => true,
+ };
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
diff --git a/test/behavior/bugs/10147.zig b/test/behavior/bugs/10147.zig
index 3ca9085805..77c513caa6 100644
--- a/test/behavior/bugs/10147.zig
+++ b/test/behavior/bugs/10147.zig
@@ -6,7 +6,6 @@ test "test calling @clz on both vector and scalar inputs" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x: u32 = 0x1;
diff --git a/test/behavior/math.zig b/test/behavior/math.zig
index 9ebeca8541..d7b8e4764b 100644
--- a/test/behavior/math.zig
+++ b/test/behavior/math.zig
@@ -100,7 +100,6 @@ test "@clz vectors" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testClzVectors();
@@ -163,7 +162,6 @@ test "@ctz vectors" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) {
@@ -1562,6 +1560,12 @@ test "signed zeros are represented properly" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.os.tag == .windows and builtin.cpu.arch == .aarch64 and
+ builtin.zig_backend == .stage2_c)
+ {
+ return error.SkipZigTest;
+ }
+
const S = struct {
fn doTheTest() !void {
try testOne(f16);
diff --git a/test/behavior/popcount.zig b/test/behavior/popcount.zig
index b27d5d77d3..9dce5820cd 100644
--- a/test/behavior/popcount.zig
+++ b/test/behavior/popcount.zig
@@ -67,7 +67,6 @@ fn testPopCountIntegers() !void {
}
test "@popCount vectors" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
From e96a0fd0a1a05fe8c3b4d87df03d78ae99b7dbcb Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Fri, 3 Mar 2023 01:48:13 -0500
Subject: [PATCH 022/294] CBE: "compute" max int alignment the lazy way
---
lib/zig.h | 2 +-
src/link/C.zig | 22 ++++++++++++++--------
2 files changed, 15 insertions(+), 9 deletions(-)
diff --git a/lib/zig.h b/lib/zig.h
index e5cb421c6f..5d77c76c8f 100644
--- a/lib/zig.h
+++ b/lib/zig.h
@@ -1921,7 +1921,7 @@ static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, uint8_t bits) {
static inline uint16_t zig_int_bytes(uint16_t bits) {
uint16_t bytes = (bits + CHAR_BIT - 1) / CHAR_BIT;
- uint16_t alignment = 16;
+ uint16_t alignment = ZIG_TARGET_MAX_INT_ALIGNMENT;
while (alignment / 2 >= bytes) alignment /= 2;
return (bytes + alignment - 1) / alignment * alignment;
}
diff --git a/src/link/C.zig b/src/link/C.zig
index 5663ba71e2..7e3ad2eddd 100644
--- a/src/link/C.zig
+++ b/src/link/C.zig
@@ -221,14 +221,19 @@ pub fn flush(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) !void
return self.flushModule(comp, prog_node);
}
-fn abiDefine(comp: *Compilation) ?[]const u8 {
- return switch (comp.getTarget().abi) {
- .msvc => "#define ZIG_TARGET_ABI_MSVC\n",
- else => null,
- };
+fn abiDefines(self: *C, target: std.Target) !std.ArrayList(u8) {
+ var defines = std.ArrayList(u8).init(self.base.allocator);
+ errdefer defines.deinit();
+ const writer = defines.writer();
+ switch (target.abi) {
+ .msvc => try writer.writeAll("#define ZIG_TARGET_ABI_MSVC\n"),
+ else => {},
+ }
+ try writer.print("#define ZIG_TARGET_MAX_INT_ALIGNMENT {d}\n", .{target.maxIntAlignment()});
+ return defines;
}
-pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) !void {
+pub fn flushModule(self: *C, _: *Compilation, prog_node: *std.Progress.Node) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -245,12 +250,13 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node)
var f: Flush = .{};
defer f.deinit(gpa);
- const abi_define = abiDefine(comp);
+ const abi_defines = try self.abiDefines(module.getTarget());
+ defer abi_defines.deinit();
// Covers defines, zig.h, ctypes, asm, lazy fwd.
try f.all_buffers.ensureUnusedCapacity(gpa, 5);
- if (abi_define) |buf| f.appendBufAssumeCapacity(buf);
+ f.appendBufAssumeCapacity(abi_defines.items);
f.appendBufAssumeCapacity(zig_h);
const ctypes_index = f.all_buffers.items.len;
From 9e3a5ecd39227aff3b2821d0c0b489eb9713b146 Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Sat, 4 Mar 2023 15:18:05 -0500
Subject: [PATCH 023/294] CBE: fix behavior test failures on msvc
---
lib/zig.h | 4 +++-
src/codegen/c.zig | 37 +++++++++++++++++++++++++++++--------
src/codegen/c/type.zig | 7 +++++++
3 files changed, 39 insertions(+), 9 deletions(-)
diff --git a/lib/zig.h b/lib/zig.h
index 5d77c76c8f..6b95ba3358 100644
--- a/lib/zig.h
+++ b/lib/zig.h
@@ -1646,7 +1646,9 @@ static inline zig_u128 zig_wrap_u128(zig_u128 val, uint8_t bits) {
}
static inline zig_i128 zig_wrap_i128(zig_i128 val, uint8_t bits) {
- return zig_make_i128(zig_wrap_i64(zig_hi_i128(val), bits - UINT8_C(64)), zig_lo_i128(val));
+ if (bits > UINT8_C(64)) return zig_make_i128(zig_wrap_i64(zig_hi_i128(val), bits - UINT8_C(64)), zig_lo_i128(val));
+ int64_t lo = zig_wrap_i64((int64_t)zig_lo_i128(val), bits);
+ return zig_make_i128(zig_shr_i64(lo, 63), (uint64_t)lo);
}
static inline zig_u128 zig_shlw_u128(zig_u128 lhs, uint8_t rhs, uint8_t bits) {
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 4d3e71e78a..b8606b1a17 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -4461,10 +4461,12 @@ fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue {
if (dest_ty.isAbiInt()) {
const dest_cty = try f.typeToCType(dest_ty, .complete);
const dest_info = dest_ty.intInfo(target);
- var wrap_ty_pl = Type.Payload.Bits{ .base = .{ .tag = switch (dest_info.signedness) {
+ var info_ty_pl = Type.Payload.Bits{ .base = .{ .tag = switch (dest_info.signedness) {
.unsigned => .int_unsigned,
.signed => .int_signed,
} }, .data = dest_info.bits };
+ var wrap_cty: ?CType = null;
+ var need_bitcasts = false;
try f.writeCValue(writer, local, .Other);
if (dest_cty.castTag(.array)) |pl| {
@@ -4472,14 +4474,31 @@ fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue {
.Little => pl.data.len - 1,
.Big => 0,
}});
- wrap_ty_pl.data -= 1;
- wrap_ty_pl.data %= @intCast(u16, f.byteSize(f.indexToCType(pl.data.elem_type)) * 8);
- wrap_ty_pl.data += 1;
+ const elem_cty = f.indexToCType(pl.data.elem_type);
+ wrap_cty = elem_cty.toSignedness(dest_info.signedness);
+ need_bitcasts = wrap_cty.?.tag() == .zig_i128;
+ info_ty_pl.data -= 1;
+ info_ty_pl.data %= @intCast(u16, f.byteSize(elem_cty) * 8);
+ info_ty_pl.data += 1;
}
- const wrap_ty = Type.initPayload(&wrap_ty_pl.base);
- try writer.writeAll(" = zig_wrap_");
- try f.object.dg.renderTypeForBuiltinFnName(writer, wrap_ty);
+ try writer.writeAll(" = ");
+ if (need_bitcasts) {
+ try writer.writeAll("zig_bitcast_");
+ try f.object.dg.renderCTypeForBuiltinFnName(writer, wrap_cty.?.toUnsigned());
+ try writer.writeByte('(');
+ }
+ try writer.writeAll("zig_wrap_");
+ const info_ty = Type.initPayload(&info_ty_pl.base);
+ if (wrap_cty) |cty|
+ try f.object.dg.renderCTypeForBuiltinFnName(writer, cty)
+ else
+ try f.object.dg.renderTypeForBuiltinFnName(writer, info_ty);
try writer.writeByte('(');
+ if (need_bitcasts) {
+ try writer.writeAll("zig_bitcast_");
+ try f.object.dg.renderCTypeForBuiltinFnName(writer, wrap_cty.?);
+ try writer.writeByte('(');
+ }
try f.writeCValue(writer, local, .Other);
if (dest_cty.castTag(.array)) |pl| {
try writer.print("[{d}]", .{switch (target.cpu.arch.endian()) {
@@ -4487,7 +4506,9 @@ fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue {
.Big => 0,
}});
}
- try f.object.dg.renderBuiltinInfo(writer, wrap_ty, .bits);
+ if (need_bitcasts) try writer.writeByte(')');
+ try f.object.dg.renderBuiltinInfo(writer, info_ty, .bits);
+ if (need_bitcasts) try writer.writeByte(')');
try writer.writeAll(");\n");
}
diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig
index 85e4cc9840..313fcc130c 100644
--- a/src/codegen/c/type.zig
+++ b/src/codegen/c/type.zig
@@ -651,6 +651,13 @@ pub const CType = extern union {
});
}
+ pub fn toSignedness(self: CType, s: std.builtin.Signedness) CType {
+ return switch (s) {
+ .unsigned => self.toUnsigned(),
+ .signed => self.toSigned(),
+ };
+ }
+
pub fn getStandardDefineAbbrev(self: CType) ?[]const u8 {
return switch (self.tag()) {
.char => "CHAR",
From b2e9c0d0ff1dc6799fe3b5fdbecd53af176f37b7 Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Sat, 4 Mar 2023 19:02:42 -0500
Subject: [PATCH 024/294] Sema: fix cmp_vector type
---
src/Sema.zig | 40 +++++++++++++++++++---------------------
1 file changed, 19 insertions(+), 21 deletions(-)
diff --git a/src/Sema.zig b/src/Sema.zig
index 8940527bc0..8c6e3cf05c 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -574,11 +574,13 @@ pub const Block = struct {
});
}
- fn addCmpVector(block: *Block, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, cmp_op: std.math.CompareOperator, vector_ty: Air.Inst.Ref) !Air.Inst.Ref {
+ fn addCmpVector(block: *Block, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, cmp_op: std.math.CompareOperator) !Air.Inst.Ref {
return block.addInst(.{
.tag = if (block.float_mode == .Optimized) .cmp_vector_optimized else .cmp_vector,
.data = .{ .ty_pl = .{
- .ty = vector_ty,
+ .ty = try block.sema.addType(
+ try Type.vector(block.sema.arena, block.sema.typeOf(lhs).vectorLen(), Type.bool),
+ ),
.payload = try block.sema.addExtra(Air.VectorCmp{
.lhs = lhs,
.rhs = rhs,
@@ -9412,7 +9414,7 @@ fn intCast(
const ok = if (is_vector) ok: {
const zeros = try Value.Tag.repeated.create(sema.arena, Value.zero);
const zero_inst = try sema.addConstant(sema.typeOf(operand), zeros);
- const is_in_range = try block.addCmpVector(operand, zero_inst, .eq, try sema.addType(operand_ty));
+ const is_in_range = try block.addCmpVector(operand, zero_inst, .eq);
const all_in_range = try block.addInst(.{
.tag = .reduce,
.data = .{ .reduce = .{ .operand = is_in_range, .operation = .And } },
@@ -9466,7 +9468,7 @@ fn intCast(
const dest_range = try sema.addConstant(unsigned_operand_ty, dest_range_val);
const ok = if (is_vector) ok: {
- const is_in_range = try block.addCmpVector(diff_unsigned, dest_range, .lte, try sema.addType(operand_ty));
+ const is_in_range = try block.addCmpVector(diff_unsigned, dest_range, .lte);
const all_in_range = try block.addInst(.{
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
@@ -9483,7 +9485,7 @@ fn intCast(
try sema.addSafetyCheck(block, ok, .cast_truncated_data);
} else {
const ok = if (is_vector) ok: {
- const is_in_range = try block.addCmpVector(diff, dest_max, .lte, try sema.addType(operand_ty));
+ const is_in_range = try block.addCmpVector(diff, dest_max, .lte);
const all_in_range = try block.addInst(.{
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
@@ -9504,7 +9506,7 @@ fn intCast(
const ok = if (is_vector) ok: {
const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero);
const zero_inst = try sema.addConstant(operand_ty, zero_val);
- const is_in_range = try block.addCmpVector(operand, zero_inst, .gte, try sema.addType(operand_ty));
+ const is_in_range = try block.addCmpVector(operand, zero_inst, .gte);
const all_in_range = try block.addInst(.{
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
@@ -12016,7 +12018,7 @@ fn zirShl(
const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: {
const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val));
- const lt = try block.addCmpVector(rhs, bit_count_inst, .lt, try sema.addType(rhs_ty));
+ const lt = try block.addCmpVector(rhs, bit_count_inst, .lt);
break :ok try block.addInst(.{
.tag = .reduce,
.data = .{ .reduce = .{
@@ -12172,7 +12174,7 @@ fn zirShr(
const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: {
const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val));
- const lt = try block.addCmpVector(rhs, bit_count_inst, .lt, try sema.addType(rhs_ty));
+ const lt = try block.addCmpVector(rhs, bit_count_inst, .lt);
break :ok try block.addInst(.{
.tag = .reduce,
.data = .{ .reduce = .{
@@ -12191,7 +12193,7 @@ fn zirShr(
const back = try block.addBinOp(.shl, result, rhs);
const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: {
- const eql = try block.addCmpVector(lhs, back, .eq, try sema.addType(rhs_ty));
+ const eql = try block.addCmpVector(lhs, back, .eq);
break :ok try block.addInst(.{
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
@@ -13192,7 +13194,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const floored = try block.addUnOp(.floor, result);
if (resolved_type.zigTypeTag() == .Vector) {
- const eql = try block.addCmpVector(result, floored, .eq, try sema.addType(resolved_type));
+ const eql = try block.addCmpVector(result, floored, .eq);
break :ok try block.addInst(.{
.tag = switch (block.float_mode) {
.Strict => .reduce,
@@ -13216,7 +13218,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (resolved_type.zigTypeTag() == .Vector) {
const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero);
const zero = try sema.addConstant(resolved_type, zero_val);
- const eql = try block.addCmpVector(remainder, zero, .eq, try sema.addType(resolved_type));
+ const eql = try block.addCmpVector(remainder, zero, .eq);
break :ok try block.addInst(.{
.tag = .reduce,
.data = .{ .reduce = .{
@@ -13514,14 +13516,13 @@ fn addDivIntOverflowSafety(
var ok: Air.Inst.Ref = .none;
if (resolved_type.zigTypeTag() == .Vector) {
- const vector_ty_ref = try sema.addType(resolved_type);
if (maybe_lhs_val == null) {
const min_int_ref = try sema.addConstant(resolved_type, min_int);
- ok = try block.addCmpVector(casted_lhs, min_int_ref, .neq, vector_ty_ref);
+ ok = try block.addCmpVector(casted_lhs, min_int_ref, .neq);
}
if (maybe_rhs_val == null) {
const neg_one_ref = try sema.addConstant(resolved_type, neg_one);
- const rhs_ok = try block.addCmpVector(casted_rhs, neg_one_ref, .neq, vector_ty_ref);
+ const rhs_ok = try block.addCmpVector(casted_rhs, neg_one_ref, .neq);
if (ok == .none) {
ok = rhs_ok;
} else {
@@ -13573,7 +13574,7 @@ fn addDivByZeroSafety(
const ok = if (resolved_type.zigTypeTag() == .Vector) ok: {
const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero);
const zero = try sema.addConstant(resolved_type, zero_val);
- const ok = try block.addCmpVector(casted_rhs, zero, .neq, try sema.addType(resolved_type));
+ const ok = try block.addCmpVector(casted_rhs, zero, .neq);
break :ok try block.addInst(.{
.tag = if (is_int) .reduce else .reduce_optimized,
.data = .{ .reduce = .{
@@ -15202,9 +15203,7 @@ fn cmpSelf(
};
try sema.requireRuntimeBlock(block, src, runtime_src);
if (resolved_type.zigTypeTag() == .Vector) {
- const result_ty = try Type.vector(sema.arena, resolved_type.vectorLen(), Type.bool);
- const result_ty_ref = try sema.addType(result_ty);
- return block.addCmpVector(casted_lhs, casted_rhs, op, result_ty_ref);
+ return block.addCmpVector(casted_lhs, casted_rhs, op);
}
const tag = Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized);
return block.addBinOp(tag, casted_lhs, casted_rhs);
@@ -23035,7 +23034,7 @@ fn panicSentinelMismatch(
const ok = if (sentinel_ty.zigTypeTag() == .Vector) ok: {
const eql =
- try parent_block.addCmpVector(expected_sentinel, actual_sentinel, .eq, try sema.addType(sentinel_ty));
+ try parent_block.addCmpVector(expected_sentinel, actual_sentinel, .eq);
break :ok try parent_block.addInst(.{
.tag = .reduce,
.data = .{ .reduce = .{
@@ -29368,8 +29367,7 @@ fn cmpVector(
};
try sema.requireRuntimeBlock(block, src, runtime_src);
- const result_ty_inst = try sema.addType(result_ty);
- return block.addCmpVector(lhs, rhs, op, result_ty_inst);
+ return block.addCmpVector(lhs, rhs, op);
}
fn wrapOptional(
From c478c7609e4529267d1ce030577777e836ffc10b Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Sun, 5 Mar 2023 00:01:15 -0500
Subject: [PATCH 025/294] CBE: implement vector operations
Also, bigint add and sub which is all I was actually trying to do.
---
lib/zig.h | 660 ++++++++++++++++++++----------
src/codegen/c.zig | 620 +++++++++++++++++-----------
src/type.zig | 2 +-
src/value.zig | 2 +-
test/behavior/bitreverse.zig | 3 -
test/behavior/byteswap.zig | 3 -
test/behavior/cast.zig | 1 -
test/behavior/floatop.zig | 12 -
test/behavior/maximum_minimum.zig | 2 -
test/behavior/muladd.zig | 5 -
test/behavior/vector.zig | 30 +-
11 files changed, 835 insertions(+), 505 deletions(-)
diff --git a/lib/zig.h b/lib/zig.h
index 6b95ba3358..22a9dbbb9e 100644
--- a/lib/zig.h
+++ b/lib/zig.h
@@ -612,12 +612,6 @@ static inline bool zig_addo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8
#endif
}
-static inline void zig_vaddo_u32(uint8_t *ov, uint32_t *res, int n,
- const uint32_t *lhs, const uint32_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_u32(&res[i], lhs[i], rhs[i], bits);
-}
-
zig_extern int32_t __addosi4(int32_t lhs, int32_t rhs, int *overflow);
static inline bool zig_addo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
@@ -632,12 +626,6 @@ static inline bool zig_addo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t
return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits);
}
-static inline void zig_vaddo_i32(uint8_t *ov, int32_t *res, int n,
- const int32_t *lhs, const int32_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_i32(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_addo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
uint64_t full_res;
@@ -650,12 +638,6 @@ static inline bool zig_addo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8
#endif
}
-static inline void zig_vaddo_u64(uint8_t *ov, uint64_t *res, int n,
- const uint64_t *lhs, const uint64_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_u64(&res[i], lhs[i], rhs[i], bits);
-}
-
zig_extern int64_t __addodi4(int64_t lhs, int64_t rhs, int *overflow);
static inline bool zig_addo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
@@ -670,12 +652,6 @@ static inline bool zig_addo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t
return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits);
}
-static inline void zig_vaddo_i64(uint8_t *ov, int64_t *res, int n,
- const int64_t *lhs, const int64_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_i64(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_addo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
uint8_t full_res;
@@ -690,12 +666,6 @@ static inline bool zig_addo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t b
#endif
}
-static inline void zig_vaddo_u8(uint8_t *ov, uint8_t *res, int n,
- const uint8_t *lhs, const uint8_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_u8(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_addo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
int8_t full_res;
@@ -710,12 +680,6 @@ static inline bool zig_addo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits
#endif
}
-static inline void zig_vaddo_i8(uint8_t *ov, int8_t *res, int n,
- const int8_t *lhs, const int8_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_i8(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_addo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
uint16_t full_res;
@@ -730,12 +694,6 @@ static inline bool zig_addo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8
#endif
}
-static inline void zig_vaddo_u16(uint8_t *ov, uint16_t *res, int n,
- const uint16_t *lhs, const uint16_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_u16(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_addo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
int16_t full_res;
@@ -750,12 +708,6 @@ static inline bool zig_addo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t
#endif
}
-static inline void zig_vaddo_i16(uint8_t *ov, int16_t *res, int n,
- const int16_t *lhs, const int16_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_i16(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_subo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
uint32_t full_res;
@@ -768,12 +720,6 @@ static inline bool zig_subo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8
#endif
}
-static inline void zig_vsubo_u32(uint8_t *ov, uint32_t *res, int n,
- const uint32_t *lhs, const uint32_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_u32(&res[i], lhs[i], rhs[i], bits);
-}
-
zig_extern int32_t __subosi4(int32_t lhs, int32_t rhs, int *overflow);
static inline bool zig_subo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
@@ -788,12 +734,6 @@ static inline bool zig_subo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t
return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits);
}
-static inline void zig_vsubo_i32(uint8_t *ov, int32_t *res, int n,
- const int32_t *lhs, const int32_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_i32(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_subo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
uint64_t full_res;
@@ -806,12 +746,6 @@ static inline bool zig_subo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8
#endif
}
-static inline void zig_vsubo_u64(uint8_t *ov, uint64_t *res, int n,
- const uint64_t *lhs, const uint64_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_u64(&res[i], lhs[i], rhs[i], bits);
-}
-
zig_extern int64_t __subodi4(int64_t lhs, int64_t rhs, int *overflow);
static inline bool zig_subo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
@@ -826,12 +760,6 @@ static inline bool zig_subo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t
return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits);
}
-static inline void zig_vsubo_i64(uint8_t *ov, int64_t *res, int n,
- const int64_t *lhs, const int64_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_i64(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_subo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
uint8_t full_res;
@@ -846,12 +774,6 @@ static inline bool zig_subo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t b
#endif
}
-static inline void zig_vsubo_u8(uint8_t *ov, uint8_t *res, int n,
- const uint8_t *lhs, const uint8_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_u8(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_subo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
int8_t full_res;
@@ -866,13 +788,6 @@ static inline bool zig_subo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits
#endif
}
-static inline void zig_vsubo_i8(uint8_t *ov, int8_t *res, int n,
- const int8_t *lhs, const int8_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_i8(&res[i], lhs[i], rhs[i], bits);
-}
-
-
static inline bool zig_subo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
uint16_t full_res;
@@ -887,13 +802,6 @@ static inline bool zig_subo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8
#endif
}
-static inline void zig_vsubo_u16(uint8_t *ov, uint16_t *res, int n,
- const uint16_t *lhs, const uint16_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_u16(&res[i], lhs[i], rhs[i], bits);
-}
-
-
static inline bool zig_subo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
int16_t full_res;
@@ -908,12 +816,6 @@ static inline bool zig_subo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t
#endif
}
-static inline void zig_vsubo_i16(uint8_t *ov, int16_t *res, int n,
- const int16_t *lhs, const int16_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_i16(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_mulo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
uint32_t full_res;
@@ -926,12 +828,6 @@ static inline bool zig_mulo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8
#endif
}
-static inline void zig_vmulo_u32(uint8_t *ov, uint32_t *res, int n,
- const uint32_t *lhs, const uint32_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u32(&res[i], lhs[i], rhs[i], bits);
-}
-
zig_extern int32_t __mulosi4(int32_t lhs, int32_t rhs, int *overflow);
static inline bool zig_mulo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
@@ -946,12 +842,6 @@ static inline bool zig_mulo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t
return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits);
}
-static inline void zig_vmulo_i32(uint8_t *ov, int32_t *res, int n,
- const int32_t *lhs, const int32_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i32(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_mulo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
uint64_t full_res;
@@ -964,12 +854,6 @@ static inline bool zig_mulo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8
#endif
}
-static inline void zig_vmulo_u64(uint8_t *ov, uint64_t *res, int n,
- const uint64_t *lhs, const uint64_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u64(&res[i], lhs[i], rhs[i], bits);
-}
-
zig_extern int64_t __mulodi4(int64_t lhs, int64_t rhs, int *overflow);
static inline bool zig_mulo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
@@ -984,12 +868,6 @@ static inline bool zig_mulo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t
return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits);
}
-static inline void zig_vmulo_i64(uint8_t *ov, int64_t *res, int n,
- const int64_t *lhs, const int64_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i64(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_mulo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
uint8_t full_res;
@@ -1004,12 +882,6 @@ static inline bool zig_mulo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t b
#endif
}
-static inline void zig_vmulo_u8(uint8_t *ov, uint8_t *res, int n,
- const uint8_t *lhs, const uint8_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u8(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_mulo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
int8_t full_res;
@@ -1024,12 +896,6 @@ static inline bool zig_mulo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits
#endif
}
-static inline void zig_vmulo_i8(uint8_t *ov, int8_t *res, int n,
- const int8_t *lhs, const int8_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i8(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_mulo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
uint16_t full_res;
@@ -1044,12 +910,6 @@ static inline bool zig_mulo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8
#endif
}
-static inline void zig_vmulo_u16(uint8_t *ov, uint16_t *res, int n,
- const uint16_t *lhs, const uint16_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u16(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_mulo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
int16_t full_res;
@@ -1064,12 +924,6 @@ static inline bool zig_mulo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t
#endif
}
-static inline void zig_vmulo_i16(uint8_t *ov, int16_t *res, int n,
- const int16_t *lhs, const int16_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i16(&res[i], lhs[i], rhs[i], bits);
-}
-
#define zig_int_builtins(w) \
static inline bool zig_shlo_u##w(uint##w##_t *res, uint##w##_t lhs, uint8_t rhs, uint8_t bits) { \
*res = zig_shlw_u##w(lhs, rhs, bits); \
@@ -2090,6 +1944,446 @@ static inline int32_t zig_cmp_big(const void *lhs, const void *rhs, bool is_sign
return 0;
}
+static inline bool zig_addo_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ uint8_t *res_bytes = res;
+ const uint8_t *lhs_bytes = lhs;
+ const uint8_t *rhs_bytes = rhs;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t top_bits = remaining_bytes * 8 - bits;
+ bool overflow = false;
+
+#if zig_big_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+ uint16_t limb_bits = 128 - (remaining_bytes == 128 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 128 / CHAR_BIT && is_signed) {
+ zig_i128 res_limb;
+ zig_i128 tmp_limb;
+ zig_i128 lhs_limb;
+ zig_i128 rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i128(&res_limb, tmp_limb, zig_make_i128(INT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ zig_u128 res_limb;
+ zig_u128 tmp_limb;
+ zig_u128 lhs_limb;
+ zig_u128 rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u128(&res_limb, tmp_limb, zig_make_u128(UINT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+ uint16_t limb_bits = 64 - (remaining_bytes == 64 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 64 / CHAR_BIT && is_signed) {
+ int64_t res_limb;
+ int64_t tmp_limb;
+ int64_t lhs_limb;
+ int64_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i64(&res_limb, tmp_limb, overflow ? INT64_C(1) : INT64_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint64_t res_limb;
+ uint64_t tmp_limb;
+ uint64_t lhs_limb;
+ uint64_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u64(&res_limb, tmp_limb, overflow ? UINT64_C(1) : UINT64_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+ uint16_t limb_bits = 32 - (remaining_bytes == 32 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 32 / CHAR_BIT && is_signed) {
+ int32_t res_limb;
+ int32_t tmp_limb;
+ int32_t lhs_limb;
+ int32_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i32(&res_limb, tmp_limb, overflow ? INT32_C(1) : INT32_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint32_t res_limb;
+ uint32_t tmp_limb;
+ uint32_t lhs_limb;
+ uint32_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u32(&res_limb, tmp_limb, overflow ? UINT32_C(1) : UINT32_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+ uint16_t limb_bits = 16 - (remaining_bytes == 16 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 16 / CHAR_BIT && is_signed) {
+ int16_t res_limb;
+ int16_t tmp_limb;
+ int16_t lhs_limb;
+ int16_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i16(&res_limb, tmp_limb, overflow ? INT16_C(1) : INT16_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint16_t res_limb;
+ uint16_t tmp_limb;
+ uint16_t lhs_limb;
+ uint16_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u16(&res_limb, tmp_limb, overflow ? UINT16_C(1) : UINT16_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+ uint16_t limb_bits = 8 - (remaining_bytes == 8 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 8 / CHAR_BIT && is_signed) {
+ int8_t res_limb;
+ int8_t tmp_limb;
+ int8_t lhs_limb;
+ int8_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i8(&res_limb, tmp_limb, overflow ? INT8_C(1) : INT8_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint8_t res_limb;
+ uint8_t tmp_limb;
+ uint8_t lhs_limb;
+ uint8_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u8(&res_limb, tmp_limb, overflow ? UINT8_C(1) : UINT8_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return overflow;
+}
+
+static inline bool zig_subo_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ uint8_t *res_bytes = res;
+ const uint8_t *lhs_bytes = lhs;
+ const uint8_t *rhs_bytes = rhs;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t top_bits = remaining_bytes * 8 - bits;
+ bool overflow = false;
+
+#if zig_big_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+ uint16_t limb_bits = 128 - (remaining_bytes == 128 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 128 / CHAR_BIT && is_signed) {
+ zig_i128 res_limb;
+ zig_i128 tmp_limb;
+ zig_i128 lhs_limb;
+ zig_i128 rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i128(&res_limb, tmp_limb, zig_make_i128(INT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ zig_u128 res_limb;
+ zig_u128 tmp_limb;
+ zig_u128 lhs_limb;
+ zig_u128 rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u128(&res_limb, tmp_limb, zig_make_u128(UINT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+ uint16_t limb_bits = 64 - (remaining_bytes == 64 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 64 / CHAR_BIT && is_signed) {
+ int64_t res_limb;
+ int64_t tmp_limb;
+ int64_t lhs_limb;
+ int64_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i64(&res_limb, tmp_limb, overflow ? INT64_C(1) : INT64_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint64_t res_limb;
+ uint64_t tmp_limb;
+ uint64_t lhs_limb;
+ uint64_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u64(&res_limb, tmp_limb, overflow ? UINT64_C(1) : UINT64_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+ uint16_t limb_bits = 32 - (remaining_bytes == 32 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 32 / CHAR_BIT && is_signed) {
+ int32_t res_limb;
+ int32_t tmp_limb;
+ int32_t lhs_limb;
+ int32_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i32(&res_limb, tmp_limb, overflow ? INT32_C(1) : INT32_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint32_t res_limb;
+ uint32_t tmp_limb;
+ uint32_t lhs_limb;
+ uint32_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u32(&res_limb, tmp_limb, overflow ? UINT32_C(1) : UINT32_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+ uint16_t limb_bits = 16 - (remaining_bytes == 16 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 16 / CHAR_BIT && is_signed) {
+ int16_t res_limb;
+ int16_t tmp_limb;
+ int16_t lhs_limb;
+ int16_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i16(&res_limb, tmp_limb, overflow ? INT16_C(1) : INT16_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint16_t res_limb;
+ uint16_t tmp_limb;
+ uint16_t lhs_limb;
+ uint16_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u16(&res_limb, tmp_limb, overflow ? UINT16_C(1) : UINT16_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+ uint16_t limb_bits = 8 - (remaining_bytes == 8 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 8 / CHAR_BIT && is_signed) {
+ int8_t res_limb;
+ int8_t tmp_limb;
+ int8_t lhs_limb;
+ int8_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i8(&res_limb, tmp_limb, overflow ? INT8_C(1) : INT8_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint8_t res_limb;
+ uint8_t tmp_limb;
+ uint8_t lhs_limb;
+ uint8_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u8(&res_limb, tmp_limb, overflow ? UINT8_C(1) : UINT8_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return overflow;
+}
+
+static inline void zig_addw_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ (void)zig_addo_big(res, lhs, rhs, is_signed, bits);
+}
+
+static inline void zig_subw_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ (void)zig_subo_big(res, lhs, rhs, is_signed, bits);
+}
+
static inline uint16_t zig_clz_big(const void *val, bool is_signed, uint16_t bits) {
const uint8_t *val_bytes = val;
uint16_t byte_offset = 0;
@@ -3092,80 +3386,6 @@ zig_msvc_atomics_128op(u128, max)
#endif /* _MSC_VER && (_M_IX86 || _M_X64) */
-/* ============================= Vector Support ============================= */
-
-#define zig_cmp_vec(operation, operator) \
- static inline void zig_##operation##_vec(bool *result, const void *lhs, const void *rhs, uint32_t len, bool is_signed, uint16_t elem_bits) { \
- uint32_t index = 0; \
- const uint8_t *lhs_ptr = lhs; \
- const uint8_t *rhs_ptr = rhs; \
- uint16_t elem_bytes = zig_int_bytes(elem_bits); \
- \
- while (index < len) { \
- result[index] = zig_cmp_big(lhs_ptr, rhs_ptr, is_signed, elem_bits) operator 0; \
- lhs_ptr += elem_bytes; \
- rhs_ptr += elem_bytes; \
- index += 1; \
- } \
- }
-zig_cmp_vec(eq, ==)
-zig_cmp_vec(ne, !=)
-zig_cmp_vec(lt, < )
-zig_cmp_vec(le, <=)
-zig_cmp_vec(gt, > )
-zig_cmp_vec(ge, >=)
-
-static inline void zig_clz_vec(void *result, const void *val, uint32_t len, bool is_signed, uint16_t elem_bits) {
- uint32_t index = 0;
- const uint8_t *val_ptr = val;
- uint16_t elem_bytes = zig_int_bytes(elem_bits);
-
- while (index < len) {
- uint16_t lz = zig_clz_big(val_ptr, is_signed, elem_bits);
- if (elem_bits <= 128) {
- ((uint8_t *)result)[index] = (uint8_t)lz;
- } else {
- ((uint16_t *)result)[index] = lz;
- }
- val_ptr += elem_bytes;
- index += 1;
- }
-}
-
-static inline void zig_ctz_vec(void *result, const void *val, uint32_t len, bool is_signed, uint16_t elem_bits) {
- uint32_t index = 0;
- const uint8_t *val_ptr = val;
- uint16_t elem_bytes = zig_int_bytes(elem_bits);
-
- while (index < len) {
- uint16_t tz = zig_ctz_big(val_ptr, is_signed, elem_bits);
- if (elem_bits <= 128) {
- ((uint8_t *)result)[index] = (uint8_t)tz;
- } else {
- ((uint16_t *)result)[index] = tz;
- }
- val_ptr += elem_bytes;
- index += 1;
- }
-}
-
-static inline void zig_popcount_vec(void *result, const void *val, uint32_t len, bool is_signed, uint16_t elem_bits) {
- uint32_t index = 0;
- const uint8_t *val_ptr = val;
- uint16_t elem_bytes = zig_int_bytes(elem_bits);
-
- while (index < len) {
- uint16_t pc = zig_popcount_big(val_ptr, is_signed, elem_bits);
- if (elem_bits <= 128) {
- ((uint8_t *)result)[index] = (uint8_t)pc;
- } else {
- ((uint16_t *)result)[index] = pc;
- }
- val_ptr += elem_bytes;
- index += 1;
- }
-}
-
/* ======================== Special Case Intrinsics ========================= */
#if (_MSC_VER && _M_X64) || defined(__x86_64__)
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index b8606b1a17..5e92a6f76c 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -444,8 +444,8 @@ pub const Function = struct {
return f.object.dg.renderType(w, t);
}
- fn renderIntCast(f: *Function, w: anytype, dest_ty: Type, src: CValue, src_ty: Type, location: ValueRenderLocation) !void {
- return f.object.dg.renderIntCast(w, dest_ty, .{ .c_value = .{ .f = f, .value = src } }, src_ty, location);
+ fn renderIntCast(f: *Function, w: anytype, dest_ty: Type, src: CValue, v: Vectorizer, src_ty: Type, location: ValueRenderLocation) !void {
+ return f.object.dg.renderIntCast(w, dest_ty, .{ .c_value = .{ .f = f, .value = src, .v = v } }, src_ty, location);
}
fn fmtIntLiteral(f: *Function, ty: Type, val: Value) !std.fmt.Formatter(formatIntLiteral) {
@@ -1593,6 +1593,7 @@ pub const DeclGen = struct {
c_value: struct {
f: *Function,
value: CValue,
+ v: Vectorizer,
},
value: struct {
value: Value,
@@ -1602,6 +1603,7 @@ pub const DeclGen = struct {
switch (self.*) {
.c_value => |v| {
try v.f.writeCValue(w, v.value, location);
+ try v.v.elem(v.f, w);
},
.value => |v| {
try dg.renderValue(w, value_ty, v.value, location);
@@ -1887,7 +1889,6 @@ pub const DeclGen = struct {
if (cty.isFloat()) cty.floatActiveBits(dg.module.getTarget()) else dg.byteSize(cty) * 8,
}),
.array => try writer.writeAll("big"),
- .vector => try writer.writeAll("vec"),
}
}
@@ -1895,34 +1896,19 @@ pub const DeclGen = struct {
switch (info) {
.none => {},
.bits => {
- const cty = try dg.typeToCType(ty, .complete);
- if (cty.castTag(.vector)) |pl| {
- var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = pl.data.len };
- try writer.print(", {}", .{try dg.fmtIntLiteral(
- Type.u32,
- Value.initPayload(&len_pl.base),
- .FunctionArgument,
- )});
- }
-
const target = dg.module.getTarget();
- const elem_ty = ty.shallowElemType();
- const elem_info = if (elem_ty.isAbiInt())
- elem_ty.intInfo(target)
- else
- std.builtin.Type.Int{
- .signedness = .unsigned,
- .bits = @intCast(u16, elem_ty.bitSize(target)),
- };
- switch (cty.tag()) {
- else => {},
- .array, .vector => try writer.print(", {}", .{elem_info.signedness == .signed}),
- }
+ const int_info = if (ty.isAbiInt()) ty.intInfo(target) else std.builtin.Type.Int{
+ .signedness = .unsigned,
+ .bits = @intCast(u16, ty.bitSize(target)),
+ };
- var bits_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = elem_info.bits };
+ const cty = try dg.typeToCType(ty, .complete);
+ if (cty.tag() == .array) try writer.print(", {}", .{int_info.signedness == .signed});
+
+ var bits_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = int_info.bits };
try writer.print(", {}", .{try dg.fmtIntLiteral(switch (cty.tag()) {
else => Type.u8,
- .array, .vector => Type.u16,
+ .array => Type.u16,
}, Value.initPayload(&bits_pl.base), .FunctionArgument)});
},
}
@@ -2786,10 +2772,10 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.div_trunc, .div_exact => try airBinOp(f, inst, "/", "div_trunc", .none),
.rem => blk: {
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- const lhs_ty = f.air.typeOf(bin_op.lhs);
+ const lhs_scalar_ty = f.air.typeOf(bin_op.lhs).scalarType();
// For binary operations @TypeOf(lhs)==@TypeOf(rhs),
// so we only check one.
- break :blk if (lhs_ty.isInt())
+ break :blk if (lhs_scalar_ty.isInt())
try airBinOp(f, inst, "%", "rem", .none)
else
try airBinFloatOp(f, inst, "fmod");
@@ -2833,10 +2819,10 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.slice => try airSlice(f, inst),
- .cmp_gt => try airCmpOp(f, inst, .gt),
- .cmp_gte => try airCmpOp(f, inst, .gte),
- .cmp_lt => try airCmpOp(f, inst, .lt),
- .cmp_lte => try airCmpOp(f, inst, .lte),
+ .cmp_gt => try airCmpOp(f, inst, f.air.instructions.items(.data)[inst].bin_op, .gt),
+ .cmp_gte => try airCmpOp(f, inst, f.air.instructions.items(.data)[inst].bin_op, .gte),
+ .cmp_lt => try airCmpOp(f, inst, f.air.instructions.items(.data)[inst].bin_op, .lt),
+ .cmp_lte => try airCmpOp(f, inst, f.air.instructions.items(.data)[inst].bin_op, .lte),
.cmp_eq => try airEquality(f, inst, .eq),
.cmp_neq => try airEquality(f, inst, .neq),
@@ -2844,7 +2830,7 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.cmp_vector => blk: {
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const extra = f.air.extraData(Air.VectorCmp, ty_pl.payload).data;
- break :blk try airCmpBuiltinCall(f, inst, extra, extra.compareOperator(), .operator, .bits,);
+ break :blk try airCmpOp(f, inst, extra, extra.compareOperator());
},
.cmp_lt_errors_len => try airCmpLtErrorsLen(f, inst),
@@ -3294,7 +3280,10 @@ fn airArg(f: *Function, inst: Air.Inst.Index) !CValue {
fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- const ptr_info = f.air.typeOf(ty_op.operand).ptrInfo().data;
+
+ const ptr_ty = f.air.typeOf(ty_op.operand);
+ const ptr_scalar_ty = ptr_ty.scalarType();
+ const ptr_info = ptr_scalar_ty.ptrInfo().data;
const src_ty = ptr_info.pointee_type;
if (!src_ty.hasRuntimeBitsIgnoreComptime() or
@@ -3312,16 +3301,19 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const is_aligned = ptr_info.@"align" == 0 or ptr_info.@"align" >= src_ty.abiAlignment(target);
const is_array = lowersToArray(src_ty, target);
const need_memcpy = !is_aligned or is_array;
- const writer = f.object.writer();
+ const writer = f.object.writer();
const local = try f.allocLocal(inst, src_ty);
+ const v = try Vectorizer.start(f, inst, writer, ptr_ty);
if (need_memcpy) {
try writer.writeAll("memcpy(");
if (!is_array) try writer.writeByte('&');
- try f.writeCValue(writer, local, .FunctionArgument);
+ try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(", (const char *)");
try f.writeCValue(writer, operand, .Other);
+ try v.elem(f, writer);
try writer.writeAll(", sizeof(");
try f.renderType(writer, src_ty);
try writer.writeAll("))");
@@ -3351,6 +3343,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const field_ty = Type.initPayload(&field_pl.base);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = (");
try f.renderType(writer, src_ty);
try writer.writeAll(")zig_wrap_");
@@ -3369,16 +3362,21 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeByte('(');
try f.writeCValueDeref(writer, operand);
+ try v.elem(f, writer);
try writer.print(", {})", .{try f.fmtIntLiteral(bit_offset_ty, bit_offset_val)});
if (cant_cast) try writer.writeByte(')');
try f.object.dg.renderBuiltinInfo(writer, field_ty, .bits);
try writer.writeByte(')');
} else {
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = ");
try f.writeCValueDeref(writer, operand);
+ try v.elem(f, writer);
}
try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -3444,15 +3442,22 @@ fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue {
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
- const writer = f.object.writer();
- const inst_ty = f.air.typeOfIndex(inst);
- const local = try f.allocLocal(inst, inst_ty);
- const operand_ty = f.air.typeOf(ty_op.operand);
+ const inst_ty = f.air.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType();
+ const operand_ty = f.air.typeOf(ty_op.operand);
+ const scalar_ty = operand_ty.scalarType();
+
+ const writer = f.object.writer();
+ const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, operand_ty);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = ");
- try f.renderIntCast(writer, inst_ty, operand, operand_ty, .Other);
+ try f.renderIntCast(writer, inst_scalar_ty, operand, v, scalar_ty, .Other);
try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -3578,7 +3583,10 @@ fn storeUndefined(f: *Function, lhs_child_ty: Type, dest_ptr: CValue) !CValue {
fn airStore(f: *Function, inst: Air.Inst.Index) !CValue {
// *a = b;
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- const ptr_info = f.air.typeOf(bin_op.lhs).ptrInfo().data;
+
+ const ptr_ty = f.air.typeOf(bin_op.lhs);
+ const ptr_scalar_ty = ptr_ty.scalarType();
+ const ptr_info = ptr_scalar_ty.ptrInfo().data;
if (!ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime()) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
@@ -3601,11 +3609,13 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue {
ptr_info.@"align" >= ptr_info.pointee_type.abiAlignment(target);
const is_array = lowersToArray(ptr_info.pointee_type, target);
const need_memcpy = !is_aligned or is_array;
- const writer = f.object.writer();
const src_val = try f.resolveInst(bin_op.rhs);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
+ const writer = f.object.writer();
+ const v = try Vectorizer.start(f, inst, writer, ptr_ty);
+
if (need_memcpy) {
// For this memcpy to safely work we need the rhs to have the same
// underlying type as the lhs (i.e. they must both be arrays of the same underlying type).
@@ -3626,9 +3636,11 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll("memcpy((char *)");
try f.writeCValue(writer, ptr_val, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(", ");
if (!is_array) try writer.writeByte('&');
try f.writeCValue(writer, array_src, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(", sizeof(");
try f.renderType(writer, src_ty);
try writer.writeAll("))");
@@ -3672,12 +3684,14 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue {
const mask_val = Value.initPayload(&mask_pl.base);
try f.writeCValueDeref(writer, ptr_val);
+ try v.elem(f, writer);
try writer.writeAll(" = zig_or_");
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeAll("(zig_and_");
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeByte('(');
try f.writeCValueDeref(writer, ptr_val);
+ try v.elem(f, writer);
try writer.print(", {x}), zig_shl_", .{try f.fmtIntLiteral(host_ty, mask_val)});
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeByte('(');
@@ -3699,14 +3713,19 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeByte(')');
}
try f.writeCValue(writer, src_val, .Other);
+ try v.elem(f, writer);
if (cant_cast) try writer.writeByte(')');
try writer.print(", {}))", .{try f.fmtIntLiteral(bit_offset_ty, bit_offset_val)});
} else {
try f.writeCValueDeref(writer, ptr_val);
+ try v.elem(f, writer);
try writer.writeAll(" = ");
try f.writeCValue(writer, src_val, .Other);
+ try v.elem(f, writer);
}
try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
+
return .none;
}
@@ -3724,51 +3743,39 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info:
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
const inst_ty = f.air.typeOfIndex(inst);
- const vector_ty = f.air.typeOf(bin_op.lhs);
- const scalar_ty = vector_ty.scalarType();
+ const operand_ty = f.air.typeOf(bin_op.lhs);
+ const scalar_ty = operand_ty.scalarType();
+
const w = f.object.writer();
-
const local = try f.allocLocal(inst, inst_ty);
-
- switch (vector_ty.zigTypeTag()) {
- .Vector => {
- try w.writeAll("zig_v");
- try w.writeAll(operation);
- try w.writeAll("o_");
- try f.object.dg.renderTypeForBuiltinFnName(w, scalar_ty);
- try w.writeAll("(");
- try f.writeCValueMember(w, local, .{ .field = 1 });
- try w.writeAll(", ");
- try f.writeCValueMember(w, local, .{ .field = 0 });
- try w.print(", {d}, ", .{vector_ty.vectorLen()});
- },
- else => {
- try f.writeCValueMember(w, local, .{ .field = 1 });
- try w.writeAll(" = zig_");
- try w.writeAll(operation);
- try w.writeAll("o_");
- try f.object.dg.renderTypeForBuiltinFnName(w, scalar_ty);
- try w.writeAll("(&");
- try f.writeCValueMember(w, local, .{ .field = 0 });
- try w.writeAll(", ");
- },
- }
-
+ const v = try Vectorizer.start(f, inst, w, operand_ty);
+ try f.writeCValueMember(w, local, .{ .field = 1 });
+ try v.elem(f, w);
+ try w.writeAll(" = zig_");
+ try w.writeAll(operation);
+ try w.writeAll("o_");
+ try f.object.dg.renderTypeForBuiltinFnName(w, scalar_ty);
+ try w.writeAll("(&");
+ try f.writeCValueMember(w, local, .{ .field = 0 });
+ try v.elem(f, w);
+ try w.writeAll(", ");
try f.writeCValue(w, lhs, .FunctionArgument);
+ try v.elem(f, w);
try w.writeAll(", ");
try f.writeCValue(w, rhs, .FunctionArgument);
+ try v.elem(f, w);
try f.object.dg.renderBuiltinInfo(w, scalar_ty, info);
try w.writeAll(");\n");
+ try v.end(f, inst, w);
return local;
}
fn airNot(f: *Function, inst: Air.Inst.Index) !CValue {
- const inst_ty = f.air.typeOfIndex(inst);
- if (inst_ty.tag() != .bool)
- return try airUnBuiltinCall(f, inst, "not", .bits);
-
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
+ const operand_ty = f.air.typeOf(ty_op.operand);
+ const scalar_ty = operand_ty.scalarType();
+ if (scalar_ty.tag() != .bool) return try airUnBuiltinCall(f, inst, "not", .bits);
if (f.liveness.isUnused(inst)) {
try reap(f, inst, &.{ty_op.operand});
@@ -3778,14 +3785,20 @@ fn airNot(f: *Function, inst: Air.Inst.Index) !CValue {
const op = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
+ const inst_ty = f.air.typeOfIndex(inst);
+
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
-
+ const v = try Vectorizer.start(f, inst, writer, operand_ty);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = ");
try writer.writeByte('!');
try f.writeCValue(writer, op, .Other);
+ try v.elem(f, writer);
try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -3798,71 +3811,89 @@ fn airBinOp(
) !CValue {
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
const operand_ty = f.air.typeOf(bin_op.lhs);
+ const scalar_ty = operand_ty.scalarType();
const target = f.object.dg.module.getTarget();
- if ((operand_ty.isInt() and operand_ty.bitSize(target) > 64) or operand_ty.isRuntimeFloat())
+ if ((scalar_ty.isInt() and scalar_ty.bitSize(target) > 64) or scalar_ty.isRuntimeFloat())
return try airBinBuiltinCall(f, inst, operation, info);
- const lhs = try f.resolveInst(bin_op.lhs);
- const rhs = try f.resolveInst(bin_op.rhs);
-
- try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
-
- if (f.liveness.isUnused(inst)) return .none;
-
- const inst_ty = f.air.typeOfIndex(inst);
-
- const writer = f.object.writer();
- const local = try f.allocLocal(inst, inst_ty);
- try f.writeCValue(writer, local, .Other);
- try writer.writeAll(" = ");
- try f.writeCValue(writer, lhs, .Other);
- try writer.writeByte(' ');
- try writer.writeAll(operator);
- try writer.writeByte(' ');
- try f.writeCValue(writer, rhs, .Other);
- try writer.writeAll(";\n");
-
- return local;
-}
-
-fn airCmpOp(f: *Function, inst: Air.Inst.Index, operator: std.math.CompareOperator) !CValue {
- const bin_op = f.air.instructions.items(.data)[inst].bin_op;
-
if (f.liveness.isUnused(inst)) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
}
- const operand_ty = f.air.typeOf(bin_op.lhs);
- const target = f.object.dg.module.getTarget();
- const operand_bits = operand_ty.bitSize(target);
- if (operand_ty.isInt() and operand_bits > 64)
- return airCmpBuiltinCall(
- f,
- inst,
- bin_op,
- operator,
- .cmp,
- if (operand_bits > 128) .bits else .none,
- );
- if (operand_ty.isRuntimeFloat())
- return airCmpBuiltinCall(f, inst, bin_op, operator, .operator, .none);
-
- const inst_ty = f.air.typeOfIndex(inst);
const lhs = try f.resolveInst(bin_op.lhs);
const rhs = try f.resolveInst(bin_op.rhs);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
+ const inst_ty = f.air.typeOfIndex(inst);
+
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, operand_ty);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = ");
try f.writeCValue(writer, lhs, .Other);
+ try v.elem(f, writer);
+ try writer.writeByte(' ');
+ try writer.writeAll(operator);
+ try writer.writeByte(' ');
+ try f.writeCValue(writer, rhs, .Other);
+ try v.elem(f, writer);
+ try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
+
+ return local;
+}
+
+fn airCmpOp(
+ f: *Function,
+ inst: Air.Inst.Index,
+ data: anytype,
+ operator: std.math.CompareOperator,
+) !CValue {
+ if (f.liveness.isUnused(inst)) {
+ try reap(f, inst, &.{ data.lhs, data.rhs });
+ return .none;
+ }
+
+ const operand_ty = f.air.typeOf(data.lhs);
+ const scalar_ty = operand_ty.scalarType();
+
+ const target = f.object.dg.module.getTarget();
+ const scalar_bits = scalar_ty.bitSize(target);
+ if (scalar_ty.isInt() and scalar_bits > 64)
+ return airCmpBuiltinCall(
+ f,
+ inst,
+ data,
+ operator,
+ .cmp,
+ if (scalar_bits > 128) .bits else .none,
+ );
+ if (scalar_ty.isRuntimeFloat())
+ return airCmpBuiltinCall(f, inst, data, operator, .operator, .none);
+
+ const inst_ty = f.air.typeOfIndex(inst);
+ const lhs = try f.resolveInst(data.lhs);
+ const rhs = try f.resolveInst(data.rhs);
+ try reap(f, inst, &.{ data.lhs, data.rhs });
+
+ const writer = f.object.writer();
+ const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, operand_ty);
+ try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
+ try writer.writeAll(" = ");
+ try f.writeCValue(writer, lhs, .Other);
+ try v.elem(f, writer);
try writer.writeByte(' ');
try writer.writeAll(compareOperatorC(operator));
try writer.writeByte(' ');
try f.writeCValue(writer, rhs, .Other);
+ try v.elem(f, writer);
try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
return local;
}
@@ -3974,11 +4005,14 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
const inst_ty = f.air.typeOfIndex(inst);
- const elem_ty = inst_ty.elemType2();
+ const inst_scalar_ty = inst_ty.scalarType();
+ const elem_ty = inst_scalar_ty.elemType2();
const local = try f.allocLocal(inst, inst_ty);
const writer = f.object.writer();
+ const v = try Vectorizer.start(f, inst, writer, inst_ty);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = ");
if (elem_ty.hasRuntimeBitsIgnoreComptime()) {
@@ -3986,19 +4020,26 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue {
// results in a NULL pointer, or if LHS is NULL. The operation is only UB
// if the result is NULL and then dereferenced.
try writer.writeByte('(');
- try f.renderType(writer, inst_ty);
+ try f.renderType(writer, inst_scalar_ty);
try writer.writeAll(")(((uintptr_t)");
try f.writeCValue(writer, lhs, .Other);
+ try v.elem(f, writer);
try writer.writeAll(") ");
try writer.writeByte(operator);
try writer.writeAll(" (");
try f.writeCValue(writer, rhs, .Other);
+ try v.elem(f, writer);
try writer.writeAll("*sizeof(");
try f.renderType(writer, elem_ty);
try writer.writeAll(")))");
- } else try f.writeCValue(writer, lhs, .Initializer);
+ } else {
+ try f.writeCValue(writer, lhs, .Other);
+ try v.elem(f, writer);
+ }
try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -4011,10 +4052,12 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []cons
}
const inst_ty = f.air.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType();
+
const target = f.object.dg.module.getTarget();
- if (inst_ty.isInt() and inst_ty.bitSize(target) > 64)
+ if (inst_scalar_ty.isInt() and inst_scalar_ty.bitSize(target) > 64)
return try airBinBuiltinCall(f, inst, operation[1..], .none);
- if (inst_ty.isRuntimeFloat())
+ if (inst_scalar_ty.isRuntimeFloat())
return try airBinFloatOp(f, inst, operation);
const lhs = try f.resolveInst(bin_op.lhs);
@@ -4023,19 +4066,26 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []cons
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, inst_ty);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
// (lhs <> rhs) ? lhs : rhs
try writer.writeAll(" = (");
try f.writeCValue(writer, lhs, .Other);
+ try v.elem(f, writer);
try writer.writeByte(' ');
try writer.writeByte(operator);
try writer.writeByte(' ');
try f.writeCValue(writer, rhs, .Other);
+ try v.elem(f, writer);
try writer.writeAll(") ? ");
try f.writeCValue(writer, lhs, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" : ");
try f.writeCValue(writer, rhs, .Other);
+ try v.elem(f, writer);
try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
return local;
}
@@ -6002,30 +6052,35 @@ fn airUnBuiltinCall(
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const inst_ty = f.air.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType();
const operand_ty = f.air.typeOf(ty_op.operand);
+ const scalar_ty = operand_ty.scalarType();
- const inst_cty = try f.typeToCType(inst_ty, .complete);
- const ref_ret = switch (inst_cty.tag()) {
- else => false,
- .array, .vector => true,
- };
+ const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete);
+ const ref_ret = inst_scalar_cty.tag() == .array;
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, operand_ty);
if (!ref_ret) {
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = ");
}
try writer.print("zig_{s}_", .{operation});
- try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty);
+ try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
try writer.writeByte('(');
if (ref_ret) {
try f.writeCValue(writer, local, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(", ");
}
try f.writeCValue(writer, operand, .FunctionArgument);
- try f.object.dg.renderBuiltinInfo(writer, operand_ty, info);
+ try v.elem(f, writer);
+ try f.object.dg.renderBuiltinInfo(writer, scalar_ty, info);
try writer.writeAll(");\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -6047,21 +6102,38 @@ fn airBinBuiltinCall(
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
const inst_ty = f.air.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType();
const operand_ty = f.air.typeOf(bin_op.lhs);
+ const scalar_ty = operand_ty.scalarType();
+
+ const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete);
+ const ref_ret = inst_scalar_cty.tag() == .array;
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
- try f.writeCValue(writer, local, .Other);
- try writer.writeAll(" = zig_");
- try writer.writeAll(operation);
- try writer.writeByte('_');
- try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty);
+ const v = try Vectorizer.start(f, inst, writer, operand_ty);
+ if (!ref_ret) {
+ try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
+ try writer.writeAll(" = ");
+ }
+ try writer.print("zig_{s}_", .{operation});
+ try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
try writer.writeByte('(');
+ if (ref_ret) {
+ try f.writeCValue(writer, local, .FunctionArgument);
+ try v.elem(f, writer);
+ try writer.writeAll(", ");
+ }
try f.writeCValue(writer, lhs, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(", ");
try f.writeCValue(writer, rhs, .FunctionArgument);
- try f.object.dg.renderBuiltinInfo(writer, operand_ty, info);
+ try v.elem(f, writer);
+ try f.object.dg.renderBuiltinInfo(writer, scalar_ty, info);
try writer.writeAll(");\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -6073,45 +6145,56 @@ fn airCmpBuiltinCall(
operation: enum { cmp, operator },
info: BuiltinInfo,
) !CValue {
- const inst_ty = f.air.typeOfIndex(inst);
- const operand_ty = f.air.typeOf(data.lhs);
+ if (f.liveness.isUnused(inst)) {
+ try reap(f, inst, &.{ data.lhs, data.rhs });
+ return .none;
+ }
const lhs = try f.resolveInst(data.lhs);
const rhs = try f.resolveInst(data.rhs);
try reap(f, inst, &.{ data.lhs, data.rhs });
- const inst_cty = try f.typeToCType(inst_ty, .complete);
- const ref_ret = switch (inst_cty.tag()) {
- else => false,
- .array, .vector => true,
- };
+ const inst_ty = f.air.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType();
+ const operand_ty = f.air.typeOf(data.lhs);
+ const scalar_ty = operand_ty.scalarType();
+
+ const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete);
+ const ref_ret = inst_scalar_cty.tag() == .array;
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, operand_ty);
if (!ref_ret) {
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = ");
}
try writer.print("zig_{s}_", .{switch (operation) {
else => @tagName(operation),
.operator => compareOperatorAbbrev(operator),
}});
- try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty);
+ try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
try writer.writeByte('(');
if (ref_ret) {
try f.writeCValue(writer, local, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(", ");
}
try f.writeCValue(writer, lhs, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(", ");
try f.writeCValue(writer, rhs, .FunctionArgument);
- try f.object.dg.renderBuiltinInfo(writer, operand_ty, info);
+ try v.elem(f, writer);
+ try f.object.dg.renderBuiltinInfo(writer, scalar_ty, info);
try writer.writeByte(')');
if (!ref_ret) try writer.print(" {s} {}", .{
compareOperatorC(operator),
try f.fmtIntLiteral(Type.initTag(.i32), Value.zero),
});
try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -6498,65 +6581,35 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
const operand = try f.resolveInst(reduce.operand);
try reap(f, inst, &.{reduce.operand});
const operand_ty = f.air.typeOf(reduce.operand);
- const vector_len = operand_ty.vectorLen();
const writer = f.object.writer();
- const Op = union(enum) {
- call_fn: []const u8,
+ const op: union(enum) {
+ float_op: []const u8,
+ builtin: []const u8,
infix: []const u8,
ternary: []const u8,
- };
- var fn_name_buf: [64]u8 = undefined;
- const op: Op = switch (reduce.operation) {
+ } = switch (reduce.operation) {
.And => .{ .infix = " &= " },
.Or => .{ .infix = " |= " },
.Xor => .{ .infix = " ^= " },
.Min => switch (scalar_ty.zigTypeTag()) {
- .Int => Op{ .ternary = " < " },
- .Float => op: {
- const float_bits = scalar_ty.floatBits(target);
- break :op Op{
- .call_fn = std.fmt.bufPrintZ(&fn_name_buf, "{s}fmin{s}", .{
- libcFloatPrefix(float_bits), libcFloatSuffix(float_bits),
- }) catch unreachable,
- };
- },
+ .Int => .{ .ternary = " < " },
+ .Float => .{ .float_op = "fmin" },
else => unreachable,
},
.Max => switch (scalar_ty.zigTypeTag()) {
- .Int => Op{ .ternary = " > " },
- .Float => op: {
- const float_bits = scalar_ty.floatBits(target);
- break :op Op{
- .call_fn = std.fmt.bufPrintZ(&fn_name_buf, "{s}fmax{s}", .{
- libcFloatPrefix(float_bits), libcFloatSuffix(float_bits),
- }) catch unreachable,
- };
- },
+ .Int => .{ .ternary = " > " },
+ .Float => .{ .float_op = "fmax" },
else => unreachable,
},
.Add => switch (scalar_ty.zigTypeTag()) {
- .Int => Op{ .infix = " += " },
- .Float => op: {
- const float_bits = scalar_ty.floatBits(target);
- break :op Op{
- .call_fn = std.fmt.bufPrintZ(&fn_name_buf, "__add{s}f3", .{
- compilerRtFloatAbbrev(float_bits),
- }) catch unreachable,
- };
- },
+ .Int => .{ .infix = " += " },
+ .Float => .{ .builtin = "add" },
else => unreachable,
},
.Mul => switch (scalar_ty.zigTypeTag()) {
- .Int => Op{ .infix = " *= " },
- .Float => op: {
- const float_bits = scalar_ty.floatBits(target);
- break :op Op{
- .call_fn = std.fmt.bufPrintZ(&fn_name_buf, "__mul{s}f3", .{
- compilerRtFloatAbbrev(float_bits),
- }) catch unreachable,
- };
- },
+ .Int => .{ .infix = " *= " },
+ .Float => .{ .builtin = "mul" },
else => unreachable,
},
};
@@ -6572,75 +6625,94 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
// }
// break :reduce accum;
// }
- const it = try f.allocLocal(inst, Type.usize);
- try f.writeCValue(writer, it, .Other);
- try writer.writeAll(" = 0;\n");
const accum = try f.allocLocal(inst, scalar_ty);
try f.writeCValue(writer, accum, .Other);
try writer.writeAll(" = ");
- const init_val = switch (reduce.operation) {
- .And, .Or, .Xor, .Add => "0",
+ var arena = std.heap.ArenaAllocator.init(f.object.dg.gpa);
+ defer arena.deinit();
+
+ const ExpectedContents = union {
+ u: Value.Payload.U64,
+ i: Value.Payload.I64,
+ f16: Value.Payload.Float_16,
+ f32: Value.Payload.Float_32,
+ f64: Value.Payload.Float_64,
+ f80: Value.Payload.Float_80,
+ f128: Value.Payload.Float_128,
+ };
+ var stack align(@alignOf(ExpectedContents)) =
+ std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator());
+
+ try f.object.dg.renderValue(writer, scalar_ty, switch (reduce.operation) {
+ .Or, .Xor, .Add => Value.zero,
+ .And => switch (scalar_ty.zigTypeTag()) {
+ .Bool => Value.one,
+ else => switch (scalar_ty.intInfo(target).signedness) {
+ .unsigned => try scalar_ty.maxInt(stack.get(), target),
+ .signed => Value.negative_one,
+ },
+ },
.Min => switch (scalar_ty.zigTypeTag()) {
- .Int => "TODO_intmax",
- .Float => "TODO_nan",
+ .Bool => Value.one,
+ .Int => try scalar_ty.maxInt(stack.get(), target),
+ .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target),
else => unreachable,
},
.Max => switch (scalar_ty.zigTypeTag()) {
- .Int => "TODO_intmin",
- .Float => "TODO_nan",
+ .Bool => Value.zero,
+ .Int => try scalar_ty.minInt(stack.get(), target),
+ .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target),
else => unreachable,
},
- .Mul => "1",
- };
- try writer.writeAll(init_val);
- try writer.writeAll(";");
- try f.object.indent_writer.insertNewline();
- try writer.writeAll("for (;");
- try f.writeCValue(writer, it, .Other);
- try writer.print("<{d};++", .{vector_len});
- try f.writeCValue(writer, it, .Other);
- try writer.writeAll(") ");
- try f.writeCValue(writer, accum, .Other);
+ .Mul => Value.one,
+ }, .Initializer);
+ try writer.writeAll(";\n");
+ const v = try Vectorizer.start(f, inst, writer, operand_ty);
+ try f.writeCValue(writer, accum, .Other);
switch (op) {
- .call_fn => |fn_name| {
- try writer.print(" = {s}(", .{fn_name});
+ .float_op => |operation| {
+ try writer.writeAll(" = zig_libc_name_");
+ try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
+ try writer.print("({s})(", .{operation});
try f.writeCValue(writer, accum, .FunctionArgument);
try writer.writeAll(", ");
try f.writeCValue(writer, operand, .Other);
- try writer.writeAll("[");
- try f.writeCValue(writer, it, .Other);
- try writer.writeAll("])");
+ try v.elem(f, writer);
+ try writer.writeByte(')');
+ },
+ .builtin => |operation| {
+ try writer.print(" = zig_{s}_", .{operation});
+ try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
+ try writer.writeByte('(');
+ try f.writeCValue(writer, accum, .FunctionArgument);
+ try writer.writeAll(", ");
+ try f.writeCValue(writer, operand, .Other);
+ try v.elem(f, writer);
+ try writer.writeByte(')');
},
.infix => |ass| {
try writer.writeAll(ass);
try f.writeCValue(writer, operand, .Other);
- try writer.writeAll("[");
- try f.writeCValue(writer, it, .Other);
- try writer.writeAll("]");
+ try v.elem(f, writer);
},
.ternary => |cmp| {
try writer.writeAll(" = ");
try f.writeCValue(writer, accum, .Other);
try writer.writeAll(cmp);
try f.writeCValue(writer, operand, .Other);
- try writer.writeAll("[");
- try f.writeCValue(writer, it, .Other);
- try writer.writeAll("] ? ");
+ try v.elem(f, writer);
+ try writer.writeAll(" ? ");
try f.writeCValue(writer, accum, .Other);
try writer.writeAll(" : ");
try f.writeCValue(writer, operand, .Other);
- try writer.writeAll("[");
- try f.writeCValue(writer, it, .Other);
- try writer.writeAll("]");
+ try v.elem(f, writer);
},
}
-
try writer.writeAll(";\n");
-
- try freeLocal(f, inst, it.new_local, 0);
+ try v.end(f, inst, writer);
return accum;
}
@@ -6774,7 +6846,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeByte('(');
if (inst_ty.isAbiInt() and (field_ty.isAbiInt() or field_ty.isPtrAtRuntime())) {
- try f.renderIntCast(writer, inst_ty, element, field_ty, .FunctionArgument);
+ try f.renderIntCast(writer, inst_ty, element, .{}, field_ty, .FunctionArgument);
} else {
try writer.writeByte('(');
try f.renderType(writer, inst_ty);
@@ -6916,7 +6988,6 @@ fn airWasmMemoryGrow(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airFloatNeg(f: *Function, inst: Air.Inst.Index) !CValue {
- const inst_ty = f.air.typeOfIndex(inst);
const un_op = f.air.instructions.items(.data)[inst].un_op;
if (f.liveness.isUnused(inst)) {
try reap(f, inst, &.{un_op});
@@ -6925,16 +6996,23 @@ fn airFloatNeg(f: *Function, inst: Air.Inst.Index) !CValue {
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
+
const operand_ty = f.air.typeOf(un_op);
+ const scalar_ty = operand_ty.scalarType();
const writer = f.object.writer();
- const local = try f.allocLocal(inst, inst_ty);
+ const local = try f.allocLocal(inst, operand_ty);
+ const v = try Vectorizer.start(f, inst, writer, operand_ty);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = zig_neg_");
- try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty);
+ try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
try writer.writeByte('(');
try f.writeCValue(writer, operand, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(");\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -6944,19 +7022,28 @@ fn airUnFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVal
try reap(f, inst, &.{un_op});
return .none;
}
+
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
- const writer = f.object.writer();
+
const inst_ty = f.air.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType();
+
+ const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, inst_ty);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = zig_libc_name_");
- try f.object.dg.renderTypeForBuiltinFnName(writer, inst_ty);
+ try f.object.dg.renderTypeForBuiltinFnName(writer, inst_scalar_ty);
try writer.writeByte('(');
try writer.writeAll(operation);
try writer.writeAll(")(");
try f.writeCValue(writer, operand, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(");\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -6966,23 +7053,32 @@ fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVa
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
}
+
const lhs = try f.resolveInst(bin_op.lhs);
const rhs = try f.resolveInst(bin_op.rhs);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
- const writer = f.object.writer();
const inst_ty = f.air.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType();
+
+ const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, inst_ty);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = zig_libc_name_");
- try f.object.dg.renderTypeForBuiltinFnName(writer, inst_ty);
+ try f.object.dg.renderTypeForBuiltinFnName(writer, inst_scalar_ty);
try writer.writeByte('(');
try writer.writeAll(operation);
try writer.writeAll(")(");
try f.writeCValue(writer, lhs, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(", ");
try f.writeCValue(writer, rhs, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(");\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -6993,23 +7089,34 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs, pl_op.operand });
return .none;
}
- const inst_ty = f.air.typeOfIndex(inst);
+
const mulend1 = try f.resolveInst(bin_op.lhs);
const mulend2 = try f.resolveInst(bin_op.rhs);
const addend = try f.resolveInst(pl_op.operand);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs, pl_op.operand });
+
+ const inst_ty = f.air.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType();
+
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, inst_ty);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = zig_libc_name_");
- try f.object.dg.renderTypeForBuiltinFnName(writer, inst_ty);
+ try f.object.dg.renderTypeForBuiltinFnName(writer, inst_scalar_ty);
try writer.writeAll("(fma)(");
try f.writeCValue(writer, mulend1, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(", ");
try f.writeCValue(writer, mulend2, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(", ");
try f.writeCValue(writer, addend, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(");\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -7510,6 +7617,47 @@ fn formatIntLiteral(
try data.cty.renderLiteralSuffix(writer);
}
+const Vectorizer = struct {
+ index: CValue = .none,
+
+ pub fn start(f: *Function, inst: Air.Inst.Index, writer: anytype, ty: Type) !Vectorizer {
+ return if (ty.zigTypeTag() == .Vector) index: {
+ var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = ty.vectorLen() };
+
+ const local = try f.allocLocal(inst, Type.usize);
+
+ try writer.writeAll("for (");
+ try f.writeCValue(writer, local, .Other);
+ try writer.print(" = {d}; ", .{try f.fmtIntLiteral(Type.usize, Value.zero)});
+ try f.writeCValue(writer, local, .Other);
+ try writer.print(" < {d}; ", .{
+ try f.fmtIntLiteral(Type.usize, Value.initPayload(&len_pl.base)),
+ });
+ try f.writeCValue(writer, local, .Other);
+ try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(Type.usize, Value.one)});
+ f.object.indent_writer.pushIndent();
+
+ break :index .{ .index = local };
+ } else .{};
+ }
+
+ pub fn elem(self: Vectorizer, f: *Function, writer: anytype) !void {
+ if (self.index != .none) {
+ try writer.writeByte('[');
+ try f.writeCValue(writer, self.index, .Other);
+ try writer.writeByte(']');
+ }
+ }
+
+ pub fn end(self: Vectorizer, f: *Function, inst: Air.Inst.Index, writer: anytype) !void {
+ if (self.index != .none) {
+ f.object.indent_writer.popIndent();
+ try writer.writeAll("}\n");
+ try freeLocal(f, inst, self.index.new_local, 0);
+ }
+ }
+};
+
fn isByRef(ty: Type) bool {
_ = ty;
return false;
diff --git a/src/type.zig b/src/type.zig
index 9e501d893c..15525f14eb 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -4213,7 +4213,7 @@ pub const Type = extern union {
};
}
- pub fn shallowElemType(child_ty: Type) Type {
+ fn shallowElemType(child_ty: Type) Type {
return switch (child_ty.zigTypeTag()) {
.Array, .Vector => child_ty.childType(),
else => child_ty,
diff --git a/src/value.zig b/src/value.zig
index 4a5683df36..00bf59ca38 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -3319,7 +3319,7 @@ pub const Value = extern union {
}
}
- fn floatToValue(float: f128, arena: Allocator, dest_ty: Type, target: Target) !Value {
+ pub fn floatToValue(float: f128, arena: Allocator, dest_ty: Type, target: Target) !Value {
switch (dest_ty.floatBits(target)) {
16 => return Value.Tag.float_16.create(arena, @floatCast(f16, float)),
32 => return Value.Tag.float_32.create(arena, @floatCast(f32, float)),
diff --git a/test/behavior/bitreverse.zig b/test/behavior/bitreverse.zig
index aa830144d1..80167b9a17 100644
--- a/test/behavior/bitreverse.zig
+++ b/test/behavior/bitreverse.zig
@@ -96,7 +96,6 @@ fn vector8() !void {
test "bitReverse vectors u8" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
@@ -115,7 +114,6 @@ fn vector16() !void {
test "bitReverse vectors u16" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
@@ -134,7 +132,6 @@ fn vector24() !void {
test "bitReverse vectors u24" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
diff --git a/test/behavior/byteswap.zig b/test/behavior/byteswap.zig
index fc385e0443..d173c13275 100644
--- a/test/behavior/byteswap.zig
+++ b/test/behavior/byteswap.zig
@@ -62,7 +62,6 @@ fn vector8() !void {
test "@byteSwap vectors u8" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
@@ -81,7 +80,6 @@ fn vector16() !void {
test "@byteSwap vectors u16" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
@@ -100,7 +98,6 @@ fn vector24() !void {
test "@byteSwap vectors u24" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig
index 927caa965b..f179cbe525 100644
--- a/test/behavior/cast.zig
+++ b/test/behavior/cast.zig
@@ -598,7 +598,6 @@ test "cast *[1][*]const u8 to [*]const ?[*]const u8" {
test "vector casts" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig
index 7befa41380..f05901f7d9 100644
--- a/test/behavior/floatop.zig
+++ b/test/behavior/floatop.zig
@@ -141,7 +141,6 @@ fn testSqrt() !void {
test "@sqrt with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -234,7 +233,6 @@ fn testSin() !void {
test "@sin with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -275,7 +273,6 @@ fn testCos() !void {
test "@cos with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -315,7 +312,6 @@ fn testExp() !void {
test "@exp with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -355,7 +351,6 @@ fn testExp2() !void {
test "@exp2" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -409,7 +404,6 @@ test "@log with @vectors" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
{
@@ -447,7 +441,6 @@ test "@log2 with vectors" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
// https://github.com/ziglang/zig/issues/13681
if (builtin.zig_backend == .stage2_llvm and
builtin.cpu.arch == .aarch64 and
@@ -491,7 +484,6 @@ test "@log10 with vectors" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
comptime try testLog10WithVectors();
try testLog10WithVectors();
@@ -537,7 +529,6 @@ fn testFabs() !void {
test "@fabs with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -660,7 +651,6 @@ fn testFloor() !void {
test "@floor with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -754,7 +744,6 @@ fn testCeil() !void {
test "@ceil with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -848,7 +837,6 @@ fn testTrunc() !void {
test "@trunc with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/maximum_minimum.zig b/test/behavior/maximum_minimum.zig
index 133a543d42..34a7d0976a 100644
--- a/test/behavior/maximum_minimum.zig
+++ b/test/behavior/maximum_minimum.zig
@@ -25,7 +25,6 @@ test "@max" {
test "@max on vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -75,7 +74,6 @@ test "@min for vectors" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
diff --git a/test/behavior/muladd.zig b/test/behavior/muladd.zig
index a2d9e6d16d..218edc5a2d 100644
--- a/test/behavior/muladd.zig
+++ b/test/behavior/muladd.zig
@@ -100,7 +100,6 @@ fn vector16() !void {
}
test "vector f16" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -124,7 +123,6 @@ fn vector32() !void {
}
test "vector f32" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -148,7 +146,6 @@ fn vector64() !void {
}
test "vector f64" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -171,7 +168,6 @@ fn vector80() !void {
}
test "vector f80" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -195,7 +191,6 @@ fn vector128() !void {
}
test "vector f128" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig
index d885a7fabc..e74bcdad86 100644
--- a/test/behavior/vector.zig
+++ b/test/behavior/vector.zig
@@ -25,7 +25,6 @@ test "implicit cast vector to array - bool" {
test "vector wrap operators" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -116,7 +115,6 @@ test "vector float operators" {
test "vector bit operators" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -442,7 +440,6 @@ test "vector comparison operators" {
test "vector division operators" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -525,7 +522,6 @@ test "vector division operators" {
test "vector bitwise not operator" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -557,7 +553,6 @@ test "vector bitwise not operator" {
test "vector shift operators" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -651,7 +646,6 @@ test "vector shift operators" {
test "vector reduce operation" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -707,7 +701,7 @@ test "vector reduce operation" {
// LLVM 11 ERROR: Cannot select type
// https://github.com/ziglang/zig/issues/7138
- if (builtin.target.cpu.arch != .aarch64) {
+ if (builtin.zig_backend != .stage2_llvm or builtin.target.cpu.arch != .aarch64) {
try testReduce(.Min, [4]i64{ 1234567, -386, 0, 3 }, @as(i64, -386));
try testReduce(.Min, [4]u64{ 99, 9999, 9, 99999 }, @as(u64, 9));
}
@@ -725,7 +719,7 @@ test "vector reduce operation" {
// LLVM 11 ERROR: Cannot select type
// https://github.com/ziglang/zig/issues/7138
- if (builtin.target.cpu.arch != .aarch64) {
+ if (builtin.zig_backend != .stage2_llvm or builtin.target.cpu.arch != .aarch64) {
try testReduce(.Max, [4]i64{ 1234567, -386, 0, 3 }, @as(i64, 1234567));
try testReduce(.Max, [4]u64{ 99, 9999, 9, 99999 }, @as(u64, 99999));
}
@@ -773,14 +767,14 @@ test "vector reduce operation" {
// LLVM 11 ERROR: Cannot select type
// https://github.com/ziglang/zig/issues/7138
- if (false) {
- try testReduce(.Min, [4]f16{ -1.9, 5.1, f16_nan, 100.0 }, f16_nan);
- try testReduce(.Min, [4]f32{ -1.9, 5.1, f32_nan, 100.0 }, f32_nan);
- try testReduce(.Min, [4]f64{ -1.9, 5.1, f64_nan, 100.0 }, f64_nan);
+ if (builtin.zig_backend != .stage2_llvm) {
+ try testReduce(.Min, [4]f16{ -1.9, 5.1, f16_nan, 100.0 }, @as(f16, -1.9));
+ try testReduce(.Min, [4]f32{ -1.9, 5.1, f32_nan, 100.0 }, @as(f32, -1.9));
+ try testReduce(.Min, [4]f64{ -1.9, 5.1, f64_nan, 100.0 }, @as(f64, -1.9));
- try testReduce(.Max, [4]f16{ -1.9, 5.1, f16_nan, 100.0 }, f16_nan);
- try testReduce(.Max, [4]f32{ -1.9, 5.1, f32_nan, 100.0 }, f32_nan);
- try testReduce(.Max, [4]f64{ -1.9, 5.1, f64_nan, 100.0 }, f64_nan);
+ try testReduce(.Max, [4]f16{ -1.9, 5.1, f16_nan, 100.0 }, @as(f16, 100.0));
+ try testReduce(.Max, [4]f32{ -1.9, 5.1, f32_nan, 100.0 }, @as(f32, 100.0));
+ try testReduce(.Max, [4]f64{ -1.9, 5.1, f64_nan, 100.0 }, @as(f64, 100.0));
}
try testReduce(.Mul, [4]f16{ -1.9, 5.1, f16_nan, 100.0 }, f16_nan);
@@ -831,7 +825,6 @@ test "mask parameter of @shuffle is comptime scope" {
test "saturating add" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -863,7 +856,6 @@ test "saturating add" {
test "saturating subtraction" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -886,7 +878,6 @@ test "saturating subtraction" {
test "saturating multiplication" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -913,7 +904,6 @@ test "saturating multiplication" {
test "saturating shift-left" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -1047,7 +1037,6 @@ test "@mulWithOverflow" {
}
test "@shlWithOverflow" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -1202,7 +1191,6 @@ test "zero multiplicand" {
test "@intCast to u0" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
From 8f6da78fb1bfc9d5e8b3d5affd33cf6a62f5e8c7 Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Sun, 5 Mar 2023 00:30:55 -0500
Subject: [PATCH 026/294] CBE: implement vector element pointers
---
src/codegen/c.zig | 10 ++--------
src/codegen/c/type.zig | 2 +-
test/behavior/vector.zig | 3 ---
3 files changed, 3 insertions(+), 12 deletions(-)
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 5e92a6f76c..60f93311a4 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -17,12 +17,6 @@ const LazySrcLoc = Module.LazySrcLoc;
const Air = @import("../Air.zig");
const Liveness = @import("../Liveness.zig");
-const target_util = @import("../target.zig");
-const libcFloatPrefix = target_util.libcFloatPrefix;
-const libcFloatSuffix = target_util.libcFloatSuffix;
-const compilerRtFloatAbbrev = target_util.compilerRtFloatAbbrev;
-const compilerRtIntAbbrev = target_util.compilerRtIntAbbrev;
-
const BigIntLimb = std.math.big.Limb;
const BigInt = std.math.big.int;
@@ -3317,7 +3311,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll(", sizeof(");
try f.renderType(writer, src_ty);
try writer.writeAll("))");
- } else if (ptr_info.host_size != 0) {
+ } else if (ptr_info.host_size > 0 and ptr_info.vector_index == .none) {
var host_pl = Type.Payload.Bits{
.base = .{ .tag = .int_unsigned },
.data = ptr_info.host_size * 8,
@@ -3647,7 +3641,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue {
if (src_val == .constant) {
try freeLocal(f, inst, array_src.new_local, 0);
}
- } else if (ptr_info.host_size != 0) {
+ } else if (ptr_info.host_size > 0 and ptr_info.vector_index == .none) {
const host_bits = ptr_info.host_size * 8;
var host_pl = Type.Payload.Bits{ .base = .{ .tag = .int_unsigned }, .data = host_bits };
const host_ty = Type.initPayload(&host_pl.base);
diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig
index 313fcc130c..038f53f186 100644
--- a/src/codegen/c/type.zig
+++ b/src/codegen/c/type.zig
@@ -1465,7 +1465,7 @@ pub const CType = extern union {
.base = .{ .tag = .int_unsigned },
.data = info.host_size * 8,
};
- const pointee_ty = if (info.host_size > 0)
+ const pointee_ty = if (info.host_size > 0 and info.vector_index == .none)
Type.initPayload(&host_int_pl.base)
else
info.pointee_type;
diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig
index e74bcdad86..42befa9c0f 100644
--- a/test/behavior/vector.zig
+++ b/test/behavior/vector.zig
@@ -1118,7 +1118,6 @@ test "byte vector initialized in inline function" {
}
test "byte vector initialized in inline function" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -1233,7 +1232,6 @@ test "load packed vector element" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
var x: @Vector(2, u15) = .{ 1, 4 };
try expect((&x[0]).* == 1);
@@ -1246,7 +1244,6 @@ test "store packed vector element" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
var v = @Vector(4, u1){ 1, 1, 1, 1 };
try expectEqual(@Vector(4, u1){ 1, 1, 1, 1 }, v);
From ba69ee488baec677d6e206eb0670240b1c2167a6 Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Sun, 5 Mar 2023 00:44:27 -0500
Subject: [PATCH 027/294] CBE: implement vector truncate
---
src/codegen/c.zig | 34 ++++++++++++++++++++++------------
test/behavior/truncate.zig | 1 -
2 files changed, 22 insertions(+), 13 deletions(-)
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 60f93311a4..3fea7c2ef2 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -3465,34 +3465,40 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const inst_ty = f.air.typeOfIndex(inst);
- const writer = f.object.writer();
- const local = try f.allocLocal(inst, inst_ty);
+ const inst_scalar_ty = inst_ty.scalarType();
const target = f.object.dg.module.getTarget();
- const dest_int_info = inst_ty.intInfo(target);
+ const dest_int_info = inst_scalar_ty.intInfo(target);
const dest_bits = dest_int_info.bits;
const dest_c_bits = toCIntBits(dest_int_info.bits) orelse
return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{});
const operand_ty = f.air.typeOf(ty_op.operand);
- const operand_int_info = operand_ty.intInfo(target);
+ const scalar_ty = operand_ty.scalarType();
+ const scalar_int_info = scalar_ty.intInfo(target);
+
+ const writer = f.object.writer();
+ const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, operand_ty);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = ");
if (dest_c_bits < 64) {
try writer.writeByte('(');
- try f.renderType(writer, inst_ty);
+ try f.renderType(writer, inst_scalar_ty);
try writer.writeByte(')');
}
- const needs_lo = operand_int_info.bits > 64 and dest_bits <= 64;
+ const needs_lo = scalar_int_info.bits > 64 and dest_bits <= 64;
if (needs_lo) {
try writer.writeAll("zig_lo_");
- try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty);
+ try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
try writer.writeByte('(');
}
if (dest_bits >= 8 and std.math.isPowerOfTwo(dest_bits)) {
try f.writeCValue(writer, operand, .Other);
+ try v.elem(f, writer);
} else switch (dest_int_info.signedness) {
.unsigned => {
var arena = std.heap.ArenaAllocator.init(f.object.dg.gpa);
@@ -3502,15 +3508,16 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
var stack align(@alignOf(ExpectedContents)) =
std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator());
- const mask_val = try inst_ty.maxInt(stack.get(), target);
+ const mask_val = try inst_scalar_ty.maxInt(stack.get(), target);
try writer.writeAll("zig_and_");
- try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty);
+ try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
try writer.writeByte('(');
try f.writeCValue(writer, operand, .FunctionArgument);
- try writer.print(", {x})", .{try f.fmtIntLiteral(operand_ty, mask_val)});
+ try v.elem(f, writer);
+ try writer.print(", {x})", .{try f.fmtIntLiteral(scalar_ty, mask_val)});
},
.signed => {
- const c_bits = toCIntBits(operand_int_info.bits) orelse
+ const c_bits = toCIntBits(scalar_int_info.bits) orelse
return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{});
var shift_pl = Value.Payload.U64{
.base = .{ .tag = .int_u64 },
@@ -3519,7 +3526,7 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
const shift_val = Value.initPayload(&shift_pl.base);
try writer.writeAll("zig_shr_");
- try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty);
+ try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
if (c_bits == 128) {
try writer.print("(zig_bitcast_i{d}(", .{c_bits});
} else {
@@ -3532,6 +3539,7 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.print("(uint{d}_t)", .{c_bits});
}
try f.writeCValue(writer, operand, .FunctionArgument);
+ try v.elem(f, writer);
if (c_bits == 128) try writer.writeByte(')');
try writer.print(", {})", .{try f.fmtIntLiteral(Type.u8, shift_val)});
if (c_bits == 128) try writer.writeByte(')');
@@ -3541,6 +3549,8 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
if (needs_lo) try writer.writeByte(')');
try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
+
return local;
}
diff --git a/test/behavior/truncate.zig b/test/behavior/truncate.zig
index c81abebe68..e70d33eea2 100644
--- a/test/behavior/truncate.zig
+++ b/test/behavior/truncate.zig
@@ -60,7 +60,6 @@ test "truncate on comptime integer" {
}
test "truncate on vectors" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
From aac47079026d0daf4d5acac08b7d0ad1150002d0 Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Sun, 5 Mar 2023 01:23:21 -0500
Subject: [PATCH 028/294] CBE: implement splat
---
src/codegen/c.zig | 33 ++++++++++++++++++++++++++++-----
test/behavior/vector.zig | 1 -
2 files changed, 28 insertions(+), 6 deletions(-)
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 3fea7c2ef2..f5309918bf 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -438,6 +438,10 @@ pub const Function = struct {
return f.object.dg.renderType(w, t);
}
+ fn renderCType(f: *Function, w: anytype, t: CType.Index) !void {
+ return f.object.dg.renderCType(w, t);
+ }
+
fn renderIntCast(f: *Function, w: anytype, dest_ty: Type, src: CValue, v: Vectorizer, src_ty: Type, location: ValueRenderLocation) !void {
return f.object.dg.renderIntCast(w, dest_ty, .{ .c_value = .{ .f = f, .value = src, .v = v } }, src_ty, location);
}
@@ -1576,9 +1580,12 @@ pub const DeclGen = struct {
/// | `renderType` | "uint8_t *" | "uint8_t *[10]" |
///
fn renderType(dg: *DeclGen, w: anytype, t: Type) error{ OutOfMemory, AnalysisFail }!void {
+ try dg.renderCType(w, try dg.typeToIndex(t, .complete));
+ }
+
+ fn renderCType(dg: *DeclGen, w: anytype, idx: CType.Index) error{ OutOfMemory, AnalysisFail }!void {
const store = &dg.ctypes.set;
const module = dg.module;
- const idx = try dg.typeToIndex(t, .complete);
_ = try renderTypePrefix(dg.decl_index, store.*, module, w, idx, .suffix, .{});
try renderTypeSuffix(dg.decl_index, store.*, module, w, idx, .suffix, .{});
}
@@ -6543,21 +6550,37 @@ fn airErrorName(f: *Function, inst: Air.Inst.Index) !CValue {
fn airSplat(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
+
if (f.liveness.isUnused(inst)) {
try reap(f, inst, &.{ty_op.operand});
return .none;
}
- const inst_ty = f.air.typeOfIndex(inst);
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
+
+ const inst_ty = f.air.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType();
+ const inst_scalar_cty = try f.typeToIndex(inst_scalar_ty, .complete);
+ const need_memcpy = f.indexToCType(inst_scalar_cty).tag() == .array;
+
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, inst_ty);
+ if (need_memcpy) try writer.writeAll("memcpy(&");
try f.writeCValue(writer, local, .Other);
- try writer.writeAll(" = ");
+ try v.elem(f, writer);
+ try writer.writeAll(if (need_memcpy) ", &" else " = ");
+ try f.writeCValue(writer, operand, .Other);
+ if (need_memcpy) {
+ try writer.writeAll(", sizeof(");
+ try f.renderCType(writer, inst_scalar_cty);
+ try writer.writeAll("))");
+ }
+ try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
- _ = operand;
- return f.fail("TODO: C backend: implement airSplat", .{});
+ return local;
}
fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue {
diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig
index 42befa9c0f..5d569bd815 100644
--- a/test/behavior/vector.zig
+++ b/test/behavior/vector.zig
@@ -234,7 +234,6 @@ test "vector casts of sizes not divisible by 8" {
}
test "vector @splat" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
From 0b0298aff27a31a7f45828d96d95adfdde61a085 Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Sun, 5 Mar 2023 02:06:53 -0500
Subject: [PATCH 029/294] CBE: implement select and shuffle
---
src/codegen/c.zig | 79 +++++++++++++++++++++++++++++++++++++--
test/behavior/select.zig | 2 -
test/behavior/shuffle.zig | 2 -
test/behavior/vector.zig | 2 -
4 files changed, 75 insertions(+), 10 deletions(-)
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index f5309918bf..5e64823a0d 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -6584,15 +6584,86 @@ fn airSplat(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue {
- if (f.liveness.isUnused(inst)) return .none;
+ const pl_op = f.air.instructions.items(.data)[inst].pl_op;
+ const extra = f.air.extraData(Air.Bin, pl_op.payload).data;
- return f.fail("TODO: C backend: implement airSelect", .{});
+ if (f.liveness.isUnused(inst)) {
+ try reap(f, inst, &.{ pl_op.operand, extra.lhs, extra.rhs });
+ return .none;
+ }
+
+ const pred = try f.resolveInst(pl_op.operand);
+ const lhs = try f.resolveInst(extra.lhs);
+ const rhs = try f.resolveInst(extra.rhs);
+ try reap(f, inst, &.{ pl_op.operand, extra.lhs, extra.rhs });
+
+ const inst_ty = f.air.typeOfIndex(inst);
+
+ const writer = f.object.writer();
+ const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, inst_ty);
+ try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
+ try writer.writeAll(" = ");
+ try f.writeCValue(writer, pred, .Other);
+ try v.elem(f, writer);
+ try writer.writeAll(" ? ");
+ try f.writeCValue(writer, lhs, .Other);
+ try v.elem(f, writer);
+ try writer.writeAll(" : ");
+ try f.writeCValue(writer, rhs, .Other);
+ try v.elem(f, writer);
+ try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
+
+ return local;
}
fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
- if (f.liveness.isUnused(inst)) return .none;
+ const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
+ const extra = f.air.extraData(Air.Shuffle, ty_pl.payload).data;
- return f.fail("TODO: C backend: implement airShuffle", .{});
+ if (f.liveness.isUnused(inst)) {
+ try reap(f, inst, &.{ extra.a, extra.b });
+ return .none;
+ }
+
+ const mask = f.air.values[extra.mask];
+ const lhs = try f.resolveInst(extra.a);
+ const rhs = try f.resolveInst(extra.b);
+
+ const module = f.object.dg.module;
+ const target = module.getTarget();
+ const inst_ty = f.air.typeOfIndex(inst);
+
+ const writer = f.object.writer();
+ const local = try f.allocLocal(inst, inst_ty);
+ try reap(f, inst, &.{ extra.a, extra.b }); // local cannot alias operands
+ for (0..extra.mask_len) |index| {
+ var dst_pl = Value.Payload.U64{
+ .base = .{ .tag = .int_u64 },
+ .data = @intCast(u64, index),
+ };
+
+ try f.writeCValue(writer, local, .Other);
+ try writer.writeByte('[');
+ try f.object.dg.renderValue(writer, Type.usize, Value.initPayload(&dst_pl.base), .Other);
+ try writer.writeAll("] = ");
+
+ var buf: Value.ElemValueBuffer = undefined;
+ const mask_elem = mask.elemValueBuffer(module, index, &buf).toSignedInt(target);
+ var src_pl = Value.Payload.U64{
+ .base = .{ .tag = .int_u64 },
+ .data = @intCast(u64, mask_elem ^ mask_elem >> 63),
+ };
+
+ try f.writeCValue(writer, if (mask_elem >= 0) lhs else rhs, .Other);
+ try writer.writeByte('[');
+ try f.object.dg.renderValue(writer, Type.usize, Value.initPayload(&src_pl.base), .Other);
+ try writer.writeAll("];\n");
+ }
+
+ return local;
}
fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
diff --git a/test/behavior/select.zig b/test/behavior/select.zig
index d09683b67c..73d69c6530 100644
--- a/test/behavior/select.zig
+++ b/test/behavior/select.zig
@@ -4,7 +4,6 @@ const mem = std.mem;
const expect = std.testing.expect;
test "@select vectors" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -33,7 +32,6 @@ fn selectVectors() !void {
}
test "@select arrays" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
diff --git a/test/behavior/shuffle.zig b/test/behavior/shuffle.zig
index bcc4618aee..b591aee2e2 100644
--- a/test/behavior/shuffle.zig
+++ b/test/behavior/shuffle.zig
@@ -8,7 +8,6 @@ test "@shuffle int" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -50,7 +49,6 @@ test "@shuffle bool 1" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig
index 5d569bd815..816bd6c23a 100644
--- a/test/behavior/vector.zig
+++ b/test/behavior/vector.zig
@@ -804,7 +804,6 @@ test "vector @reduce comptime" {
test "mask parameter of @shuffle is comptime scope" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -1212,7 +1211,6 @@ test "modRem with zero divisor" {
test "array operands to shuffle are coerced to vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
From 33fa25ba4470bf000280a94f0376988b05918b75 Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Sun, 5 Mar 2023 02:35:32 -0500
Subject: [PATCH 030/294] CBE: ensure uniqueness of more internal identifiers
---
src/codegen/c.zig | 35 +++++++++++++----------------------
test/behavior/vector.zig | 1 -
2 files changed, 13 insertions(+), 23 deletions(-)
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 5e64823a0d..f1761ed80d 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -1841,30 +1841,21 @@ pub const DeclGen = struct {
dg.module.markDeclAlive(decl);
if (dg.module.decl_exports.get(decl_index)) |exports| {
- return writer.writeAll(exports.items[export_index].options.name);
+ try writer.writeAll(exports.items[export_index].options.name);
} else if (decl.isExtern()) {
- return writer.writeAll(mem.sliceTo(decl.name, 0));
- } else if (dg.module.test_functions.get(decl_index)) |_| {
- const gpa = dg.gpa;
- const name = try decl.getFullyQualifiedName(dg.module);
- defer gpa.free(name);
- return writer.print("{}_{d}", .{ fmtIdent(name), @enumToInt(decl_index) });
+ try writer.writeAll(mem.sliceTo(decl.name, 0));
} else {
- const gpa = dg.gpa;
- const name = try decl.getFullyQualifiedName(dg.module);
- defer gpa.free(name);
-
- // MSVC has a limit of 4095 character token length limit, and fmtIdent can (worst case), expand
- // to 3x the length of its input
- if (name.len > 1365) {
- var hash = ident_hasher_init;
- hash.update(name);
- const ident_hash = hash.finalInt();
- try writer.writeAll("zig_D_");
- return std.fmt.formatIntValue(ident_hash, "x", .{}, writer);
- } else {
- return writer.print("{}", .{fmtIdent(name)});
- }
+ // MSVC has a limit of 4095 character token length limit, and fmtIdent can (worst case),
+ // expand to 3x the length of its input, but let's cut it off at a much shorter limit.
+ var name: [100]u8 = undefined;
+ var name_stream = std.io.fixedBufferStream(&name);
+ decl.renderFullyQualifiedName(dg.module, name_stream.writer()) catch |err| switch (err) {
+ error.NoSpaceLeft => {},
+ };
+ try writer.print("{}__{d}", .{
+ fmtIdent(name_stream.getWritten()),
+ @enumToInt(decl_index),
+ });
}
}
diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig
index 816bd6c23a..0215572f8f 100644
--- a/test/behavior/vector.zig
+++ b/test/behavior/vector.zig
@@ -91,7 +91,6 @@ test "vector int operators" {
test "vector float operators" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
From 7352d461cff72d92b07cf2d2b7ee17714005b9cf Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Sun, 5 Mar 2023 03:29:50 -0500
Subject: [PATCH 031/294] behavior: fix comptime issue and disable failing test
---
test/behavior/muladd.zig | 7 +++++++
test/behavior/shuffle.zig | 3 +--
test/behavior/vector.zig | 7 +++++++
3 files changed, 15 insertions(+), 2 deletions(-)
diff --git a/test/behavior/muladd.zig b/test/behavior/muladd.zig
index 218edc5a2d..25ed3641b8 100644
--- a/test/behavior/muladd.zig
+++ b/test/behavior/muladd.zig
@@ -197,6 +197,13 @@ test "vector f128" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.os.tag == .windows and builtin.cpu.arch == .aarch64 and
+ builtin.zig_backend == .stage2_c)
+ {
+ // https://github.com/ziglang/zig/issues/13876
+ return error.SkipZigTest;
+ }
+
comptime try vector128();
try vector128();
}
diff --git a/test/behavior/shuffle.zig b/test/behavior/shuffle.zig
index b591aee2e2..97223cc263 100644
--- a/test/behavior/shuffle.zig
+++ b/test/behavior/shuffle.zig
@@ -69,7 +69,6 @@ test "@shuffle bool 2" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_llvm) {
@@ -81,7 +80,7 @@ test "@shuffle bool 2" {
fn doTheTest() !void {
var x: @Vector(3, bool) = [3]bool{ false, true, false };
var v: @Vector(2, bool) = [2]bool{ true, false };
- const mask: @Vector(4, i32) = [4]i32{ 0, ~@as(i32, 1), 1, 2 };
+ const mask = [4]i32{ 0, ~@as(i32, 1), 1, 2 };
var res = @shuffle(bool, x, v, mask);
try expect(mem.eql(bool, &@as([4]bool, res), &[4]bool{ false, false, true, false }));
}
diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig
index 0215572f8f..1d9d517a96 100644
--- a/test/behavior/vector.zig
+++ b/test/behavior/vector.zig
@@ -96,6 +96,13 @@ test "vector float operators" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.os.tag == .windows and builtin.cpu.arch == .aarch64 and
+ builtin.zig_backend == .stage2_c)
+ {
+ // https://github.com/ziglang/zig/issues/13876
+ return error.SkipZigTest;
+ }
+
inline for ([_]type{ f16, f32, f64, f80, f128 }) |T| {
const S = struct {
fn doTheTest() !void {
From 8ea1c1932e7bd869ec77a161da7876d171d4ef1d Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Sun, 5 Mar 2023 04:25:04 -0500
Subject: [PATCH 032/294] behavior: disable failing tests
---
test/behavior/slice.zig | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig
index ed5e2a721d..6239de2d76 100644
--- a/test/behavior/slice.zig
+++ b/test/behavior/slice.zig
@@ -749,6 +749,11 @@ test "slice decays to many pointer" {
}
test "write through pointer to optional slice arg" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
const S = struct {
fn bar(foo: *?[]const u8) !void {
foo.* = try baz();
From 1efd36cd5c9a1128ae702b081d60ee32f21bc258 Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Sun, 5 Mar 2023 06:32:23 -0500
Subject: [PATCH 033/294] CBE: fix reduce of emulated integers
---
src/codegen/c.zig | 46 +++++++++++++++++++++++++++++-----------------
1 file changed, 29 insertions(+), 17 deletions(-)
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index f1761ed80d..3d059adc15 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -6672,33 +6672,43 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
const operand_ty = f.air.typeOf(reduce.operand);
const writer = f.object.writer();
+ const use_operator = scalar_ty.bitSize(target) <= 64;
const op: union(enum) {
- float_op: []const u8,
- builtin: []const u8,
+ const Func = struct { operation: []const u8, info: BuiltinInfo = .none };
+ float_op: Func,
+ builtin: Func,
infix: []const u8,
ternary: []const u8,
} = switch (reduce.operation) {
- .And => .{ .infix = " &= " },
- .Or => .{ .infix = " |= " },
- .Xor => .{ .infix = " ^= " },
+ .And => if (use_operator) .{ .infix = " &= " } else .{ .builtin = .{ .operation = "and" } },
+ .Or => if (use_operator) .{ .infix = " |= " } else .{ .builtin = .{ .operation = "or" } },
+ .Xor => if (use_operator) .{ .infix = " ^= " } else .{ .builtin = .{ .operation = "xor" } },
.Min => switch (scalar_ty.zigTypeTag()) {
- .Int => .{ .ternary = " < " },
- .Float => .{ .float_op = "fmin" },
+ .Int => if (use_operator) .{ .ternary = " < " } else .{
+ .builtin = .{ .operation = "min" },
+ },
+ .Float => .{ .float_op = .{ .operation = "fmin" } },
else => unreachable,
},
.Max => switch (scalar_ty.zigTypeTag()) {
- .Int => .{ .ternary = " > " },
- .Float => .{ .float_op = "fmax" },
+ .Int => if (use_operator) .{ .ternary = " > " } else .{
+ .builtin = .{ .operation = "max" },
+ },
+ .Float => .{ .float_op = .{ .operation = "fmax" } },
else => unreachable,
},
.Add => switch (scalar_ty.zigTypeTag()) {
- .Int => .{ .infix = " += " },
- .Float => .{ .builtin = "add" },
+ .Int => if (use_operator) .{ .infix = " += " } else .{
+ .builtin = .{ .operation = "addw", .info = .bits },
+ },
+ .Float => .{ .builtin = .{ .operation = "add" } },
else => unreachable,
},
.Mul => switch (scalar_ty.zigTypeTag()) {
- .Int => .{ .infix = " *= " },
- .Float => .{ .builtin = "mul" },
+ .Int => if (use_operator) .{ .infix = " *= " } else .{
+ .builtin = .{ .operation = "mulw", .info = .bits },
+ },
+ .Float => .{ .builtin = .{ .operation = "mul" } },
else => unreachable,
},
};
@@ -6762,24 +6772,26 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
const v = try Vectorizer.start(f, inst, writer, operand_ty);
try f.writeCValue(writer, accum, .Other);
switch (op) {
- .float_op => |operation| {
+ .float_op => |func| {
try writer.writeAll(" = zig_libc_name_");
try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
- try writer.print("({s})(", .{operation});
+ try writer.print("({s})(", .{func.operation});
try f.writeCValue(writer, accum, .FunctionArgument);
try writer.writeAll(", ");
try f.writeCValue(writer, operand, .Other);
try v.elem(f, writer);
+ try f.object.dg.renderBuiltinInfo(writer, scalar_ty, func.info);
try writer.writeByte(')');
},
- .builtin => |operation| {
- try writer.print(" = zig_{s}_", .{operation});
+ .builtin => |func| {
+ try writer.print(" = zig_{s}_", .{func.operation});
try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
try writer.writeByte('(');
try f.writeCValue(writer, accum, .FunctionArgument);
try writer.writeAll(", ");
try f.writeCValue(writer, operand, .Other);
try v.elem(f, writer);
+ try f.object.dg.renderBuiltinInfo(writer, scalar_ty, func.info);
try writer.writeByte(')');
},
.infix => |ass| {
From a63134a4a56e8683aeee292b641b4e943cbfb999 Mon Sep 17 00:00:00 2001
From: jim price
Date: Sat, 4 Mar 2023 18:03:37 -0800
Subject: [PATCH 034/294] std.os: Add DeviceBusy as a possible write error
In Linux when writing to various files in the virtual file system,
for example /sys/fs/cgroup, if you write an invalid value to a file
you'll get errno 16.
This change allows for these specific cases to be caught instead of
being lumped together in UnexpectedError.
---
lib/std/os.zig | 5 +++++
src/link.zig | 1 +
2 files changed, 6 insertions(+)
diff --git a/lib/std/os.zig b/lib/std/os.zig
index fe664302a7..3a3433d819 100644
--- a/lib/std/os.zig
+++ b/lib/std/os.zig
@@ -1036,6 +1036,7 @@ pub const WriteError = error{
FileTooBig,
InputOutput,
NoSpaceLeft,
+ DeviceBusy,
/// In WASI, this error may occur when the file descriptor does
/// not hold the required rights to write to it.
@@ -1134,6 +1135,7 @@ pub fn write(fd: fd_t, bytes: []const u8) WriteError!usize {
.PERM => return error.AccessDenied,
.PIPE => return error.BrokenPipe,
.CONNRESET => return error.ConnectionResetByPeer,
+ .BUSY => return error.DeviceBusy,
else => |err| return unexpectedErrno(err),
}
}
@@ -1203,6 +1205,7 @@ pub fn writev(fd: fd_t, iov: []const iovec_const) WriteError!usize {
.PERM => return error.AccessDenied,
.PIPE => return error.BrokenPipe,
.CONNRESET => return error.ConnectionResetByPeer,
+ .BUSY => return error.DeviceBusy,
else => |err| return unexpectedErrno(err),
}
}
@@ -1299,6 +1302,7 @@ pub fn pwrite(fd: fd_t, bytes: []const u8, offset: u64) PWriteError!usize {
.NXIO => return error.Unseekable,
.SPIPE => return error.Unseekable,
.OVERFLOW => return error.Unseekable,
+ .BUSY => return error.DeviceBusy,
else => |err| return unexpectedErrno(err),
}
}
@@ -1388,6 +1392,7 @@ pub fn pwritev(fd: fd_t, iov: []const iovec_const, offset: u64) PWriteError!usiz
.NXIO => return error.Unseekable,
.SPIPE => return error.Unseekable,
.OVERFLOW => return error.Unseekable,
+ .BUSY => return error.DeviceBusy,
else => |err| return unexpectedErrno(err),
}
}
diff --git a/src/link.zig b/src/link.zig
index 4c4915441d..24cc0a3861 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -460,6 +460,7 @@ pub const File = struct {
CurrentWorkingDirectoryUnlinked,
LockViolation,
NetNameDeleted,
+ DeviceBusy,
};
/// Called from within the CodeGen to lower a local variable instantion as an unnamed
From 29c56a8aa74d1b1a19bece5ba5d738af1e3c9f6d Mon Sep 17 00:00:00 2001
From: jiacai2050
Date: Sat, 4 Mar 2023 10:47:25 +0800
Subject: [PATCH 035/294] fix package redeclaration when cache is not found
---
src/Package.zig | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/src/Package.zig b/src/Package.zig
index 2aa5e85294..ed93500980 100644
--- a/src/Package.zig
+++ b/src/Package.zig
@@ -432,6 +432,12 @@ fn fetchAndUnpack(
const build_root = try global_cache_directory.join(gpa, &.{pkg_dir_sub_path});
errdefer gpa.free(build_root);
+ var pkg_dir = global_cache_directory.handle.openDir(pkg_dir_sub_path, .{}) catch |err| switch (err) {
+ error.FileNotFound => break :cached,
+ else => |e| return e,
+ };
+ errdefer pkg_dir.close();
+
try build_roots_source.writer().print(" pub const {s} = \"{}\";\n", .{
std.zig.fmtId(fqn), std.zig.fmtEscapes(build_root),
});
@@ -444,12 +450,6 @@ fn fetchAndUnpack(
return gop.value_ptr.*;
}
- var pkg_dir = global_cache_directory.handle.openDir(pkg_dir_sub_path, .{}) catch |err| switch (err) {
- error.FileNotFound => break :cached,
- else => |e| return e,
- };
- errdefer pkg_dir.close();
-
const ptr = try gpa.create(Package);
errdefer gpa.destroy(ptr);
From f1ae688d371f49fdbf65f952d655905c74871fdb Mon Sep 17 00:00:00 2001
From: r00ster91
Date: Sun, 5 Mar 2023 15:45:23 +0100
Subject: [PATCH 036/294] AstGen: ensure certain builtin functions return void
Fixes #14779
Co-authored-by: Veikka Tuominen
---
src/AstGen.zig | 32 +++++++++----------
...n_functions_returning_void_or_noreturn.zig | 32 +++++++++++++++++++
2 files changed, 48 insertions(+), 16 deletions(-)
create mode 100644 test/behavior/builtin_functions_returning_void_or_noreturn.zig
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 587b574a01..20f4fb6df3 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -8060,35 +8060,35 @@ fn builtinCall(
},
.fence => {
const order = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .atomic_order_type } }, params[0]);
- const result = try gz.addExtendedPayload(.fence, Zir.Inst.UnNode{
+ _ = try gz.addExtendedPayload(.fence, Zir.Inst.UnNode{
.node = gz.nodeIndexToRelative(node),
.operand = order,
});
- return rvalue(gz, ri, result, node);
+ return rvalue(gz, ri, .void_value, node);
},
.set_float_mode => {
const order = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .float_mode_type } }, params[0]);
- const result = try gz.addExtendedPayload(.set_float_mode, Zir.Inst.UnNode{
+ _ = try gz.addExtendedPayload(.set_float_mode, Zir.Inst.UnNode{
.node = gz.nodeIndexToRelative(node),
.operand = order,
});
- return rvalue(gz, ri, result, node);
+ return rvalue(gz, ri, .void_value, node);
},
.set_align_stack => {
const order = try expr(gz, scope, align_ri, params[0]);
- const result = try gz.addExtendedPayload(.set_align_stack, Zir.Inst.UnNode{
+ _ = try gz.addExtendedPayload(.set_align_stack, Zir.Inst.UnNode{
.node = gz.nodeIndexToRelative(node),
.operand = order,
});
- return rvalue(gz, ri, result, node);
+ return rvalue(gz, ri, .void_value, node);
},
.set_cold => {
const order = try expr(gz, scope, ri, params[0]);
- const result = try gz.addExtendedPayload(.set_cold, Zir.Inst.UnNode{
+ _ = try gz.addExtendedPayload(.set_cold, Zir.Inst.UnNode{
.node = gz.nodeIndexToRelative(node),
.operand = order,
});
- return rvalue(gz, ri, result, node);
+ return rvalue(gz, ri, .void_value, node);
},
.src => {
@@ -8373,14 +8373,14 @@ fn builtinCall(
},
.atomic_store => {
const int_type = try typeExpr(gz, scope, params[0]);
- const result = try gz.addPlNode(.atomic_store, node, Zir.Inst.AtomicStore{
+ _ = try gz.addPlNode(.atomic_store, node, Zir.Inst.AtomicStore{
// zig fmt: off
.ptr = try expr(gz, scope, .{ .rl = .none }, params[1]),
.operand = try expr(gz, scope, .{ .rl = .{ .ty = int_type } }, params[2]),
.ordering = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .atomic_order_type } }, params[3]),
// zig fmt: on
});
- return rvalue(gz, ri, result, node);
+ return rvalue(gz, ri, .void_value, node);
},
.mul_add => {
const float_type = try typeExpr(gz, scope, params[0]);
@@ -8421,20 +8421,20 @@ fn builtinCall(
return rvalue(gz, ri, result, node);
},
.memcpy => {
- const result = try gz.addPlNode(.memcpy, node, Zir.Inst.Memcpy{
+ _ = try gz.addPlNode(.memcpy, node, Zir.Inst.Memcpy{
.dest = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .manyptr_u8_type } }, params[0]),
.source = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .manyptr_const_u8_type } }, params[1]),
.byte_count = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, params[2]),
});
- return rvalue(gz, ri, result, node);
+ return rvalue(gz, ri, .void_value, node);
},
.memset => {
- const result = try gz.addPlNode(.memset, node, Zir.Inst.Memset{
+ _ = try gz.addPlNode(.memset, node, Zir.Inst.Memset{
.dest = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .manyptr_u8_type } }, params[0]),
.byte = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .u8_type } }, params[1]),
.byte_count = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, params[2]),
});
- return rvalue(gz, ri, result, node);
+ return rvalue(gz, ri, .void_value, node);
},
.shuffle => {
const result = try gz.addPlNode(.shuffle, node, Zir.Inst.Shuffle{
@@ -8475,12 +8475,12 @@ fn builtinCall(
.prefetch => {
const ptr = try expr(gz, scope, .{ .rl = .none }, params[0]);
const options = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .prefetch_options_type } }, params[1]);
- const result = try gz.addExtendedPayload(.prefetch, Zir.Inst.BinNode{
+ _ = try gz.addExtendedPayload(.prefetch, Zir.Inst.BinNode{
.node = gz.nodeIndexToRelative(node),
.lhs = ptr,
.rhs = options,
});
- return rvalue(gz, ri, result, node);
+ return rvalue(gz, ri, .void_value, node);
},
.c_va_arg => {
if (astgen.fn_block == null) {
diff --git a/test/behavior/builtin_functions_returning_void_or_noreturn.zig b/test/behavior/builtin_functions_returning_void_or_noreturn.zig
new file mode 100644
index 0000000000..072f5576cc
--- /dev/null
+++ b/test/behavior/builtin_functions_returning_void_or_noreturn.zig
@@ -0,0 +1,32 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const testing = std.testing;
+
+var x: u8 = 1;
+
+// This excludes builtin functions that return void or noreturn that cannot be tested.
+test {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
+
+ var val: u8 = undefined;
+ try testing.expectEqual({}, @atomicStore(u8, &val, 0, .Unordered));
+ try testing.expectEqual(void, @TypeOf(@breakpoint()));
+ try testing.expectEqual({}, @export(x, .{ .name = "x" }));
+ try testing.expectEqual({}, @fence(.Acquire));
+ try testing.expectEqual({}, @memcpy(@intToPtr([*]u8, 1), @intToPtr([*]u8, 1), 0));
+ try testing.expectEqual({}, @memset(@intToPtr([*]u8, 1), undefined, 0));
+ try testing.expectEqual(noreturn, @TypeOf(if (true) @panic("") else {}));
+ try testing.expectEqual({}, @prefetch(&val, .{}));
+ try testing.expectEqual({}, @setAlignStack(16));
+ try testing.expectEqual({}, @setCold(true));
+ try testing.expectEqual({}, @setEvalBranchQuota(0));
+ try testing.expectEqual({}, @setFloatMode(.Optimized));
+ try testing.expectEqual({}, @setRuntimeSafety(true));
+ try testing.expectEqual(noreturn, @TypeOf(if (true) @trap() else {}));
+}
From 34a23db664e0fe50fb21c892f33b0aec8a7a2f7f Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 4 Mar 2023 14:21:57 -0700
Subject: [PATCH 037/294] zig.h: lower trap to SIGTRAP instead of SIGILL
---
lib/zig.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/zig.h b/lib/zig.h
index 22a9dbbb9e..65fb21f99a 100644
--- a/lib/zig.h
+++ b/lib/zig.h
@@ -193,7 +193,7 @@ typedef char bool;
#elif defined(__i386__) || defined(__x86_64__)
#define zig_trap() __asm__ volatile("ud2");
#else
-#define zig_trap() raise(SIGILL)
+#define zig_trap() raise(SIGTRAP)
#endif
#if zig_has_builtin(debugtrap)
From fb04ff45cd1b4eca5c56e0295bbbe961557ef820 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 4 Mar 2023 14:22:46 -0700
Subject: [PATCH 038/294] langref: small clarification to `@trap`
---
doc/langref.html.in | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/doc/langref.html.in b/doc/langref.html.in
index a413c3aab5..7044fe977f 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -9403,7 +9403,7 @@ fn List(comptime T: type) type {
Unlike for {#syntax#}@breakpoint(){#endsyntax#}, execution does not continue after this point.
- This function is only valid within function scope.
+ Outside function scope, this builtin causes a compile error.
{#see_also|@breakpoint#}
{#header_close#}
From 48e72960a496edc86b231d45bfa39d618b6adfaf Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 4 Mar 2023 14:48:31 -0700
Subject: [PATCH 039/294] llvm: fix lowering of `@trap`
It needed an unreachable instruction after it.
---
src/codegen/llvm.zig | 1 +
1 file changed, 1 insertion(+)
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index baeaeee58f..85a82f4eda 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -8261,6 +8261,7 @@ pub const FuncGen = struct {
_ = inst;
const llvm_fn = self.getIntrinsic("llvm.trap", &.{});
_ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, undefined, 0, .Cold, .Auto, "");
+ _ = self.builder.buildUnreachable();
return null;
}
From c839c180ef1686794c039fc6d3c20a8716e87357 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sun, 5 Mar 2023 12:46:12 -0700
Subject: [PATCH 040/294] stage2: add zig_backend to ZIR cache namespace
---
src/Module.zig | 1 +
1 file changed, 1 insertion(+)
diff --git a/src/Module.zig b/src/Module.zig
index a2502d36d3..7ea69a0a2e 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -3528,6 +3528,7 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
const digest = hash: {
var path_hash: Cache.HashHelper = .{};
path_hash.addBytes(build_options.version);
+ path_hash.add(builtin.zig_backend);
if (!want_local_cache) {
path_hash.addOptionalBytes(file.pkg.root_src_directory.path);
}
From cdb9cc8f6bda4b4faa270278e3b67c4ef9246a84 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 4 Mar 2023 14:41:12 -0700
Subject: [PATCH 041/294] update zig1.wasm
---
stage1/zig.h | 2759 +++++++++++++++++++++++++++++++---------------
stage1/zig1.wasm | Bin 2408069 -> 2412111 bytes
2 files changed, 1858 insertions(+), 901 deletions(-)
diff --git a/stage1/zig.h b/stage1/zig.h
index 0756d9f731..65fb21f99a 100644
--- a/stage1/zig.h
+++ b/stage1/zig.h
@@ -1,8 +1,11 @@
#undef linux
+#ifndef __STDC_WANT_IEC_60559_TYPES_EXT__
#define __STDC_WANT_IEC_60559_TYPES_EXT__
+#endif
#include
#include
+#include
#include
#include
@@ -34,6 +37,14 @@ typedef char bool;
#define zig_has_attribute(attribute) 0
#endif
+#if __LITTLE_ENDIAN__ || _MSC_VER
+#define zig_little_endian 1
+#define zig_big_endian 0
+#else
+#define zig_little_endian 0
+#define zig_big_endian 1
+#endif
+
#if __STDC_VERSION__ >= 201112L
#define zig_threadlocal _Thread_local
#elif defined(__GNUC__)
@@ -75,6 +86,32 @@ typedef char bool;
#define zig_cold
#endif
+#if zig_has_attribute(flatten)
+#define zig_maybe_flatten __attribute__((flatten))
+#else
+#define zig_maybe_flatten
+#endif
+
+#if zig_has_attribute(noinline)
+#define zig_never_inline __attribute__((noinline)) zig_maybe_flatten
+#elif defined(_MSC_VER)
+#define zig_never_inline __declspec(noinline) zig_maybe_flatten
+#else
+#define zig_never_inline zig_never_inline_unavailable
+#endif
+
+#if zig_has_attribute(not_tail_called)
+#define zig_never_tail __attribute__((not_tail_called)) zig_never_inline
+#else
+#define zig_never_tail zig_never_tail_unavailable
+#endif
+
+#if zig_has_attribute(always_inline)
+#define zig_always_tail __attribute__((musttail))
+#else
+#define zig_always_tail zig_always_tail_unavailable
+#endif
+
#if __STDC_VERSION__ >= 199901L
#define zig_restrict restrict
#elif defined(__GNUC__)
@@ -151,10 +188,16 @@ typedef char bool;
#define zig_export(sig, symbol, name) __asm(name " = " symbol)
#endif
+#if zig_has_builtin(trap)
+#define zig_trap() __builtin_trap()
+#elif defined(__i386__) || defined(__x86_64__)
+#define zig_trap() __asm__ volatile("ud2");
+#else
+#define zig_trap() raise(SIGTRAP)
+#endif
+
#if zig_has_builtin(debugtrap)
#define zig_breakpoint() __builtin_debugtrap()
-#elif zig_has_builtin(trap) || defined(zig_gnuc)
-#define zig_breakpoint() __builtin_trap()
#elif defined(_MSC_VER) || defined(__MINGW32__) || defined(__MINGW64__)
#define zig_breakpoint() __debugbreak()
#elif defined(__i386__) || defined(__x86_64__)
@@ -286,701 +329,656 @@ typedef char bool;
#endif
#if __STDC_VERSION__ >= 201112L
-#define zig_noreturn _Noreturn void
+#define zig_noreturn _Noreturn
#elif zig_has_attribute(noreturn) || defined(zig_gnuc)
-#define zig_noreturn __attribute__((noreturn)) void
+#define zig_noreturn __attribute__((noreturn))
#elif _MSC_VER
-#define zig_noreturn __declspec(noreturn) void
+#define zig_noreturn __declspec(noreturn)
#else
-#define zig_noreturn void
+#define zig_noreturn
#endif
#define zig_bitSizeOf(T) (CHAR_BIT * sizeof(T))
-typedef uintptr_t zig_usize;
-typedef intptr_t zig_isize;
-typedef signed short int zig_c_short;
-typedef unsigned short int zig_c_ushort;
-typedef signed int zig_c_int;
-typedef unsigned int zig_c_uint;
-typedef signed long int zig_c_long;
-typedef unsigned long int zig_c_ulong;
-typedef signed long long int zig_c_longlong;
-typedef unsigned long long int zig_c_ulonglong;
+#define zig_compiler_rt_abbrev_uint32_t si
+#define zig_compiler_rt_abbrev_int32_t si
+#define zig_compiler_rt_abbrev_uint64_t di
+#define zig_compiler_rt_abbrev_int64_t di
+#define zig_compiler_rt_abbrev_zig_u128 ti
+#define zig_compiler_rt_abbrev_zig_i128 ti
+#define zig_compiler_rt_abbrev_zig_f16 hf
+#define zig_compiler_rt_abbrev_zig_f32 sf
+#define zig_compiler_rt_abbrev_zig_f64 df
+#define zig_compiler_rt_abbrev_zig_f80 xf
+#define zig_compiler_rt_abbrev_zig_f128 tf
-typedef uint8_t zig_u8;
-typedef int8_t zig_i8;
-typedef uint16_t zig_u16;
-typedef int16_t zig_i16;
-typedef uint32_t zig_u32;
-typedef int32_t zig_i32;
-typedef uint64_t zig_u64;
-typedef int64_t zig_i64;
+zig_extern void *memcpy (void *zig_restrict, void const *zig_restrict, size_t);
+zig_extern void *memset (void *, int, size_t);
-#define zig_as_u8(val) UINT8_C(val)
-#define zig_as_i8(val) INT8_C(val)
-#define zig_as_u16(val) UINT16_C(val)
-#define zig_as_i16(val) INT16_C(val)
-#define zig_as_u32(val) UINT32_C(val)
-#define zig_as_i32(val) INT32_C(val)
-#define zig_as_u64(val) UINT64_C(val)
-#define zig_as_i64(val) INT64_C(val)
+/* ===================== 8/16/32/64-bit Integer Support ===================== */
+
+#if __STDC_VERSION__ >= 199901L || _MSC_VER
+#include
+#else
+
+#if SCHAR_MIN == ~0x7F && SCHAR_MAX == 0x7F && UCHAR_MAX == 0xFF
+typedef unsigned char uint8_t;
+typedef signed char int8_t;
+#define INT8_C(c) c
+#define UINT8_C(c) c##U
+#elif SHRT_MIN == ~0x7F && SHRT_MAX == 0x7F && USHRT_MAX == 0xFF
+typedef unsigned short uint8_t;
+typedef signed short int8_t;
+#define INT8_C(c) c
+#define UINT8_C(c) c##U
+#elif INT_MIN == ~0x7F && INT_MAX == 0x7F && UINT_MAX == 0xFF
+typedef unsigned int uint8_t;
+typedef signed int int8_t;
+#define INT8_C(c) c
+#define UINT8_C(c) c##U
+#elif LONG_MIN == ~0x7F && LONG_MAX == 0x7F && ULONG_MAX == 0xFF
+typedef unsigned long uint8_t;
+typedef signed long int8_t;
+#define INT8_C(c) c##L
+#define UINT8_C(c) c##LU
+#elif LLONG_MIN == ~0x7F && LLONG_MAX == 0x7F && ULLONG_MAX == 0xFF
+typedef unsigned long long uint8_t;
+typedef signed long long int8_t;
+#define INT8_C(c) c##LL
+#define UINT8_C(c) c##LLU
+#endif
+#define INT8_MIN (~INT8_C(0x7F))
+#define INT8_MAX ( INT8_C(0x7F))
+#define UINT8_MAX ( INT8_C(0xFF))
+
+#if SCHAR_MIN == ~0x7FFF && SCHAR_MAX == 0x7FFF && UCHAR_MAX == 0xFFFF
+typedef unsigned char uint16_t;
+typedef signed char int16_t;
+#define INT16_C(c) c
+#define UINT16_C(c) c##U
+#elif SHRT_MIN == ~0x7FFF && SHRT_MAX == 0x7FFF && USHRT_MAX == 0xFFFF
+typedef unsigned short uint16_t;
+typedef signed short int16_t;
+#define INT16_C(c) c
+#define UINT16_C(c) c##U
+#elif INT_MIN == ~0x7FFF && INT_MAX == 0x7FFF && UINT_MAX == 0xFFFF
+typedef unsigned int uint16_t;
+typedef signed int int16_t;
+#define INT16_C(c) c
+#define UINT16_C(c) c##U
+#elif LONG_MIN == ~0x7FFF && LONG_MAX == 0x7FFF && ULONG_MAX == 0xFFFF
+typedef unsigned long uint16_t;
+typedef signed long int16_t;
+#define INT16_C(c) c##L
+#define UINT16_C(c) c##LU
+#elif LLONG_MIN == ~0x7FFF && LLONG_MAX == 0x7FFF && ULLONG_MAX == 0xFFFF
+typedef unsigned long long uint16_t;
+typedef signed long long int16_t;
+#define INT16_C(c) c##LL
+#define UINT16_C(c) c##LLU
+#endif
+#define INT16_MIN (~INT16_C(0x7FFF))
+#define INT16_MAX ( INT16_C(0x7FFF))
+#define UINT16_MAX ( INT16_C(0xFFFF))
+
+#if SCHAR_MIN == ~0x7FFFFFFF && SCHAR_MAX == 0x7FFFFFFF && UCHAR_MAX == 0xFFFFFFFF
+typedef unsigned char uint32_t;
+typedef signed char int32_t;
+#define INT32_C(c) c
+#define UINT32_C(c) c##U
+#elif SHRT_MIN == ~0x7FFFFFFF && SHRT_MAX == 0x7FFFFFFF && USHRT_MAX == 0xFFFFFFFF
+typedef unsigned short uint32_t;
+typedef signed short int32_t;
+#define INT32_C(c) c
+#define UINT32_C(c) c##U
+#elif INT_MIN == ~0x7FFFFFFF && INT_MAX == 0x7FFFFFFF && UINT_MAX == 0xFFFFFFFF
+typedef unsigned int uint32_t;
+typedef signed int int32_t;
+#define INT32_C(c) c
+#define UINT32_C(c) c##U
+#elif LONG_MIN == ~0x7FFFFFFF && LONG_MAX == 0x7FFFFFFF && ULONG_MAX == 0xFFFFFFFF
+typedef unsigned long uint32_t;
+typedef signed long int32_t;
+#define INT32_C(c) c##L
+#define UINT32_C(c) c##LU
+#elif LLONG_MIN == ~0x7FFFFFFF && LLONG_MAX == 0x7FFFFFFF && ULLONG_MAX == 0xFFFFFFFF
+typedef unsigned long long uint32_t;
+typedef signed long long int32_t;
+#define INT32_C(c) c##LL
+#define UINT32_C(c) c##LLU
+#endif
+#define INT32_MIN (~INT32_C(0x7FFFFFFF))
+#define INT32_MAX ( INT32_C(0x7FFFFFFF))
+#define UINT32_MAX ( INT32_C(0xFFFFFFFF))
+
+#if SCHAR_MIN == ~0x7FFFFFFFFFFFFFFF && SCHAR_MAX == 0x7FFFFFFFFFFFFFFF && UCHAR_MAX == 0xFFFFFFFFFFFFFFFF
+typedef unsigned char uint64_t;
+typedef signed char int64_t;
+#define INT64_C(c) c
+#define UINT64_C(c) c##U
+#elif SHRT_MIN == ~0x7FFFFFFFFFFFFFFF && SHRT_MAX == 0x7FFFFFFFFFFFFFFF && USHRT_MAX == 0xFFFFFFFFFFFFFFFF
+typedef unsigned short uint64_t;
+typedef signed short int64_t;
+#define INT64_C(c) c
+#define UINT64_C(c) c##U
+#elif INT_MIN == ~0x7FFFFFFFFFFFFFFF && INT_MAX == 0x7FFFFFFFFFFFFFFF && UINT_MAX == 0xFFFFFFFFFFFFFFFF
+typedef unsigned int uint64_t;
+typedef signed int int64_t;
+#define INT64_C(c) c
+#define UINT64_C(c) c##U
+#elif LONG_MIN == ~0x7FFFFFFFFFFFFFFF && LONG_MAX == 0x7FFFFFFFFFFFFFFF && ULONG_MAX == 0xFFFFFFFFFFFFFFFF
+typedef unsigned long uint64_t;
+typedef signed long int64_t;
+#define INT64_C(c) c##L
+#define UINT64_C(c) c##LU
+#elif LLONG_MIN == ~0x7FFFFFFFFFFFFFFF && LLONG_MAX == 0x7FFFFFFFFFFFFFFF && ULLONG_MAX == 0xFFFFFFFFFFFFFFFF
+typedef unsigned long long uint64_t;
+typedef signed long long int64_t;
+#define INT64_C(c) c##LL
+#define UINT64_C(c) c##LLU
+#endif
+#define INT64_MIN (~INT64_C(0x7FFFFFFFFFFFFFFF))
+#define INT64_MAX ( INT64_C(0x7FFFFFFFFFFFFFFF))
+#define UINT64_MAX ( INT64_C(0xFFFFFFFFFFFFFFFF))
+
+typedef size_t uintptr_t;
+typedef ptrdiff_t intptr_t;
+
+#endif
-#define zig_minInt_u8 zig_as_u8(0)
-#define zig_maxInt_u8 UINT8_MAX
#define zig_minInt_i8 INT8_MIN
#define zig_maxInt_i8 INT8_MAX
-#define zig_minInt_u16 zig_as_u16(0)
-#define zig_maxInt_u16 UINT16_MAX
+#define zig_minInt_u8 UINT8_C(0)
+#define zig_maxInt_u8 UINT8_MAX
#define zig_minInt_i16 INT16_MIN
#define zig_maxInt_i16 INT16_MAX
-#define zig_minInt_u32 zig_as_u32(0)
-#define zig_maxInt_u32 UINT32_MAX
+#define zig_minInt_u16 UINT16_C(0)
+#define zig_maxInt_u16 UINT16_MAX
#define zig_minInt_i32 INT32_MIN
#define zig_maxInt_i32 INT32_MAX
-#define zig_minInt_u64 zig_as_u64(0)
-#define zig_maxInt_u64 UINT64_MAX
+#define zig_minInt_u32 UINT32_C(0)
+#define zig_maxInt_u32 UINT32_MAX
#define zig_minInt_i64 INT64_MIN
#define zig_maxInt_i64 INT64_MAX
+#define zig_minInt_u64 UINT64_C(0)
+#define zig_maxInt_u64 UINT64_MAX
-#define zig_compiler_rt_abbrev_u32 si
-#define zig_compiler_rt_abbrev_i32 si
-#define zig_compiler_rt_abbrev_u64 di
-#define zig_compiler_rt_abbrev_i64 di
-#define zig_compiler_rt_abbrev_u128 ti
-#define zig_compiler_rt_abbrev_i128 ti
-#define zig_compiler_rt_abbrev_f16 hf
-#define zig_compiler_rt_abbrev_f32 sf
-#define zig_compiler_rt_abbrev_f64 df
-#define zig_compiler_rt_abbrev_f80 xf
-#define zig_compiler_rt_abbrev_f128 tf
-
-zig_extern void *memcpy (void *zig_restrict, void const *zig_restrict, zig_usize);
-zig_extern void *memset (void *, int, zig_usize);
-
-/* ==================== 8/16/32/64-bit Integer Routines ===================== */
-
-#define zig_maxInt(Type, bits) zig_shr_##Type(zig_maxInt_##Type, (zig_bitSizeOf(zig_##Type) - bits))
-#define zig_expand_maxInt(Type, bits) zig_maxInt(Type, bits)
-#define zig_minInt(Type, bits) zig_not_##Type(zig_maxInt(Type, bits), bits)
-#define zig_expand_minInt(Type, bits) zig_minInt(Type, bits)
+#define zig_intLimit(s, w, limit, bits) zig_shr_##s##w(zig_##limit##Int_##s##w, w - (bits))
+#define zig_minInt_i(w, bits) zig_intLimit(i, w, min, bits)
+#define zig_maxInt_i(w, bits) zig_intLimit(i, w, max, bits)
+#define zig_minInt_u(w, bits) zig_intLimit(u, w, min, bits)
+#define zig_maxInt_u(w, bits) zig_intLimit(u, w, max, bits)
#define zig_int_operator(Type, RhsType, operation, operator) \
- static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##RhsType rhs) { \
+ static inline Type zig_##operation(Type lhs, RhsType rhs) { \
return lhs operator rhs; \
}
#define zig_int_basic_operator(Type, operation, operator) \
- zig_int_operator(Type, Type, operation, operator)
+ zig_int_operator(Type, Type, operation, operator)
#define zig_int_shift_operator(Type, operation, operator) \
- zig_int_operator(Type, u8, operation, operator)
+ zig_int_operator(Type, uint8_t, operation, operator)
#define zig_int_helpers(w) \
- zig_int_basic_operator(u##w, and, &) \
- zig_int_basic_operator(i##w, and, &) \
- zig_int_basic_operator(u##w, or, |) \
- zig_int_basic_operator(i##w, or, |) \
- zig_int_basic_operator(u##w, xor, ^) \
- zig_int_basic_operator(i##w, xor, ^) \
- zig_int_shift_operator(u##w, shl, <<) \
- zig_int_shift_operator(i##w, shl, <<) \
- zig_int_shift_operator(u##w, shr, >>) \
+ zig_int_basic_operator(uint##w##_t, and_u##w, &) \
+ zig_int_basic_operator( int##w##_t, and_i##w, &) \
+ zig_int_basic_operator(uint##w##_t, or_u##w, |) \
+ zig_int_basic_operator( int##w##_t, or_i##w, |) \
+ zig_int_basic_operator(uint##w##_t, xor_u##w, ^) \
+ zig_int_basic_operator( int##w##_t, xor_i##w, ^) \
+ zig_int_shift_operator(uint##w##_t, shl_u##w, <<) \
+ zig_int_shift_operator( int##w##_t, shl_i##w, <<) \
+ zig_int_shift_operator(uint##w##_t, shr_u##w, >>) \
\
- static inline zig_i##w zig_shr_i##w(zig_i##w lhs, zig_u8 rhs) { \
- zig_i##w sign_mask = lhs < zig_as_i##w(0) ? -zig_as_i##w(1) : zig_as_i##w(0); \
+ static inline int##w##_t zig_shr_i##w(int##w##_t lhs, uint8_t rhs) { \
+ int##w##_t sign_mask = lhs < INT##w##_C(0) ? -INT##w##_C(1) : INT##w##_C(0); \
return ((lhs ^ sign_mask) >> rhs) ^ sign_mask; \
} \
\
- static inline zig_u##w zig_not_u##w(zig_u##w val, zig_u8 bits) { \
- return val ^ zig_maxInt(u##w, bits); \
+ static inline uint##w##_t zig_not_u##w(uint##w##_t val, uint8_t bits) { \
+ return val ^ zig_maxInt_u(w, bits); \
} \
\
- static inline zig_i##w zig_not_i##w(zig_i##w val, zig_u8 bits) { \
+ static inline int##w##_t zig_not_i##w(int##w##_t val, uint8_t bits) { \
(void)bits; \
return ~val; \
} \
\
- static inline zig_u##w zig_wrap_u##w(zig_u##w val, zig_u8 bits) { \
- return val & zig_maxInt(u##w, bits); \
+ static inline uint##w##_t zig_wrap_u##w(uint##w##_t val, uint8_t bits) { \
+ return val & zig_maxInt_u(w, bits); \
} \
\
- static inline zig_i##w zig_wrap_i##w(zig_i##w val, zig_u8 bits) { \
- return (val & zig_as_u##w(1) << (bits - zig_as_u8(1))) != 0 \
- ? val | zig_minInt(i##w, bits) : val & zig_maxInt(i##w, bits); \
+ static inline int##w##_t zig_wrap_i##w(int##w##_t val, uint8_t bits) { \
+ return (val & UINT##w##_C(1) << (bits - UINT8_C(1))) != 0 \
+ ? val | zig_minInt_i(w, bits) : val & zig_maxInt_i(w, bits); \
} \
\
- zig_int_basic_operator(u##w, div_floor, /) \
+ zig_int_basic_operator(uint##w##_t, div_floor_u##w, /) \
\
- static inline zig_i##w zig_div_floor_i##w(zig_i##w lhs, zig_i##w rhs) { \
- return lhs / rhs - (((lhs ^ rhs) & (lhs % rhs)) < zig_as_i##w(0)); \
+ static inline int##w##_t zig_div_floor_i##w(int##w##_t lhs, int##w##_t rhs) { \
+ return lhs / rhs - (((lhs ^ rhs) & (lhs % rhs)) < INT##w##_C(0)); \
} \
\
- zig_int_basic_operator(u##w, mod, %) \
+ zig_int_basic_operator(uint##w##_t, mod_u##w, %) \
\
- static inline zig_i##w zig_mod_i##w(zig_i##w lhs, zig_i##w rhs) { \
- zig_i##w rem = lhs % rhs; \
- return rem + (((lhs ^ rhs) & rem) < zig_as_i##w(0) ? rhs : zig_as_i##w(0)); \
+ static inline int##w##_t zig_mod_i##w(int##w##_t lhs, int##w##_t rhs) { \
+ int##w##_t rem = lhs % rhs; \
+ return rem + (((lhs ^ rhs) & rem) < INT##w##_C(0) ? rhs : INT##w##_C(0)); \
} \
\
- static inline zig_u##w zig_shlw_u##w(zig_u##w lhs, zig_u8 rhs, zig_u8 bits) { \
+ static inline uint##w##_t zig_shlw_u##w(uint##w##_t lhs, uint8_t rhs, uint8_t bits) { \
return zig_wrap_u##w(zig_shl_u##w(lhs, rhs), bits); \
} \
\
- static inline zig_i##w zig_shlw_i##w(zig_i##w lhs, zig_u8 rhs, zig_u8 bits) { \
- return zig_wrap_i##w((zig_i##w)zig_shl_u##w((zig_u##w)lhs, (zig_u##w)rhs), bits); \
+ static inline int##w##_t zig_shlw_i##w(int##w##_t lhs, uint8_t rhs, uint8_t bits) { \
+ return zig_wrap_i##w((int##w##_t)zig_shl_u##w((uint##w##_t)lhs, (uint##w##_t)rhs), bits); \
} \
\
- static inline zig_u##w zig_addw_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
+ static inline uint##w##_t zig_addw_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
return zig_wrap_u##w(lhs + rhs, bits); \
} \
\
- static inline zig_i##w zig_addw_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
- return zig_wrap_i##w((zig_i##w)((zig_u##w)lhs + (zig_u##w)rhs), bits); \
+ static inline int##w##_t zig_addw_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
+ return zig_wrap_i##w((int##w##_t)((uint##w##_t)lhs + (uint##w##_t)rhs), bits); \
} \
\
- static inline zig_u##w zig_subw_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
+ static inline uint##w##_t zig_subw_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
return zig_wrap_u##w(lhs - rhs, bits); \
} \
\
- static inline zig_i##w zig_subw_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
- return zig_wrap_i##w((zig_i##w)((zig_u##w)lhs - (zig_u##w)rhs), bits); \
+ static inline int##w##_t zig_subw_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
+ return zig_wrap_i##w((int##w##_t)((uint##w##_t)lhs - (uint##w##_t)rhs), bits); \
} \
\
- static inline zig_u##w zig_mulw_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
+ static inline uint##w##_t zig_mulw_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
return zig_wrap_u##w(lhs * rhs, bits); \
} \
\
- static inline zig_i##w zig_mulw_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
- return zig_wrap_i##w((zig_i##w)((zig_u##w)lhs * (zig_u##w)rhs), bits); \
+ static inline int##w##_t zig_mulw_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
+ return zig_wrap_i##w((int##w##_t)((uint##w##_t)lhs * (uint##w##_t)rhs), bits); \
}
zig_int_helpers(8)
zig_int_helpers(16)
zig_int_helpers(32)
zig_int_helpers(64)
-static inline bool zig_addo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) {
+static inline bool zig_addo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u32(full_res, bits);
- return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits);
+ return overflow || full_res < zig_minInt_u(32, bits) || full_res > zig_maxInt_u(32, bits);
#else
*res = zig_addw_u32(lhs, rhs, bits);
return *res < lhs;
#endif
}
-static inline void zig_vaddo_u32(zig_u8 *ov, zig_u32 *res, int n,
- const zig_u32 *lhs, const zig_u32 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_u32(&res[i], lhs[i], rhs[i], bits);
-}
-
-zig_extern zig_i32 __addosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow);
-static inline bool zig_addo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) {
+zig_extern int32_t __addosi4(int32_t lhs, int32_t rhs, int *overflow);
+static inline bool zig_addo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
- zig_i32 full_res = __addosi4(lhs, rhs, &overflow_int);
+ int overflow_int;
+ int32_t full_res = __addosi4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i32(full_res, bits);
- return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits);
+ return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits);
}
-static inline void zig_vaddo_i32(zig_u8 *ov, zig_i32 *res, int n,
- const zig_i32 *lhs, const zig_i32 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_i32(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_addo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) {
+static inline bool zig_addo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_u64 full_res;
+ uint64_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u64(full_res, bits);
- return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits);
+ return overflow || full_res < zig_minInt_u(64, bits) || full_res > zig_maxInt_u(64, bits);
#else
*res = zig_addw_u64(lhs, rhs, bits);
return *res < lhs;
#endif
}
-static inline void zig_vaddo_u64(zig_u8 *ov, zig_u64 *res, int n,
- const zig_u64 *lhs, const zig_u64 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_u64(&res[i], lhs[i], rhs[i], bits);
-}
-
-zig_extern zig_i64 __addodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow);
-static inline bool zig_addo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) {
+zig_extern int64_t __addodi4(int64_t lhs, int64_t rhs, int *overflow);
+static inline bool zig_addo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_i64 full_res;
+ int64_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
- zig_i64 full_res = __addodi4(lhs, rhs, &overflow_int);
+ int overflow_int;
+ int64_t full_res = __addodi4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i64(full_res, bits);
- return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits);
+ return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits);
}
-static inline void zig_vaddo_i64(zig_u8 *ov, zig_i64 *res, int n,
- const zig_i64 *lhs, const zig_i64 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_i64(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_addo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline bool zig_addo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_u8 full_res;
+ uint8_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u8(full_res, bits);
- return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits);
+ return overflow || full_res < zig_minInt_u(8, bits) || full_res > zig_maxInt_u(8, bits);
#else
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = zig_addo_u32(&full_res, lhs, rhs, bits);
- *res = (zig_u8)full_res;
+ *res = (uint8_t)full_res;
return overflow;
#endif
}
-static inline void zig_vaddo_u8(zig_u8 *ov, zig_u8 *res, int n,
- const zig_u8 *lhs, const zig_u8 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_u8(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_addo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) {
+static inline bool zig_addo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_i8 full_res;
+ int8_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i8(full_res, bits);
- return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits);
+ return overflow || full_res < zig_minInt_i(8, bits) || full_res > zig_maxInt_i(8, bits);
#else
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = zig_addo_i32(&full_res, lhs, rhs, bits);
- *res = (zig_i8)full_res;
+ *res = (int8_t)full_res;
return overflow;
#endif
}
-static inline void zig_vaddo_i8(zig_u8 *ov, zig_i8 *res, int n,
- const zig_i8 *lhs, const zig_i8 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_i8(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_addo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) {
+static inline bool zig_addo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_u16 full_res;
+ uint16_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u16(full_res, bits);
- return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits);
+ return overflow || full_res < zig_minInt_u(16, bits) || full_res > zig_maxInt_u(16, bits);
#else
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = zig_addo_u32(&full_res, lhs, rhs, bits);
- *res = (zig_u16)full_res;
+ *res = (uint16_t)full_res;
return overflow;
#endif
}
-static inline void zig_vaddo_u16(zig_u8 *ov, zig_u16 *res, int n,
- const zig_u16 *lhs, const zig_u16 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_u16(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_addo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) {
+static inline bool zig_addo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_i16 full_res;
+ int16_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i16(full_res, bits);
- return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits);
+ return overflow || full_res < zig_minInt_i(16, bits) || full_res > zig_maxInt_i(16, bits);
#else
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = zig_addo_i32(&full_res, lhs, rhs, bits);
- *res = (zig_i16)full_res;
+ *res = (int16_t)full_res;
return overflow;
#endif
}
-static inline void zig_vaddo_i16(zig_u8 *ov, zig_i16 *res, int n,
- const zig_i16 *lhs, const zig_i16 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_i16(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_subo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) {
+static inline bool zig_subo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u32(full_res, bits);
- return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits);
+ return overflow || full_res < zig_minInt_u(32, bits) || full_res > zig_maxInt_u(32, bits);
#else
*res = zig_subw_u32(lhs, rhs, bits);
return *res > lhs;
#endif
}
-static inline void zig_vsubo_u32(zig_u8 *ov, zig_u32 *res, int n,
- const zig_u32 *lhs, const zig_u32 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_u32(&res[i], lhs[i], rhs[i], bits);
-}
-
-zig_extern zig_i32 __subosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow);
-static inline bool zig_subo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) {
+zig_extern int32_t __subosi4(int32_t lhs, int32_t rhs, int *overflow);
+static inline bool zig_subo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
- zig_i32 full_res = __subosi4(lhs, rhs, &overflow_int);
+ int overflow_int;
+ int32_t full_res = __subosi4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i32(full_res, bits);
- return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits);
+ return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits);
}
-static inline void zig_vsubo_i32(zig_u8 *ov, zig_i32 *res, int n,
- const zig_i32 *lhs, const zig_i32 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_i32(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_subo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) {
+static inline bool zig_subo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_u64 full_res;
+ uint64_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u64(full_res, bits);
- return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits);
+ return overflow || full_res < zig_minInt_u(64, bits) || full_res > zig_maxInt_u(64, bits);
#else
*res = zig_subw_u64(lhs, rhs, bits);
return *res > lhs;
#endif
}
-static inline void zig_vsubo_u64(zig_u8 *ov, zig_u64 *res, int n,
- const zig_u64 *lhs, const zig_u64 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_u64(&res[i], lhs[i], rhs[i], bits);
-}
-
-zig_extern zig_i64 __subodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow);
-static inline bool zig_subo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) {
+zig_extern int64_t __subodi4(int64_t lhs, int64_t rhs, int *overflow);
+static inline bool zig_subo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_i64 full_res;
+ int64_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
- zig_i64 full_res = __subodi4(lhs, rhs, &overflow_int);
+ int overflow_int;
+ int64_t full_res = __subodi4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i64(full_res, bits);
- return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits);
+ return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits);
}
-static inline void zig_vsubo_i64(zig_u8 *ov, zig_i64 *res, int n,
- const zig_i64 *lhs, const zig_i64 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_i64(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_subo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline bool zig_subo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_u8 full_res;
+ uint8_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u8(full_res, bits);
- return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits);
+ return overflow || full_res < zig_minInt_u(8, bits) || full_res > zig_maxInt_u(8, bits);
#else
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = zig_subo_u32(&full_res, lhs, rhs, bits);
- *res = (zig_u8)full_res;
+ *res = (uint8_t)full_res;
return overflow;
#endif
}
-static inline void zig_vsubo_u8(zig_u8 *ov, zig_u8 *res, int n,
- const zig_u8 *lhs, const zig_u8 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_u8(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_subo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) {
+static inline bool zig_subo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_i8 full_res;
+ int8_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i8(full_res, bits);
- return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits);
+ return overflow || full_res < zig_minInt_i(8, bits) || full_res > zig_maxInt_i(8, bits);
#else
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = zig_subo_i32(&full_res, lhs, rhs, bits);
- *res = (zig_i8)full_res;
+ *res = (int8_t)full_res;
return overflow;
#endif
}
-static inline void zig_vsubo_i8(zig_u8 *ov, zig_i8 *res, int n,
- const zig_i8 *lhs, const zig_i8 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_i8(&res[i], lhs[i], rhs[i], bits);
-}
-
-
-static inline bool zig_subo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) {
+static inline bool zig_subo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_u16 full_res;
+ uint16_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u16(full_res, bits);
- return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits);
+ return overflow || full_res < zig_minInt_u(16, bits) || full_res > zig_maxInt_u(16, bits);
#else
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = zig_subo_u32(&full_res, lhs, rhs, bits);
- *res = (zig_u16)full_res;
+ *res = (uint16_t)full_res;
return overflow;
#endif
}
-static inline void zig_vsubo_u16(zig_u8 *ov, zig_u16 *res, int n,
- const zig_u16 *lhs, const zig_u16 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_u16(&res[i], lhs[i], rhs[i], bits);
-}
-
-
-static inline bool zig_subo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) {
+static inline bool zig_subo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_i16 full_res;
+ int16_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i16(full_res, bits);
- return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits);
+ return overflow || full_res < zig_minInt_i(16, bits) || full_res > zig_maxInt_i(16, bits);
#else
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = zig_subo_i32(&full_res, lhs, rhs, bits);
- *res = (zig_i16)full_res;
+ *res = (int16_t)full_res;
return overflow;
#endif
}
-static inline void zig_vsubo_i16(zig_u8 *ov, zig_i16 *res, int n,
- const zig_i16 *lhs, const zig_i16 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_i16(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_mulo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) {
+static inline bool zig_mulo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u32(full_res, bits);
- return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits);
+ return overflow || full_res < zig_minInt_u(32, bits) || full_res > zig_maxInt_u(32, bits);
#else
*res = zig_mulw_u32(lhs, rhs, bits);
- return rhs != zig_as_u32(0) && lhs > zig_maxInt(u32, bits) / rhs;
+ return rhs != UINT32_C(0) && lhs > zig_maxInt_u(32, bits) / rhs;
#endif
}
-static inline void zig_vmulo_u32(zig_u8 *ov, zig_u32 *res, int n,
- const zig_u32 *lhs, const zig_u32 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u32(&res[i], lhs[i], rhs[i], bits);
-}
-
-zig_extern zig_i32 __mulosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow);
-static inline bool zig_mulo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) {
+zig_extern int32_t __mulosi4(int32_t lhs, int32_t rhs, int *overflow);
+static inline bool zig_mulo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
- zig_i32 full_res = __mulosi4(lhs, rhs, &overflow_int);
+ int overflow_int;
+ int32_t full_res = __mulosi4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i32(full_res, bits);
- return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits);
+ return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits);
}
-static inline void zig_vmulo_i32(zig_u8 *ov, zig_i32 *res, int n,
- const zig_i32 *lhs, const zig_i32 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i32(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_mulo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) {
+static inline bool zig_mulo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_u64 full_res;
+ uint64_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u64(full_res, bits);
- return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits);
+ return overflow || full_res < zig_minInt_u(64, bits) || full_res > zig_maxInt_u(64, bits);
#else
*res = zig_mulw_u64(lhs, rhs, bits);
- return rhs != zig_as_u64(0) && lhs > zig_maxInt(u64, bits) / rhs;
+ return rhs != UINT64_C(0) && lhs > zig_maxInt_u(64, bits) / rhs;
#endif
}
-static inline void zig_vmulo_u64(zig_u8 *ov, zig_u64 *res, int n,
- const zig_u64 *lhs, const zig_u64 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u64(&res[i], lhs[i], rhs[i], bits);
-}
-
-zig_extern zig_i64 __mulodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow);
-static inline bool zig_mulo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) {
+zig_extern int64_t __mulodi4(int64_t lhs, int64_t rhs, int *overflow);
+static inline bool zig_mulo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_i64 full_res;
+ int64_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
- zig_i64 full_res = __mulodi4(lhs, rhs, &overflow_int);
+ int overflow_int;
+ int64_t full_res = __mulodi4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i64(full_res, bits);
- return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits);
+ return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits);
}
-static inline void zig_vmulo_i64(zig_u8 *ov, zig_i64 *res, int n,
- const zig_i64 *lhs, const zig_i64 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i64(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_mulo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline bool zig_mulo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_u8 full_res;
+ uint8_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u8(full_res, bits);
- return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits);
+ return overflow || full_res < zig_minInt_u(8, bits) || full_res > zig_maxInt_u(8, bits);
#else
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = zig_mulo_u32(&full_res, lhs, rhs, bits);
- *res = (zig_u8)full_res;
+ *res = (uint8_t)full_res;
return overflow;
#endif
}
-static inline void zig_vmulo_u8(zig_u8 *ov, zig_u8 *res, int n,
- const zig_u8 *lhs, const zig_u8 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u8(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_mulo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) {
+static inline bool zig_mulo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_i8 full_res;
+ int8_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i8(full_res, bits);
- return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits);
+ return overflow || full_res < zig_minInt_i(8, bits) || full_res > zig_maxInt_i(8, bits);
#else
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = zig_mulo_i32(&full_res, lhs, rhs, bits);
- *res = (zig_i8)full_res;
+ *res = (int8_t)full_res;
return overflow;
#endif
}
-static inline void zig_vmulo_i8(zig_u8 *ov, zig_i8 *res, int n,
- const zig_i8 *lhs, const zig_i8 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i8(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_mulo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) {
+static inline bool zig_mulo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_u16 full_res;
+ uint16_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u16(full_res, bits);
- return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits);
+ return overflow || full_res < zig_minInt_u(16, bits) || full_res > zig_maxInt_u(16, bits);
#else
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = zig_mulo_u32(&full_res, lhs, rhs, bits);
- *res = (zig_u16)full_res;
+ *res = (uint16_t)full_res;
return overflow;
#endif
}
-static inline void zig_vmulo_u16(zig_u8 *ov, zig_u16 *res, int n,
- const zig_u16 *lhs, const zig_u16 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u16(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_mulo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) {
+static inline bool zig_mulo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_i16 full_res;
+ int16_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i16(full_res, bits);
- return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits);
+ return overflow || full_res < zig_minInt_i(16, bits) || full_res > zig_maxInt_i(16, bits);
#else
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = zig_mulo_i32(&full_res, lhs, rhs, bits);
- *res = (zig_i16)full_res;
+ *res = (int16_t)full_res;
return overflow;
#endif
}
-static inline void zig_vmulo_i16(zig_u8 *ov, zig_i16 *res, int n,
- const zig_i16 *lhs, const zig_i16 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i16(&res[i], lhs[i], rhs[i], bits);
-}
-
#define zig_int_builtins(w) \
- static inline bool zig_shlo_u##w(zig_u##w *res, zig_u##w lhs, zig_u8 rhs, zig_u8 bits) { \
+ static inline bool zig_shlo_u##w(uint##w##_t *res, uint##w##_t lhs, uint8_t rhs, uint8_t bits) { \
*res = zig_shlw_u##w(lhs, rhs, bits); \
- return lhs > zig_maxInt(u##w, bits) >> rhs; \
+ return lhs > zig_maxInt_u(w, bits) >> rhs; \
} \
\
- static inline bool zig_shlo_i##w(zig_i##w *res, zig_i##w lhs, zig_u8 rhs, zig_u8 bits) { \
+ static inline bool zig_shlo_i##w(int##w##_t *res, int##w##_t lhs, uint8_t rhs, uint8_t bits) { \
*res = zig_shlw_i##w(lhs, rhs, bits); \
- zig_i##w mask = (zig_i##w)(zig_maxInt_u##w << (bits - rhs - 1)); \
- return (lhs & mask) != zig_as_i##w(0) && (lhs & mask) != mask; \
+ int##w##_t mask = (int##w##_t)(UINT##w##_MAX << (bits - rhs - 1)); \
+ return (lhs & mask) != INT##w##_C(0) && (lhs & mask) != mask; \
} \
\
- static inline zig_u##w zig_shls_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
- zig_u##w res; \
- if (rhs >= bits) return lhs != zig_as_u##w(0) ? zig_maxInt(u##w, bits) : lhs; \
- return zig_shlo_u##w(&res, lhs, (zig_u8)rhs, bits) ? zig_maxInt(u##w, bits) : res; \
+ static inline uint##w##_t zig_shls_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
+ uint##w##_t res; \
+ if (rhs >= bits) return lhs != UINT##w##_C(0) ? zig_maxInt_u(w, bits) : lhs; \
+ return zig_shlo_u##w(&res, lhs, (uint8_t)rhs, bits) ? zig_maxInt_u(w, bits) : res; \
} \
\
- static inline zig_i##w zig_shls_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
- zig_i##w res; \
- if ((zig_u##w)rhs < (zig_u##w)bits && !zig_shlo_i##w(&res, lhs, rhs, bits)) return res; \
- return lhs < zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \
+ static inline int##w##_t zig_shls_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
+ int##w##_t res; \
+ if ((uint##w##_t)rhs < (uint##w##_t)bits && !zig_shlo_i##w(&res, lhs, (uint8_t)rhs, bits)) return res; \
+ return lhs < INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \
} \
\
- static inline zig_u##w zig_adds_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
- zig_u##w res; \
- return zig_addo_u##w(&res, lhs, rhs, bits) ? zig_maxInt(u##w, bits) : res; \
+ static inline uint##w##_t zig_adds_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
+ uint##w##_t res; \
+ return zig_addo_u##w(&res, lhs, rhs, bits) ? zig_maxInt_u(w, bits) : res; \
} \
\
- static inline zig_i##w zig_adds_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
- zig_i##w res; \
+ static inline int##w##_t zig_adds_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
+ int##w##_t res; \
if (!zig_addo_i##w(&res, lhs, rhs, bits)) return res; \
- return res >= zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \
+ return res >= INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \
} \
\
- static inline zig_u##w zig_subs_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
- zig_u##w res; \
- return zig_subo_u##w(&res, lhs, rhs, bits) ? zig_minInt(u##w, bits) : res; \
+ static inline uint##w##_t zig_subs_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
+ uint##w##_t res; \
+ return zig_subo_u##w(&res, lhs, rhs, bits) ? zig_minInt_u(w, bits) : res; \
} \
\
- static inline zig_i##w zig_subs_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
- zig_i##w res; \
+ static inline int##w##_t zig_subs_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
+ int##w##_t res; \
if (!zig_subo_i##w(&res, lhs, rhs, bits)) return res; \
- return res >= zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \
+ return res >= INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \
} \
\
- static inline zig_u##w zig_muls_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
- zig_u##w res; \
- return zig_mulo_u##w(&res, lhs, rhs, bits) ? zig_maxInt(u##w, bits) : res; \
+ static inline uint##w##_t zig_muls_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
+ uint##w##_t res; \
+ return zig_mulo_u##w(&res, lhs, rhs, bits) ? zig_maxInt_u(w, bits) : res; \
} \
\
- static inline zig_i##w zig_muls_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
- zig_i##w res; \
+ static inline int##w##_t zig_muls_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
+ int##w##_t res; \
if (!zig_mulo_i##w(&res, lhs, rhs, bits)) return res; \
- return (lhs ^ rhs) < zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \
+ return (lhs ^ rhs) < INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \
}
zig_int_builtins(8)
zig_int_builtins(16)
@@ -988,89 +986,89 @@ zig_int_builtins(32)
zig_int_builtins(64)
#define zig_builtin8(name, val) __builtin_##name(val)
-typedef zig_c_uint zig_Builtin8;
+typedef unsigned int zig_Builtin8;
#define zig_builtin16(name, val) __builtin_##name(val)
-typedef zig_c_uint zig_Builtin16;
+typedef unsigned int zig_Builtin16;
#if INT_MIN <= INT32_MIN
#define zig_builtin32(name, val) __builtin_##name(val)
-typedef zig_c_uint zig_Builtin32;
+typedef unsigned int zig_Builtin32;
#elif LONG_MIN <= INT32_MIN
#define zig_builtin32(name, val) __builtin_##name##l(val)
-typedef zig_c_ulong zig_Builtin32;
+typedef unsigned long zig_Builtin32;
#endif
#if INT_MIN <= INT64_MIN
#define zig_builtin64(name, val) __builtin_##name(val)
-typedef zig_c_uint zig_Builtin64;
+typedef unsigned int zig_Builtin64;
#elif LONG_MIN <= INT64_MIN
#define zig_builtin64(name, val) __builtin_##name##l(val)
-typedef zig_c_ulong zig_Builtin64;
+typedef unsigned long zig_Builtin64;
#elif LLONG_MIN <= INT64_MIN
#define zig_builtin64(name, val) __builtin_##name##ll(val)
-typedef zig_c_ulonglong zig_Builtin64;
+typedef unsigned long long zig_Builtin64;
#endif
-static inline zig_u8 zig_byte_swap_u8(zig_u8 val, zig_u8 bits) {
+static inline uint8_t zig_byte_swap_u8(uint8_t val, uint8_t bits) {
return zig_wrap_u8(val >> (8 - bits), bits);
}
-static inline zig_i8 zig_byte_swap_i8(zig_i8 val, zig_u8 bits) {
- return zig_wrap_i8((zig_i8)zig_byte_swap_u8((zig_u8)val, bits), bits);
+static inline int8_t zig_byte_swap_i8(int8_t val, uint8_t bits) {
+ return zig_wrap_i8((int8_t)zig_byte_swap_u8((uint8_t)val, bits), bits);
}
-static inline zig_u16 zig_byte_swap_u16(zig_u16 val, zig_u8 bits) {
- zig_u16 full_res;
+static inline uint16_t zig_byte_swap_u16(uint16_t val, uint8_t bits) {
+ uint16_t full_res;
#if zig_has_builtin(bswap16) || defined(zig_gnuc)
full_res = __builtin_bswap16(val);
#else
- full_res = (zig_u16)zig_byte_swap_u8((zig_u8)(val >> 0), 8) << 8 |
- (zig_u16)zig_byte_swap_u8((zig_u8)(val >> 8), 8) >> 0;
+ full_res = (uint16_t)zig_byte_swap_u8((uint8_t)(val >> 0), 8) << 8 |
+ (uint16_t)zig_byte_swap_u8((uint8_t)(val >> 8), 8) >> 0;
#endif
return zig_wrap_u16(full_res >> (16 - bits), bits);
}
-static inline zig_i16 zig_byte_swap_i16(zig_i16 val, zig_u8 bits) {
- return zig_wrap_i16((zig_i16)zig_byte_swap_u16((zig_u16)val, bits), bits);
+static inline int16_t zig_byte_swap_i16(int16_t val, uint8_t bits) {
+ return zig_wrap_i16((int16_t)zig_byte_swap_u16((uint16_t)val, bits), bits);
}
-static inline zig_u32 zig_byte_swap_u32(zig_u32 val, zig_u8 bits) {
- zig_u32 full_res;
+static inline uint32_t zig_byte_swap_u32(uint32_t val, uint8_t bits) {
+ uint32_t full_res;
#if zig_has_builtin(bswap32) || defined(zig_gnuc)
full_res = __builtin_bswap32(val);
#else
- full_res = (zig_u32)zig_byte_swap_u16((zig_u16)(val >> 0), 16) << 16 |
- (zig_u32)zig_byte_swap_u16((zig_u16)(val >> 16), 16) >> 0;
+ full_res = (uint32_t)zig_byte_swap_u16((uint16_t)(val >> 0), 16) << 16 |
+ (uint32_t)zig_byte_swap_u16((uint16_t)(val >> 16), 16) >> 0;
#endif
return zig_wrap_u32(full_res >> (32 - bits), bits);
}
-static inline zig_i32 zig_byte_swap_i32(zig_i32 val, zig_u8 bits) {
- return zig_wrap_i32((zig_i32)zig_byte_swap_u32((zig_u32)val, bits), bits);
+static inline int32_t zig_byte_swap_i32(int32_t val, uint8_t bits) {
+ return zig_wrap_i32((int32_t)zig_byte_swap_u32((uint32_t)val, bits), bits);
}
-static inline zig_u64 zig_byte_swap_u64(zig_u64 val, zig_u8 bits) {
- zig_u64 full_res;
+static inline uint64_t zig_byte_swap_u64(uint64_t val, uint8_t bits) {
+ uint64_t full_res;
#if zig_has_builtin(bswap64) || defined(zig_gnuc)
full_res = __builtin_bswap64(val);
#else
- full_res = (zig_u64)zig_byte_swap_u32((zig_u32)(val >> 0), 32) << 32 |
- (zig_u64)zig_byte_swap_u32((zig_u32)(val >> 32), 32) >> 0;
+ full_res = (uint64_t)zig_byte_swap_u32((uint32_t)(val >> 0), 32) << 32 |
+ (uint64_t)zig_byte_swap_u32((uint32_t)(val >> 32), 32) >> 0;
#endif
return zig_wrap_u64(full_res >> (64 - bits), bits);
}
-static inline zig_i64 zig_byte_swap_i64(zig_i64 val, zig_u8 bits) {
- return zig_wrap_i64((zig_i64)zig_byte_swap_u64((zig_u64)val, bits), bits);
+static inline int64_t zig_byte_swap_i64(int64_t val, uint8_t bits) {
+ return zig_wrap_i64((int64_t)zig_byte_swap_u64((uint64_t)val, bits), bits);
}
-static inline zig_u8 zig_bit_reverse_u8(zig_u8 val, zig_u8 bits) {
- zig_u8 full_res;
+static inline uint8_t zig_bit_reverse_u8(uint8_t val, uint8_t bits) {
+ uint8_t full_res;
#if zig_has_builtin(bitreverse8)
full_res = __builtin_bitreverse8(val);
#else
- static zig_u8 const lut[0x10] = {
+ static uint8_t const lut[0x10] = {
0x0, 0x8, 0x4, 0xc, 0x2, 0xa, 0x6, 0xe,
0x1, 0x9, 0x5, 0xd, 0x3, 0xb, 0x7, 0xf
};
@@ -1079,62 +1077,62 @@ static inline zig_u8 zig_bit_reverse_u8(zig_u8 val, zig_u8 bits) {
return zig_wrap_u8(full_res >> (8 - bits), bits);
}
-static inline zig_i8 zig_bit_reverse_i8(zig_i8 val, zig_u8 bits) {
- return zig_wrap_i8((zig_i8)zig_bit_reverse_u8((zig_u8)val, bits), bits);
+static inline int8_t zig_bit_reverse_i8(int8_t val, uint8_t bits) {
+ return zig_wrap_i8((int8_t)zig_bit_reverse_u8((uint8_t)val, bits), bits);
}
-static inline zig_u16 zig_bit_reverse_u16(zig_u16 val, zig_u8 bits) {
- zig_u16 full_res;
+static inline uint16_t zig_bit_reverse_u16(uint16_t val, uint8_t bits) {
+ uint16_t full_res;
#if zig_has_builtin(bitreverse16)
full_res = __builtin_bitreverse16(val);
#else
- full_res = (zig_u16)zig_bit_reverse_u8((zig_u8)(val >> 0), 8) << 8 |
- (zig_u16)zig_bit_reverse_u8((zig_u8)(val >> 8), 8) >> 0;
+ full_res = (uint16_t)zig_bit_reverse_u8((uint8_t)(val >> 0), 8) << 8 |
+ (uint16_t)zig_bit_reverse_u8((uint8_t)(val >> 8), 8) >> 0;
#endif
return zig_wrap_u16(full_res >> (16 - bits), bits);
}
-static inline zig_i16 zig_bit_reverse_i16(zig_i16 val, zig_u8 bits) {
- return zig_wrap_i16((zig_i16)zig_bit_reverse_u16((zig_u16)val, bits), bits);
+static inline int16_t zig_bit_reverse_i16(int16_t val, uint8_t bits) {
+ return zig_wrap_i16((int16_t)zig_bit_reverse_u16((uint16_t)val, bits), bits);
}
-static inline zig_u32 zig_bit_reverse_u32(zig_u32 val, zig_u8 bits) {
- zig_u32 full_res;
+static inline uint32_t zig_bit_reverse_u32(uint32_t val, uint8_t bits) {
+ uint32_t full_res;
#if zig_has_builtin(bitreverse32)
full_res = __builtin_bitreverse32(val);
#else
- full_res = (zig_u32)zig_bit_reverse_u16((zig_u16)(val >> 0), 16) << 16 |
- (zig_u32)zig_bit_reverse_u16((zig_u16)(val >> 16), 16) >> 0;
+ full_res = (uint32_t)zig_bit_reverse_u16((uint16_t)(val >> 0), 16) << 16 |
+ (uint32_t)zig_bit_reverse_u16((uint16_t)(val >> 16), 16) >> 0;
#endif
return zig_wrap_u32(full_res >> (32 - bits), bits);
}
-static inline zig_i32 zig_bit_reverse_i32(zig_i32 val, zig_u8 bits) {
- return zig_wrap_i32((zig_i32)zig_bit_reverse_u32((zig_u32)val, bits), bits);
+static inline int32_t zig_bit_reverse_i32(int32_t val, uint8_t bits) {
+ return zig_wrap_i32((int32_t)zig_bit_reverse_u32((uint32_t)val, bits), bits);
}
-static inline zig_u64 zig_bit_reverse_u64(zig_u64 val, zig_u8 bits) {
- zig_u64 full_res;
+static inline uint64_t zig_bit_reverse_u64(uint64_t val, uint8_t bits) {
+ uint64_t full_res;
#if zig_has_builtin(bitreverse64)
full_res = __builtin_bitreverse64(val);
#else
- full_res = (zig_u64)zig_bit_reverse_u32((zig_u32)(val >> 0), 32) << 32 |
- (zig_u64)zig_bit_reverse_u32((zig_u32)(val >> 32), 32) >> 0;
+ full_res = (uint64_t)zig_bit_reverse_u32((uint32_t)(val >> 0), 32) << 32 |
+ (uint64_t)zig_bit_reverse_u32((uint32_t)(val >> 32), 32) >> 0;
#endif
return zig_wrap_u64(full_res >> (64 - bits), bits);
}
-static inline zig_i64 zig_bit_reverse_i64(zig_i64 val, zig_u8 bits) {
- return zig_wrap_i64((zig_i64)zig_bit_reverse_u64((zig_u64)val, bits), bits);
+static inline int64_t zig_bit_reverse_i64(int64_t val, uint8_t bits) {
+ return zig_wrap_i64((int64_t)zig_bit_reverse_u64((uint64_t)val, bits), bits);
}
#define zig_builtin_popcount_common(w) \
- static inline zig_u8 zig_popcount_i##w(zig_i##w val, zig_u8 bits) { \
- return zig_popcount_u##w((zig_u##w)val, bits); \
+ static inline uint8_t zig_popcount_i##w(int##w##_t val, uint8_t bits) { \
+ return zig_popcount_u##w((uint##w##_t)val, bits); \
}
#if zig_has_builtin(popcount) || defined(zig_gnuc)
#define zig_builtin_popcount(w) \
- static inline zig_u8 zig_popcount_u##w(zig_u##w val, zig_u8 bits) { \
+ static inline uint8_t zig_popcount_u##w(uint##w##_t val, uint8_t bits) { \
(void)bits; \
return zig_builtin##w(popcount, val); \
} \
@@ -1142,12 +1140,12 @@ static inline zig_i64 zig_bit_reverse_i64(zig_i64 val, zig_u8 bits) {
zig_builtin_popcount_common(w)
#else
#define zig_builtin_popcount(w) \
- static inline zig_u8 zig_popcount_u##w(zig_u##w val, zig_u8 bits) { \
+ static inline uint8_t zig_popcount_u##w(uint##w##_t val, uint8_t bits) { \
(void)bits; \
- zig_u##w temp = val - ((val >> 1) & (zig_maxInt_u##w / 3)); \
- temp = (temp & (zig_maxInt_u##w / 5)) + ((temp >> 2) & (zig_maxInt_u##w / 5)); \
- temp = (temp + (temp >> 4)) & (zig_maxInt_u##w / 17); \
- return temp * (zig_maxInt_u##w / 255) >> (w - 8); \
+ uint##w##_t temp = val - ((val >> 1) & (UINT##w##_MAX / 3)); \
+ temp = (temp & (UINT##w##_MAX / 5)) + ((temp >> 2) & (UINT##w##_MAX / 5)); \
+ temp = (temp + (temp >> 4)) & (UINT##w##_MAX / 17); \
+ return temp * (UINT##w##_MAX / 255) >> (w - 8); \
} \
\
zig_builtin_popcount_common(w)
@@ -1158,12 +1156,12 @@ zig_builtin_popcount(32)
zig_builtin_popcount(64)
#define zig_builtin_ctz_common(w) \
- static inline zig_u8 zig_ctz_i##w(zig_i##w val, zig_u8 bits) { \
- return zig_ctz_u##w((zig_u##w)val, bits); \
+ static inline uint8_t zig_ctz_i##w(int##w##_t val, uint8_t bits) { \
+ return zig_ctz_u##w((uint##w##_t)val, bits); \
}
#if zig_has_builtin(ctz) || defined(zig_gnuc)
#define zig_builtin_ctz(w) \
- static inline zig_u8 zig_ctz_u##w(zig_u##w val, zig_u8 bits) { \
+ static inline uint8_t zig_ctz_u##w(uint##w##_t val, uint8_t bits) { \
if (val == 0) return bits; \
return zig_builtin##w(ctz, val); \
} \
@@ -1171,7 +1169,7 @@ zig_builtin_popcount(64)
zig_builtin_ctz_common(w)
#else
#define zig_builtin_ctz(w) \
- static inline zig_u8 zig_ctz_u##w(zig_u##w val, zig_u8 bits) { \
+ static inline uint8_t zig_ctz_u##w(uint##w##_t val, uint8_t bits) { \
return zig_popcount_u##w(zig_not_u##w(val, bits) & zig_subw_u##w(val, 1, bits), bits); \
} \
\
@@ -1183,12 +1181,12 @@ zig_builtin_ctz(32)
zig_builtin_ctz(64)
#define zig_builtin_clz_common(w) \
- static inline zig_u8 zig_clz_i##w(zig_i##w val, zig_u8 bits) { \
- return zig_clz_u##w((zig_u##w)val, bits); \
+ static inline uint8_t zig_clz_i##w(int##w##_t val, uint8_t bits) { \
+ return zig_clz_u##w((uint##w##_t)val, bits); \
}
#if zig_has_builtin(clz) || defined(zig_gnuc)
#define zig_builtin_clz(w) \
- static inline zig_u8 zig_clz_u##w(zig_u##w val, zig_u8 bits) { \
+ static inline uint8_t zig_clz_u##w(uint##w##_t val, uint8_t bits) { \
if (val == 0) return bits; \
return zig_builtin##w(clz, val) - (zig_bitSizeOf(zig_Builtin##w) - bits); \
} \
@@ -1196,7 +1194,7 @@ zig_builtin_ctz(64)
zig_builtin_clz_common(w)
#else
#define zig_builtin_clz(w) \
- static inline zig_u8 zig_clz_u##w(zig_u##w val, zig_u8 bits) { \
+ static inline uint8_t zig_clz_u##w(uint##w##_t val, uint8_t bits) { \
return zig_ctz_u##w(zig_bit_reverse_u##w(val, bits), bits); \
} \
\
@@ -1207,7 +1205,7 @@ zig_builtin_clz(16)
zig_builtin_clz(32)
zig_builtin_clz(64)
-/* ======================== 128-bit Integer Routines ======================== */
+/* ======================== 128-bit Integer Support ========================= */
#if !defined(zig_has_int128)
# if defined(__SIZEOF_INT128__)
@@ -1222,18 +1220,18 @@ zig_builtin_clz(64)
typedef unsigned __int128 zig_u128;
typedef signed __int128 zig_i128;
-#define zig_as_u128(hi, lo) ((zig_u128)(hi)<<64|(lo))
-#define zig_as_i128(hi, lo) ((zig_i128)zig_as_u128(hi, lo))
-#define zig_as_constant_u128(hi, lo) zig_as_u128(hi, lo)
-#define zig_as_constant_i128(hi, lo) zig_as_i128(hi, lo)
-#define zig_hi_u128(val) ((zig_u64)((val) >> 64))
-#define zig_lo_u128(val) ((zig_u64)((val) >> 0))
-#define zig_hi_i128(val) ((zig_i64)((val) >> 64))
-#define zig_lo_i128(val) ((zig_u64)((val) >> 0))
+#define zig_make_u128(hi, lo) ((zig_u128)(hi)<<64|(lo))
+#define zig_make_i128(hi, lo) ((zig_i128)zig_make_u128(hi, lo))
+#define zig_init_u128(hi, lo) zig_make_u128(hi, lo)
+#define zig_init_i128(hi, lo) zig_make_i128(hi, lo)
+#define zig_hi_u128(val) ((uint64_t)((val) >> 64))
+#define zig_lo_u128(val) ((uint64_t)((val) >> 0))
+#define zig_hi_i128(val) (( int64_t)((val) >> 64))
+#define zig_lo_i128(val) ((uint64_t)((val) >> 0))
#define zig_bitcast_u128(val) ((zig_u128)(val))
#define zig_bitcast_i128(val) ((zig_i128)(val))
#define zig_cmp_int128(Type) \
- static inline zig_i32 zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \
+ static inline int32_t zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \
return (lhs > rhs) - (lhs < rhs); \
}
#define zig_bit_int128(Type, operation, operator) \
@@ -1243,32 +1241,32 @@ typedef signed __int128 zig_i128;
#else /* zig_has_int128 */
-#if __LITTLE_ENDIAN__ || _MSC_VER
-typedef struct { zig_align(16) zig_u64 lo; zig_u64 hi; } zig_u128;
-typedef struct { zig_align(16) zig_u64 lo; zig_i64 hi; } zig_i128;
+#if zig_little_endian
+typedef struct { zig_align(16) uint64_t lo; uint64_t hi; } zig_u128;
+typedef struct { zig_align(16) uint64_t lo; int64_t hi; } zig_i128;
#else
-typedef struct { zig_align(16) zig_u64 hi; zig_u64 lo; } zig_u128;
-typedef struct { zig_align(16) zig_i64 hi; zig_u64 lo; } zig_i128;
+typedef struct { zig_align(16) uint64_t hi; uint64_t lo; } zig_u128;
+typedef struct { zig_align(16) int64_t hi; uint64_t lo; } zig_i128;
#endif
-#define zig_as_u128(hi, lo) ((zig_u128){ .h##i = (hi), .l##o = (lo) })
-#define zig_as_i128(hi, lo) ((zig_i128){ .h##i = (hi), .l##o = (lo) })
+#define zig_make_u128(hi, lo) ((zig_u128){ .h##i = (hi), .l##o = (lo) })
+#define zig_make_i128(hi, lo) ((zig_i128){ .h##i = (hi), .l##o = (lo) })
-#if _MSC_VER
-#define zig_as_constant_u128(hi, lo) { .h##i = (hi), .l##o = (lo) }
-#define zig_as_constant_i128(hi, lo) { .h##i = (hi), .l##o = (lo) }
-#else
-#define zig_as_constant_u128(hi, lo) zig_as_u128(hi, lo)
-#define zig_as_constant_i128(hi, lo) zig_as_i128(hi, lo)
+#if _MSC_VER /* MSVC doesn't allow struct literals in constant expressions */
+#define zig_init_u128(hi, lo) { .h##i = (hi), .l##o = (lo) }
+#define zig_init_i128(hi, lo) { .h##i = (hi), .l##o = (lo) }
+#else /* But non-MSVC doesn't like the unprotected commas */
+#define zig_init_u128(hi, lo) zig_make_u128(hi, lo)
+#define zig_init_i128(hi, lo) zig_make_i128(hi, lo)
#endif
#define zig_hi_u128(val) ((val).hi)
#define zig_lo_u128(val) ((val).lo)
#define zig_hi_i128(val) ((val).hi)
#define zig_lo_i128(val) ((val).lo)
-#define zig_bitcast_u128(val) zig_as_u128((zig_u64)(val).hi, (val).lo)
-#define zig_bitcast_i128(val) zig_as_i128((zig_i64)(val).hi, (val).lo)
+#define zig_bitcast_u128(val) zig_make_u128((uint64_t)(val).hi, (val).lo)
+#define zig_bitcast_i128(val) zig_make_i128(( int64_t)(val).hi, (val).lo)
#define zig_cmp_int128(Type) \
- static inline zig_i32 zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \
+ static inline int32_t zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \
return (lhs.hi == rhs.hi) \
? (lhs.lo > rhs.lo) - (lhs.lo < rhs.lo) \
: (lhs.hi > rhs.hi) - (lhs.hi < rhs.hi); \
@@ -1280,10 +1278,10 @@ typedef struct { zig_align(16) zig_i64 hi; zig_u64 lo; } zig_i128;
#endif /* zig_has_int128 */
-#define zig_minInt_u128 zig_as_u128(zig_minInt_u64, zig_minInt_u64)
-#define zig_maxInt_u128 zig_as_u128(zig_maxInt_u64, zig_maxInt_u64)
-#define zig_minInt_i128 zig_as_i128(zig_minInt_i64, zig_minInt_u64)
-#define zig_maxInt_i128 zig_as_i128(zig_maxInt_i64, zig_maxInt_u64)
+#define zig_minInt_u128 zig_make_u128(zig_minInt_u64, zig_minInt_u64)
+#define zig_maxInt_u128 zig_make_u128(zig_maxInt_u64, zig_maxInt_u64)
+#define zig_minInt_i128 zig_make_i128(zig_minInt_i64, zig_minInt_u64)
+#define zig_maxInt_i128 zig_make_i128(zig_maxInt_i64, zig_maxInt_u64)
zig_cmp_int128(u128)
zig_cmp_int128(i128)
@@ -1297,28 +1295,33 @@ zig_bit_int128(i128, or, |)
zig_bit_int128(u128, xor, ^)
zig_bit_int128(i128, xor, ^)
-static inline zig_u128 zig_shr_u128(zig_u128 lhs, zig_u8 rhs);
+static inline zig_u128 zig_shr_u128(zig_u128 lhs, uint8_t rhs);
#if zig_has_int128
-static inline zig_u128 zig_not_u128(zig_u128 val, zig_u8 bits) {
- return val ^ zig_maxInt(u128, bits);
+static inline zig_u128 zig_not_u128(zig_u128 val, uint8_t bits) {
+ return val ^ zig_maxInt_u(128, bits);
}
-static inline zig_i128 zig_not_i128(zig_i128 val, zig_u8 bits) {
+static inline zig_i128 zig_not_i128(zig_i128 val, uint8_t bits) {
(void)bits;
return ~val;
}
-static inline zig_u128 zig_shr_u128(zig_u128 lhs, zig_u8 rhs) {
+static inline zig_u128 zig_shr_u128(zig_u128 lhs, uint8_t rhs) {
return lhs >> rhs;
}
-static inline zig_u128 zig_shl_u128(zig_u128 lhs, zig_u8 rhs) {
+static inline zig_u128 zig_shl_u128(zig_u128 lhs, uint8_t rhs) {
return lhs << rhs;
}
-static inline zig_i128 zig_shl_i128(zig_i128 lhs, zig_u8 rhs) {
+static inline zig_i128 zig_shr_i128(zig_i128 lhs, uint8_t rhs) {
+ zig_i128 sign_mask = lhs < zig_make_i128(0, 0) ? -zig_make_i128(0, 1) : zig_make_i128(0, 0);
+ return ((lhs ^ sign_mask) >> rhs) ^ sign_mask;
+}
+
+static inline zig_i128 zig_shl_i128(zig_i128 lhs, uint8_t rhs) {
return lhs << rhs;
}
@@ -1363,40 +1366,46 @@ static inline zig_i128 zig_rem_i128(zig_i128 lhs, zig_i128 rhs) {
}
static inline zig_i128 zig_div_floor_i128(zig_i128 lhs, zig_i128 rhs) {
- return zig_div_trunc_i128(lhs, rhs) - (((lhs ^ rhs) & zig_rem_i128(lhs, rhs)) < zig_as_i128(0, 0));
+ return zig_div_trunc_i128(lhs, rhs) - (((lhs ^ rhs) & zig_rem_i128(lhs, rhs)) < zig_make_i128(0, 0));
}
static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) {
zig_i128 rem = zig_rem_i128(lhs, rhs);
- return rem + (((lhs ^ rhs) & rem) < zig_as_i128(0, 0) ? rhs : zig_as_i128(0, 0));
+ return rem + (((lhs ^ rhs) & rem) < zig_make_i128(0, 0) ? rhs : zig_make_i128(0, 0));
}
#else /* zig_has_int128 */
-static inline zig_u128 zig_not_u128(zig_u128 val, zig_u8 bits) {
- return (zig_u128){ .hi = zig_not_u64(val.hi, bits - zig_as_u8(64)), .lo = zig_not_u64(val.lo, zig_as_u8(64)) };
+static inline zig_u128 zig_not_u128(zig_u128 val, uint8_t bits) {
+ return (zig_u128){ .hi = zig_not_u64(val.hi, bits - UINT8_C(64)), .lo = zig_not_u64(val.lo, UINT8_C(64)) };
}
-static inline zig_i128 zig_not_i128(zig_i128 val, zig_u8 bits) {
- return (zig_i128){ .hi = zig_not_i64(val.hi, bits - zig_as_u8(64)), .lo = zig_not_u64(val.lo, zig_as_u8(64)) };
+static inline zig_i128 zig_not_i128(zig_i128 val, uint8_t bits) {
+ return (zig_i128){ .hi = zig_not_i64(val.hi, bits - UINT8_C(64)), .lo = zig_not_u64(val.lo, UINT8_C(64)) };
}
-static inline zig_u128 zig_shr_u128(zig_u128 lhs, zig_u8 rhs) {
- if (rhs == zig_as_u8(0)) return lhs;
- if (rhs >= zig_as_u8(64)) return (zig_u128){ .hi = zig_minInt_u64, .lo = lhs.hi >> (rhs - zig_as_u8(64)) };
- return (zig_u128){ .hi = lhs.hi >> rhs, .lo = lhs.hi << (zig_as_u8(64) - rhs) | lhs.lo >> rhs };
+static inline zig_u128 zig_shr_u128(zig_u128 lhs, uint8_t rhs) {
+ if (rhs == UINT8_C(0)) return lhs;
+ if (rhs >= UINT8_C(64)) return (zig_u128){ .hi = zig_minInt_u64, .lo = lhs.hi >> (rhs - UINT8_C(64)) };
+ return (zig_u128){ .hi = lhs.hi >> rhs, .lo = lhs.hi << (UINT8_C(64) - rhs) | lhs.lo >> rhs };
}
-static inline zig_u128 zig_shl_u128(zig_u128 lhs, zig_u8 rhs) {
- if (rhs == zig_as_u8(0)) return lhs;
- if (rhs >= zig_as_u8(64)) return (zig_u128){ .hi = lhs.lo << (rhs - zig_as_u8(64)), .lo = zig_minInt_u64 };
- return (zig_u128){ .hi = lhs.hi << rhs | lhs.lo >> (zig_as_u8(64) - rhs), .lo = lhs.lo << rhs };
+static inline zig_u128 zig_shl_u128(zig_u128 lhs, uint8_t rhs) {
+ if (rhs == UINT8_C(0)) return lhs;
+ if (rhs >= UINT8_C(64)) return (zig_u128){ .hi = lhs.lo << (rhs - UINT8_C(64)), .lo = zig_minInt_u64 };
+ return (zig_u128){ .hi = lhs.hi << rhs | lhs.lo >> (UINT8_C(64) - rhs), .lo = lhs.lo << rhs };
}
-static inline zig_i128 zig_shl_i128(zig_i128 lhs, zig_u8 rhs) {
- if (rhs == zig_as_u8(0)) return lhs;
- if (rhs >= zig_as_u8(64)) return (zig_i128){ .hi = lhs.lo << (rhs - zig_as_u8(64)), .lo = zig_minInt_u64 };
- return (zig_i128){ .hi = lhs.hi << rhs | lhs.lo >> (zig_as_u8(64) - rhs), .lo = lhs.lo << rhs };
+static inline zig_i128 zig_shr_i128(zig_i128 lhs, uint8_t rhs) {
+ if (rhs == UINT8_C(0)) return lhs;
+ if (rhs >= UINT8_C(64)) return (zig_i128){ .hi = zig_shr_i64(lhs.hi, 63), .lo = zig_shr_i64(lhs.hi, (rhs - UINT8_C(64))) };
+ return (zig_i128){ .hi = zig_shr_i64(lhs.hi, rhs), .lo = lhs.lo >> rhs | (uint64_t)lhs.hi << (UINT8_C(64) - rhs) };
+}
+
+static inline zig_i128 zig_shl_i128(zig_i128 lhs, uint8_t rhs) {
+ if (rhs == UINT8_C(0)) return lhs;
+ if (rhs >= UINT8_C(64)) return (zig_i128){ .hi = lhs.lo << (rhs - UINT8_C(64)), .lo = zig_minInt_u64 };
+ return (zig_i128){ .hi = lhs.hi << rhs | lhs.lo >> (UINT8_C(64) - rhs), .lo = lhs.lo << rhs };
}
static inline zig_u128 zig_add_u128(zig_u128 lhs, zig_u128 rhs) {
@@ -1424,14 +1433,14 @@ static inline zig_i128 zig_sub_i128(zig_i128 lhs, zig_i128 rhs) {
}
zig_extern zig_i128 __multi3(zig_i128 lhs, zig_i128 rhs);
-static zig_u128 zig_mul_u128(zig_u128 lhs, zig_u128 rhs) {
- return zig_bitcast_u128(__multi3(zig_bitcast_i128(lhs), zig_bitcast_i128(rhs)));
-}
-
static zig_i128 zig_mul_i128(zig_i128 lhs, zig_i128 rhs) {
return __multi3(lhs, rhs);
}
+static zig_u128 zig_mul_u128(zig_u128 lhs, zig_u128 rhs) {
+ return zig_bitcast_u128(zig_mul_i128(zig_bitcast_i128(lhs), zig_bitcast_i128(rhs)));
+}
+
zig_extern zig_u128 __udivti3(zig_u128 lhs, zig_u128 rhs);
static zig_u128 zig_div_trunc_u128(zig_u128 lhs, zig_u128 rhs) {
return __udivti3(lhs, rhs);
@@ -1454,11 +1463,11 @@ static zig_i128 zig_rem_i128(zig_i128 lhs, zig_i128 rhs) {
static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) {
zig_i128 rem = zig_rem_i128(lhs, rhs);
- return zig_add_i128(rem, (((lhs.hi ^ rhs.hi) & rem.hi) < zig_as_i64(0) ? rhs : zig_as_i128(0, 0)));
+ return zig_add_i128(rem, ((lhs.hi ^ rhs.hi) & rem.hi) < INT64_C(0) ? rhs : zig_make_i128(0, 0));
}
static inline zig_i128 zig_div_floor_i128(zig_i128 lhs, zig_i128 rhs) {
- return zig_sub_i128(zig_div_trunc_i128(lhs, rhs), zig_as_i128(0, zig_cmp_i128(zig_and_i128(zig_xor_i128(lhs, rhs), zig_rem_i128(lhs, rhs)), zig_as_i128(0, 0)) < zig_as_i32(0)));
+ return zig_sub_i128(zig_div_trunc_i128(lhs, rhs), zig_make_i128(0, zig_cmp_i128(zig_and_i128(zig_xor_i128(lhs, rhs), zig_rem_i128(lhs, rhs)), zig_make_i128(0, 0)) < INT32_C(0)));
}
#endif /* zig_has_int128 */
@@ -1471,326 +1480,1265 @@ static inline zig_u128 zig_nand_u128(zig_u128 lhs, zig_u128 rhs) {
}
static inline zig_u128 zig_min_u128(zig_u128 lhs, zig_u128 rhs) {
- return zig_cmp_u128(lhs, rhs) < zig_as_i32(0) ? lhs : rhs;
+ return zig_cmp_u128(lhs, rhs) < INT32_C(0) ? lhs : rhs;
}
static inline zig_i128 zig_min_i128(zig_i128 lhs, zig_i128 rhs) {
- return zig_cmp_i128(lhs, rhs) < zig_as_i32(0) ? lhs : rhs;
+ return zig_cmp_i128(lhs, rhs) < INT32_C(0) ? lhs : rhs;
}
static inline zig_u128 zig_max_u128(zig_u128 lhs, zig_u128 rhs) {
- return zig_cmp_u128(lhs, rhs) > zig_as_i32(0) ? lhs : rhs;
+ return zig_cmp_u128(lhs, rhs) > INT32_C(0) ? lhs : rhs;
}
static inline zig_i128 zig_max_i128(zig_i128 lhs, zig_i128 rhs) {
- return zig_cmp_i128(lhs, rhs) > zig_as_i32(0) ? lhs : rhs;
+ return zig_cmp_i128(lhs, rhs) > INT32_C(0) ? lhs : rhs;
}
-static inline zig_i128 zig_shr_i128(zig_i128 lhs, zig_u8 rhs) {
- zig_i128 sign_mask = zig_cmp_i128(lhs, zig_as_i128(0, 0)) < zig_as_i32(0) ? zig_sub_i128(zig_as_i128(0, 0), zig_as_i128(0, 1)) : zig_as_i128(0, 0);
- return zig_xor_i128(zig_bitcast_i128(zig_shr_u128(zig_bitcast_u128(zig_xor_i128(lhs, sign_mask)), rhs)), sign_mask);
+static inline zig_u128 zig_wrap_u128(zig_u128 val, uint8_t bits) {
+ return zig_and_u128(val, zig_maxInt_u(128, bits));
}
-static inline zig_u128 zig_wrap_u128(zig_u128 val, zig_u8 bits) {
- return zig_and_u128(val, zig_maxInt(u128, bits));
+static inline zig_i128 zig_wrap_i128(zig_i128 val, uint8_t bits) {
+ if (bits > UINT8_C(64)) return zig_make_i128(zig_wrap_i64(zig_hi_i128(val), bits - UINT8_C(64)), zig_lo_i128(val));
+ int64_t lo = zig_wrap_i64((int64_t)zig_lo_i128(val), bits);
+ return zig_make_i128(zig_shr_i64(lo, 63), (uint64_t)lo);
}
-static inline zig_i128 zig_wrap_i128(zig_i128 val, zig_u8 bits) {
- return zig_as_i128(zig_wrap_i64(zig_hi_i128(val), bits - zig_as_u8(64)), zig_lo_i128(val));
-}
-
-static inline zig_u128 zig_shlw_u128(zig_u128 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline zig_u128 zig_shlw_u128(zig_u128 lhs, uint8_t rhs, uint8_t bits) {
return zig_wrap_u128(zig_shl_u128(lhs, rhs), bits);
}
-static inline zig_i128 zig_shlw_i128(zig_i128 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline zig_i128 zig_shlw_i128(zig_i128 lhs, uint8_t rhs, uint8_t bits) {
return zig_wrap_i128(zig_bitcast_i128(zig_shl_u128(zig_bitcast_u128(lhs), rhs)), bits);
}
-static inline zig_u128 zig_addw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline zig_u128 zig_addw_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
return zig_wrap_u128(zig_add_u128(lhs, rhs), bits);
}
-static inline zig_i128 zig_addw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline zig_i128 zig_addw_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
return zig_wrap_i128(zig_bitcast_i128(zig_add_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits);
}
-static inline zig_u128 zig_subw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline zig_u128 zig_subw_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
return zig_wrap_u128(zig_sub_u128(lhs, rhs), bits);
}
-static inline zig_i128 zig_subw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline zig_i128 zig_subw_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
return zig_wrap_i128(zig_bitcast_i128(zig_sub_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits);
}
-static inline zig_u128 zig_mulw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline zig_u128 zig_mulw_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
return zig_wrap_u128(zig_mul_u128(lhs, rhs), bits);
}
-static inline zig_i128 zig_mulw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline zig_i128 zig_mulw_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
return zig_wrap_i128(zig_bitcast_i128(zig_mul_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits);
}
#if zig_has_int128
-static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow)
zig_u128 full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u128(full_res, bits);
- return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits);
+ return overflow || full_res < zig_minInt_u(128, bits) || full_res > zig_maxInt_u(128, bits);
#else
*res = zig_addw_u128(lhs, rhs, bits);
return *res < lhs;
#endif
}
-zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
-static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, int *overflow);
+static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow)
zig_i128 full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
+ int overflow_int;
zig_i128 full_res = __addoti4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i128(full_res, bits);
- return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits);
+ return overflow || full_res < zig_minInt_i(128, bits) || full_res > zig_maxInt_i(128, bits);
}
-static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow)
zig_u128 full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u128(full_res, bits);
- return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits);
+ return overflow || full_res < zig_minInt_u(128, bits) || full_res > zig_maxInt_u(128, bits);
#else
*res = zig_subw_u128(lhs, rhs, bits);
return *res > lhs;
#endif
}
-zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
-static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, int *overflow);
+static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow)
zig_i128 full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
+ int overflow_int;
zig_i128 full_res = __suboti4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i128(full_res, bits);
- return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits);
+ return overflow || full_res < zig_minInt_i(128, bits) || full_res > zig_maxInt_i(128, bits);
}
-static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow)
zig_u128 full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u128(full_res, bits);
- return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits);
+ return overflow || full_res < zig_minInt_u(128, bits) || full_res > zig_maxInt_u(128, bits);
#else
*res = zig_mulw_u128(lhs, rhs, bits);
- return rhs != zig_as_u128(0, 0) && lhs > zig_maxInt(u128, bits) / rhs;
+ return rhs != zig_make_u128(0, 0) && lhs > zig_maxInt_u(128, bits) / rhs;
#endif
}
-zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
-static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, int *overflow);
+static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow)
zig_i128 full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
+ int overflow_int;
zig_i128 full_res = __muloti4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i128(full_res, bits);
- return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits);
+ return overflow || full_res < zig_minInt_i(128, bits) || full_res > zig_maxInt_i(128, bits);
}
#else /* zig_has_int128 */
-static inline bool zig_overflow_u128(bool overflow, zig_u128 full_res, zig_u8 bits) {
- return overflow ||
- zig_cmp_u128(full_res, zig_minInt(u128, bits)) < zig_as_i32(0) ||
- zig_cmp_u128(full_res, zig_maxInt(u128, bits)) > zig_as_i32(0);
+static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
+ uint64_t hi;
+ bool overflow = zig_addo_u64(&hi, lhs.hi, rhs.hi, bits - 64);
+ return overflow ^ zig_addo_u64(&res->hi, hi, zig_addo_u64(&res->lo, lhs.lo, rhs.lo, 64), bits - 64);
}
-static inline bool zig_overflow_i128(bool overflow, zig_i128 full_res, zig_u8 bits) {
- return overflow ||
- zig_cmp_i128(full_res, zig_minInt(i128, bits)) < zig_as_i32(0) ||
- zig_cmp_i128(full_res, zig_maxInt(i128, bits)) > zig_as_i32(0);
+static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
+ int64_t hi;
+ bool overflow = zig_addo_i64(&hi, lhs.hi, rhs.hi, bits - 64);
+ return overflow ^ zig_addo_i64(&res->hi, hi, zig_addo_u64(&res->lo, lhs.lo, rhs.lo, 64), bits - 64);
}
-static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
- zig_u128 full_res;
- bool overflow =
- zig_addo_u64(&full_res.hi, lhs.hi, rhs.hi, 64) |
- zig_addo_u64(&full_res.hi, full_res.hi, zig_addo_u64(&full_res.lo, lhs.lo, rhs.lo, 64), 64);
- *res = zig_wrap_u128(full_res, bits);
- return zig_overflow_u128(overflow, full_res, bits);
+static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
+ uint64_t hi;
+ bool overflow = zig_subo_u64(&hi, lhs.hi, rhs.hi, bits - 64);
+ return overflow ^ zig_subo_u64(&res->hi, hi, zig_subo_u64(&res->lo, lhs.lo, rhs.lo, 64), bits - 64);
}
-zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
-static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
- zig_c_int overflow_int;
- zig_i128 full_res = __addoti4(lhs, rhs, &overflow_int);
- *res = zig_wrap_i128(full_res, bits);
- return zig_overflow_i128(overflow_int, full_res, bits);
+static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
+ int64_t hi;
+ bool overflow = zig_subo_i64(&hi, lhs.hi, rhs.hi, bits - 64);
+ return overflow ^ zig_subo_i64(&res->hi, hi, zig_subo_u64(&res->lo, lhs.lo, rhs.lo, 64), bits - 64);
}
-static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
- zig_u128 full_res;
- bool overflow =
- zig_subo_u64(&full_res.hi, lhs.hi, rhs.hi, 64) |
- zig_subo_u64(&full_res.hi, full_res.hi, zig_subo_u64(&full_res.lo, lhs.lo, rhs.lo, 64), 64);
- *res = zig_wrap_u128(full_res, bits);
- return zig_overflow_u128(overflow, full_res, bits);
-}
-
-zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
-static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
- zig_c_int overflow_int;
- zig_i128 full_res = __suboti4(lhs, rhs, &overflow_int);
- *res = zig_wrap_i128(full_res, bits);
- return zig_overflow_i128(overflow_int, full_res, bits);
-}
-
-static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
*res = zig_mulw_u128(lhs, rhs, bits);
- return zig_cmp_u128(*res, zig_as_u128(0, 0)) != zig_as_i32(0) &&
- zig_cmp_u128(lhs, zig_div_trunc_u128(zig_maxInt(u128, bits), rhs)) > zig_as_i32(0);
+ return zig_cmp_u128(*res, zig_make_u128(0, 0)) != INT32_C(0) &&
+ zig_cmp_u128(lhs, zig_div_trunc_u128(zig_maxInt_u(128, bits), rhs)) > INT32_C(0);
}
-zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
-static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
- zig_c_int overflow_int;
+zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, int *overflow);
+static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
+ int overflow_int;
zig_i128 full_res = __muloti4(lhs, rhs, &overflow_int);
+ bool overflow = overflow_int != 0 ||
+ zig_cmp_i128(full_res, zig_minInt_i(128, bits)) < INT32_C(0) ||
+ zig_cmp_i128(full_res, zig_maxInt_i(128, bits)) > INT32_C(0);
*res = zig_wrap_i128(full_res, bits);
- return zig_overflow_i128(overflow_int, full_res, bits);
+ return overflow;
}
#endif /* zig_has_int128 */
-static inline bool zig_shlo_u128(zig_u128 *res, zig_u128 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline bool zig_shlo_u128(zig_u128 *res, zig_u128 lhs, uint8_t rhs, uint8_t bits) {
*res = zig_shlw_u128(lhs, rhs, bits);
- return zig_cmp_u128(lhs, zig_shr_u128(zig_maxInt(u128, bits), rhs)) > zig_as_i32(0);
+ return zig_cmp_u128(lhs, zig_shr_u128(zig_maxInt_u(128, bits), rhs)) > INT32_C(0);
}
-static inline bool zig_shlo_i128(zig_i128 *res, zig_i128 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline bool zig_shlo_i128(zig_i128 *res, zig_i128 lhs, uint8_t rhs, uint8_t bits) {
*res = zig_shlw_i128(lhs, rhs, bits);
- zig_i128 mask = zig_bitcast_i128(zig_shl_u128(zig_maxInt_u128, bits - rhs - zig_as_u8(1)));
- return zig_cmp_i128(zig_and_i128(lhs, mask), zig_as_i128(0, 0)) != zig_as_i32(0) &&
- zig_cmp_i128(zig_and_i128(lhs, mask), mask) != zig_as_i32(0);
+ zig_i128 mask = zig_bitcast_i128(zig_shl_u128(zig_maxInt_u128, bits - rhs - UINT8_C(1)));
+ return zig_cmp_i128(zig_and_i128(lhs, mask), zig_make_i128(0, 0)) != INT32_C(0) &&
+ zig_cmp_i128(zig_and_i128(lhs, mask), mask) != INT32_C(0);
}
-static inline zig_u128 zig_shls_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline zig_u128 zig_shls_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
zig_u128 res;
- if (zig_cmp_u128(rhs, zig_as_u128(0, bits)) >= zig_as_i32(0))
- return zig_cmp_u128(lhs, zig_as_u128(0, 0)) != zig_as_i32(0) ? zig_maxInt(u128, bits) : lhs;
-
-#if zig_has_int128
- return zig_shlo_u128(&res, lhs, (zig_u8)rhs, bits) ? zig_maxInt(u128, bits) : res;
-#else
- return zig_shlo_u128(&res, lhs, (zig_u8)rhs.lo, bits) ? zig_maxInt(u128, bits) : res;
-#endif
+ if (zig_cmp_u128(rhs, zig_make_u128(0, bits)) >= INT32_C(0))
+ return zig_cmp_u128(lhs, zig_make_u128(0, 0)) != INT32_C(0) ? zig_maxInt_u(128, bits) : lhs;
+ return zig_shlo_u128(&res, lhs, (uint8_t)zig_lo_u128(rhs), bits) ? zig_maxInt_u(128, bits) : res;
}
-static inline zig_i128 zig_shls_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline zig_i128 zig_shls_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
zig_i128 res;
- if (zig_cmp_u128(zig_bitcast_u128(rhs), zig_as_u128(0, bits)) < zig_as_i32(0) && !zig_shlo_i128(&res, lhs, zig_lo_i128(rhs), bits)) return res;
- return zig_cmp_i128(lhs, zig_as_i128(0, 0)) < zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits);
+ if (zig_cmp_u128(zig_bitcast_u128(rhs), zig_make_u128(0, bits)) < INT32_C(0) && !zig_shlo_i128(&res, lhs, (uint8_t)zig_lo_i128(rhs), bits)) return res;
+ return zig_cmp_i128(lhs, zig_make_i128(0, 0)) < INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits);
}
-static inline zig_u128 zig_adds_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline zig_u128 zig_adds_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
zig_u128 res;
- return zig_addo_u128(&res, lhs, rhs, bits) ? zig_maxInt(u128, bits) : res;
+ return zig_addo_u128(&res, lhs, rhs, bits) ? zig_maxInt_u(128, bits) : res;
}
-static inline zig_i128 zig_adds_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline zig_i128 zig_adds_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
zig_i128 res;
if (!zig_addo_i128(&res, lhs, rhs, bits)) return res;
- return zig_cmp_i128(res, zig_as_i128(0, 0)) >= zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits);
+ return zig_cmp_i128(res, zig_make_i128(0, 0)) >= INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits);
}
-static inline zig_u128 zig_subs_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline zig_u128 zig_subs_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
zig_u128 res;
- return zig_subo_u128(&res, lhs, rhs, bits) ? zig_minInt(u128, bits) : res;
+ return zig_subo_u128(&res, lhs, rhs, bits) ? zig_minInt_u(128, bits) : res;
}
-static inline zig_i128 zig_subs_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline zig_i128 zig_subs_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
zig_i128 res;
if (!zig_subo_i128(&res, lhs, rhs, bits)) return res;
- return zig_cmp_i128(res, zig_as_i128(0, 0)) >= zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits);
+ return zig_cmp_i128(res, zig_make_i128(0, 0)) >= INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits);
}
-static inline zig_u128 zig_muls_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline zig_u128 zig_muls_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
zig_u128 res;
- return zig_mulo_u128(&res, lhs, rhs, bits) ? zig_maxInt(u128, bits) : res;
+ return zig_mulo_u128(&res, lhs, rhs, bits) ? zig_maxInt_u(128, bits) : res;
}
-static inline zig_i128 zig_muls_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline zig_i128 zig_muls_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
zig_i128 res;
if (!zig_mulo_i128(&res, lhs, rhs, bits)) return res;
- return zig_cmp_i128(zig_xor_i128(lhs, rhs), zig_as_i128(0, 0)) < zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits);
+ return zig_cmp_i128(zig_xor_i128(lhs, rhs), zig_make_i128(0, 0)) < INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits);
}
-static inline zig_u8 zig_clz_u128(zig_u128 val, zig_u8 bits) {
- if (bits <= zig_as_u8(64)) return zig_clz_u64(zig_lo_u128(val), bits);
- if (zig_hi_u128(val) != 0) return zig_clz_u64(zig_hi_u128(val), bits - zig_as_u8(64));
- return zig_clz_u64(zig_lo_u128(val), zig_as_u8(64)) + (bits - zig_as_u8(64));
+static inline uint8_t zig_clz_u128(zig_u128 val, uint8_t bits) {
+ if (bits <= UINT8_C(64)) return zig_clz_u64(zig_lo_u128(val), bits);
+ if (zig_hi_u128(val) != 0) return zig_clz_u64(zig_hi_u128(val), bits - UINT8_C(64));
+ return zig_clz_u64(zig_lo_u128(val), UINT8_C(64)) + (bits - UINT8_C(64));
}
-static inline zig_u8 zig_clz_i128(zig_i128 val, zig_u8 bits) {
+static inline uint8_t zig_clz_i128(zig_i128 val, uint8_t bits) {
return zig_clz_u128(zig_bitcast_u128(val), bits);
}
-static inline zig_u8 zig_ctz_u128(zig_u128 val, zig_u8 bits) {
- if (zig_lo_u128(val) != 0) return zig_ctz_u64(zig_lo_u128(val), zig_as_u8(64));
- return zig_ctz_u64(zig_hi_u128(val), bits - zig_as_u8(64)) + zig_as_u8(64);
+static inline uint8_t zig_ctz_u128(zig_u128 val, uint8_t bits) {
+ if (zig_lo_u128(val) != 0) return zig_ctz_u64(zig_lo_u128(val), UINT8_C(64));
+ return zig_ctz_u64(zig_hi_u128(val), bits - UINT8_C(64)) + UINT8_C(64);
}
-static inline zig_u8 zig_ctz_i128(zig_i128 val, zig_u8 bits) {
+static inline uint8_t zig_ctz_i128(zig_i128 val, uint8_t bits) {
return zig_ctz_u128(zig_bitcast_u128(val), bits);
}
-static inline zig_u8 zig_popcount_u128(zig_u128 val, zig_u8 bits) {
- return zig_popcount_u64(zig_hi_u128(val), bits - zig_as_u8(64)) +
- zig_popcount_u64(zig_lo_u128(val), zig_as_u8(64));
+static inline uint8_t zig_popcount_u128(zig_u128 val, uint8_t bits) {
+ return zig_popcount_u64(zig_hi_u128(val), bits - UINT8_C(64)) +
+ zig_popcount_u64(zig_lo_u128(val), UINT8_C(64));
}
-static inline zig_u8 zig_popcount_i128(zig_i128 val, zig_u8 bits) {
+static inline uint8_t zig_popcount_i128(zig_i128 val, uint8_t bits) {
return zig_popcount_u128(zig_bitcast_u128(val), bits);
}
-static inline zig_u128 zig_byte_swap_u128(zig_u128 val, zig_u8 bits) {
+static inline zig_u128 zig_byte_swap_u128(zig_u128 val, uint8_t bits) {
zig_u128 full_res;
#if zig_has_builtin(bswap128)
full_res = __builtin_bswap128(val);
#else
- full_res = zig_as_u128(zig_byte_swap_u64(zig_lo_u128(val), zig_as_u8(64)),
- zig_byte_swap_u64(zig_hi_u128(val), zig_as_u8(64)));
+ full_res = zig_make_u128(zig_byte_swap_u64(zig_lo_u128(val), UINT8_C(64)),
+ zig_byte_swap_u64(zig_hi_u128(val), UINT8_C(64)));
#endif
- return zig_shr_u128(full_res, zig_as_u8(128) - bits);
+ return zig_shr_u128(full_res, UINT8_C(128) - bits);
}
-static inline zig_i128 zig_byte_swap_i128(zig_i128 val, zig_u8 bits) {
+static inline zig_i128 zig_byte_swap_i128(zig_i128 val, uint8_t bits) {
return zig_bitcast_i128(zig_byte_swap_u128(zig_bitcast_u128(val), bits));
}
-static inline zig_u128 zig_bit_reverse_u128(zig_u128 val, zig_u8 bits) {
- return zig_shr_u128(zig_as_u128(zig_bit_reverse_u64(zig_lo_u128(val), zig_as_u8(64)),
- zig_bit_reverse_u64(zig_hi_u128(val), zig_as_u8(64))),
- zig_as_u8(128) - bits);
+static inline zig_u128 zig_bit_reverse_u128(zig_u128 val, uint8_t bits) {
+ return zig_shr_u128(zig_make_u128(zig_bit_reverse_u64(zig_lo_u128(val), UINT8_C(64)),
+ zig_bit_reverse_u64(zig_hi_u128(val), UINT8_C(64))),
+ UINT8_C(128) - bits);
}
-static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, zig_u8 bits) {
+static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, uint8_t bits) {
return zig_bitcast_i128(zig_bit_reverse_u128(zig_bitcast_u128(val), bits));
}
+/* ========================== Big Integer Support =========================== */
+
+static inline uint16_t zig_int_bytes(uint16_t bits) {
+ uint16_t bytes = (bits + CHAR_BIT - 1) / CHAR_BIT;
+ uint16_t alignment = ZIG_TARGET_MAX_INT_ALIGNMENT;
+ while (alignment / 2 >= bytes) alignment /= 2;
+ return (bytes + alignment - 1) / alignment * alignment;
+}
+
+static inline int32_t zig_cmp_big(const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ const uint8_t *lhs_bytes = lhs;
+ const uint8_t *rhs_bytes = rhs;
+ uint16_t byte_offset = 0;
+ bool do_signed = is_signed;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+
+#if zig_little_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+ int32_t limb_cmp;
+
+#if zig_little_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ zig_i128 lhs_limb;
+ zig_i128 rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_cmp = zig_cmp_i128(lhs_limb, rhs_limb);
+ do_signed = false;
+ } else {
+ zig_u128 lhs_limb;
+ zig_u128 rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_cmp = zig_cmp_u128(lhs_limb, rhs_limb);
+ }
+
+ if (limb_cmp != 0) return limb_cmp;
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ int64_t lhs_limb;
+ int64_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ do_signed = false;
+ } else {
+ uint64_t lhs_limb;
+ uint64_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ }
+
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ int32_t lhs_limb;
+ int32_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ do_signed = false;
+ } else {
+ uint32_t lhs_limb;
+ uint32_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ }
+
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ int16_t lhs_limb;
+ int16_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ do_signed = false;
+ } else {
+ uint16_t lhs_limb;
+ uint16_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ }
+
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ int8_t lhs_limb;
+ int8_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ do_signed = false;
+ } else {
+ uint8_t lhs_limb;
+ uint8_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ }
+
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return 0;
+}
+
+static inline bool zig_addo_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ uint8_t *res_bytes = res;
+ const uint8_t *lhs_bytes = lhs;
+ const uint8_t *rhs_bytes = rhs;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t top_bits = remaining_bytes * 8 - bits;
+ bool overflow = false;
+
+#if zig_big_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+ uint16_t limb_bits = 128 - (remaining_bytes == 128 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 128 / CHAR_BIT && is_signed) {
+ zig_i128 res_limb;
+ zig_i128 tmp_limb;
+ zig_i128 lhs_limb;
+ zig_i128 rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i128(&res_limb, tmp_limb, zig_make_i128(INT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ zig_u128 res_limb;
+ zig_u128 tmp_limb;
+ zig_u128 lhs_limb;
+ zig_u128 rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u128(&res_limb, tmp_limb, zig_make_u128(UINT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+ uint16_t limb_bits = 64 - (remaining_bytes == 64 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 64 / CHAR_BIT && is_signed) {
+ int64_t res_limb;
+ int64_t tmp_limb;
+ int64_t lhs_limb;
+ int64_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i64(&res_limb, tmp_limb, overflow ? INT64_C(1) : INT64_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint64_t res_limb;
+ uint64_t tmp_limb;
+ uint64_t lhs_limb;
+ uint64_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u64(&res_limb, tmp_limb, overflow ? UINT64_C(1) : UINT64_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+ uint16_t limb_bits = 32 - (remaining_bytes == 32 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 32 / CHAR_BIT && is_signed) {
+ int32_t res_limb;
+ int32_t tmp_limb;
+ int32_t lhs_limb;
+ int32_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i32(&res_limb, tmp_limb, overflow ? INT32_C(1) : INT32_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint32_t res_limb;
+ uint32_t tmp_limb;
+ uint32_t lhs_limb;
+ uint32_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u32(&res_limb, tmp_limb, overflow ? UINT32_C(1) : UINT32_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+ uint16_t limb_bits = 16 - (remaining_bytes == 16 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 16 / CHAR_BIT && is_signed) {
+ int16_t res_limb;
+ int16_t tmp_limb;
+ int16_t lhs_limb;
+ int16_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i16(&res_limb, tmp_limb, overflow ? INT16_C(1) : INT16_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint16_t res_limb;
+ uint16_t tmp_limb;
+ uint16_t lhs_limb;
+ uint16_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u16(&res_limb, tmp_limb, overflow ? UINT16_C(1) : UINT16_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+ uint16_t limb_bits = 8 - (remaining_bytes == 8 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 8 / CHAR_BIT && is_signed) {
+ int8_t res_limb;
+ int8_t tmp_limb;
+ int8_t lhs_limb;
+ int8_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i8(&res_limb, tmp_limb, overflow ? INT8_C(1) : INT8_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint8_t res_limb;
+ uint8_t tmp_limb;
+ uint8_t lhs_limb;
+ uint8_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u8(&res_limb, tmp_limb, overflow ? UINT8_C(1) : UINT8_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return overflow;
+}
+
+static inline bool zig_subo_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ uint8_t *res_bytes = res;
+ const uint8_t *lhs_bytes = lhs;
+ const uint8_t *rhs_bytes = rhs;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t top_bits = remaining_bytes * 8 - bits;
+ bool overflow = false;
+
+#if zig_big_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+ uint16_t limb_bits = 128 - (remaining_bytes == 128 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 128 / CHAR_BIT && is_signed) {
+ zig_i128 res_limb;
+ zig_i128 tmp_limb;
+ zig_i128 lhs_limb;
+ zig_i128 rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i128(&res_limb, tmp_limb, zig_make_i128(INT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ zig_u128 res_limb;
+ zig_u128 tmp_limb;
+ zig_u128 lhs_limb;
+ zig_u128 rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u128(&res_limb, tmp_limb, zig_make_u128(UINT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+ uint16_t limb_bits = 64 - (remaining_bytes == 64 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 64 / CHAR_BIT && is_signed) {
+ int64_t res_limb;
+ int64_t tmp_limb;
+ int64_t lhs_limb;
+ int64_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i64(&res_limb, tmp_limb, overflow ? INT64_C(1) : INT64_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint64_t res_limb;
+ uint64_t tmp_limb;
+ uint64_t lhs_limb;
+ uint64_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u64(&res_limb, tmp_limb, overflow ? UINT64_C(1) : UINT64_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+ uint16_t limb_bits = 32 - (remaining_bytes == 32 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 32 / CHAR_BIT && is_signed) {
+ int32_t res_limb;
+ int32_t tmp_limb;
+ int32_t lhs_limb;
+ int32_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i32(&res_limb, tmp_limb, overflow ? INT32_C(1) : INT32_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint32_t res_limb;
+ uint32_t tmp_limb;
+ uint32_t lhs_limb;
+ uint32_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u32(&res_limb, tmp_limb, overflow ? UINT32_C(1) : UINT32_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+ uint16_t limb_bits = 16 - (remaining_bytes == 16 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 16 / CHAR_BIT && is_signed) {
+ int16_t res_limb;
+ int16_t tmp_limb;
+ int16_t lhs_limb;
+ int16_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i16(&res_limb, tmp_limb, overflow ? INT16_C(1) : INT16_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint16_t res_limb;
+ uint16_t tmp_limb;
+ uint16_t lhs_limb;
+ uint16_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u16(&res_limb, tmp_limb, overflow ? UINT16_C(1) : UINT16_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+ uint16_t limb_bits = 8 - (remaining_bytes == 8 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 8 / CHAR_BIT && is_signed) {
+ int8_t res_limb;
+ int8_t tmp_limb;
+ int8_t lhs_limb;
+ int8_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i8(&res_limb, tmp_limb, overflow ? INT8_C(1) : INT8_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint8_t res_limb;
+ uint8_t tmp_limb;
+ uint8_t lhs_limb;
+ uint8_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u8(&res_limb, tmp_limb, overflow ? UINT8_C(1) : UINT8_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return overflow;
+}
+
+static inline void zig_addw_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ (void)zig_addo_big(res, lhs, rhs, is_signed, bits);
+}
+
+static inline void zig_subw_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ (void)zig_subo_big(res, lhs, rhs, is_signed, bits);
+}
+
+static inline uint16_t zig_clz_big(const void *val, bool is_signed, uint16_t bits) {
+ const uint8_t *val_bytes = val;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t skip_bits = remaining_bytes * 8 - bits;
+ uint16_t total_lz = 0;
+ uint16_t limb_lz;
+ (void)is_signed;
+
+#if zig_little_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ {
+ zig_u128 val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u128(val_limb, 128 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 128 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ {
+ uint64_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u64(val_limb, 64 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 64 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ {
+ uint32_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u32(val_limb, 32 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 32 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ {
+ uint16_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u16(val_limb, 16 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 16 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ {
+ uint8_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u8(val_limb, 8 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 8 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return total_lz;
+}
+
+static inline uint16_t zig_ctz_big(const void *val, bool is_signed, uint16_t bits) {
+ const uint8_t *val_bytes = val;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t total_tz = 0;
+ uint16_t limb_tz;
+ (void)is_signed;
+
+#if zig_big_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ {
+ zig_u128 val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u128(val_limb, 128);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 128) return total_tz;
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ {
+ uint64_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u64(val_limb, 64);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 64) return total_tz;
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ {
+ uint32_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u32(val_limb, 32);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 32) return total_tz;
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ {
+ uint16_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u16(val_limb, 16);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 16) return total_tz;
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ {
+ uint8_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u8(val_limb, 8);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 8) return total_tz;
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return total_tz;
+}
+
+static inline uint16_t zig_popcount_big(const void *val, bool is_signed, uint16_t bits) {
+ const uint8_t *val_bytes = val;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t total_pc = 0;
+ (void)is_signed;
+
+#if zig_big_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ {
+ zig_u128 val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc += zig_popcount_u128(val_limb, 128);
+ }
+
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ {
+ uint64_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc += zig_popcount_u64(val_limb, 64);
+ }
+
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ {
+ uint32_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc += zig_popcount_u32(val_limb, 32);
+ }
+
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ {
+ uint16_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc = zig_popcount_u16(val_limb, 16);
+ }
+
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ {
+ uint8_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc = zig_popcount_u8(val_limb, 8);
+ }
+
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return total_pc;
+}
+
/* ========================= Floating Point Support ========================= */
#if _MSC_VER
@@ -1810,252 +2758,253 @@ static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, zig_u8 bits) {
#if (zig_has_builtin(nan) && zig_has_builtin(nans) && zig_has_builtin(inf)) || defined(zig_gnuc)
#define zig_has_float_builtins 1
-#define zig_as_special_f16(sign, name, arg, repr) sign zig_as_f16(__builtin_##name, )(arg)
-#define zig_as_special_f32(sign, name, arg, repr) sign zig_as_f32(__builtin_##name, )(arg)
-#define zig_as_special_f64(sign, name, arg, repr) sign zig_as_f64(__builtin_##name, )(arg)
-#define zig_as_special_f80(sign, name, arg, repr) sign zig_as_f80(__builtin_##name, )(arg)
-#define zig_as_special_f128(sign, name, arg, repr) sign zig_as_f128(__builtin_##name, )(arg)
-#define zig_as_special_c_longdouble(sign, name, arg, repr) sign zig_as_c_longdouble(__builtin_##name, )(arg)
+#define zig_make_special_f16(sign, name, arg, repr) sign zig_make_f16(__builtin_##name, )(arg)
+#define zig_make_special_f32(sign, name, arg, repr) sign zig_make_f32(__builtin_##name, )(arg)
+#define zig_make_special_f64(sign, name, arg, repr) sign zig_make_f64(__builtin_##name, )(arg)
+#define zig_make_special_f80(sign, name, arg, repr) sign zig_make_f80(__builtin_##name, )(arg)
+#define zig_make_special_f128(sign, name, arg, repr) sign zig_make_f128(__builtin_##name, )(arg)
#else
#define zig_has_float_builtins 0
-#define zig_as_special_f16(sign, name, arg, repr) zig_float_from_repr_f16(repr)
-#define zig_as_special_f32(sign, name, arg, repr) zig_float_from_repr_f32(repr)
-#define zig_as_special_f64(sign, name, arg, repr) zig_float_from_repr_f64(repr)
-#define zig_as_special_f80(sign, name, arg, repr) zig_float_from_repr_f80(repr)
-#define zig_as_special_f128(sign, name, arg, repr) zig_float_from_repr_f128(repr)
-#define zig_as_special_c_longdouble(sign, name, arg, repr) zig_float_from_repr_c_longdouble(repr)
+#define zig_make_special_f16(sign, name, arg, repr) zig_float_from_repr_f16(repr)
+#define zig_make_special_f32(sign, name, arg, repr) zig_float_from_repr_f32(repr)
+#define zig_make_special_f64(sign, name, arg, repr) zig_float_from_repr_f64(repr)
+#define zig_make_special_f80(sign, name, arg, repr) zig_float_from_repr_f80(repr)
+#define zig_make_special_f128(sign, name, arg, repr) zig_float_from_repr_f128(repr)
#endif
#define zig_has_f16 1
#define zig_bitSizeOf_f16 16
+typedef int16_t zig_repr_f16;
#define zig_libc_name_f16(name) __##name##h
-#define zig_as_special_constant_f16(sign, name, arg, repr) zig_as_special_f16(sign, name, arg, repr)
+#define zig_init_special_f16(sign, name, arg, repr) zig_make_special_f16(sign, name, arg, repr)
#if FLT_MANT_DIG == 11
typedef float zig_f16;
-#define zig_as_f16(fp, repr) fp##f
+#define zig_make_f16(fp, repr) fp##f
#elif DBL_MANT_DIG == 11
typedef double zig_f16;
-#define zig_as_f16(fp, repr) fp
+#define zig_make_f16(fp, repr) fp
#elif LDBL_MANT_DIG == 11
#define zig_bitSizeOf_c_longdouble 16
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f16 zig_repr_c_longdouble;
+#endif
typedef long double zig_f16;
-#define zig_as_f16(fp, repr) fp##l
+#define zig_make_f16(fp, repr) fp##l
#elif FLT16_MANT_DIG == 11 && (zig_has_builtin(inff16) || defined(zig_gnuc))
typedef _Float16 zig_f16;
-#define zig_as_f16(fp, repr) fp##f16
+#define zig_make_f16(fp, repr) fp##f16
#elif defined(__SIZEOF_FP16__)
typedef __fp16 zig_f16;
-#define zig_as_f16(fp, repr) fp##f16
+#define zig_make_f16(fp, repr) fp##f16
#else
#undef zig_has_f16
#define zig_has_f16 0
-#define zig_repr_f16 i16
-typedef zig_i16 zig_f16;
-#define zig_as_f16(fp, repr) repr
-#undef zig_as_special_f16
-#define zig_as_special_f16(sign, name, arg, repr) repr
-#undef zig_as_special_constant_f16
-#define zig_as_special_constant_f16(sign, name, arg, repr) repr
+#define zig_bitSizeOf_repr_f16 16
+typedef int16_t zig_f16;
+#define zig_make_f16(fp, repr) repr
+#undef zig_make_special_f16
+#define zig_make_special_f16(sign, name, arg, repr) repr
+#undef zig_init_special_f16
+#define zig_init_special_f16(sign, name, arg, repr) repr
#endif
#define zig_has_f32 1
#define zig_bitSizeOf_f32 32
+typedef int32_t zig_repr_f32;
#define zig_libc_name_f32(name) name##f
#if _MSC_VER
-#define zig_as_special_constant_f32(sign, name, arg, repr) sign zig_as_f32(zig_msvc_flt_##name, )
+#define zig_init_special_f32(sign, name, arg, repr) sign zig_make_f32(zig_msvc_flt_##name, )
#else
-#define zig_as_special_constant_f32(sign, name, arg, repr) zig_as_special_f32(sign, name, arg, repr)
+#define zig_init_special_f32(sign, name, arg, repr) zig_make_special_f32(sign, name, arg, repr)
#endif
#if FLT_MANT_DIG == 24
typedef float zig_f32;
-#define zig_as_f32(fp, repr) fp##f
+#define zig_make_f32(fp, repr) fp##f
#elif DBL_MANT_DIG == 24
typedef double zig_f32;
-#define zig_as_f32(fp, repr) fp
+#define zig_make_f32(fp, repr) fp
#elif LDBL_MANT_DIG == 24
#define zig_bitSizeOf_c_longdouble 32
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f32 zig_repr_c_longdouble;
+#endif
typedef long double zig_f32;
-#define zig_as_f32(fp, repr) fp##l
+#define zig_make_f32(fp, repr) fp##l
#elif FLT32_MANT_DIG == 24
typedef _Float32 zig_f32;
-#define zig_as_f32(fp, repr) fp##f32
+#define zig_make_f32(fp, repr) fp##f32
#else
#undef zig_has_f32
#define zig_has_f32 0
-#define zig_repr_f32 i32
-typedef zig_i32 zig_f32;
-#define zig_as_f32(fp, repr) repr
-#undef zig_as_special_f32
-#define zig_as_special_f32(sign, name, arg, repr) repr
-#undef zig_as_special_constant_f32
-#define zig_as_special_constant_f32(sign, name, arg, repr) repr
+#define zig_bitSizeOf_repr_f32 32
+typedef int32_t zig_f32;
+#define zig_make_f32(fp, repr) repr
+#undef zig_make_special_f32
+#define zig_make_special_f32(sign, name, arg, repr) repr
+#undef zig_init_special_f32
+#define zig_init_special_f32(sign, name, arg, repr) repr
#endif
#define zig_has_f64 1
#define zig_bitSizeOf_f64 64
+typedef int64_t zig_repr_f64;
#define zig_libc_name_f64(name) name
#if _MSC_VER
#ifdef ZIG_TARGET_ABI_MSVC
#define zig_bitSizeOf_c_longdouble 64
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f64 zig_repr_c_longdouble;
#endif
-#define zig_as_special_constant_f64(sign, name, arg, repr) sign zig_as_f64(zig_msvc_flt_##name, )
+#endif
+#define zig_init_special_f64(sign, name, arg, repr) sign zig_make_f64(zig_msvc_flt_##name, )
#else /* _MSC_VER */
-#define zig_as_special_constant_f64(sign, name, arg, repr) zig_as_special_f64(sign, name, arg, repr)
+#define zig_init_special_f64(sign, name, arg, repr) zig_make_special_f64(sign, name, arg, repr)
#endif /* _MSC_VER */
#if FLT_MANT_DIG == 53
typedef float zig_f64;
-#define zig_as_f64(fp, repr) fp##f
+#define zig_make_f64(fp, repr) fp##f
#elif DBL_MANT_DIG == 53
typedef double zig_f64;
-#define zig_as_f64(fp, repr) fp
+#define zig_make_f64(fp, repr) fp
#elif LDBL_MANT_DIG == 53
#define zig_bitSizeOf_c_longdouble 64
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f64 zig_repr_c_longdouble;
+#endif
typedef long double zig_f64;
-#define zig_as_f64(fp, repr) fp##l
+#define zig_make_f64(fp, repr) fp##l
#elif FLT64_MANT_DIG == 53
typedef _Float64 zig_f64;
-#define zig_as_f64(fp, repr) fp##f64
+#define zig_make_f64(fp, repr) fp##f64
#elif FLT32X_MANT_DIG == 53
typedef _Float32x zig_f64;
-#define zig_as_f64(fp, repr) fp##f32x
+#define zig_make_f64(fp, repr) fp##f32x
#else
#undef zig_has_f64
#define zig_has_f64 0
-#define zig_repr_f64 i64
-typedef zig_i64 zig_f64;
-#define zig_as_f64(fp, repr) repr
-#undef zig_as_special_f64
-#define zig_as_special_f64(sign, name, arg, repr) repr
-#undef zig_as_special_constant_f64
-#define zig_as_special_constant_f64(sign, name, arg, repr) repr
+#define zig_bitSizeOf_repr_f64 64
+typedef int64_t zig_f64;
+#define zig_make_f64(fp, repr) repr
+#undef zig_make_special_f64
+#define zig_make_special_f64(sign, name, arg, repr) repr
+#undef zig_init_special_f64
+#define zig_init_special_f64(sign, name, arg, repr) repr
#endif
#define zig_has_f80 1
#define zig_bitSizeOf_f80 80
+typedef zig_i128 zig_repr_f80;
#define zig_libc_name_f80(name) __##name##x
-#define zig_as_special_constant_f80(sign, name, arg, repr) zig_as_special_f80(sign, name, arg, repr)
+#define zig_init_special_f80(sign, name, arg, repr) zig_make_special_f80(sign, name, arg, repr)
#if FLT_MANT_DIG == 64
typedef float zig_f80;
-#define zig_as_f80(fp, repr) fp##f
+#define zig_make_f80(fp, repr) fp##f
#elif DBL_MANT_DIG == 64
typedef double zig_f80;
-#define zig_as_f80(fp, repr) fp
+#define zig_make_f80(fp, repr) fp
#elif LDBL_MANT_DIG == 64
#define zig_bitSizeOf_c_longdouble 80
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f80 zig_repr_c_longdouble;
+#endif
typedef long double zig_f80;
-#define zig_as_f80(fp, repr) fp##l
+#define zig_make_f80(fp, repr) fp##l
#elif FLT80_MANT_DIG == 64
typedef _Float80 zig_f80;
-#define zig_as_f80(fp, repr) fp##f80
+#define zig_make_f80(fp, repr) fp##f80
#elif FLT64X_MANT_DIG == 64
typedef _Float64x zig_f80;
-#define zig_as_f80(fp, repr) fp##f64x
+#define zig_make_f80(fp, repr) fp##f64x
#elif defined(__SIZEOF_FLOAT80__)
typedef __float80 zig_f80;
-#define zig_as_f80(fp, repr) fp##l
+#define zig_make_f80(fp, repr) fp##l
#else
#undef zig_has_f80
#define zig_has_f80 0
-#define zig_repr_f80 i128
+#define zig_bitSizeOf_repr_f80 128
typedef zig_i128 zig_f80;
-#define zig_as_f80(fp, repr) repr
-#undef zig_as_special_f80
-#define zig_as_special_f80(sign, name, arg, repr) repr
-#undef zig_as_special_constant_f80
-#define zig_as_special_constant_f80(sign, name, arg, repr) repr
+#define zig_make_f80(fp, repr) repr
+#undef zig_make_special_f80
+#define zig_make_special_f80(sign, name, arg, repr) repr
+#undef zig_init_special_f80
+#define zig_init_special_f80(sign, name, arg, repr) repr
#endif
#define zig_has_f128 1
#define zig_bitSizeOf_f128 128
+typedef zig_i128 zig_repr_f128;
#define zig_libc_name_f128(name) name##q
-#define zig_as_special_constant_f128(sign, name, arg, repr) zig_as_special_f128(sign, name, arg, repr)
+#define zig_init_special_f128(sign, name, arg, repr) zig_make_special_f128(sign, name, arg, repr)
#if FLT_MANT_DIG == 113
typedef float zig_f128;
-#define zig_as_f128(fp, repr) fp##f
+#define zig_make_f128(fp, repr) fp##f
#elif DBL_MANT_DIG == 113
typedef double zig_f128;
-#define zig_as_f128(fp, repr) fp
+#define zig_make_f128(fp, repr) fp
#elif LDBL_MANT_DIG == 113
#define zig_bitSizeOf_c_longdouble 128
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f128 zig_repr_c_longdouble;
+#endif
typedef long double zig_f128;
-#define zig_as_f128(fp, repr) fp##l
+#define zig_make_f128(fp, repr) fp##l
#elif FLT128_MANT_DIG == 113
typedef _Float128 zig_f128;
-#define zig_as_f128(fp, repr) fp##f128
+#define zig_make_f128(fp, repr) fp##f128
#elif FLT64X_MANT_DIG == 113
typedef _Float64x zig_f128;
-#define zig_as_f128(fp, repr) fp##f64x
+#define zig_make_f128(fp, repr) fp##f64x
#elif defined(__SIZEOF_FLOAT128__)
typedef __float128 zig_f128;
-#define zig_as_f128(fp, repr) fp##q
-#undef zig_as_special_f128
-#define zig_as_special_f128(sign, name, arg, repr) sign __builtin_##name##f128(arg)
+#define zig_make_f128(fp, repr) fp##q
+#undef zig_make_special_f128
+#define zig_make_special_f128(sign, name, arg, repr) sign __builtin_##name##f128(arg)
#else
#undef zig_has_f128
#define zig_has_f128 0
-#define zig_repr_f128 i128
+#define zig_bitSizeOf_repr_f128 128
typedef zig_i128 zig_f128;
-#define zig_as_f128(fp, repr) repr
-#undef zig_as_special_f128
-#define zig_as_special_f128(sign, name, arg, repr) repr
-#undef zig_as_special_constant_f128
-#define zig_as_special_constant_f128(sign, name, arg, repr) repr
+#define zig_make_f128(fp, repr) repr
+#undef zig_make_special_f128
+#define zig_make_special_f128(sign, name, arg, repr) repr
+#undef zig_init_special_f128
+#define zig_init_special_f128(sign, name, arg, repr) repr
#endif
-#define zig_has_c_longdouble 1
-
-#ifdef ZIG_TARGET_ABI_MSVC
-#define zig_libc_name_c_longdouble(name) name
-#else
-#define zig_libc_name_c_longdouble(name) name##l
-#endif
-
-#define zig_as_special_constant_c_longdouble(sign, name, arg, repr) zig_as_special_c_longdouble(sign, name, arg, repr)
#ifdef zig_bitSizeOf_c_longdouble
+#define zig_has_c_longdouble 1
#ifdef ZIG_TARGET_ABI_MSVC
-typedef double zig_c_longdouble;
#undef zig_bitSizeOf_c_longdouble
#define zig_bitSizeOf_c_longdouble 64
-#define zig_as_c_longdouble(fp, repr) fp
+typedef zig_f64 zig_c_longdouble;
+typedef zig_repr_f64 zig_repr_c_longdouble;
#else
typedef long double zig_c_longdouble;
-#define zig_as_c_longdouble(fp, repr) fp##l
#endif
#else /* zig_bitSizeOf_c_longdouble */
-#undef zig_has_c_longdouble
#define zig_has_c_longdouble 0
-#define zig_bitSizeOf_c_longdouble 80
-#define zig_compiler_rt_abbrev_c_longdouble zig_compiler_rt_abbrev_f80
-#define zig_repr_c_longdouble i128
-typedef zig_i128 zig_c_longdouble;
-#define zig_as_c_longdouble(fp, repr) repr
-#undef zig_as_special_c_longdouble
-#define zig_as_special_c_longdouble(sign, name, arg, repr) repr
-#undef zig_as_special_constant_c_longdouble
-#define zig_as_special_constant_c_longdouble(sign, name, arg, repr) repr
+#define zig_bitSizeOf_repr_c_longdouble 128
+typedef zig_f128 zig_c_longdouble;
+typedef zig_repr_f128 zig_repr_c_longdouble;
#endif /* zig_bitSizeOf_c_longdouble */
#if !zig_has_float_builtins
-#define zig_float_from_repr(Type, ReprType) \
- static inline zig_##Type zig_float_from_repr_##Type(zig_##ReprType repr) { \
- return *((zig_##Type*)&repr); \
+#define zig_float_from_repr(Type) \
+ static inline zig_##Type zig_float_from_repr_##Type(zig_repr_##Type repr) { \
+ zig_##Type result; \
+ memcpy(&result, &repr, sizeof(result)); \
+ return result; \
}
-zig_float_from_repr(f16, u16)
-zig_float_from_repr(f32, u32)
-zig_float_from_repr(f64, u64)
-zig_float_from_repr(f80, u128)
-zig_float_from_repr(f128, u128)
-#if zig_bitSizeOf_c_longdouble == 80
-zig_float_from_repr(c_longdouble, u128)
-#else
-#define zig_expand_float_from_repr(Type, ReprType) zig_float_from_repr(Type, ReprType)
-zig_expand_float_from_repr(c_longdouble, zig_expand_concat(u, zig_bitSizeOf_c_longdouble))
-#endif
+zig_float_from_repr(f16)
+zig_float_from_repr(f32)
+zig_float_from_repr(f64)
+zig_float_from_repr(f80)
+zig_float_from_repr(f128)
#endif
#define zig_cast_f16 (zig_f16)
@@ -2064,41 +3013,42 @@ zig_expand_float_from_repr(c_longdouble, zig_expand_concat(u, zig_bitSizeOf_c_lo
#if _MSC_VER && !zig_has_f128
#define zig_cast_f80
-#define zig_cast_c_longdouble
#define zig_cast_f128
#else
#define zig_cast_f80 (zig_f80)
-#define zig_cast_c_longdouble (zig_c_longdouble)
#define zig_cast_f128 (zig_f128)
#endif
#define zig_convert_builtin(ResType, operation, ArgType, version) \
- zig_extern zig_##ResType zig_expand_concat(zig_expand_concat(zig_expand_concat(__##operation, \
- zig_compiler_rt_abbrev_##ArgType), zig_compiler_rt_abbrev_##ResType), version)(zig_##ArgType);
-zig_convert_builtin(f16, trunc, f32, 2)
-zig_convert_builtin(f16, trunc, f64, 2)
-zig_convert_builtin(f16, trunc, f80, 2)
-zig_convert_builtin(f16, trunc, f128, 2)
-zig_convert_builtin(f32, extend, f16, 2)
-zig_convert_builtin(f32, trunc, f64, 2)
-zig_convert_builtin(f32, trunc, f80, 2)
-zig_convert_builtin(f32, trunc, f128, 2)
-zig_convert_builtin(f64, extend, f16, 2)
-zig_convert_builtin(f64, extend, f32, 2)
-zig_convert_builtin(f64, trunc, f80, 2)
-zig_convert_builtin(f64, trunc, f128, 2)
-zig_convert_builtin(f80, extend, f16, 2)
-zig_convert_builtin(f80, extend, f32, 2)
-zig_convert_builtin(f80, extend, f64, 2)
-zig_convert_builtin(f80, trunc, f128, 2)
-zig_convert_builtin(f128, extend, f16, 2)
-zig_convert_builtin(f128, extend, f32, 2)
-zig_convert_builtin(f128, extend, f64, 2)
-zig_convert_builtin(f128, extend, f80, 2)
+ zig_extern ResType zig_expand_concat(zig_expand_concat(zig_expand_concat(__##operation, \
+ zig_compiler_rt_abbrev_##ArgType), zig_compiler_rt_abbrev_##ResType), version)(ArgType);
+zig_convert_builtin(zig_f16, trunc, zig_f32, 2)
+zig_convert_builtin(zig_f16, trunc, zig_f64, 2)
+zig_convert_builtin(zig_f16, trunc, zig_f80, 2)
+zig_convert_builtin(zig_f16, trunc, zig_f128, 2)
+zig_convert_builtin(zig_f32, extend, zig_f16, 2)
+zig_convert_builtin(zig_f32, trunc, zig_f64, 2)
+zig_convert_builtin(zig_f32, trunc, zig_f80, 2)
+zig_convert_builtin(zig_f32, trunc, zig_f128, 2)
+zig_convert_builtin(zig_f64, extend, zig_f16, 2)
+zig_convert_builtin(zig_f64, extend, zig_f32, 2)
+zig_convert_builtin(zig_f64, trunc, zig_f80, 2)
+zig_convert_builtin(zig_f64, trunc, zig_f128, 2)
+zig_convert_builtin(zig_f80, extend, zig_f16, 2)
+zig_convert_builtin(zig_f80, extend, zig_f32, 2)
+zig_convert_builtin(zig_f80, extend, zig_f64, 2)
+zig_convert_builtin(zig_f80, trunc, zig_f128, 2)
+zig_convert_builtin(zig_f128, extend, zig_f16, 2)
+zig_convert_builtin(zig_f128, extend, zig_f32, 2)
+zig_convert_builtin(zig_f128, extend, zig_f64, 2)
+zig_convert_builtin(zig_f128, extend, zig_f80, 2)
#define zig_float_negate_builtin_0(Type) \
static inline zig_##Type zig_neg_##Type(zig_##Type arg) { \
- return zig_expand_concat(zig_xor_, zig_repr_##Type)(arg, zig_expand_minInt(zig_repr_##Type, zig_bitSizeOf_##Type)); \
+ return zig_expand_concat(zig_xor_i, zig_bitSizeOf_repr_##Type)( \
+ arg, \
+ zig_minInt_i(zig_bitSizeOf_repr_##Type, zig_bitSizeOf_##Type) \
+ ); \
}
#define zig_float_negate_builtin_1(Type) \
static inline zig_##Type zig_neg_##Type(zig_##Type arg) { \
@@ -2106,28 +3056,28 @@ zig_convert_builtin(f128, extend, f80, 2)
}
#define zig_float_less_builtin_0(Type, operation) \
- zig_extern zig_i32 zig_expand_concat(zig_expand_concat(__##operation, \
- zig_compiler_rt_abbrev_##Type), 2)(zig_##Type, zig_##Type); \
- static inline zig_i32 zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
- return zig_expand_concat(zig_expand_concat(__##operation, zig_compiler_rt_abbrev_##Type), 2)(lhs, rhs); \
+ zig_extern int32_t zig_expand_concat(zig_expand_concat(__##operation, \
+ zig_compiler_rt_abbrev_zig_##Type), 2)(zig_##Type, zig_##Type); \
+ static inline int32_t zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
+ return zig_expand_concat(zig_expand_concat(__##operation, zig_compiler_rt_abbrev_zig_##Type), 2)(lhs, rhs); \
}
#define zig_float_less_builtin_1(Type, operation) \
- static inline zig_i32 zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
+ static inline int32_t zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
return (!(lhs <= rhs) - (lhs < rhs)); \
}
#define zig_float_greater_builtin_0(Type, operation) \
zig_float_less_builtin_0(Type, operation)
#define zig_float_greater_builtin_1(Type, operation) \
- static inline zig_i32 zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
+ static inline int32_t zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
return ((lhs > rhs) - !(lhs >= rhs)); \
}
#define zig_float_binary_builtin_0(Type, operation, operator) \
zig_extern zig_##Type zig_expand_concat(zig_expand_concat(__##operation, \
- zig_compiler_rt_abbrev_##Type), 3)(zig_##Type, zig_##Type); \
+ zig_compiler_rt_abbrev_zig_##Type), 3)(zig_##Type, zig_##Type); \
static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
- return zig_expand_concat(zig_expand_concat(__##operation, zig_compiler_rt_abbrev_##Type), 3)(lhs, rhs); \
+ return zig_expand_concat(zig_expand_concat(__##operation, zig_compiler_rt_abbrev_zig_##Type), 3)(lhs, rhs); \
}
#define zig_float_binary_builtin_1(Type, operation, operator) \
static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
@@ -2135,18 +3085,18 @@ zig_convert_builtin(f128, extend, f80, 2)
}
#define zig_float_builtins(Type) \
- zig_convert_builtin(i32, fix, Type, ) \
- zig_convert_builtin(u32, fixuns, Type, ) \
- zig_convert_builtin(i64, fix, Type, ) \
- zig_convert_builtin(u64, fixuns, Type, ) \
- zig_convert_builtin(i128, fix, Type, ) \
- zig_convert_builtin(u128, fixuns, Type, ) \
- zig_convert_builtin(Type, float, i32, ) \
- zig_convert_builtin(Type, floatun, u32, ) \
- zig_convert_builtin(Type, float, i64, ) \
- zig_convert_builtin(Type, floatun, u64, ) \
- zig_convert_builtin(Type, float, i128, ) \
- zig_convert_builtin(Type, floatun, u128, ) \
+ zig_convert_builtin( int32_t, fix, zig_##Type, ) \
+ zig_convert_builtin(uint32_t, fixuns, zig_##Type, ) \
+ zig_convert_builtin( int64_t, fix, zig_##Type, ) \
+ zig_convert_builtin(uint64_t, fixuns, zig_##Type, ) \
+ zig_convert_builtin(zig_i128, fix, zig_##Type, ) \
+ zig_convert_builtin(zig_u128, fixuns, zig_##Type, ) \
+ zig_convert_builtin(zig_##Type, float, int32_t, ) \
+ zig_convert_builtin(zig_##Type, floatun, uint32_t, ) \
+ zig_convert_builtin(zig_##Type, float, int64_t, ) \
+ zig_convert_builtin(zig_##Type, floatun, uint64_t, ) \
+ zig_convert_builtin(zig_##Type, float, zig_i128, ) \
+ zig_convert_builtin(zig_##Type, floatun, zig_u128, ) \
zig_expand_concat(zig_float_negate_builtin_, zig_has_##Type)(Type) \
zig_expand_concat(zig_float_less_builtin_, zig_has_##Type)(Type, cmp) \
zig_expand_concat(zig_float_less_builtin_, zig_has_##Type)(Type, ne) \
@@ -2194,155 +3144,162 @@ zig_float_builtins(f32)
zig_float_builtins(f64)
zig_float_builtins(f80)
zig_float_builtins(f128)
-zig_float_builtins(c_longdouble)
#if _MSC_VER && (_M_IX86 || _M_X64)
// TODO: zig_msvc_atomic_load should load 32 bit without interlocked on x86, and load 64 bit without interlocked on x64
-#define zig_msvc_atomics(Type, suffix) \
- static inline bool zig_msvc_cmpxchg_##Type(zig_##Type volatile* obj, zig_##Type* expected, zig_##Type desired) { \
- zig_##Type comparand = *expected; \
- zig_##Type initial = _InterlockedCompareExchange##suffix(obj, desired, comparand); \
+#define zig_msvc_atomics(ZigType, Type, suffix) \
+ static inline bool zig_msvc_cmpxchg_##ZigType(Type volatile* obj, Type* expected, Type desired) { \
+ Type comparand = *expected; \
+ Type initial = _InterlockedCompareExchange##suffix(obj, desired, comparand); \
bool exchanged = initial == comparand; \
if (!exchanged) { \
*expected = initial; \
} \
return exchanged; \
} \
- static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_xchg_##ZigType(Type volatile* obj, Type value) { \
return _InterlockedExchange##suffix(obj, value); \
} \
- static inline zig_##Type zig_msvc_atomicrmw_add_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_add_##ZigType(Type volatile* obj, Type value) { \
return _InterlockedExchangeAdd##suffix(obj, value); \
} \
- static inline zig_##Type zig_msvc_atomicrmw_sub_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_sub_##ZigType(Type volatile* obj, Type value) { \
bool success = false; \
- zig_##Type new; \
- zig_##Type prev; \
+ Type new; \
+ Type prev; \
while (!success) { \
prev = *obj; \
new = prev - value; \
- success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \
+ success = zig_msvc_cmpxchg_##ZigType(obj, &prev, new); \
} \
return prev; \
} \
- static inline zig_##Type zig_msvc_atomicrmw_or_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_or_##ZigType(Type volatile* obj, Type value) { \
return _InterlockedOr##suffix(obj, value); \
} \
- static inline zig_##Type zig_msvc_atomicrmw_xor_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_xor_##ZigType(Type volatile* obj, Type value) { \
return _InterlockedXor##suffix(obj, value); \
} \
- static inline zig_##Type zig_msvc_atomicrmw_and_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_and_##ZigType(Type volatile* obj, Type value) { \
return _InterlockedAnd##suffix(obj, value); \
} \
- static inline zig_##Type zig_msvc_atomicrmw_nand_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_nand_##ZigType(Type volatile* obj, Type value) { \
bool success = false; \
- zig_##Type new; \
- zig_##Type prev; \
+ Type new; \
+ Type prev; \
while (!success) { \
prev = *obj; \
new = ~(prev & value); \
- success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \
+ success = zig_msvc_cmpxchg_##ZigType(obj, &prev, new); \
} \
return prev; \
} \
- static inline zig_##Type zig_msvc_atomicrmw_min_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_min_##ZigType(Type volatile* obj, Type value) { \
bool success = false; \
- zig_##Type new; \
- zig_##Type prev; \
+ Type new; \
+ Type prev; \
while (!success) { \
prev = *obj; \
new = value < prev ? value : prev; \
- success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \
+ success = zig_msvc_cmpxchg_##ZigType(obj, &prev, new); \
} \
return prev; \
} \
- static inline zig_##Type zig_msvc_atomicrmw_max_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_max_##ZigType(Type volatile* obj, Type value) { \
bool success = false; \
- zig_##Type new; \
- zig_##Type prev; \
+ Type new; \
+ Type prev; \
while (!success) { \
prev = *obj; \
new = value > prev ? value : prev; \
- success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \
+ success = zig_msvc_cmpxchg_##ZigType(obj, &prev, new); \
} \
return prev; \
} \
- static inline void zig_msvc_atomic_store_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline void zig_msvc_atomic_store_##ZigType(Type volatile* obj, Type value) { \
_InterlockedExchange##suffix(obj, value); \
} \
- static inline zig_##Type zig_msvc_atomic_load_##Type(zig_##Type volatile* obj) { \
+ static inline Type zig_msvc_atomic_load_##ZigType(Type volatile* obj) { \
return _InterlockedOr##suffix(obj, 0); \
}
-zig_msvc_atomics(u8, 8)
-zig_msvc_atomics(i8, 8)
-zig_msvc_atomics(u16, 16)
-zig_msvc_atomics(i16, 16)
-zig_msvc_atomics(u32, )
-zig_msvc_atomics(i32, )
+zig_msvc_atomics( u8, uint8_t, 8)
+zig_msvc_atomics( i8, int8_t, 8)
+zig_msvc_atomics(u16, uint16_t, 16)
+zig_msvc_atomics(i16, int16_t, 16)
+zig_msvc_atomics(u32, uint32_t, )
+zig_msvc_atomics(i32, int32_t, )
#if _M_X64
-zig_msvc_atomics(u64, 64)
-zig_msvc_atomics(i64, 64)
+zig_msvc_atomics(u64, uint64_t, 64)
+zig_msvc_atomics(i64, int64_t, 64)
#endif
#define zig_msvc_flt_atomics(Type, ReprType, suffix) \
static inline bool zig_msvc_cmpxchg_##Type(zig_##Type volatile* obj, zig_##Type* expected, zig_##Type desired) { \
- zig_##ReprType comparand = *((zig_##ReprType*)expected); \
- zig_##ReprType initial = _InterlockedCompareExchange##suffix((zig_##ReprType volatile*)obj, *((zig_##ReprType*)&desired), comparand); \
- bool exchanged = initial == comparand; \
- if (!exchanged) { \
- *expected = *((zig_##Type*)&initial); \
- } \
- return exchanged; \
+ ReprType exchange; \
+ ReprType comparand; \
+ ReprType initial; \
+ bool success; \
+ memcpy(&comparand, expected, sizeof(comparand)); \
+ memcpy(&exchange, &desired, sizeof(exchange)); \
+ initial = _InterlockedCompareExchange##suffix((ReprType volatile*)obj, exchange, comparand); \
+ success = initial == comparand; \
+ if (!success) memcpy(expected, &initial, sizeof(*expected)); \
+ return success; \
} \
static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \
- zig_##ReprType initial = _InterlockedExchange##suffix((zig_##ReprType volatile*)obj, *((zig_##ReprType*)&value)); \
- return *((zig_##Type*)&initial); \
+ ReprType repr; \
+ ReprType initial; \
+ zig_##Type result; \
+ memcpy(&repr, &value, sizeof(repr)); \
+ initial = _InterlockedExchange##suffix((ReprType volatile*)obj, repr); \
+ memcpy(&result, &initial, sizeof(result)); \
+ return result; \
} \
static inline zig_##Type zig_msvc_atomicrmw_add_##Type(zig_##Type volatile* obj, zig_##Type value) { \
- bool success = false; \
- zig_##ReprType new; \
- zig_##Type prev; \
- while (!success) { \
- prev = *obj; \
- new = prev + value; \
- success = zig_msvc_cmpxchg_##Type(obj, &prev, *((zig_##ReprType*)&new)); \
- } \
- return prev; \
+ ReprType repr; \
+ zig_##Type expected; \
+ zig_##Type desired; \
+ repr = *(ReprType volatile*)obj; \
+ memcpy(&expected, &repr, sizeof(expected)); \
+ do { \
+ desired = expected + value; \
+ } while (!zig_msvc_cmpxchg_##Type(obj, &expected, desired)); \
+ return expected; \
} \
static inline zig_##Type zig_msvc_atomicrmw_sub_##Type(zig_##Type volatile* obj, zig_##Type value) { \
- bool success = false; \
- zig_##ReprType new; \
- zig_##Type prev; \
- while (!success) { \
- prev = *obj; \
- new = prev - value; \
- success = zig_msvc_cmpxchg_##Type(obj, &prev, *((zig_##ReprType*)&new)); \
- } \
- return prev; \
+ ReprType repr; \
+ zig_##Type expected; \
+ zig_##Type desired; \
+ repr = *(ReprType volatile*)obj; \
+ memcpy(&expected, &repr, sizeof(expected)); \
+ do { \
+ desired = expected - value; \
+ } while (!zig_msvc_cmpxchg_##Type(obj, &expected, desired)); \
+ return expected; \
}
-zig_msvc_flt_atomics(f32, u32, )
+zig_msvc_flt_atomics(f32, uint32_t, )
#if _M_X64
-zig_msvc_flt_atomics(f64, u64, 64)
+zig_msvc_flt_atomics(f64, uint64_t, 64)
#endif
#if _M_IX86
static inline void zig_msvc_atomic_barrier() {
- zig_i32 barrier;
+ int32_t barrier;
__asm {
xchg barrier, eax
}
}
-static inline void* zig_msvc_atomicrmw_xchg_p32(void** obj, zig_u32* arg) {
+static inline void* zig_msvc_atomicrmw_xchg_p32(void** obj, void* arg) {
return _InterlockedExchangePointer(obj, arg);
}
-static inline void zig_msvc_atomic_store_p32(void** obj, zig_u32* arg) {
+static inline void zig_msvc_atomic_store_p32(void** obj, void* arg) {
_InterlockedExchangePointer(obj, arg);
}
@@ -2360,11 +3317,11 @@ static inline bool zig_msvc_cmpxchg_p32(void** obj, void** expected, void* desir
return exchanged;
}
#else /* _M_IX86 */
-static inline void* zig_msvc_atomicrmw_xchg_p64(void** obj, zig_u64* arg) {
+static inline void* zig_msvc_atomicrmw_xchg_p64(void** obj, void* arg) {
return _InterlockedExchangePointer(obj, arg);
}
-static inline void zig_msvc_atomic_store_p64(void** obj, zig_u64* arg) {
+static inline void zig_msvc_atomic_store_p64(void** obj, void* arg) {
_InterlockedExchangePointer(obj, arg);
}
@@ -2383,11 +3340,11 @@ static inline bool zig_msvc_cmpxchg_p64(void** obj, void** expected, void* desir
}
static inline bool zig_msvc_cmpxchg_u128(zig_u128 volatile* obj, zig_u128* expected, zig_u128 desired) {
- return _InterlockedCompareExchange128((zig_i64 volatile*)obj, desired.hi, desired.lo, (zig_i64*)expected);
+ return _InterlockedCompareExchange128((int64_t volatile*)obj, desired.hi, desired.lo, (int64_t*)expected);
}
static inline bool zig_msvc_cmpxchg_i128(zig_i128 volatile* obj, zig_i128* expected, zig_i128 desired) {
- return _InterlockedCompareExchange128((zig_i64 volatile*)obj, desired.hi, desired.lo, (zig_u64*)expected);
+ return _InterlockedCompareExchange128((int64_t volatile*)obj, desired.hi, desired.lo, (uint64_t*)expected);
}
#define zig_msvc_atomics_128xchg(Type) \
@@ -2429,7 +3386,7 @@ zig_msvc_atomics_128op(u128, max)
#endif /* _MSC_VER && (_M_IX86 || _M_X64) */
-/* ========================= Special Case Intrinsics ========================= */
+/* ======================== Special Case Intrinsics ========================= */
#if (_MSC_VER && _M_X64) || defined(__x86_64__)
@@ -2459,8 +3416,8 @@ static inline void* zig_x86_windows_teb(void) {
#if (_MSC_VER && (_M_IX86 || _M_X64)) || defined(__i386__) || defined(__x86_64__)
-static inline void zig_x86_cpuid(zig_u32 leaf_id, zig_u32 subid, zig_u32* eax, zig_u32* ebx, zig_u32* ecx, zig_u32* edx) {
- zig_u32 cpu_info[4];
+static inline void zig_x86_cpuid(uint32_t leaf_id, uint32_t subid, uint32_t* eax, uint32_t* ebx, uint32_t* ecx, uint32_t* edx) {
+ uint32_t cpu_info[4];
#if _MSC_VER
__cpuidex(cpu_info, leaf_id, subid);
#else
@@ -2472,12 +3429,12 @@ static inline void zig_x86_cpuid(zig_u32 leaf_id, zig_u32 subid, zig_u32* eax, z
*edx = cpu_info[3];
}
-static inline zig_u32 zig_x86_get_xcr0(void) {
+static inline uint32_t zig_x86_get_xcr0(void) {
#if _MSC_VER
- return (zig_u32)_xgetbv(0);
+ return (uint32_t)_xgetbv(0);
#else
- zig_u32 eax;
- zig_u32 edx;
+ uint32_t eax;
+ uint32_t edx;
__asm__("xgetbv" : "=a"(eax), "=d"(edx) : "c"(0));
return eax;
#endif
diff --git a/stage1/zig1.wasm b/stage1/zig1.wasm
index d7bf519b41a74966a3bc46b133fd9b34aa200f75..fa2b2efa03b4ed138a7f8c52e72648d828f2116a 100644
GIT binary patch
delta 933731
zcmc${349dA_AfqFJzFv}NoOSsfu2c#1PEahP(WxD5fQ=Vx?C4{pndt=YeZTkl|NrkzV7jZ$sk7Co
zQ)~B({PQigtxRJ<+oS(_j@fKBHqUYIKu3^1wt<|!BynpG=dj>Fk-(KexG?&15(447
zj2qmUSpXUY1*&bJ+e8qwhYqz3j0y%P1ru{XW+<`5w<9$v1CzX7H@8VHDd=SqrA>frp|t8%QF@ExB-kq+Yw%=0!emT~U&a0}K{+<^Wi7vZyGx%kJVr
ze(qtA)8%qND+x3hJcSFz=u6y*8!usdQP}C^EQPY550qgyaBg!_CbQY?PVRQwxFow=
z5Z?{e;#@$4&6wRuqQM$2kIQb4cG{yo5W&Gv+6EKbUCimVyBuE07Gz!*kAksKoO2gd
zgUe%!lH4!?EWs#;K9cx`=wL-(QUfq(06F40i*D+KFm^jc0(<6>Jdy+d823VCpmgp+
zHN)+k*-#HKjRRJNeVEVZ3xY9sF$sPyA$eu8k%k4yU6HU7Z9~LJCKGqG50n*%k1=;E+7Zq6muWpkiHY}{=l
z7q!`=Tuz(A=aXaylM!xg?r2n)%Y&MTcDZ}l+)h{@N|R{ePf*-7NKQ_6_-u|Sd?Z(t
z5(VM$k)j<@vCJ(&F}Rf5hU_R*u-)N`MV1TbkPVi$#X>$G@=y%O__j%ua!KNYyHuCW
z=8znyeaU~5lzOvGf-V%wf^NIZ7aQwvB9b62;V%k4Y;&W4Tavsmv(3%j9<*nhgox&J
zM!_()SZEIGN{B%8?Q+2*9WWuoR6t;La#dtfcrPo5$VjRvU{VBiTLS
z#)(FYq&<+?q6J)5LQ66}P~1UPfPihcN5L!(=8%Gj@Gv6F)uwnOr0H*+iS2bel4%HAM07O@rNU5K5Wa=53r6a~|uZ-!69dEhY~
zw-50Nts$xb1u$4J=t6izmw?VH3hbR|lW;+s6on4XiK>7X*(v_mAsn?SyYOirq_oFN
z5?Z(u9kT=kIE({viQ*ud3E7#+h&wjfCc^=29y@{nLMqE}N8QFCyvq%zb-9B6Sd^tc
zC(6fL!d$Uvo>Xn5+K6J*S)>2QmF(_=dyotWK(HR1P4sLI4t)_U(OEfB8z6^P
zf~bhN;sXu^Yh-00Qql3Vg1_`y)Hs3Y1L
zbVO5R!zX~ExH#%ZQK2wc9i8tQAXZ#q3#LZ$gtMF}L@P$f|Lwq)ctMZGg{E{lY7?9M8O;hB2
zNUKQds!du{N`P+~spY2RaxGCW>EZ~m3N4tD##U<6Qd)XZ7%G4Om)9@-CgpaFMzqYd
zOg2;Nh3_@m;I#IX}0AP-Yg}sX4R0FiRJCjnc(lOX=^k9`cggfvYyHO?HCA(4|jyNv5Id
zF;m!+7Gcx1GxJBtz17%IkrG1^V)Awc#BC4)
zOpXA?Y=L_jdVp4G1Td;Xd$H{Ud_sw~t!-ZX;u5q=IovMQno><}m%-PT2zTw>u2qgr
zg*qyKQ7zkAB3fyH4-R+~kIMU`t1RCi4QKH_t1j89&1yH&dG!-iaeML?QRV#8rP|r&
z(zKV_H(>L$@$K6(U8`*0o*y}){oej*j;1o;x?aw!MGzuWI2nJ>XfF$>y)tgFDou##D+EDA*u@^BY=-A6yFcu728VnYX6%BIvSYpu1
zWHA2(tYx-5(|I<&gIY?c!}HsvuXY;7LT>3Z+R#Z~qz#4K(noHo^|_&|iCb;r)?ACb
z8@OM#(iYv}N;)!mRH*5OBErTWwBK%M%eU{-ayly}v7#8W@y1-kxG+XAp58gc_=3s!
z^0kc5MHtWj0oohh6=GcRgVwW4TfTFj_F9(^BP666l-deP!?le!zCm7bYqwtf
z{74a@XO1F=VDITed@Nm)yC>!BS`)5%Ut7fs7pLfalyVcT)ZWtW=q{*j?%vC}g`BvV
z@G*8?zdDz-<9`;Bk2P1BUs2vLsr>jh8f?Q56|cXcq7#@HT-v8qswvrf-v>s#iDCNZ
zsB`FI>vk5EZ}sTF7Hb`QbV!;p8p`d4){Ib>L{+qiu
zEhYl8!O|vV*?9;qVUR>uNzz@OwRH5&4>EFz!#zV3i(*Y*jE(rB*65ZSlTOb9MWYf!
zB5gdhbjU3bY4<+un_eLTr6z$gD*^e|8xvp+>KC=;2ndLTH%9*WiU94yX
zp59G4^(ei=JxcG_OgmJVg;!Y$Yv1?2F=@_LXzi|J$m83Wwz;h+)aa|tk~Nl+CfnU$
zYc%BSU)Q1rqwA0*7h!>p3dED6_B
z$j*Cz(l*@jDmnR0{dzgCjD}kV47Z$Pxa9}rmVOMF68g90U+&X-^>52gYya+_#rA76
z`v=f3ukD|SaQ##N03W?ki@viRzp!2Fc4u3@VY~LPJN=<jOeiiiP+m46W2yfz8xY|)5ctz6*XTk-9FdiOn~+meU<1QL
z1btcHU!YuSG)>qLUt5HYZp!O^19b2hf?|wS(0N~J-T&6nG=4^AO+VANmazJChxnqvlHf44F$1RA9^x;O2y^IKqmP%#)FQz>Hp
zTSVCt)LzX&4Ozgj|5?g48kDd=Yvnl;@||86zqO!pP@8sdM#`E)5IRIlwfS!nu&)nk
z+wN@`{Jac6b0|cNnh4+)$_)igNS36$U_xdbfl_97LikAne*xu&Dy&CgK|z7+Kf;tD
zCiucf7PIvL)BwX}Bz5d!?YaBXQpWE@jaaHMPS7pgsm;7ECFRg+ln?h6QT}8B{C%~y
z4ZyusFupOG5tZLY0Dr2|t^)Y>n7W=3FMxB$XnFUiG?=^^z;JCw`5Ot~``RP-r>2}6
z34@x=iu8;XuuCJgiT5|WZt*mWwPDdz0o+BomfGo0gOyC!g5TV+IWA;{_Kp7Hdwzlx^jS?o)kRhxH<*(BA
z|2>D#*sHl8^e0Tf#5Yn^1m3B2c`zsOZORY32IYTJtG)JM78|a;^I%)PvR2#qU?wly
zq@73J_gce;+VXjuwEhoe@)@6MFFlmS)@sv`xA0SK>qD7ruJ+4AP1rFl<>9t$sn+M=
zEWUlNHss+fwp^R~a9dt8SKIh-CND44&LD5DmiS0p{&k^t>m!+Lj`r*$P1ptPtw-9j
zkG1uWv~RFr9V$Ac5bEneq3lde9*~i;{1QerA=gDky9Dg3OIptX3jbog_Vj?P4!_b&
zGTf4F{yPND=o;jPUDoT*P`ditDLqZ-iMiUE0g35X<^m}k5}yT<*PkYe@6u`qBsDzp
z8AJ=YHtM3EGT4M~Kdt!(CN?}wqpoB#^flF9&ipt7f|k_Okw_qMai`!RJf+R{upH{!Kv);vmlQ-10uhV5xEm)BR4G+_Xw{<
zOMJB5pyRV5p3)9HZc`>Q|KAk1;!uBGh2Um3AOiJJN(fG%k}+U!H>kCPN^ye8dUjOyVW2XAY-_cq>p3-~K6^pAtrF)cd
z%>RHe&^t$92#c|AREh~smYQe=-TTx4iqcGrb*)Lm-}p6~`&k&}nr@fCj}JguRi`2HEl9j@*uGj|H*
zh8_P=Gk5&kR(E6mI|YT&Kf=c>>fNUd4}s><_yg{zb$Bv?A6~BYc`~)(7oWo5p}-15
z`IV8(f0v;2wpQ?DTEe7BKuEXx(?K&u2Xq8VgsLYg(<`@{G+2GMx6h{9)M%uz(owh{t6*4;L`J3u-dudg(D&3
znx!sVOI7`&kdOLAyJJXOEZo*Um#&o!X^vGKNy!nqZHuhpkZw{P-726j%fcg{%HmY6?`EXsA(^Xi?+&7YDKNTJ?)3_|Em(
z`j>9t)iX5q@>|Z+AAn0s?cl~F?W33b@s-20t1q|ZM=6@6X?*4Ij`slok?N|b84xMO}jKL;q_j8>Jsgt*SoS3z4hvhWNpdotyqb6;B{2t66l?z
z<-F03|6Hm)@CMm(+M?tY^StI^`#OvZX}_M0gzp$AhHg$-UbYF>b;
zZkX8JXX)KTU*vq&E^R}>b*80iO-r4#S}JAOqwtD<4|~o;DoHZTIXTHVXK``Q&OTQoIWWh~ejFoe3wi|3%3s*ne
zXv&fkpW3M45d)lzC>71(S-R#>hl6Ap
z4pLPUagb;wTFt8CAO-t{gG`y275vd^Lt&ehYb^o^+pM7>vR5)UrE_i5K-}GBMErY(h=cBpDCP5b|7-G-_*XjlIa
z=p6}ZA-zKpEts$VU(q|HcE38I{Qm==?mi_is9F77wA&Nb9MNviSaU?Xy=u*wghZs>
z{v1k)?&?%1CED$up_FL1MH}k0+hIu7YquJ*!tHhv5`s;n-A+SRz0fpslcmUvO02@dpC%2*5^w>C;;Aqa(CU)1Gz?r%}AvyEpb?-!(?@!>2zLUqsu!5t>x+
zkLj&=af#Nv>}lt$auiI84F4;Pxy_-n7R_mn^p4mLE-BOg%Ot6?OzSY?=HND~KB9i+
zZL?@7>Suv9N7TS^NY<;LZO97O&z4Z(
zNd0_+ta|mcZ-=GGUhTejZcD9i%5&psen}Sn?44$0(f%{jv_Id8SL!3~`x;9M9w1Jp
zmD9>@TAOzp8~81+!7U@ARPh3Ld!@GK9VJA&dgisXYiQZu5yG!nW5vH^R{S+Wf42e?
zY{J)u%nA#=>@N_nZp6BZ(C$D4F);gDiUk|6%EE|Z-`Qb`C4Yffy_iu0A=QnFV
ze|!sjTWkGE=j2HzQN$M}ffFN~@P>Vo;5DAhqWgXJ@5vl@I
zZ#~qBr?l?N8?Y_f{ma|?R((ENMz5i!__?}GdwY3fc1c^Y+%Hpg_;FAjXH)?BWT$p~
z`3=c4sm|;ngXe?{e#56tTfR>q76z$*kZQl+OYx%~hoBzSMAfC>g8C@_^e*g^9{4nc
zkFC}&etLa+!7i9Q8M2B|oTV=Ij9=Qfe1+8N;LH*_I2Q1!?w9=;>JMfzhLV};VKW&^
z$;RqWTKknJnteW>+OM(Ph;ET#!gGSQB+boDN#wjWM!RKI_w>udfoq;9u%#(6=pZu%
z{NiM7_NoS{^EaT$nwuwp2UMGYu0NpdSoIo~llpxAWoRM6Z}i&h{FYuj`-@rb;w>X(
ze=2-(`xniylyl*WM)4niOIaS?op)E#l;*;BS0`#+RwwY&BeXkLCz9_DTb<}6*~M)N
zMmwt5g?99@cJtb7Q_}4w`P~-s
z+NibjO>jx3A?f7Ih@{%BU%tj^WB*HYeP!vsFV`{b1kGBXC+#m7qwQRO8=tdPYxGq%
zmXLdWH7K#(0RfldQ}=4?zG@e{>U|BX{qpMyWkHEUn;Y|!?`zGzek0ignPJEl!dUTj
zGhi(J;p-NR|2#sA-O!fLE)xI%7$B=yXK+$yP(f@
zvSe-0tF4@Sr((>o!$>dO*otqRs?}_4#3pE$Hr~fipVRKy)Jmu}a?>4r{t0c*rfjxZ
zyRvC_>}k@VDKy|=4cz)X?pEu|8EsQF0
z^;UoE??!`tUGXVVWRo*nlVkq~<;7t62ziOyo)zfZGPIf7qLa-cChg2n$<^DMC7UI~
zAd|r5ZLRt6(^{+T{rTJTw1VwT`G@ngZr={2RQ8VRC^cZmOU^$w!=YldA=Qc6=^fX{
zE+oZ#P}XY%bgHgZRL-xxy7S>=6KwKo62dyMvl(F}?s_No3nCCzm%v8BIN1dKRbgPv
z-2HFoZ>P|8c!eN4=Yw>e$Y@Asny?JN2vijk~MGj0Dg9}_FVOyu|E(kM_nn88&aB}zmgIdpfhL9
zUgn~sHaLQ;P5UlGJMvw+YCS)Tfj+jBX+Pd*jRBkd4Z}I+#C+qLZC<;W*X=gkXkGU8
za?UA)k6O0Ib_u_4j`NBbVP)3a1*tL{?q+Ml{d3<#W(2A>!8I1J8G&|Z>Uq(umG)sx
z7c&SPGm%ePkl)(>l8c%(sd>l##@h7#p^#COWrU34EKA6E`1=i**@GcXE|}oUR`Ak81*Ta_o0w6&iAB-AKTJy!J&FT;hfS4l
zv3FN(ep2%C4^1-1!yl>Z7C9JFd2nRL3$&P9{)s&r?vJL}{9|Uy&!jJAKM}3y{5WI}
z{@5(#T4OZ8_6LAp{3yDi>K_|ZaJcwmOH&TecPu_w=6552KBoMFTM>247-ThNxvI#Mpge`=y`)D&DZ2YNj%TqKuixd?#
zQ;W4~`3=RccsuZdKA{2LuZOgoj=r937WfN_92kEd4ecqGG&MV?rj`omct?O7&x
zt`+>ju^!GtmC$9`(zD0z3mtu{YGzQ{+$=)LY!2(1>3`nH^0aBk%jwwJ-6wiE=Z!)`
z6gx;Q_)g=1Wwmj@GX11lx666_qewEd^@sRDB^rb&xyM`&?pU!-K^#b$hQt>!or^4_9sL+Ik{h|tewlgt+w8eB_g;y7Ahb|~uRc&*V4?pmMCZByN#0IG)XG5pSi*ighITkio
z&I&doei3Xw`lXk1^Ej~SYOpCb*t})1dE?g*8|S5OF<2ozE|0e`t(+j3el-D1Zwp!NheECQ#jw?o
z`uSo%)A@>8nk-vdSl;!=7^;>1fAn%KBI4^zjn0i1#QzvCyyE>!e*V*3ZNsG|gj{>6
zm-Em>AX~O9G4ZBt#L<_N31jBvUM`I=zBI}0ohZm1m?*HG`BPvO{MpM{PDjPXapD&I
zf^k&*%2+7WSnGSG)Km)RF!*cDN
z0P2;d4C4$LCQa1X$;SF(#%e-TTA5T@MX2cSarULzGnM2S@X2`*xY-no^F&kBKa__kEL0X&XbIvNjfdx_KFS
zx4zB`>AxF=&1bPg&qo<2){5!Gn&PNzldl%)Q+@1Rc0#{ChNU+;Z(Reb?Ut{a`;Wik
zcEnEopD`@6(a*r4Wh&d=ZfGpOg;GZ-HEgH8G=_CcoVe4nmNc#slaA>-b!RMVA&&<{
zl>U?O-`=UKvFvg7hyGqHt7aeTLuK|4{^wG?R%TB&Jz
z6RdLVB}!pAj!+76tOHJ6ImWFulPUG(n7OXL91AE1
za=emim17cZVu$Nx2Bjd!+0^=Sd`YDtMO(VFgSzs6ON8tA)sK_|`B!GJ2O{!sAXZ^J
z?x`dHAxe$@3dKzM&s-z_B~$*#8(HN)Wy-(15u21;U;c|rF_4Va-Ek9@hMh|ltDK(`KI|Oj{|hCeochySap{v=-;!fg03J|VW(J6DOhz=R((NFn1Wu;vNo>$rl9#v
z{z6bRdSBCeg5KV=zMwjtPN^g49Lj-3$27GHI);v_gaw^KDG2&U)B1w0q|&fxi)QS>
zNHkhR_^AG=&Dex%+;j{*sqq)p|5A2+VUJ!T?60P7=M5vBP
z8s$Kw=UZB<_hKE9M$&;NRPTY7^{aOQm4<2LT&rnj5I*F1J(q1@`}Ll!7@qCaclcSF
z{#GlNoKyd!b>V5(f#pu^<7Sr97ZE-TxTrl_#kC53@O7+h+&6zM?{^k?PhVGG
z-kEfotd6`3CNYz@
z?7B!l(2+G^yA20u+^N2Z@w0}&tUWEJs){#Fr}Qwkz(=~Q3DeN-AEUg=~N
zaSP$Y2L@fw(!xg*+Zsdv&2241{~4XxjeOfNeSc?m(wtLWF!3&1@XR^Yxwc}pPfzR0
zZsQ*m=}&cKWB516^s`;r&!IV1JCjR03zu^@vSlW`puGVvY;S?D?G}cYn(#6!yret(
zCNzOD>6cmPmlvpPho|mL?Eb!(%WN5`&+dyQjNilxO8J?-?8PWI9lY?nF_js7AItKY
z$1onnRvfa*?qk`kP+xx^yAP9Ez0v(FC&gS8e>xCtY;xL^CRkIt{Moz#_v1Y^Gp6Z}
z{+(^(>-OnwAH>_d*6-7we-P8d`5)`M9%OH^VdYOf#D+-x=P87jrvEyCeHBurt2r0z
z8Zkxru7S+OOoSp6q1b|;pL>*TG~pMz7z8eNi4f4MA7ihxDf&H+v(-$~6Q5uMdCkc3
zm!H6U%2LJ+E1)3oG>+q%Z=djrl|p^mQ<#BH)Ym=5o=a28fP`c!q(9d`i?=Y*^LY@
z>|k(MJQE$T<#4&9Bn}JbD>ix+2EN>C%CmAM7QVAN-mrkPFnG-a-ew`MrX4~3hJ4l=
zl~$O~?qP-Hd-K^t%(=J}%~G7GF7N*=d!O;qOY{rRu>owFe*g1qF)Pr&c$qcTTfM-N
z8y2mcOs5~P_F9NLu=Lv%Nr6c6dv1N;3+zr|1H~~o;J_NBqj$O=%eC~U7O&L%b&L(@
zr(R%bp7915#Ua0|>m6e=V!|0Jdl`9$I>sjG{r|z5#909O2F%+jHlq%KkN&}ivd{F~
zA*>@Cu0J`16=2r;6H>gUqP+1x+4U^=4Lv-^<>sR1pZOAN
zkXD2(yF7M}56Ai(
zDls+!3`?w9iTVrcrVdqu0{i%ch$Yhf*qP_*LH)It+0ASOwSm@bgMQ{^cFJ3gwnQ6C
zaQp50u~%3>_+6)0+4Ihy=^-m6vY=Ev9P3%B`hm;LrC)v(j=4#H^EGycjntp}H!ER#
z^(+5onMeg*XD=Z2*6Zv^UQ$zj@pbkNmfFfo3)n)3XUS&lnytc@yjJfwiv67}(mx!<
zI^c3>6sw}tXx0sv`u05>R9^L
z?;0yeE;3SAjnv=AiOl8Wu!OWoPc3E-|6fVV0HesTG{b{7GD-o%=C9FHj9YM-1=$F{@
z^;45sKrb23;sBXAN#I?XB=EYuB~l-~B~oXM)FT?=+#-FxCSVOF3)uaWMe6Ix0v0z#
zqza~p)J7w9{Zw`r$`wx)7Tr9RHDep~b5q$pu@`1+gGzS+#bCexJ^2*$VyEQq~3q
z{$0u*xCZ5iQr42K(_`OeQ`lzxy|>wi#$z6C@tB7jk9pvKpYq41u~)ggfhq2uJ`uO=
zey@5=KVHVZKqxN%a0Yvc$IivcdwOD)g1lO`Qg1bjr6SDTI*WDAs|O1q(%FA0o2TGe
zPYFE$zLb2qn|faVWESg$w@sXw#d<`Y{{shB9O=mIFdJ2JUVnZzdkhEm9EbpTsHy7R
z$ay_Gx*dmcfZmQ;$3soW-qp0(?b7w6ci8~Gd%6DdyBI)Rr6W(K&u@H}eZZp2U!23@
z8LgTY&1F6LqA~imxh#)Y882u-I>=u3F
z0(Pr&!f>?lX8O3T39WbqweJLd>(&HkV+51I4)<`H-ekgo1+^qRhCCv)rF7PFgzZ__av
z?!CdIqBl^#oQqRR+z1gmImRdLH%T
zeZ;}r-bl?fD7W)aZ>4UUkH^)yx2=&HTP-l#c!GlM>2)I6+C#xs{b8$MlxL)t7$UVY
z3Vwo!3o^Mz>eCO0QL3efdN>@w6|fdYYRVizrMZXtGxg|x0n0I9t9})*Y>yfwt*03J
zH1nwFe^
zYS=EJtJ`3)RTp+}8L1zC7PfO5G^ZJUgtx(wcbqpk*ga|tQXd-CBcUcKInAgaPN(xE
zb+17b8~&ulsP+1+Pgwe+qn0BoDC&>&Zl82@JudRjM7-Hj^zC+eYNDqUvydPkia#1?
zcm}Jl7>{tqBZX*J@tyg&kTf_z(Jnp|?IenJhriPsE@eNnca8CI%VjK=W7PZT1Xs2`
zVj0VcL7c#o&4U97LyEJv{@F5YS$*i%<0iUr3dpUuUe0c&YwZe_N!L})k}~v)1#wbSWK~A8a7I&P
z9gbu<%&emK!UbV>18rd>4?`ePa(g6;Iu#P^TqFylA(2)3zV0c9Zlw$L*5$0R_t+sL
z*g4eFh2;;HvmT5eJE+gBVC}vjU5i
z$P@N!`x#rsM(JlhV|&@B`i7OPr`tG0ERWWcSFt>I$xe&_8B%>$vHrnO%0UwW%pDnK
zFBGgR=p`)}N!QOWrgvw!uFYRb_!#Zi$DfD>MI+Pt`JenGCJb$^0gU<#0F0mO7pO%z
zB!Tq4pR-)|FZ7BbVClm@XLn)XyytUFT*v5let~{+jQ;Bv7=ae)H?78me60TLYW8e0
zToDsMyb6$B^M@}9(?F@WetI=ifD&7YaoShqZ7b0&cbGB(@BYJBSV&+alNkM1A?1f8
z!x2y;w+0PZNb>JBSh_i`&sxJC!#qp3uVuHe-^=@~g*CC;`0P41i0#zBS;yWm#yk!&
z=5e4K#{WL$Pp)TEIXk9b{+doB4MHf(PHPdAg{SrCt?X8|N56Y326uaO
zZ7cf}ql??Nv1h{WShI~~#MN!an8E6wFJswy+wCj`Z!6T}ma`NmjP#~yjjut~>*wl%k$V4~tTcE9
zukP|oUeOP7bu&ds$%}q43OHk_QgRe;DUdC?70YVuBzLN_QOHEjWUTFHxU
z(2dl30~5j1fz&DLIwUW;LBcGpMC_KR8zk7VpivP0pcBbYje_V0iN)teiuysMek&A$
zif)jLHx+^mx{}=yt)u6(G_Awby*0CuFww5qVI!3U-X4U
zW+Kk@0~P%t0lzaeM1M%BUulFOQFlm!j@vG@M|Vi&KA$FZLU%~1vi;~)B^*8%wpeNi
zgYJ;1OfUo$-62=sSuiX}sOS&L+%pde_UI3Z#kXkufT2Gm?XMgY>_vCT)kz0L3f&=L
zjxfrhJ0uoc4XdF$q|{`i9J<43q;^(`a_9~{NR?web{ANnKP1$*jJg;7Ay+RL`XVk9
z`|@9ehLXW>eu>qF+nT!%MHrs)fJvM7Dg9@aAW
z9vOp^E6_-)!03SC3T~uyqyL8|(3oo_3B$ec1j76dj~5^XPoS=Pj?w+Y6R6yIqpOA|
z5bV!tF{FVfd~HyG7f}8&q7dZd24WzK*H(cb+~6c4H16O5MC?nW5IleexQ7k4@Bk{a
z#9#{#_@1gJJQ%9R^Cd{3_KDHz--U{(eJZoo7{;LXNsZvH#bQ*0>L=7a1~YRsG~5`T
zirU9mnOu=m?UPtD@nT1$Q2QgX2!cCN^OE{0-Uft}IS`sXQ&`Cy2+hO`@&H54W2r^m
z_%1N-=U!C37s)B_h=S%w=mT;f@m^fgdWn%j%`Yex<7d=539;yiAdgxActK)&gGQSE{DQ>hbpNoUQDY0C*4GkRpx?EZeZ|J1q4e3uz{Pk(4jLV9d^B
zOw$B>&OY`4t+<{!%Tgt*@93Ui*z=U}<{^v=P9D{l)UbItBGmn7%)ZC#BlokmYy!!1
zhocl4PSic$V^QOh{=)YHUiLlnw=mfiNcn*%md)q>PhHu^l6CnIyF0`m;)kpi<}Zx$^C5i>@KBS&4D(@z
zN&0O+usIPS(tnJYsY;k>_K#sR1?hOLpGKB6sjh`2kfj!)Ms?uZ@#0=ojVV8tf&$T2
z`6(eOz$=hchIiZ|=D><2v}%Zh{5Yc-t3>aY>PYyxdCRAT?nE6<#{>5R~yiBh-
z%$^SETp6)By~9b^{NW?8d5J#!2#h^h|CBDJ`f<9H=|hgPB)#2FEGeS;|EB#%7%QHy
z|5y8mWx4$*WEnwh?+KZ|T9`le82cNV%L~Uaj1tY|4#z!%}N
zY0OuVmiq5AFv=Kx<{37fy`^`qMQ3H2U>4*WuP>@)og>;OpABh$hhqug6ZNvQ7P&7k
z49oTUFJO%H>|d;mt4HYUv=Jf-e+9yrYW>4s**Mzf=y(oqs4l3%MDa`gnR6_|NzY+o
z3C7>l+d#klnC#X+JjddL^w1@uEyB8lC`^x48eyF#pY+fqQZDL=(M{rUL!=PZ>6u5|
zp)bd7gGzG}q!7`G0zJKm6rwt%=n+MvL|=~1l=^ZJ*@>Q*^y_mzVR7ZR|Hl5wf(z()
ztE9e1mnxh39nK)rs4|J7x{@!pW^mUqU&b1>XCZ$7!+6(w0bWYv#MY5h?T(c!S!0+0
zJM-AB0T-6sorx9k(;2JExOSFFGRCot|KQ7K}esci#_t1SXn|BW&8ubV2L7>v&
z8SScac`t7B*fRl15M;DgDa|J^q75Phm)oPf6y|f87WoZ6_7412gWZd`fcQU%=M&gj
z!eyYJUKUmW8bqr$XyH}F>M$!Igc2Yk3J7NZD;auVhp3DSUBnt_2VRO>1+q-0D?}rD
zgt9DxvV;WzgH(kqfKFi{VIoz)0-g?hK9gWdtUnrJ$gdG7J0Q`^yPS$4%t4~%CJc(f
z-%$#|7T;+4gpNf@Y}?6?BBv8&Z)fyIDZ@<89I1*0J4&s?)Q(a4V<_oCoFwxaOlm2+
z_8LQ6pzI6C7L`GZ!{F7@AB9{jkqU{>^^i>2Jj?H+i-H%!In|VWY{m#}QUdtJlZfKDOLMySb=TGPe`+Bw%ZUt;T=yXYkZmVLC}=_LfI<=96X&Ug*e4?Z5BSpLct
zEID8g)@TS_i@m{SMb9d_32Lj6qZTwi(o@zmjG1+GxCJ_JN1b!p2lkz
z=nGtYFh5hJr?~l}{IeqcbvHkC4c%9wc%v5k=_lG0%;y-zXSj&kZHVgZN59q3P(Fao
zx*^z3cleXkqZN8(6z|6ut=HqCd8Vs^_M%Vj)H_G>*6gJIY&7qcT2FFQwpYEfJ}inS
z2Ei-VW22dm-B>h_H>M_bRV;AFQy=C4OsrbR)6~R?lvsC;r#_6zi7keB>cb?Y#HK_%
z^=IS`Iitm+oS4c)5dTI<
zdw>*vwlhJIjq$0jdHEn3&|LBIX2~YE@Y7HRQ-WpaAI0zNIoO-0)FQH5saUlq8zf0rXz6d|)r5!c><(DN;>XPe`Jw5<4gH0*0zOh}A}aJUJT;H^eT7k-|iB
zjea_o2SRd-DhkUTsiM*EhO0=(eWT3NnwC-~4d=tv5iS|gxD1aV@aMwTHwlFmi#O~L
zvA#((j8NbD7|c(Mc1rLjB6TtqeLDs5Po96>)A>CNP`R1vU@7&s_oW)qOHSylhEET
zlxHRLhd5tfs}E1%ZFtc;`tlUM=YdUEar7WnzQ~j)`45^9@m$+drz#D(!8WTuqP?Wf
z!Z-`R9Kr}kfHyM)5)*9ZM=3m;&t(BT-Y%YTx62pxGpRf|HB{zY{W5x^G~UyD32c>@
zdniToWK4|Hcst)^%8}H`U@BkM7pL(D6fd=ToUOb47Si>Phl|qYC3Pw(jcgoVd~mSz9LAf2eA^6XC|^aG>2UaIIP*DArc>UZ;VOMM>OQ^<3I;P>jMGQFxH&r7N~ijx}!Jm3)P7_`NoAXn?&4Bk2j
z!5m6UlFtEcMXHl=fqoNQ;oFI!+ibqufu5w=L7k_3m5M{K8p=Cp&C9jD6)KWz)6Cn>
zL{Cyn(A7KSVVH{JxdXJH@*}#d5%0mO_1hZpyKtG&h>yc1r!jBaa0;Cb^2ivsN_ng>
zH9#kHT(Tog=$_MpvnLHbr%QE@R?o(#w)GXf7Kups6W?_!CbG`mx7QZzOV>b!&
zr+hgNi(fIF@$;7SP896PFV#QK;=R-VBOE&VoA6sfG`|VYz@-QmeqyUWuL-{y$&*c>
zHnliQ;m-mHJmAhM^@>A*<(m{r{
zdxY~`pU2ao#ozMyCajP6Tk{OpaLh@lZS-u-+ad2YYu=33{I=F#BeIEBL<4tqKI#v1
z*+QcVgJ#ohV+?68ja{{E94(O*~j?Ks4>rU!4TpH}%uBdc4CU7=?a`HP~l3ixak
zdi*B%zbMpAueyo*%#!7oZ{o8V0PpmK%tv(3Eoed`^>(*#e>heC<}I)-lACVDuHMM<
zlehAp7r3NOC(r4d}S`|Vz)X(0|Q@}R5F9xGBe=rf`|?)*mBO69yhDh>?|pfq
zA$VE`w_5NKw6n{YuHaW0)eGCuuum}cRzJr~k7K?OU=(+;F;q>zR7T;lv|4}g4&E5=
z($?R&gLeU`6?edhC-f6{@IKBG;K|4JF8z2fe+?FE)cyEEDRT&)MQ>?Ru|t8k`^YEB
z%IW&*e!Qo%)F?-0*8~0eo3yn#yFWk2__`V8`dw(^4ZdE?xj*O-KfqT(uZpH$;FA~V
zSMKI%IZ3=b{n2u~96%qi-41u=@=0t`(?lPK5&)@T0UQ##ho|tWjrxQ4@bn;7XO_@`
zqJrTCZm}ZEXq^@z0fT@JJPVM@jU}Q!`6_{j!(>>#sK7lTZeNTW1*eIr5bgzcNuA4S
zzJfzl35OixO6GInsa1i44XB~Q1%oKDx%vD=2jH~dPvBrHD
zhb6Zek}FBne_(up1%GV!azWOLWx%PI03x-jY^>pn)fI=HaUX9MG-_(@c(N)Pjp|Zx
z$M*SP6u_~X(9#MMs~X~MEgt|in;&F;7hMlCzXPq<^fsd=iH?hei;T7mR+~iyhFIZs
zq^YPhII&O@M#3$5tzyr$rKmJEEfr2Fd`!hXj+kS3jf3)rsI(wwhm2&;kKql5)GpPI
zUpo{FLg7}a9+vWHxQGU-7TC?(AWmwA?rmyCo%@enN>YajXYC*eeE
zYhsimUtSgh<=Vaz_B2XiOk5`wtg$4iA}cWrR@WK+5#~jJDgtb-2gtQuwUp1b6$tMO
zu`--0l-geh^LrzOL)D05xwZp!fCuZO4%JEhP$%`HB^BT~QuiJ2bw)tD|X4H}*eYNl#dcU_u&W~-
z|Iwie;U0B8%4c<{B)!ky5vu>Bm3L}5m`~tWXM57C#6r$SZ*)u)&W5RklU9l3ULm;g
zW0@(yXc)qPLRg|jPL`Py^4n0mg`41HlhJMwA}AZD8BHt@L>#|BJ93x9DBNiCcJ(CY
zKY!713`r@(h^b%5{fV+VADPRk>CtMa$k~pZa$)vtj&UFwb4`#LPA=d@Ay=kUuI*>j
z)JEzzONv|+9T!)z+Xqyd_}B%DT-#B*{1LG*3LUo;qWuY@5ea6sA{7jyp0=VEU^QN}
zH$o+DCDb4$TN;F*QEI4Ya3i$2LMUrwGz?={llCNtxJU&Nv(&f#vlC6a#)+&x
zREO+B1Q9s3!348Bvu`jAG1-y=$8jN!L5lZv1L0VQ>ZyCDVMO*w|{X<(yfp*1R4KWFMk#}3VLb+8?S
z5ir^jZtG*{#~3|LypmK?hub@|3ZW!g#u9i_e+=V{bTHSp1Az$D@|i#_G5S3~X}n>w
z!ng@(`C~@Ip~=+qsm@*ymaD1a;={;I#Xx2!F~gNafoD&oO-baG3Yx^3Rx-k5)yj7?
zIcQQX6a8tFwLFT^6po4CH!Hz!oN;VRcwzmB5|xG$VI7VEtj;o204%}_*$2J(@()6T
zxk7`~TWGI{asBN9TnVC32fmD}-|WL=Ug)5xd7?5JJ
zT?GH^@!_5i_e8_NX^;xvR0|Y&F$VX>_)*kWp)cSTBPwCvA|DMwg=>TKF~rue%OU~I
z(5d(oDPQ41?BxWU@~6VGn-|jnOd>Fxp8|hA4uraK##tR!P>|XWYGVq8I}FCufY_LZ
zdy_9wlcmiIAs%`PM6$4e=(;?#F(oKtxk~amuxDVR1|_*1LE4}%`mlq>h#zVOxI7
zRrZtL9vhFhtFdBT(lpWsTOuO{KZ7lh$D(QC*T#TX$+6#w_DvZnxT!@Y`|;SvV>}R~jccInW_~+j
z0ruDW7#Zkzyzqc$^Ziak&I
z8c}~sDuSG-qb(aVNAXx~1^fg91`Z_Z9`aaaMFb1Lj*?ALs91_>c{~hm6?#?-b6B(q
z>THyB#4okQ;26o~WJWU~;`C$KibC7Q?N8G&P;npNJrTPJ6mU?f1GZaywA8EMoWu5UW`VRfD-7@ZFG<5
z9yS>d*g@k`m?%doVSy~<-$jpNX7;e%rDWkR^UXl!-~I*unf?H^Nle_tObE*;GzC&6
zzdq|p?h5`VpN#T|?!wY8V10|v;#652dOB)rG`d9evvjgd#o7e0Z?9_`gZ^<9bQMbPhJhnqS@k%bsLZ=@^rK9wZp5p!v5%ueIR^d289q@K8!4pD%
z1k$fQ#Z!ZB92%i&^*)mr*5_AZ7bUa6z74!u%J4&*VD)P}2Pz
zLp_j|Xv@VMj{2BhFo-uuiFh1!G+smtzt?N?*z}tQVSzCM#VPvG;0bLFB8j-8DDr9Q
z*z*`VLyW6PezlMm(mFG$U$|g_Pi;oiMbUE>_&Ojz4ND&e9$rl@5b2l=D4^NEF+kb~
zc2ZF26OO0=kmKM;40%(!2tVW}*o7N{FQpH5NTI0&c=y2pMXdPX)N4QCB*O4;R@5l9
z64{J%$o7G6u5IID3?WD{csXYBqA4YZunK%YLaWb3G*q}gJD+D*Y(t7hx=&-E6Yjt3
z0!_{0G3=qq0{tn`B#(&coCyO8e~zY@9*@`{*9s3tChfL2!!Zd|Chy)z0C6x3<08V1
z1bKRXEM9+bFjt;%BkEJYGSqTgqdOhJa{I+bicrh%f+%nfHbnHWU=mHC9%Y`zdpO)L
zQ#i-_ILOk218#BSrXN0Z>jwt&mT|(o(b$kRq^(-4H++^i|Bpt(*k5e7Sbe*$rR?wl
zRAfu42#AW2WUeh6uj^H0C6!KLDF`P{BaY!PcuTkdNm!5GGnRTm8^v+18*H@yf|k`1
zxV7eJk}3;iabgahe1R4k4V#54ks3InMm9eOAkG$M87Y*fp|qkn_1x!pli+p{Co8EK
z`W$MSX#3VEyUrZonRBdq(i+14i0Tz{tN&>rpm`X{R@oHPG487tQkcutNQA<5NeQGy
z5Y>kb!Wkbs1p})9<_JBkGk_NB!L}X*ZhwG+{jrJM--K?!#Ghq2G9|Y|fJ{1w<-QAE
zi34Cck~~rgDCsCUvJhhdL;J)=^d`s6TYXYG0h
z0#8Y&A%zbQtr;B^3SpA2+C2U^#jgM31>TUI(#OBRTeyc|jU3~318wNBCyNDJV>Kdu0$QN*s?mxK2^uWw5vf~d^X>XZ3IxnIwQJbLYxHA^VnfY
zv~QfmQ=B;cScj6ncL)!}qA$Ko)qtVT1uRqP(f{Pl{~zMs2HdKu+W+5c&Are2;Vj?)
z2M#ZLZ%`pY@g+4iV^OK3mG*Ya-@RpBe`R1@EqmAYQ9-eMAw?&e<Irgo|+FTy@M=A;7~>4g5y>#w$n%}yc4_Rh%e
zBpzj*y9K6&aaM>CWN1RT!
znHA8VPgWak3bZ4uq~v@R&Y^X;fU4!VAH&raKR#fbN2j(~=PRYTQCcp?O6`PRG;VyoyyQ
zj_fD|eKWQdn9$yBOc|2uJ0F!+_
zI|YW%TMcx1gD%cmTKf`O0v)e1-q$c#Y|B-!S3%BgjfkCxfq1tu4E*7rkV?86-ftJV
zNdRikID8@QFKE;rxW8STTjze|3E2axEaqPPAL^-*3uuH|_=2PQ2*_FsJXFFgNbd9x
ze*%lEshyX^^DuYH8Ua1smFLgMd46%`U}-S@QQgRdoPpy8o3`T0;8lm1enQPy-avg(
zo;StLT5L3?wutL>9}cOWf0=G!@K3XzM_<@{n2EtLIXXU4DD
z8>yB&<)Ae}eQ~!aZMyLefna)~4BzegRrS}M8PAHQ_y^96-%txKU>e#Yy)oTz@!54>
z@RZC0EKO*zqpq##tLs&G?#SE?{>bOgs=L9d75v|He9(#!#<$dNx>$Fxul!&@!J3P?
z-#02#VWFN1Qv-H`liK*1WsB^07}m^b{@%~V`_}pcPZKZ5T+$)}VcXqcmMkvrL%GEw
z=tmHcLn9Pn?X_tg`ATzK$Fkn8l+j}=jBvPeh?rsLLTTebWn9CMu>;~12?C`1+sv07ld4pQtJtyOb?%o6LNh1=y&g+414
zB_xXyI%+|j_%SlD7@c-ZW%4&D8~&+<+LmtSej{z@x%Hkqx49ZS}DButFBNgBO_^i0U#I%z<5WVW1McdH|(8*3T#;+w3G9pjL+O?e20|7mtr%j+)FSE)!ABP5@DC#W@aItC2(#)
zg+Asv=jP1NINDuj>y=T;hpB9aB;0+xRxzPY7?ULICC|bjvkDhERRzU0@I0v^llF9ElC*F6a&HHRs*dlFGVI1;2N>i~Z
z7d0-H-J|i;a*TiL!uWvcCgD)EaTGHd)4?6ILCb(BM^t~hh4{+kAI{&jFrG5aG>OnI
z8%xoy*+zp+?Yd+96SUhk%Ix@b6~5a(KYLL;X=+p5o<`kM8z52j)J5?=QIEfR5f)Q}
zSBEg@!+$@Et>==@q>&Iglw&~ROW_z+2q^g?JOe74hDG+(I;|lVz5&o=6x9ST{)bBc
zh5>pdeb3C}?T91^?c!l}6Y4z&m7E%OpELc^-d&NjpR`
zv`fA!oh-~1YRhDLU%4k4;M3wC6WxNP+=EEAud$ep&ww_wd8G%(WV&!&76q?t(C5CH
z{g?=VZ_DR5=|u&NNQ*qO`EYEo)Ub`qW2He?`;sLww~xv*ub!P8A{tMzHE1ntRoE@J
z(vkYlKHoR4hK|)S=dJe;{2mOnTq>$lEc<+mYl@WP(epMH0>MH$dQmtH}rV4niInF*<8D$FkIle2Ug2
zoBQDw3-<02Hn;$$N-5@fW*llVvA$%e!eDv5oPRrm7j`~*J>GG4S)%=w8a(|?_>)E>
zmzwZr=+mtJ0_Y>+JD`u2c1B+fh#~6k@b^36ujfVZ*Ni?z=wwX#!sr9H$Rv;hZLThw24eKnoyf%WI
zV2y4{?3R1v_cpG@Y#++s4_-kQRUh|ry
z*?2ciGQ-awp)|f?c0X4H9BblTvIurYD=u)w3fkI3`t2*(Ia>PM?lGu%jiDH)}g2YZ{n|Ml!f8?xG;bAg3K}jONLgd
zL=($hFyJVbzZVv>`Lnyowys(wgdOE$QchRk#&nC;AoQ6q9>5qY5K8&Lw5dz_lHFlN
z?26}Hq&Q7SQUP|)!?%lvdm9Q1Y|_wi;me`uk87Iqc18coVBk6^i?R*=$Lx-QGq5u1
zP$XU6tucHeJc^Uo;wpp>ajMg!CH6>2%kd?OTxDx7nVp6D!B8<-MB-%jgZ{Gh2tR@y
zIM(ks&^xBK$fn7SnV1g>jpVyl{9!dhQ&g=S?$O{jBT?)(e
z99p>6P(H>Ysd|$02nN&I8$)Cl%}+|1sR7~#NgLQf7aJ8)31AASK#oFC4})^J11*q{
z7aL|FM)94BPn#b9Yo#Zx5!({afkLh2o)8Q`V_NWT&Go}>#o0lIWO02cb6&ReW3bG1
zRmh%knpnbeqLy8fdzut1qpTvi3YF1JQOKIL5!dCkTcbzJUUFhqx&UKzxlds9BS1#y
z5R6amf$g--IJ%JHsFVg&NLUaoj}3<42xa+?D
zMSXc;#wF*&_Oj?|sUl=QtshIet3{ox!tFqJ_Lv9A#xR>)0OVyJw5-Uq)|Ml?aU>aA
zmy4E0VeqUG4a|MPCj+RTQ~HN=_iG}({5jb(QOO@OKaX=0Ph
zlPOju(xa@Us^kKoo>f@bt@SJe6XB}q9Z?$#TZGK8H%_tA0VcCY;A0eHtX7^3CQeUl
zA3cpaGD_JxrVuj5>4r9viEHw=5pz~1iMGInP>2PGMM~mjgm!44N|OkRazNt?!FpjG
z)(oZ1utujVbR&lqBnsLZPj?G0QO=4cv(6ZMg)ot%Xp4)ebrV*h34^PQYrOX(t{<
zW!m5en`7vXSuJe{ijl;*$oJe{9(mNejD_p`LF^r$1y@5j!WgArY*lH-DmE4iH=sT?
z)Z-REEfv_EfK3F*t1crb2v7t^OQqfBzi}+B!V8#@K=?dCPA>t({|$tbb_1dqp%X@E
z0#R`61fmqwVF^wggqX7m?KShHH|JdH|}
zHP;5nuZh}>c0`Y;ZIN)g#L!S0%TU%7wcAp{oS8FrCbnh;n5EJJeOUbqvaU{RSe?)Ihl81{;INsy&;PkG8(x
z`{WcIgd=@Z09pC9c&53fGl*;lXmRq@+yMgq>09VPc|vkIcE34BaN*QftwkjNre#Ds
z{ntV*Hh=^S5-kJht~@_E8x^pS-bW}b4lCTZ3WSFsXby666_X{#L&WBTQMkw+Ctt|1
z#k|<~mhzU1YV$_d8N!n+V#`?@2hS=Hs-^O3MK79LL074RJfxY(f9OggcP{atyE2|M
z?h=SrFDHns3pe`Yi^%GYYV$@%>x1>Xfp7f-R^)bA4nm&0)3NdhN;fD&nP(k`*fxDUWweF)kj=0Dv*juMyuT=iBYA(5`
zCLc~O=x0_s+W~k_ve4075uB*Efi;BuWqc5En^1yjJPYq+;3caKc?M*1lkNvX&bWX#
zt;QCJKoF>=Um-k52`?)tfd?5wiaV{(tW_21KkKdIA_K+w0Y=kDLqgxzZJ9-88*5Y8
zZH4j#X%vPC)~Jw0!YDUdG|p0p2Gs&i5iZG9l-xJEiw~+UyYx|=111Y8?4Q}SCn(jd~D1z>6$tpq_a2R)5%
zzpm2B;OvC+BW)I#lt+uLJ7`gQn-nn7DdhG;Cv+Zt!l?MDK}`SG$XefKU=Iem@B^$)
zj}OtG!->y)uK-_JkE~RcWZx{}&>DMkNMqai)yFQ8eL>ZuRT0lpWw`LxT4$lM>Q*P4
z45QBpE7px&8(%1$nTi3@0sX=UpAl%Wlwta$3tq|O2i6?9fcP?YZcp_nkBwz5Laimb
z30ZKx+{N#3n-E!dcj`2@`CeQNBwUxqLF}c@@S+ZIvwvu9q<#cVpg%xY9txgnmw{H{@
z)M!sFQskhRL?wRF{q@#;91-~-q7k0WPkv}ME#8$+@PnbGnnF*Ok6>Tv$bfl7q4O%4
zevgS!%PtZ0r=(PXVD59VDvljU!YHzEd@7lP29JQ6XN#-_xF<(zdK-;?%O%Xw%GBB@
z>0QS(gS?jw5Qm`CxZQd;m0cn+l&BTh{Sn-Z{NNFOrp!Af-vJabew6bl6CqE^YeY{a
zzqYbMyV6#{u&fPB5*NRtUGlwTYpWpnUEI3#e)?Vr*xKHOlB2F~4MR$coLxDxGVw+|
z-Wug>Vx%yodS4ko8fT;iuL22`gwQ
zWRw{8``ulnRiHCr$szx#h~8%UT1fX3cL267%*^0X6MViY$!3j$Ugh4{L(1v(+v;3o
zJ{`l-(juvq5N}DomUKZk`{|jLDtz%q53I=I?oKk!dKC~`;XIBCQ=FT_?2A1pz1dXK
zKD17qEn_Nm5k~+BDA=mdNKiMj+^?{C=0TIUSj4bernXNYYbskKs-S^mUvnu`y5l(n
zb^Gd`lSliR%j3@41!7cTc2=Q4g>(rUM#(-Z7mo>X++aftWGwEI?@~Hf9E0+NTqRZl
zmPeGsR{226t6=e7dtsbtQJ)1p#U`r=lPGeDX$nbDhajtBCsB?D9J(jY%Ei-}V@zzwSY*4=BXRupz&i2iHM0dv
zHaD6TNJuTCuEpj~EDlWX4zs(3%xWU5+ajDpx5-c@&o*A>{~iC^IM?VK@mSV6{y*$%DeLP??y7sY`U-UPwU+f%(uf%lKWKkqM)c{rM^%y-
zmSEUk2v6i8@%+e{1g2}aiSV(>PtQ3{s-BT<3RyYGW=yvYuAxtNWH)W%WV8$3`_pzeqvcRhVvZ~=t&1b9Vg$3eD+`+A`ruj7x5$G5Oyp8Bb~gD#X}bB
znTbq)^%}O3?z<@-QL}IwYqQZvYw$KVwFNIElBP;{;G50;kewe(vO0=rJd$dJFE(_d
zN1d>&&a4Xzb+JN^HG@)Kqpp%a9hLy+&3?Q}mQq15TQN?rl13C34&a*wU{WK1Z)GFj
z{I=y0^D`avWz&E~tl@|tor^$jBLcHeP(6o|K^hojkOnp^6-vWWfrYBre`O6Gkzt6S
z=65B6Xsu7aBqFF2J?aERpcDF|E`VP(4jE{utHy$DP!}^gBu#WsxtBqQy2HONhj$%7
zojVP>S};y>;th0HOgb;bM(Nqj$RR{V8rh194IwAiw1W~l%B6e_piK_a>I!IPDx`Vn
z#N_PtuQdQOE|r!pT(%#yw~1Di^&nc|B?eAALq`Kg;yN2P;4AiLEpwdy9uRh&@
z<~&U&10g`HPh^eShwOWi<-eQMHehX;!eD47A-P(0AwUp65p}J3Z#uKyw1?gRm%1@d
ztL!Z*iC!$RAK>An`}0b@3Px8U@^}cEU=Xozfur75;KB$p^K%3dV}iGnW!*}Oc%CVf
zcu8)^VXxUg41_kurQR)jL5poH;?|5cw%FNLx7HAcVQ$W?H706IF?$?lT&sh%Mz;JA
zYt00ytTi0ijc;ckFZmVHxHEcDUoMV$OnVq(ivYOdxObol&Y3=21`E?}Goa{6f
z)e$#=1(HQ_ROuqX#e}}xqtZ|^iO)&2niIeu)+6qPJBk^WSwQ5
z>wGO+7TNQc-m5z6G;t1V${c0)KkM)*7~VS*RFmb)C8*}&5n?jvX|pgr))YpMHTz~g
zuDmXuQfq$x(mPJ*9g#K`61-8SSOtgJkDWXw9DE%dC;01RX);P|)yIr7nPb9?tg;YP
zR+Hn};^c79;=h}md6X=OM@o$XaF&^WiJRa&?goX1k6HEDMtt@a!?j-
zBceYGL6yK(Iz0m!7&5iFX8Ly?f{HE-Fic=C5~RH|P$qHEM!~#C?DxSXO*G8M{~z^F(W*dLjrxyxrr0BSdMkWM>~2Jkh;W
z$pa&)2Ug^CKop8b!DE->w}v2w95gSYq-$?u@Ar|(XVjq=z+!OqK`((7q#-{Vp}a2@
zj|auIT|b`TjO7-?h&i;u5ReCfOz&7Vq>E)CNTChLnj{2vVK!;$9B(7ikc{?Dp~|GY
zBCH5P<|q4kQ3^Db`8AoJCfS?9ywaI=#;*dIeaEfjlM+~&V?GYl*}{{II$H7-2@1eL
zvX8BW2f05^j+hKVl6)qD`Q8xQtSz(68rBz2tj{vcEM9y(%~WjOvN89Tz@p=CPd`GeGR~QnJ}+yJ=|US~jzHnb?M)Wd~4ow1gnVi6y~+y(_?QTF@mM
z3`L+ZQ`^8J#54dy)Mdu5Y+Q+rdW`rktIbDnrGO9kuZ`g4o%Qdkj2I?UHCxWiqH>R9
z4Et3=$ZVYHgVG8q)RD<6Dfv2$R)zhdtOW!NW8nm`Wl^7qI3J4H=SCyFAo)#+gD`uA$xVJ^MHTU-K&?4GVG#
zntG1+rZQUUL{MYk3x?UC@@+`RZP}%4#hx+7ougrrIs`
zXi5HvCmZa^GIRcirqNjjnE}#Y-=O#@3+m?7ghyP!sFLaiiU-HbRb;bOULKm$%4534
zp0I0k9g3~?Rv)@bnN{5=YVm>!
zC)S5J?$ohSBv_UC;s@`vs7AvfYgN*d~?l3m3uCBMD(3CR0bxmBU0x#W1
zfuJ&RLa1omyZ}IX!s*BcR9S#Prl%52gHIZTq}HK3S+`<;#Nc{CWkF$F_<>K4@!qtX
zq7;0kFZ0h-d<$u^N}1-kl1l8NC@iy!3wlhedLlj6Mv|&6UIL?-rbwEGz?iDuG(6`l
zHYg^B67!SGpu)bbsolXn;56}TEBXo~;jM3Cy`oP(oMmsf`ZGr-Hz(5(4$$t4W7-*Mgp`~EpfS&X@
zwG%hmA?@V!H2E0P5TT&_=>4U}B9QXz=wVtE1DU3uo^_7@{$25Y!@zycIYZ%w7lvuE
z0pks-(*)ygbGUZ~aWa&yV@V)qmBN(Iw2|?6#
z6CgWS*@4Z(ZPARv+D?MIKPguOV|;}4K(RBh5y?Wmw3J+FEX`st<}Cpr67&SthmcXm
z7Xt?ifk4Z2$HGXPI%m#CF2)r3b*&U2JAPF_Y?
zSxJ3%tiXMU71Oexxs`&<)UcUVVEd2I3#$0B_T>(X7IQ>oY9ehlI10K#D6LhNF*ANa7bS+Jw@Pz_;2+IWE)cCPlA~x}r#5vw?%>6QDvpI!e5)Jnj4|2ZI++6*4?SJ
zusR&w?V&B6iCb$gon1#5w}Pve$K_pCtr;zBDynlo%4McfIV-nRvrG18O_*=OG
z@HzPAHI^D%JYHohY3gfcEaqJ_d>zSVvCW`~&sh4^hcy6WeKWQJh(|XJ!1m5U4d@J|
za#v*4WlYGUWM7v)Y%%~FLkwI16p0l@G-9V*r7IyX%eac?35ifE3`?Tv8mN-75cp1E
zE}~;5DL8q=79W}*-C0y%HB#LSm;&h7jI4el1V$H7+bmvP0rB|kkd&nJBV*5_EBi*P
zh+5=@Y<2dX=eip~{}yoD9IrRJs~6zX^)xjBVP_J6ijtuBp>j)tZqjvvX>)6J8x*^Z
z+_GU-SEa>&BcQ6a4@*P
z`bY7^8WJ4cS}WI_?tokmMb@$(!qErEVdE!sM5R+j=@SSUg*Ie|gj7rlvg0^hiXCKo
z1}kBVRVK}#EoiIS+q~-VtyZS8kC1Ca!6P%Ij7a7(4H;E)Y`Du@!po#G4W{msKd`a~
zElLcV4{65ybgyiCtF1tw{aZzSHf|*ZgB!vLw%g4}0#EIRg|=;WgMtxsb$O?GqJ*Ek
zEi9Df7qX3`DcejXprKqpjD-=F>BWKsiN-HkHUnYGp?Lk!m2UC{c?0e!Bxzjb7uWg8
z)mgr|Nsz2Sn7K4%rGMp2g<2XV9|*K%57QT_*bvd_->KMCZ!Y`Yo>`t&-eYdztqMG>
z@?ekhGC5!%CkQk<;>vwSf$!EPug8-bU60XDkk>J-8d{o84toX*p$TXqo5em5HzwhEkS{n8JM0p6WQp$xF4CY@j~D7K)i>pvmS`|
zsSOq3(llOnp`%Pe6_QPrnhf+k>BcyDxt+F75+*hs*5R4Sas!
zLH6_NVa+4G<=ahr`mD--usevYfYHmaC($ZWxj+OH9EvDbtTX`$DWey(gQG#xPG+gC
z?0~DfU0RKEuBCi!cdLAc?aWiMMx2+q7tBpica&Ugz`V%vUI0p;&zv?Ib(xQ(IH#*~
z&!B{K%et@yk=F7^X)sFgWX>vbQ7EsrLF?73ApgjRG2G|dEZMk+nIQ+qz_=iI7V{a@
z>85QU`b<{{<1y=xwDl}Xps{8I7z*@r{b5UdffZJn+-TO9kf@9k#c*4=X^(&RPf_2Q
z)8rZ?Vq9FBMT~2yYj+@sN*wsZ=v
zWzLeY;~#S$rVU`T39w{?YmyIAO^(cL_U{7s`IZ7V1>0D564Z^l+
zYrcai0pHq*Q~(EjEboA5IH_
z%t$#BtH3o1H?O8@5elI+bwy>QNqaF=Nb1+|>HM08Y!P>b6$1=cI>@JCqXrrHk%z_L
zipoU+smD-U+}@Zq^DTe?gVBKbsHC;@!X<>nB!t8eRG80P+8Zd_GLxd_9KkS}oX1u8IJcg01$Y8@pZ)v~smx;x{@
zCH2~p_RTxuHElUZI-fSA3+t9$*_tKZ4a5l&!E{|#t7D2-1Q6V(;b!|gbehFePve23
zsTR_BbPsnB5rY&}2oSDng#dNtaVgj@8cik^4RFRwu5N`rQcai)8jJ5rqj%MrlCYWJ
zCz52KO*3oi`z1<<@Jx)8aEhy(DBbY>u02NDhsypHzmEIIj8C6qcI3#Jbin$Htcm9P
zwtofZ1?NFz3wMjETvRj=uPj
zN8(@Xt0y5!acU*0>?~wGC{TcF5z-2jCSIC(Rm#W6
zkY-0YT-<;yXz*{qXmnQpFIP~ei>X@lmy}cwaAk0pu1c@DD%8!!bQD@
zSBUJAzNy$BWxk3kaFMbnfi#PkF78gb-#83R3=KFuXfYZy&5TUCNVZ)R>T%HS#Xw?P
zoY7)ZSW#w$Y4_SaDrm{Tx2tlGk}Tyu@ZV0Fvh^g(mk0{8aa#@CXRP)=0lx`)ojh+w
ziGfWha06@y*sWkzhfA~~G8%i`;Ek1bk76(FzI7gVP5Lh|bJ9v+5CgK#1*5gUc#t(3
z9csTp$Be?IZ2PSS>3I@D{}8ZFlrGMxp*IOAFf1N-8IzRyc?PjNEYw;jNCqZA#nHYs
z<0ZZZ@{quRpng{sDjPDkO47PzfXR5$W{hEX_1*Am?S+`53A-S>z#6hL
zv@KV#p^$%Ka!bKdDoqn>G&sXF36nx~GXvE)bXCY+Z&rpPhYH!Q#Oku90ELC}IB@^&
zZ)L{Av6Ax;pXq&PuKD8d^*M1q5)Ai+cOSzVenY7lS)v!IOz
z0X{^U?JUg3D;I5X+-c{Q<*o`#xy3PH32A=PZA`*1Fu(QHqeNYp&RE_yr^v{Ld=Nl&
z`Jk+T7eF}p-lfU+zi68bmI(}B(oaI{WqUn|s~!WNj|t`Sp|*>~6lWR2Lq`#i*5bGP
zKAv0~rks5tM1d+L5b;s(m279@OYICH6j#}BI)}vO=2BA>XphC>Yh}@x9WpVuEBez}
z^k-LiUv_%z&)OD`ecKSKHTP7`lXh@h%@$^DaZh4mcYPp?62B`VJoNQX+u~V`$*mzW
zn$bg*tlNCavi4t6w*{6>%yb(?Lw~~jtlSZ~U&92zq1x7UVm(9G>A7M(t>=}tToO}Vg0d^<5IN#B=8Q>7W!7s7
z(A7c+rC}Fgm67uA$k!&5QuwqCD$C|>M0{c&%U?ZAlR|8*snuqPHH~UMf}7ZT4N#ON
zW&l)sGI+C87UCV7!DEV2@tuLJ$9-a*WDJT8pGGHyz|X7^eT!LL=x+%ppUUrTI)y@=
zi2DR9I$XFZq_a;GNr?NL5RecycS!ud{&~{)3HCaR_|$tuT}Y@6@AnDSX|!m&6w41J
z&uK2EB`4(e%q)7p?N9NH*M&9LG-3j?HA4$0#jrCAe&?5F^Y8l7zx~m8M$Ji5u`;Mc
zZ>=ITWou3N19N_v%;F^PHN2bpc)b6p^}qOBf|p3y2xO+tPdEO;f8gEH=Nu(E9qU}S#BUvQbFms$Wmw7(D};jA
zJV9r#_uqMfl@?%s?1}iJ(Hj5Wzr?TOGuQtmel3?j{v|$Y#uc~dY@AJ+$#r@Nc;(oj
z<~Ey~?DZf1Ydj%Z>CgXbd`j=1iwf>XcA#6=OSL~4@7a5s!Tu!PwT@{+=h}w?yCflb(vFN00j>pMrppi>!$38$4BGnM2MrvLK1S+
zy!q+)2^;r~Kk);{*7$A^iINvmp3ye(&e(-8-I(*YpBS9&Xq`*7$$KQz`fAe^|Mb
z{}I2gxt#beP!ndGGjU}xaqd1cQkO>D%Z#T_uT9He{?qu-cw*NHDKEYv`vVd7P%ARpZ(4c
zMDBzAKl|OnBpsExXLkBB)Eet2#O{dUzC6~S9lL*npj%_N|3vJxl4ML(k>f$P+2h
zaueh8*7_fo+%wS^{iYT-W$aaL)q;&S^zJca!L;IjPQK(%Yb9$qX?5*wkNtD8+=XBE
z%t~XTtL@ERn`QcJX1V=|>}^vk>>;32?`XyfPi%WiWnx;mtkq4~r}={_Wnm~^ZmO?`
z={e75b$-+8_KdFb?QL!fml6^peobq4GbwU>JD9n~
zf4Uv?Ugf{o?hfVMPurOL4_Tw_WgCf`Z<2zir)&PetiU0BV4r(hCj^j)Ji9(WG
zTZgStVx>xOoLBm*LNd6U_iXyys?DTnfzdUxJYoS@w&(y_oY&uI({Jx^V*v?RM*9h!
zq&{5br+2zn$6K!UALw)k^7E=r>&fk%5bwJy{4YA)C*!-8__uet{o;F<_{Cjr-+1#9
ze`}ZPpZJwJEZB{5Z`29(;1a)`646)uP2FyMyk&{stJ}RYKF9lace}nrhOhWJ&*^Qm
zlG!<%V1{WCtTiR(Cdpd6&HEp86QKHl_m6eEL+H<*BMj)HM!46}g$wlSTEBXPJ3Kz;
zdH<&oZV#T0>~SBBuJns~tmySU?iJAO$33pXyTA6hx!_>k!HJ8bvENx@;Uu#w#>|N(
z*QoOj|K^cy@A#qh{*00CgzimDe?YYnq~G@o|J#x74SKcjD0f}F<$nKD!cU`z{J7D;
zbD96w(e71%V
zvXDN=arYV%5lTR7x^*h3?;If=t(dO;;i4q@YHw#RKfxJIe{$MWK5#dg!y!ooNWt&TFbApV<7&PGM9u89ymW)
zf!3!Tb#)W7O=mUYH)jJ;SgT7hF+0p*B&Tp(P&h<^hYGZ-ouoymn7v-{gJ?@C3Wd{$
z6OuytaY2+fqWYZEIJEv5s$Q?jp=lEQp(Z=KXP;NitEd^};ONUsBURPmj^V3F6|0Mq
zA2CVJampK3Ab_)jEfE&{*nk+AV4*6yS2#7^s|D2Sv_Q7gPtg(*CQQYt0Xo3;>=BpZ
z{Fw#h&6_}EDeYQ7DTgA+JaYoef%7X7#Fzcm#nBOhhR6$I;PH#;hYJhZBBDWfV7at_
z5^eL7m5`>Xy40RP6~n0i;<)mJmbCYj`N>BAt_g0hypsGax5U}8^&Dx6OEfg+2PZf}
zBSOUr4S`4M6AK?&QqzT?ISXu&o@LAYhyZ$hNHtP+GIs~Ul-x&D
zP~!@F2<>w~n7BoHjKB=+wG)`D1X=hv3>yGlZ?IH%g0#R2vwv!$>#4~kV^vtjv^Eti
z;!zN?DjjmfwjnNHVfJzdj+`=qj_ME!zKyh~&c*?SfJh4gMAd|V?sV%|Rn}jJ5Xm6z
z=Xs6gZl^VF$U+BWN@Y}O=6gnt5Fytv6mHtxr+^d+A1F3`0E$h|Kkx$W7m^#}BShYZ
z2xvjj!J>t9Q-g2sGPcd&=nJ%LJJ^-!Ir`SVTsHf3vU|GsQj1AB9CIFe9a5ny$x{EO
zDQ;}>vQm1lKY5Cq(e(ucxUB)D&;H(jX^LCba?4$))+_v*%kH>|7k_~{_bDAcUk-R8elc6*c;UP=iR@kR;I1#$U{>ci}L%?y;9Q(ocP*WQ1xN=t(cD8rm|)(hcqQl$q52
z{bhdS)Ow4Zi>2t3Cz+mmxj(k?+|Sy>wYPlba$0ToY1Q@mtEW)js7)z)0s<3_)BFEp#{10q}h
z&s1|xP;p={!D^;CVoc`CNzJ}(6Kg0$HeGY{*D=tAcOJo8Nm3gD)l#4GS<%PjP6`1
z6ry0J%U1jSUcq-)u2!!abm`xy2SdJVW2oMQ7qL)3-UN#E}CMx^U+3Pk$aO;jBdh_p%fyAY|V86(oxn?$5uzj<$0%&}mUdRoN&iGH}oA2Q3mDtKEO{#t5IZ>}j@NET4`O%c6+vr6sLTuK9^QL4cW17Go#{Tt9p
z#>Urt#lLg^99u!^#rJ)I_|`$q-*V}8!yzhX
znBKBBE?llQ4vBwwh5zIMEOS5QS03P|Px_{KXj(D_r9w8H<;DOl2NV>NEBs>zxPRN@
zTWu9vN01G+td!f>QcR`Bn7)?Y;Q!-5H)i7QUSl7j*QBfcASI4iR-!S~%%P2;t!0hn
zr7uCwYhuqFKPdnDy8r8e?n79U?|r2^zV_VB=Oz_4F<=+p^nu^QoN-zr7Cyc0XJ_kP
z`{|_Lxcxlcx3E`uy5c|4B~+%nGyT$?S>3MA^uM-dWjGRR@F}}55saSx@w;~4&Qe(V
z*e|pDw$AkNjduU$b~0PjZ8v2v*u~}ME3^AjXS(E<_JMYdtQ&uA&pR}(p8bIV(Wb#U
z_&o!<4Sb}JKIR7wa^w4cxhU&+@GoOmN#94Ex%nVB^{^{1#^tmZXvyA6^#V`nqbn{Y
z?jzaI)_4KFj5>W#6KB%Q_T_p7M0F!%a_fk;=0lA07&LgAwoSIzMn`ZFi@oN{PcwWVK=j`f4bOr99s>2E(~@?rk!*SaHK{Z4A*
zz|6p+Q&r*GV2ey4xBV<^p-4Gdeb`dkn}daXmu~e_UgxI!d9QO*#?<#_kLJyX(@RjZ
zbd3MZ>)b+
z^bm^pTPIBJk6QeD4=j%ExHH>imUQ52Du6AEAkn30
zwrS2$!_ZFscp=(Z`tR0)+#m~Oh)pKzkPGS|CbvQgbU?9!Ufv6S|lH1SOS6A
z*7?)ctv|_K5mhV>7sU!!U^_dma9-U(@yWo87cb>d@znc|yx;vtd%c+7{y|jHf4<+f
zp$WBqz+F5edxg)i%NigFeKwV6&rff-A3JyiOS7c(Yx?;12VCDCx9MRBtJZq$QsM&g
zYA0vkyyJfV+y~sh$5eUm$*wXsYo{JRoHfV036oEx4#1~bv-L8H{|xmf7_@D7=)0xl
zL%#e0__z0iZq{ou$nl2OyPh|<{92~KQ5O82L0K=7LwTzo_@FykK>qQAt~xfWu`>X?
z%K*4lO%`(i-ap^X?yqB2r-BF6XDGPS-#j0eI*pHC;HDw?k67SNoZJ{p2teApcx9e>yTEPUevjyK7m+sy7)
z_wmFgXvSW;Ie)!Xmf|*cpxx5^`l0;wqk8=$uOIOzpXy#ccI5-)xRGEw>fm^(te~q1
zuUPwl|JJFlyvGuxldZL{(wkd&vn+q2Pj2AB;60b0=8j3Whi@rrUvIYg|8trahkibCeI0K0qYlAOi^G!X$8rYz@iqL@D}3bE^1{eyVYfx{9)phLOR_tGZey*
z*IE|mMbmQqZV9a}2Hfd!q1qv^ikfiQ5jNbz;CW~4bBCxep}YF6Z_Wu9wnMX@eY7wT
zid07*>=px2nd@;p&JiX5IioXO8@?O9v@&nDjRV2_n?C9$)fxcnJNke;O6gS(ph>@E
zw4~}`z*~+XT?Ln6>Z%86ZMsPv>w1o@O@-t0dJJ<_5pE8t-!)K3Pa5shPnFG(LPzxr
zoaoZ#4?o=<&FQqHu8TW2v2<2Um-(fqyS+s5yi+c{uSIX5IAjUDa+f`U@L5y`d}U9d
zfBnfVdNKnhkV_d}4?h><-qp`7Rvv8B+kD8Lh+U>XSL-OK&;>-j8XD?_7V60icU)+5
zN%%DZku$G7SaOel;^S`8$Xlon#8sOo
zT)OHfe#$41z)fR(s~X-$ea84ZKH(1NU7vq)p&Td|v=dSD6aS@8xcz$X%HLe2H_Lf*
z-%tE+KHng1ft2I2$AbNLe^KMxlE)Gz;(du?s$Pn$&I1^h6HxmER+<-eZO
zuZ3*pJ=k1$dH#B-Ozo?9y{h^3N*=6ley}z#KS+7TK)jx-fB&al^2WQG%WcTZtx~(2
z@?Ur9*L^g)xw-Iz`RmPky@l5gt^4k$-I8eZ*8I)ZlI9qs|2F^rGu)BMlc9bfc`1F=
zZ#ctMkK2(KU)V}{Q~V##B=+HH-~L&5Y&6@N+*=}6lSmO`
zEo*Q?U8QH=GjfglD;=u7-Y>2ZEb+Ae?q}UmTsmvaqyO_(zEQ*QdD=hwvRWZG2F*~g
zd~(25sD9;uwX$`W8f@yw%1D8ScD|5Sd}lDeq1qT6$-^O^vZvc(=j+u5bD_NQ~LhnJo(Z&1Yec-|7GJ
zEO+1`4^yzrlJj86BtPcwE}Wo<5G-_MX9`@lsYPb`IcK{u^P1Qrhgxc`K646MFc;&7
zcm?Y?{5Uzl-{6aCd+6Uu{_3+0m3N-)=68z~as+Jhhn(Z4^yH6NbEch5{{82;%E*lo
zrxx&YLsY@FAc7pT2-WOqf9fK9ygxa|y_@?#EOJ%#&NQgoiH
z?A`PwL;d;)7ixKg))2c1fz?j27AfADV&0tRK1FNaKhLd>aSng%bM8Qk?Jy$;Q-i}7
zg>(G5>$w`W8C>eJ865O9`3*1jjZPve!*F;ad+fBG?w-k
zD^{GRd}DXcmfv9xk$_vb?&+*tEYc!!7mJGETxs^Jv<1HkY{OkCdn1_GUxq)i(pz#Xee=EewH`uI#!Bz35c8o9Ccj70geVrQK-762
zZS$=cxs&!swI3+kpLr~`3VUqUll&pvDgLl*2bKdO%d@D%$shceE@GYH4}RT680mlT
z4`1ZAL>>ONi`|d;b-@z%zEONFtWZiM^R?tpmw&;Xu-9Wz=62<2QD6~e@@F_=R>5pt
zttjKsdfcD&1=mZYCw1Pwdl$nKKDxxcIr@t~{t`m9p71AM;@%QJ9{G14HR@Xb?n~Vp
z6n%OraYax1KVRw=;{4@O^_MMmlRCGo0zbiv;cs6`4<29l`=yxi(c_n2?*7I89=mS&
z@PJHqkDJ$(~iKjXz%E&!OZ+%O#HkN9%yOyYCD{U!HkxE$h_T-~}l
zzrW{d_d9U7cF>IiyPoUY{J3lJ*gd}P!E4>OqgS5KU@eOkxpBe!w^tsdRE=QjXf8^+
zMyNss(zfL3s2ush^gT{^f}*u
zz5CbbI{(S*-7#G5y51ehrE`Uw$K~xS+*{|3_;6(mGBsLI9)DV;&nbptyiyTLa|g~|
zylms(S?9D8zdoDCW9H+)=p#kEJmSsT$Etk+v~597us?Ts#617R3iqno^~#Hrf0P)l
za9hONklnVVt@&*Mi#>bF*v)T?X-j@vYLtx{WlN0$1YqQ4UA^qlIN;AeH2L|HMi+I`54=3B4HtK)nl1wVS$tUn_6E2?_QzM
zHSk#PO%s}$;0}nE$E^kNJ%D}_gMvC>P2X+8coVoyFgEov`)p05Y=A?
zlC_m4XomYjQ&V;NH+?{#&GFv^deg^3H-pPC?|Re6npp4!>nq4@yq*8!=EQ7@oj&iB
z`#Xj}sZx3TG3=6Ey3y@XgZo>={U{)CfA{L@Y$7NZ;w8YzSDsz&*6?7f2?hft=JhXV
zly11Aj&EW>!ehIt`((DFxm+-
zMW-WX`?ys5U=KXnJo<RY^P>OUO>RP+G8~izYG?-TxFJJ&t1ty;H_;oF)iRt_jP8h2)>~gU8mD}<
zWGQz5-Ru8xlj}QdG}!=Sk{d(DN44$4)v?5YRuj_Quw+lKPU7{Xd9sDt(zA;5sudln
z+LjI!kEvGuaccAb2Rp*2m{C^h8hpDp=PhJ
z*P5SK`qK!bjZ`xIKSn9kTy@NOYo*5qJCoRvP?BBkYvO#Q$bR$bt1)G@n|WPIEgA;9
zQ_&sLU-#p-+g3w&h!|7Y;dnno&JIJ)Vybs0t4h`G@)Wj*Wn&Hd90||bZ?e16BW{Qh
zGmPH$^4MdE0j;kpow#p_7Qn~<8X?ctteVkvgK)FTKU8ucJ+sl)VV);e9kbvXW
zTqn)1_J;Rv`)s;Lo+0ZntbIq7ap19B9<9xBZO6QEJS)=%Ir$6TXwC4IP5$Av%-)m0
zM!kw!X=jR0sxc{kpr;aOV$JhH%jtRZ9B*#&m*_7cp=t?(~W$)(&m5KD7
z^8}Bw5j2r4E5T{H%k57C<%ySn&5e75It8-8U^!U;x)>kesHHq<0Ud0q^ct$jGU_T@NqC4CS
zYAo*VEANffYGbvvK2{r|seeIb@3OWTVMOzu!3`A5qjo#4GTKH(YODm;sU5J)ll65S3$3txoi}f5Q>>?6-cyjX{19
zdcaOSCNKKSv*d?{Y60n8+H->ej}(vuK)J@(zhyrloCvO^Zj|`&M9nvhGw?3q0c_k(
zOm2ep$aNJLce0NGgc=Ksoq_^5j_K#n;mU-59*t#iK?u;z{_u=hio=AJet+>dU4Lu8
zI?6nE^Ech3vi)F7)Ka=-zxm0U5{Id=W0@U_D}g)K|CuUdt>f(FVK8?91YJmO@>ABi
z_e7I@ZJpaAVIl|+hw(hw-?GllXkTkjOXbP_zt_2`wVUZdcX={Uop9=qoPi!^HHX}vv$;S2pvYtjU{QQDjiIu(1Uwx}P
zaY{BCVGJnOi=6CDW9@DjriNZW`8GE&QX0jAa`NG78{gvpF@DW$ZZDen*=_Eqo;4+W
zL){AqgJ;Ji_;%K}+^5)1sSJx#B?r&1`xcSG*ZFPVa<7fP=4WnjQ
zU-HrUjHuR1XOIoOLTs-;c>_Ukqy3@{yAph~zioq?(YhAhhb@ZyA2+zEM?mfzKWt0X
zkW{EuG}n`UHWOB@N<(34CGAXez$Tjmdi*iByTeW%t=x3Yq##E}thFydovI5nn-(R?
zG%{MX=}@-sfyja^
zp)sp)NN^1y!;`1|@priXk(l&_vhp|eryJ@a=8akiX%A>l~}p)A|nktJNy@IcE-E=9OmC-r6;Wv815
z1mzlWX;_(t*?KSj|pZDx?D;rzEBo
zHBCy3vR}a6LGelIr{#3vEIv{6Q2+>k@nt5Tqx7u8slq4Db{?{_soRr7iWKf}(mOy|
z?+|ol!1#IJaVORY=Zq5&SV&i~Y7{4z3jh+Vh3bj8eA4^BcTR#s!`r78M&Vln!eaKZ
zPwIEh(?RY=Eo`()HpPC}$OxMoPe@#*#=taVk&%nKdXLUJ%ia`K2dn
z&JiqmoAQq5ZGQd70}9#*FfBKj@^%O)pq;B|hd3K{I%Ke_mPp!Yt=o1zY$Zf<%qF~_G6Ne-WeE0X<
zxHnpr#^4V-fNdW-_*ok>yx8F9M>BS;yNqqak^l7f+=N5xa6_j@h4&|IG~hxH30FIY
z861H0V?a9ke+yDhFn`APT;EB}&^#w-IQK?4%%NG_w54CD8HQ?E!tw&vLIZ1*0}7@M
z>HLr{O>F+s1>(D