wasm linker: aggressive DODification

The goals of this branch are to:
* compile faster when using the wasm linker and backend
* enable saving compiler state by directly copying in-memory linker
  state to disk.
* more efficient compiler memory utilization
* introduce integer type safety to wasm linker code
* generate better WebAssembly code
* fully participate in incremental compilation
* do as much work as possible outside of flush(), while continuing to do
  linker garbage collection.
* avoid unnecessary heap allocations
* avoid unnecessary indirect function calls

In order to accomplish this goals, this removes the ZigObject
abstraction, as well as Symbol and Atom. These abstractions resulted
in overly generic code, doing unnecessary work, and needless
complications that simply go away by creating a better in-memory data
model and emitting more things lazily.

For example, this makes wasm codegen emit MIR which is then lowered to
wasm code during linking, with optimal function indexes etc, or
relocations are emitted if outputting an object. Previously, this would
always emit relocations, which are fully unnecessary when emitting an
executable, and required all function calls to use the maximum size LEB
encoding.

This branch introduces the concept of the "prelink" phase which occurs
after all object files have been parsed, but before any Zcu updates are
sent to the linker. This allows the linker to fully parse all objects
into a compact memory model, which is guaranteed to be complete when Zcu
code is generated.

This commit is not a complete implementation of all these goals; it is
not even passing semantic analysis.
This commit is contained in:
Andrew Kelley 2024-11-04 17:26:17 -08:00
parent 77273103a8
commit 795e7c64d5
34 changed files with 4322 additions and 7204 deletions

View File

@ -643,9 +643,9 @@ set(ZIG_STAGE2_SOURCES
src/link/StringTable.zig
src/link/Wasm.zig
src/link/Wasm/Archive.zig
src/link/Wasm/Flush.zig
src/link/Wasm/Object.zig
src/link/Wasm/Symbol.zig
src/link/Wasm/ZigObject.zig
src/link/aarch64.zig
src/link/riscv.zig
src/link/table_section.zig

View File

@ -2682,7 +2682,7 @@ const WasmDumper = struct {
else => unreachable,
}
const end_opcode = try std.leb.readUleb128(u8, reader);
if (end_opcode != std.wasm.opcode(.end)) {
if (end_opcode != @intFromEnum(std.wasm.Opcode.end)) {
return step.fail("expected 'end' opcode in init expression", .{});
}
}

View File

@ -4,8 +4,6 @@
const std = @import("std.zig");
const testing = std.testing;
// TODO: Add support for multi-byte ops (e.g. table operations)
/// Wasm instruction opcodes
///
/// All instructions are defined as per spec:
@ -195,27 +193,6 @@ pub const Opcode = enum(u8) {
_,
};
/// Returns the integer value of an `Opcode`. Used by the Zig compiler
/// to write instructions to the wasm binary file
pub fn opcode(op: Opcode) u8 {
return @intFromEnum(op);
}
test "opcodes" {
// Ensure our opcodes values remain intact as certain values are skipped due to them being reserved
const i32_const = opcode(.i32_const);
const end = opcode(.end);
const drop = opcode(.drop);
const local_get = opcode(.local_get);
const i64_extend32_s = opcode(.i64_extend32_s);
try testing.expectEqual(@as(u16, 0x41), i32_const);
try testing.expectEqual(@as(u16, 0x0B), end);
try testing.expectEqual(@as(u16, 0x1A), drop);
try testing.expectEqual(@as(u16, 0x20), local_get);
try testing.expectEqual(@as(u16, 0xC4), i64_extend32_s);
}
/// Opcodes that require a prefix `0xFC`.
/// Each opcode represents a varuint32, meaning
/// they are encoded as leb128 in binary.
@ -241,12 +218,6 @@ pub const MiscOpcode = enum(u32) {
_,
};
/// Returns the integer value of an `MiscOpcode`. Used by the Zig compiler
/// to write instructions to the wasm binary file
pub fn miscOpcode(op: MiscOpcode) u32 {
return @intFromEnum(op);
}
/// Simd opcodes that require a prefix `0xFD`.
/// Each opcode represents a varuint32, meaning
/// they are encoded as leb128 in binary.
@ -512,12 +483,6 @@ pub const SimdOpcode = enum(u32) {
f32x4_relaxed_dot_bf16x8_add_f32x4 = 0x114,
};
/// Returns the integer value of an `SimdOpcode`. Used by the Zig compiler
/// to write instructions to the wasm binary file
pub fn simdOpcode(op: SimdOpcode) u32 {
return @intFromEnum(op);
}
/// Atomic opcodes that require a prefix `0xFE`.
/// Each opcode represents a varuint32, meaning
/// they are encoded as leb128 in binary.
@ -592,12 +557,6 @@ pub const AtomicsOpcode = enum(u32) {
i64_atomic_rmw32_cmpxchg_u = 0x4E,
};
/// Returns the integer value of an `AtomicsOpcode`. Used by the Zig compiler
/// to write instructions to the wasm binary file
pub fn atomicsOpcode(op: AtomicsOpcode) u32 {
return @intFromEnum(op);
}
/// Enum representing all Wasm value types as per spec:
/// https://webassembly.github.io/spec/core/binary/types.html
pub const Valtype = enum(u8) {
@ -608,11 +567,6 @@ pub const Valtype = enum(u8) {
v128 = 0x7B,
};
/// Returns the integer value of a `Valtype`
pub fn valtype(value: Valtype) u8 {
return @intFromEnum(value);
}
/// Reference types, where the funcref references to a function regardless of its type
/// and ref references an object from the embedder.
pub const RefType = enum(u8) {
@ -620,41 +574,17 @@ pub const RefType = enum(u8) {
externref = 0x6F,
};
/// Returns the integer value of a `Reftype`
pub fn reftype(value: RefType) u8 {
return @intFromEnum(value);
}
test "valtypes" {
const _i32 = valtype(.i32);
const _i64 = valtype(.i64);
const _f32 = valtype(.f32);
const _f64 = valtype(.f64);
try testing.expectEqual(@as(u8, 0x7F), _i32);
try testing.expectEqual(@as(u8, 0x7E), _i64);
try testing.expectEqual(@as(u8, 0x7D), _f32);
try testing.expectEqual(@as(u8, 0x7C), _f64);
}
/// Limits classify the size range of resizeable storage associated with memory types and table types.
pub const Limits = struct {
flags: u8,
flags: Flags,
min: u32,
max: u32,
pub const Flags = enum(u8) {
WASM_LIMITS_FLAG_HAS_MAX = 0x1,
WASM_LIMITS_FLAG_IS_SHARED = 0x2,
pub const Flags = packed struct(u8) {
has_max: bool,
is_shared: bool,
reserved: u6 = 0,
};
pub fn hasFlag(limits: Limits, flag: Flags) bool {
return limits.flags & @intFromEnum(flag) != 0;
}
pub fn setFlag(limits: *Limits, flag: Flags) void {
limits.flags |= @intFromEnum(flag);
}
};
/// Initialization expressions are used to set the initial value on an object
@ -667,18 +597,6 @@ pub const InitExpression = union(enum) {
global_get: u32,
};
/// Represents a function entry, holding the index to its type
pub const Func = struct {
type_index: u32,
};
/// Tables are used to hold pointers to opaque objects.
/// This can either by any function, or an object from the host.
pub const Table = struct {
limits: Limits,
reftype: RefType,
};
/// Describes the layout of the memory where `min` represents
/// the minimal amount of pages, and the optional `max` represents
/// the max pages. When `null` will allow the host to determine the
@ -687,88 +605,6 @@ pub const Memory = struct {
limits: Limits,
};
/// Represents the type of a `Global` or an imported global.
pub const GlobalType = struct {
valtype: Valtype,
mutable: bool,
};
pub const Global = struct {
global_type: GlobalType,
init: InitExpression,
};
/// Notates an object to be exported from wasm
/// to the host.
pub const Export = struct {
name: []const u8,
kind: ExternalKind,
index: u32,
};
/// Element describes the layout of the table that can
/// be found at `table_index`
pub const Element = struct {
table_index: u32,
offset: InitExpression,
func_indexes: []const u32,
};
/// Imports are used to import objects from the host
pub const Import = struct {
module_name: []const u8,
name: []const u8,
kind: Kind,
pub const Kind = union(ExternalKind) {
function: u32,
table: Table,
memory: Limits,
global: GlobalType,
};
};
/// `Type` represents a function signature type containing both
/// a slice of parameters as well as a slice of return values.
pub const Type = struct {
params: []const Valtype,
returns: []const Valtype,
pub fn format(self: Type, comptime fmt: []const u8, opt: std.fmt.FormatOptions, writer: anytype) !void {
if (fmt.len != 0) std.fmt.invalidFmtError(fmt, self);
_ = opt;
try writer.writeByte('(');
for (self.params, 0..) |param, i| {
try writer.print("{s}", .{@tagName(param)});
if (i + 1 != self.params.len) {
try writer.writeAll(", ");
}
}
try writer.writeAll(") -> ");
if (self.returns.len == 0) {
try writer.writeAll("nil");
} else {
for (self.returns, 0..) |return_ty, i| {
try writer.print("{s}", .{@tagName(return_ty)});
if (i + 1 != self.returns.len) {
try writer.writeAll(", ");
}
}
}
}
pub fn eql(self: Type, other: Type) bool {
return std.mem.eql(Valtype, self.params, other.params) and
std.mem.eql(Valtype, self.returns, other.returns);
}
pub fn deinit(self: *Type, gpa: std.mem.Allocator) void {
gpa.free(self.params);
gpa.free(self.returns);
self.* = undefined;
}
};
/// Wasm module sections as per spec:
/// https://webassembly.github.io/spec/core/binary/modules.html
pub const Section = enum(u8) {
@ -788,11 +624,6 @@ pub const Section = enum(u8) {
_,
};
/// Returns the integer value of a given `Section`
pub fn section(val: Section) u8 {
return @intFromEnum(val);
}
/// The kind of the type when importing or exporting to/from the host environment.
/// https://webassembly.github.io/spec/core/syntax/modules.html
pub const ExternalKind = enum(u8) {
@ -802,11 +633,6 @@ pub const ExternalKind = enum(u8) {
global,
};
/// Returns the integer value of a given `ExternalKind`
pub fn externalKind(val: ExternalKind) u8 {
return @intFromEnum(val);
}
/// Defines the enum values for each subsection id for the "Names" custom section
/// as described by:
/// https://webassembly.github.io/spec/core/appendix/custom.html?highlight=name#name-section

View File

@ -2438,9 +2438,8 @@ fn flush(
if (comp.bin_file) |lf| {
// This is needed before reading the error flags.
lf.flush(arena, tid, prog_node) catch |err| switch (err) {
error.FlushFailure, error.LinkFailure => {}, // error reported through link_diags.flags
error.LLDReportedFailure => {}, // error reported via lockAndParseLldStderr
else => |e| return e,
error.LinkFailure => {}, // Already reported.
error.OutOfMemory => return error.OutOfMemory,
};
}

View File

@ -524,6 +524,15 @@ pub const Export = struct {
section: InternPool.OptionalNullTerminatedString = .none,
visibility: std.builtin.SymbolVisibility = .default,
};
/// Index into `all_exports`.
pub const Index = enum(u32) {
_,
pub fn ptr(i: Index, zcu: *const Zcu) *Export {
return &zcu.all_exports.items[@intFromEnum(i)];
}
};
};
pub const Reference = struct {

View File

@ -1722,22 +1722,19 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai
// Correcting this failure will involve changing a type this function
// depends on, hence triggering re-analysis of this function, so this
// interacts correctly with incremental compilation.
// TODO: do we need to mark this failure anywhere? I don't think so, since compilation
// will fail due to the type error anyway.
} else if (comp.bin_file) |lf| {
lf.updateFunc(pt, func_index, air, liveness) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {
assert(zcu.failed_codegen.contains(nav_index));
},
else => {
try zcu.failed_codegen.putNoClobber(gpa, nav_index, try Zcu.ErrorMsg.create(
error.CodegenFail => assert(zcu.failed_codegen.contains(nav_index)),
error.LinkFailure => assert(comp.link_diags.hasErrors()),
error.Overflow => {
try zcu.failed_codegen.putNoClobber(nav_index, try Zcu.ErrorMsg.create(
gpa,
zcu.navSrcLoc(nav_index),
"unable to codegen: {s}",
.{@errorName(err)},
));
try zcu.retryable_failures.append(zcu.gpa, AnalUnit.wrap(.{ .func = func_index }));
// Not a retryable failure.
},
};
} else if (zcu.llvm_object) |llvm_object| {
@ -3100,6 +3097,7 @@ pub fn populateTestFunctions(
pub fn linkerUpdateNav(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) error{OutOfMemory}!void {
const zcu = pt.zcu;
const comp = zcu.comp;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const nav = zcu.intern_pool.getNav(nav_index);
@ -3113,26 +3111,16 @@ pub fn linkerUpdateNav(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) error
} else if (comp.bin_file) |lf| {
lf.updateNav(pt, nav_index) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {
assert(zcu.failed_codegen.contains(nav_index));
},
else => {
const gpa = zcu.gpa;
try zcu.failed_codegen.ensureUnusedCapacity(gpa, 1);
zcu.failed_codegen.putAssumeCapacityNoClobber(nav_index, try Zcu.ErrorMsg.create(
error.CodegenFail => assert(zcu.failed_codegen.contains(nav_index)),
error.LinkFailure => assert(comp.link_diags.hasErrors()),
error.Overflow => {
try zcu.failed_codegen.putNoClobber(nav_index, try Zcu.ErrorMsg.create(
gpa,
zcu.navSrcLoc(nav_index),
"unable to codegen: {s}",
.{@errorName(err)},
));
if (nav.analysis != null) {
try zcu.retryable_failures.append(zcu.gpa, .wrap(.{ .nav_val = nav_index }));
} else {
// TODO: we don't have a way to indicate that this failure is retryable!
// Since these are really rare, we could as a cop-out retry the whole build next update.
// But perhaps we can do better...
@panic("TODO: retryable failure codegenning non-declaration Nav");
}
// Not a retryable failure.
},
};
} else if (zcu.llvm_object) |llvm_object| {

View File

@ -167,7 +167,7 @@ const DbgInfoReloc = struct {
name: [:0]const u8,
mcv: MCValue,
fn genDbgInfo(reloc: DbgInfoReloc, function: Self) !void {
fn genDbgInfo(reloc: DbgInfoReloc, function: Self) CodeGenError!void {
switch (reloc.tag) {
.arg,
.dbg_arg_inline,
@ -181,7 +181,7 @@ const DbgInfoReloc = struct {
}
}
fn genArgDbgInfo(reloc: DbgInfoReloc, function: Self) !void {
fn genArgDbgInfo(reloc: DbgInfoReloc, function: Self) CodeGenError!void {
switch (function.debug_output) {
.dwarf => |dw| {
const loc: link.File.Dwarf.Loc = switch (reloc.mcv) {
@ -209,7 +209,7 @@ const DbgInfoReloc = struct {
}
}
fn genVarDbgInfo(reloc: DbgInfoReloc, function: Self) !void {
fn genVarDbgInfo(reloc: DbgInfoReloc, function: Self) CodeGenError!void {
switch (function.debug_output) {
.dwarf => |dwarf| {
const loc: link.File.Dwarf.Loc = switch (reloc.mcv) {
@ -395,13 +395,13 @@ pub fn generate(
try reloc.genDbgInfo(function);
}
var mir = Mir{
var mir: Mir = .{
.instructions = function.mir_instructions.toOwnedSlice(),
.extra = try function.mir_extra.toOwnedSlice(gpa),
};
defer mir.deinit(gpa);
var emit = Emit{
var emit: Emit = .{
.mir = mir,
.bin_file = lf,
.debug_output = debug_output,
@ -1107,7 +1107,7 @@ fn spillCompareFlagsIfOccupied(self: *Self) !void {
/// Copies a value to a register without tracking the register. The register is not considered
/// allocated. A second call to `copyToTmpRegister` may return the same register.
/// This can have a side effect of spilling instructions to the stack to free up a register.
fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register {
fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) InnerError!Register {
const raw_reg = try self.register_manager.allocReg(null, gp);
const reg = self.registerAlias(raw_reg, ty);
try self.genSetReg(ty, reg, mcv);
@ -1125,12 +1125,12 @@ fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCVa
return MCValue{ .register = reg };
}
fn airAlloc(self: *Self, inst: Air.Inst.Index) !void {
fn airAlloc(self: *Self, inst: Air.Inst.Index) InnerError!void {
const stack_offset = try self.allocMemPtr(inst);
return self.finishAir(inst, .{ .ptr_stack_offset = stack_offset }, .{ .none, .none, .none });
}
fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airRetPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const result: MCValue = switch (self.ret_mcv) {
@ -1152,19 +1152,19 @@ fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ .none, .none, .none });
}
fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void {
fn airFptrunc(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airFptrunc for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airFpext(self: *Self, inst: Air.Inst.Index) !void {
fn airFpext(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airFpext for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
fn airIntCast(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
if (self.liveness.isUnused(inst))
return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
@ -1293,7 +1293,7 @@ fn trunc(
}
}
fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
fn airTrunc(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.typeOf(ty_op.operand);
@ -1306,14 +1306,14 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airIntFromBool(self: *Self, inst: Air.Inst.Index) !void {
fn airIntFromBool(self: *Self, inst: Air.Inst.Index) InnerError!void {
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try self.resolveInst(un_op);
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else operand;
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airNot(self: *Self, inst: Air.Inst.Index) !void {
fn airNot(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const pt = self.pt;
const zcu = pt.zcu;
@ -1484,7 +1484,7 @@ fn minMax(
}
}
fn airMinMax(self: *Self, inst: Air.Inst.Index) !void {
fn airMinMax(self: *Self, inst: Air.Inst.Index) InnerError!void {
const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs_ty = self.typeOf(bin_op.lhs);
@ -1502,7 +1502,7 @@ fn airMinMax(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
fn airSlice(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
@ -2440,7 +2440,7 @@ fn ptrArithmetic(
}
}
fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) InnerError!void {
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs_ty = self.typeOf(bin_op.lhs);
const rhs_ty = self.typeOf(bin_op.rhs);
@ -2490,7 +2490,7 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const lhs_ty = self.typeOf(bin_op.lhs);
@ -2505,25 +2505,25 @@ fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
fn airAddSat(self: *Self, inst: Air.Inst.Index) InnerError!void {
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement add_sat for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
fn airSubSat(self: *Self, inst: Air.Inst.Index) InnerError!void {
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement sub_sat for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
fn airMulSat(self: *Self, inst: Air.Inst.Index) InnerError!void {
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement mul_sat for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
fn airOverflow(self: *Self, inst: Air.Inst.Index) InnerError!void {
const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
@ -2536,9 +2536,9 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
const rhs_ty = self.typeOf(extra.rhs);
const tuple_ty = self.typeOfIndex(inst);
const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(zcu)));
const tuple_size: u32 = @intCast(tuple_ty.abiSize(zcu));
const tuple_align = tuple_ty.abiAlignment(zcu);
const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, zcu)));
const overflow_bit_offset: u32 = @intCast(tuple_ty.structFieldOffset(1, zcu));
switch (lhs_ty.zigTypeTag(zcu)) {
.vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}),
@ -2652,7 +2652,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none });
@ -2876,7 +2876,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none });
@ -3012,13 +3012,13 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
fn airShlSat(self: *Self, inst: Air.Inst.Index) !void {
fn airShlSat(self: *Self, inst: Air.Inst.Index) InnerError!void {
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void {
fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const optional_ty = self.typeOf(ty_op.operand);
@ -3055,13 +3055,13 @@ fn optionalPayload(self: *Self, inst: Air.Inst.Index, mcv: MCValue, optional_ty:
}
}
fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement .optional_payload_ptr for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement .optional_payload_ptr_set for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
@ -3137,7 +3137,7 @@ fn errUnionErr(
}
}
fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void {
fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand };
@ -3218,7 +3218,7 @@ fn errUnionPayload(
}
}
fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void {
fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand };
@ -3230,26 +3230,26 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void {
}
// *(E!T) -> E
fn airUnwrapErrErrPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airUnwrapErrErrPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement unwrap error union error ptr for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
// *(E!T) -> *T
fn airUnwrapErrPayloadPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airUnwrapErrPayloadPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement unwrap error union payload ptr for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement .errunion_payload_ptr_set for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airErrReturnTrace(self: *Self, inst: Air.Inst.Index) !void {
fn airErrReturnTrace(self: *Self, inst: Air.Inst.Index) InnerError!void {
const result: MCValue = if (self.liveness.isUnused(inst))
.dead
else
@ -3257,17 +3257,17 @@ fn airErrReturnTrace(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ .none, .none, .none });
}
fn airSetErrReturnTrace(self: *Self, inst: Air.Inst.Index) !void {
fn airSetErrReturnTrace(self: *Self, inst: Air.Inst.Index) InnerError!void {
_ = inst;
return self.fail("TODO implement airSetErrReturnTrace for {}", .{self.target.cpu.arch});
}
fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void {
fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) InnerError!void {
_ = inst;
return self.fail("TODO implement airSaveErrReturnTraceIndex for {}", .{self.target.cpu.arch});
}
fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
fn airWrapOptional(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
@ -3313,7 +3313,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
}
/// T to E!T
fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
@ -3338,7 +3338,7 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
}
/// E to E!T
fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const pt = self.pt;
@ -3379,7 +3379,7 @@ fn slicePtr(mcv: MCValue) MCValue {
}
}
fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void {
fn airSlicePtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const mcv = try self.resolveInst(ty_op.operand);
@ -3388,7 +3388,7 @@ fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
fn airSliceLen(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const ptr_bits = 64;
@ -3412,7 +3412,7 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const ptr_bits = 64;
@ -3429,7 +3429,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const mcv = try self.resolveInst(ty_op.operand);
@ -3444,7 +3444,7 @@ fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
@ -3487,7 +3487,7 @@ fn ptrElemVal(
}
}
fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
@ -3506,13 +3506,13 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) InnerError!void {
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement array_elem_val for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
@ -3526,7 +3526,7 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
@ -3542,55 +3542,55 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) InnerError!void {
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
_ = bin_op;
return self.fail("TODO implement airSetUnionTag for {}", .{self.target.cpu.arch});
}
fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airGetUnionTag for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airClz(self: *Self, inst: Air.Inst.Index) !void {
fn airClz(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airClz for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
fn airCtz(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airCtz for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airPopcount(self: *Self, inst: Air.Inst.Index) !void {
fn airPopcount(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airPopcount for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airAbs(self: *Self, inst: Air.Inst.Index) !void {
fn airAbs(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airAbs for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void {
fn airByteSwap(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airByteSwap for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void {
fn airBitReverse(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airBitReverse for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airUnaryMath(self: *Self, inst: Air.Inst.Index) !void {
fn airUnaryMath(self: *Self, inst: Air.Inst.Index) InnerError!void {
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const result: MCValue = if (self.liveness.isUnused(inst))
.dead
@ -3885,7 +3885,7 @@ fn genInlineMemsetCode(
// end:
}
fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
fn airLoad(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
@ -4086,7 +4086,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
}
}
fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) InnerError!void {
if (safety) {
// TODO if the value is undef, write 0xaa bytes to dest
} else {
@ -4103,14 +4103,14 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airStructFieldPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airStructFieldPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
const result = try self.structFieldPtr(inst, extra.struct_operand, extra.field_index);
return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none });
}
fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void {
fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result = try self.structFieldPtr(inst, ty_op.operand, index);
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
@ -4138,7 +4138,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
};
}
fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
const operand = extra.struct_operand;
@ -4194,7 +4194,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none });
}
fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
@ -4218,7 +4218,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ extra.field_ptr, .none, .none });
}
fn airArg(self: *Self, inst: Air.Inst.Index) !void {
fn airArg(self: *Self, inst: Air.Inst.Index) InnerError!void {
// skip zero-bit arguments as they don't have a corresponding arg instruction
var arg_index = self.arg_index;
while (self.args[arg_index] == .none) arg_index += 1;
@ -4238,7 +4238,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ .none, .none, .none });
}
fn airTrap(self: *Self) !void {
fn airTrap(self: *Self) InnerError!void {
_ = try self.addInst(.{
.tag = .brk,
.data = .{ .imm16 = 0x0001 },
@ -4246,7 +4246,7 @@ fn airTrap(self: *Self) !void {
return self.finishAirBookkeeping();
}
fn airBreakpoint(self: *Self) !void {
fn airBreakpoint(self: *Self) InnerError!void {
_ = try self.addInst(.{
.tag = .brk,
.data = .{ .imm16 = 0xf000 },
@ -4254,17 +4254,17 @@ fn airBreakpoint(self: *Self) !void {
return self.finishAirBookkeeping();
}
fn airRetAddr(self: *Self, inst: Air.Inst.Index) !void {
fn airRetAddr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airRetAddr for aarch64", .{});
return self.finishAir(inst, result, .{ .none, .none, .none });
}
fn airFrameAddress(self: *Self, inst: Air.Inst.Index) !void {
fn airFrameAddress(self: *Self, inst: Air.Inst.Index) InnerError!void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airFrameAddress for aarch64", .{});
return self.finishAir(inst, result, .{ .none, .none, .none });
}
fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !void {
fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) InnerError!void {
if (modifier == .always_tail) return self.fail("TODO implement tail calls for aarch64", .{});
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const callee = pl_op.operand;
@ -4422,7 +4422,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
return bt.finishAir(result);
}
fn airRet(self: *Self, inst: Air.Inst.Index) !void {
fn airRet(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
@ -4455,7 +4455,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, .dead, .{ un_op, .none, .none });
}
fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
fn airRetLoad(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
@ -4499,7 +4499,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, .dead, .{ un_op, .none, .none });
}
fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) InnerError!void {
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs_ty = self.typeOf(bin_op.lhs);
@ -4597,12 +4597,12 @@ fn cmp(
}
}
fn airCmpVector(self: *Self, inst: Air.Inst.Index) !void {
fn airCmpVector(self: *Self, inst: Air.Inst.Index) InnerError!void {
_ = inst;
return self.fail("TODO implement airCmpVector for {}", .{self.target.cpu.arch});
}
fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void {
fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) InnerError!void {
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try self.resolveInst(un_op);
_ = operand;
@ -4610,7 +4610,7 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void {
fn airDbgStmt(self: *Self, inst: Air.Inst.Index) InnerError!void {
const dbg_stmt = self.air.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt;
_ = try self.addInst(.{
@ -4624,7 +4624,7 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAirBookkeeping();
}
fn airDbgInlineBlock(self: *Self, inst: Air.Inst.Index) !void {
fn airDbgInlineBlock(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
@ -4635,7 +4635,7 @@ fn airDbgInlineBlock(self: *Self, inst: Air.Inst.Index) !void {
try self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]));
}
fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void {
fn airDbgVar(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const operand = pl_op.operand;
const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
@ -4686,7 +4686,7 @@ fn condBr(self: *Self, condition: MCValue) !Mir.Inst.Index {
}
}
fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
fn airCondBr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const cond = try self.resolveInst(pl_op.operand);
const extra = self.air.extraData(Air.CondBr, pl_op.payload);
@ -4919,7 +4919,7 @@ fn isNonErr(
}
}
fn airIsNull(self: *Self, inst: Air.Inst.Index) !void {
fn airIsNull(self: *Self, inst: Air.Inst.Index) InnerError!void {
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(un_op);
@ -4930,7 +4930,7 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
@ -4947,7 +4947,7 @@ fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void {
fn airIsNonNull(self: *Self, inst: Air.Inst.Index) InnerError!void {
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(un_op);
@ -4958,7 +4958,7 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
@ -4975,7 +4975,7 @@ fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airIsErr(self: *Self, inst: Air.Inst.Index) !void {
fn airIsErr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const error_union_bind: ReadArg.Bind = .{ .inst = un_op };
@ -4986,7 +4986,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
@ -5003,7 +5003,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void {
fn airIsNonErr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const error_union_bind: ReadArg.Bind = .{ .inst = un_op };
@ -5014,7 +5014,7 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
@ -5031,7 +5031,7 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airLoop(self: *Self, inst: Air.Inst.Index) !void {
fn airLoop(self: *Self, inst: Air.Inst.Index) InnerError!void {
// A loop is a setup to be able to jump back to the beginning.
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const loop = self.air.extraData(Air.Block, ty_pl.payload);
@ -5052,7 +5052,7 @@ fn jump(self: *Self, inst: Mir.Inst.Index) !void {
});
}
fn airBlock(self: *Self, inst: Air.Inst.Index) !void {
fn airBlock(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Block, ty_pl.payload);
try self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]));
@ -5090,7 +5090,7 @@ fn lowerBlock(self: *Self, inst: Air.Inst.Index, body: []const Air.Inst.Index) !
return self.finishAir(inst, result, .{ .none, .none, .none });
}
fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
fn airSwitch(self: *Self, inst: Air.Inst.Index) InnerError!void {
const switch_br = self.air.unwrapSwitch(inst);
const condition_ty = self.typeOf(switch_br.operand);
const liveness = try self.liveness.getSwitchBr(
@ -5224,7 +5224,7 @@ fn performReloc(self: *Self, inst: Mir.Inst.Index) !void {
}
}
fn airBr(self: *Self, inst: Air.Inst.Index) !void {
fn airBr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const branch = self.air.instructions.items(.data)[@intFromEnum(inst)].br;
try self.br(branch.block_inst, branch.operand);
return self.finishAir(inst, .dead, .{ branch.operand, .none, .none });
@ -5268,7 +5268,7 @@ fn brVoid(self: *Self, block: Air.Inst.Index) !void {
}));
}
fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
fn airAsm(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Asm, ty_pl.payload);
const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0;
@ -5601,7 +5601,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.tag = .ldr_ptr_stack,
.data = .{ .load_store_stack = .{
.rt = reg,
.offset = @as(u32, @intCast(off)),
.offset = @intCast(off),
} },
});
},
@ -5617,13 +5617,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.immediate => |x| {
_ = try self.addInst(.{
.tag = .movz,
.data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @as(u16, @truncate(x)) } },
.data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(x) } },
});
if (x & 0x0000_0000_ffff_0000 != 0) {
_ = try self.addInst(.{
.tag = .movk,
.data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @as(u16, @truncate(x >> 16)), .hw = 1 } },
.data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(x >> 16), .hw = 1 } },
});
}
@ -5631,13 +5631,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
if (x & 0x0000_ffff_0000_0000 != 0) {
_ = try self.addInst(.{
.tag = .movk,
.data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @as(u16, @truncate(x >> 32)), .hw = 2 } },
.data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(x >> 32), .hw = 2 } },
});
}
if (x & 0xffff_0000_0000_0000 != 0) {
_ = try self.addInst(.{
.tag = .movk,
.data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @as(u16, @truncate(x >> 48)), .hw = 3 } },
.data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(x >> 48), .hw = 3 } },
});
}
}
@ -5709,7 +5709,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.tag = tag,
.data = .{ .load_store_stack = .{
.rt = reg,
.offset = @as(u32, @intCast(off)),
.offset = @intCast(off),
} },
});
},
@ -5733,7 +5733,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.tag = tag,
.data = .{ .load_store_stack = .{
.rt = reg,
.offset = @as(u32, @intCast(off)),
.offset = @intCast(off),
} },
});
},
@ -5918,13 +5918,13 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
}
}
fn airIntFromPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airIntFromPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const result = try self.resolveInst(un_op);
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
fn airBitCast(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(ty_op.operand);
@ -5945,7 +5945,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
@ -5963,7 +5963,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void {
fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airFloatFromInt for {}", .{
self.target.cpu.arch,
@ -5971,7 +5971,7 @@ fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airIntFromFloat(self: *Self, inst: Air.Inst.Index) !void {
fn airIntFromFloat(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airIntFromFloat for {}", .{
self.target.cpu.arch,
@ -5979,7 +5979,7 @@ fn airIntFromFloat(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void {
fn airCmpxchg(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Block, ty_pl.payload);
_ = extra;
@ -5989,23 +5989,23 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void {
});
}
fn airAtomicRmw(self: *Self, inst: Air.Inst.Index) !void {
fn airAtomicRmw(self: *Self, inst: Air.Inst.Index) InnerError!void {
_ = inst;
return self.fail("TODO implement airCmpxchg for {}", .{self.target.cpu.arch});
}
fn airAtomicLoad(self: *Self, inst: Air.Inst.Index) !void {
fn airAtomicLoad(self: *Self, inst: Air.Inst.Index) InnerError!void {
_ = inst;
return self.fail("TODO implement airAtomicLoad for {}", .{self.target.cpu.arch});
}
fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOrder) !void {
fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOrder) InnerError!void {
_ = inst;
_ = order;
return self.fail("TODO implement airAtomicStore for {}", .{self.target.cpu.arch});
}
fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) InnerError!void {
_ = inst;
if (safety) {
// TODO if the value is undef, write 0xaa bytes to dest
@ -6015,12 +6015,12 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
return self.fail("TODO implement airMemset for {}", .{self.target.cpu.arch});
}
fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
fn airMemcpy(self: *Self, inst: Air.Inst.Index) InnerError!void {
_ = inst;
return self.fail("TODO implement airMemcpy for {}", .{self.target.cpu.arch});
}
fn airTagName(self: *Self, inst: Air.Inst.Index) !void {
fn airTagName(self: *Self, inst: Air.Inst.Index) InnerError!void {
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try self.resolveInst(un_op);
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else {
@ -6030,7 +6030,7 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airErrorName(self: *Self, inst: Air.Inst.Index) !void {
fn airErrorName(self: *Self, inst: Air.Inst.Index) InnerError!void {
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try self.resolveInst(un_op);
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else {
@ -6040,33 +6040,33 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airSplat(self: *Self, inst: Air.Inst.Index) !void {
fn airSplat(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airSplat for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airSelect(self: *Self, inst: Air.Inst.Index) !void {
fn airSelect(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airSelect for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ pl_op.operand, extra.lhs, extra.rhs });
}
fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
fn airShuffle(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airShuffle for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ extra.a, extra.b, .none });
}
fn airReduce(self: *Self, inst: Air.Inst.Index) !void {
fn airReduce(self: *Self, inst: Air.Inst.Index) InnerError!void {
const reduce = self.air.instructions.items(.data)[@intFromEnum(inst)].reduce;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airReduce for aarch64", .{});
return self.finishAir(inst, result, .{ reduce.operand, .none, .none });
}
fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
fn airAggregateInit(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const vector_ty = self.typeOfIndex(inst);
@ -6090,19 +6090,19 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
return bt.finishAir(result);
}
fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
fn airUnionInit(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data;
_ = extra;
return self.fail("TODO implement airUnionInit for aarch64", .{});
}
fn airPrefetch(self: *Self, inst: Air.Inst.Index) !void {
fn airPrefetch(self: *Self, inst: Air.Inst.Index) InnerError!void {
const prefetch = self.air.instructions.items(.data)[@intFromEnum(inst)].prefetch;
return self.finishAir(inst, MCValue.dead, .{ prefetch.ptr, .none, .none });
}
fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
fn airMulAdd(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else {
@ -6111,7 +6111,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, pl_op.operand });
}
fn airTry(self: *Self, inst: Air.Inst.Index) !void {
fn airTry(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Try, pl_op.payload);
@ -6139,7 +6139,7 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ pl_op.operand, .none, .none });
}
fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airTryPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.TryPtr, ty_pl.payload);
const body = self.air.extra[extra.end..][0..extra.data.body_len];

View File

@ -245,7 +245,7 @@ const DbgInfoReloc = struct {
name: [:0]const u8,
mcv: MCValue,
fn genDbgInfo(reloc: DbgInfoReloc, function: Self) !void {
fn genDbgInfo(reloc: DbgInfoReloc, function: Self) CodeGenError!void {
switch (reloc.tag) {
.arg,
.dbg_arg_inline,
@ -259,7 +259,7 @@ const DbgInfoReloc = struct {
}
}
fn genArgDbgInfo(reloc: DbgInfoReloc, function: Self) !void {
fn genArgDbgInfo(reloc: DbgInfoReloc, function: Self) CodeGenError!void {
switch (function.debug_output) {
.dwarf => |dw| {
const loc: link.File.Dwarf.Loc = switch (reloc.mcv) {
@ -287,7 +287,7 @@ const DbgInfoReloc = struct {
}
}
fn genVarDbgInfo(reloc: DbgInfoReloc, function: Self) !void {
fn genVarDbgInfo(reloc: DbgInfoReloc, function: Self) CodeGenError!void {
switch (function.debug_output) {
.dwarf => |dw| {
const loc: link.File.Dwarf.Loc = switch (reloc.mcv) {

View File

@ -6,7 +6,6 @@ const assert = std.debug.assert;
const testing = std.testing;
const leb = std.leb;
const mem = std.mem;
const wasm = std.wasm;
const log = std.log.scoped(.codegen);
const codegen = @import("../../codegen.zig");
@ -55,22 +54,19 @@ const WValue = union(enum) {
float32: f32,
/// A constant 64bit float value
float64: f64,
/// A value that represents a pointer to the data section
/// Note: The value contains the symbol index, rather than the actual address
/// as we use this to perform the relocation.
memory: u32,
/// A value that represents a pointer to the data section.
memory: InternPool.Index,
/// A value that represents a parent pointer and an offset
/// from that pointer. i.e. when slicing with constant values.
memory_offset: struct {
/// The symbol of the parent pointer
pointer: u32,
pointer: InternPool.Index,
/// Offset will be set as addend when relocating
offset: u32,
},
/// Represents a function pointer
/// In wasm function pointers are indexes into a function table,
/// rather than an address in the data section.
function_index: u32,
function_index: InternPool.Index,
/// Offset from the bottom of the virtual stack, with the offset
/// pointing to where the value lives.
stack_offset: struct {
@ -119,7 +115,7 @@ const WValue = union(enum) {
if (local_value < reserved + 2) return; // reserved locals may never be re-used. Also accounts for 2 stack locals.
const index = local_value - reserved;
const valtype = @as(wasm.Valtype, @enumFromInt(gen.locals.items[index]));
const valtype: std.wasm.Valtype = @enumFromInt(gen.locals.items[index]);
switch (valtype) {
.i32 => gen.free_locals_i32.append(gen.gpa, local_value) catch return, // It's ok to fail any of those, a new local can be allocated instead
.i64 => gen.free_locals_i64.append(gen.gpa, local_value) catch return,
@ -132,8 +128,6 @@ const WValue = union(enum) {
}
};
/// Wasm ops, but without input/output/signedness information
/// Used for `buildOpcode`
const Op = enum {
@"unreachable",
nop,
@ -200,70 +194,42 @@ const Op = enum {
extend,
};
/// Contains the settings needed to create an `Opcode` using `buildOpcode`.
///
/// The fields correspond to the opcode name. Here is an example
/// i32_trunc_f32_s
/// ^ ^ ^ ^
/// | | | |
/// valtype1 | | |
/// = .i32 | | |
/// | | |
/// op | |
/// = .trunc | |
/// | |
/// valtype2 |
/// = .f32 |
/// |
/// width |
/// = null |
/// |
/// signed
/// = true
///
/// There can be missing fields, here are some more examples:
/// i64_load8_u
/// --> .{ .valtype1 = .i64, .op = .load, .width = 8, signed = false }
/// i32_mul
/// --> .{ .valtype1 = .i32, .op = .trunc }
/// nop
/// --> .{ .op = .nop }
const OpcodeBuildArguments = struct {
/// First valtype in the opcode (usually represents the type of the output)
valtype1: ?wasm.Valtype = null,
valtype1: ?std.wasm.Valtype = null,
/// The operation (e.g. call, unreachable, div, min, sqrt, etc.)
op: Op,
/// Width of the operation (e.g. 8 for i32_load8_s, 16 for i64_extend16_i32_s)
width: ?u8 = null,
/// Second valtype in the opcode name (usually represents the type of the input)
valtype2: ?wasm.Valtype = null,
valtype2: ?std.wasm.Valtype = null,
/// Signedness of the op
signedness: ?std.builtin.Signedness = null,
};
/// Helper function that builds an Opcode given the arguments needed
fn buildOpcode(args: OpcodeBuildArguments) wasm.Opcode {
/// TODO: deprecated, should be split up per tag.
fn buildOpcode(args: OpcodeBuildArguments) std.wasm.Opcode {
switch (args.op) {
.@"unreachable" => return .@"unreachable",
.nop => return .nop,
.block => return .block,
.loop => return .loop,
.@"if" => return .@"if",
.@"else" => return .@"else",
.end => return .end,
.br => return .br,
.br_if => return .br_if,
.br_table => return .br_table,
.@"return" => return .@"return",
.call => return .call,
.call_indirect => return .call_indirect,
.drop => return .drop,
.select => return .select,
.local_get => return .local_get,
.local_set => return .local_set,
.local_tee => return .local_tee,
.global_get => return .global_get,
.global_set => return .global_set,
.@"unreachable" => unreachable,
.nop => unreachable,
.block => unreachable,
.loop => unreachable,
.@"if" => unreachable,
.@"else" => unreachable,
.end => unreachable,
.br => unreachable,
.br_if => unreachable,
.br_table => unreachable,
.@"return" => unreachable,
.call => unreachable,
.call_indirect => unreachable,
.drop => unreachable,
.select => unreachable,
.local_get => unreachable,
.local_set => unreachable,
.local_tee => unreachable,
.global_get => unreachable,
.global_set => unreachable,
.load => if (args.width) |width| switch (width) {
8 => switch (args.valtype1.?) {
@ -626,11 +592,11 @@ test "Wasm - buildOpcode" {
const i64_extend32_s = buildOpcode(.{ .op = .extend, .valtype1 = .i64, .width = 32, .signedness = .signed });
const f64_reinterpret_i64 = buildOpcode(.{ .op = .reinterpret, .valtype1 = .f64, .valtype2 = .i64 });
try testing.expectEqual(@as(wasm.Opcode, .i32_const), i32_const);
try testing.expectEqual(@as(wasm.Opcode, .end), end);
try testing.expectEqual(@as(wasm.Opcode, .local_get), local_get);
try testing.expectEqual(@as(wasm.Opcode, .i64_extend32_s), i64_extend32_s);
try testing.expectEqual(@as(wasm.Opcode, .f64_reinterpret_i64), f64_reinterpret_i64);
try testing.expectEqual(@as(std.wasm.Opcode, .i32_const), i32_const);
try testing.expectEqual(@as(std.wasm.Opcode, .end), end);
try testing.expectEqual(@as(std.wasm.Opcode, .local_get), local_get);
try testing.expectEqual(@as(std.wasm.Opcode, .i64_extend32_s), i64_extend32_s);
try testing.expectEqual(@as(std.wasm.Opcode, .f64_reinterpret_i64), f64_reinterpret_i64);
}
/// Hashmap to store generated `WValue` for each `Air.Inst.Ref`
@ -806,13 +772,7 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue {
// In the other cases, we will simply lower the constant to a value that fits
// into a single local (such as a pointer, integer, bool, etc).
const result: WValue = if (isByRef(ty, pt, func.target.*))
switch (try func.bin_file.lowerUav(pt, val.toIntern(), .none, func.src_loc)) {
.mcv => |mcv| .{ .memory = mcv.load_symbol },
.fail => |err_msg| {
func.err_msg = err_msg;
return error.CodegenFail;
},
}
.{ .memory = val.toIntern() }
else
try func.lowerConstant(val, ty);
@ -919,7 +879,7 @@ fn addTag(func: *CodeGen, tag: Mir.Inst.Tag) error{OutOfMemory}!void {
try func.addInst(.{ .tag = tag, .data = .{ .tag = {} } });
}
fn addExtended(func: *CodeGen, opcode: wasm.MiscOpcode) error{OutOfMemory}!void {
fn addExtended(func: *CodeGen, opcode: std.wasm.MiscOpcode) error{OutOfMemory}!void {
const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
try func.mir_extra.append(func.gpa, @intFromEnum(opcode));
try func.addInst(.{ .tag = .misc_prefix, .data = .{ .payload = extra_index } });
@ -929,6 +889,10 @@ fn addLabel(func: *CodeGen, tag: Mir.Inst.Tag, label: u32) error{OutOfMemory}!vo
try func.addInst(.{ .tag = tag, .data = .{ .label = label } });
}
fn addCallTagName(func: *CodeGen, ip_index: InternPool.Index) error{OutOfMemory}!void {
try func.addInst(.{ .tag = .call_tag_name, .data = .{ .ip_index = ip_index } });
}
/// Accepts an unsigned 32bit integer rather than a signed integer to
/// prevent us from having to bitcast multiple times as most values
/// within codegen are represented as unsigned rather than signed.
@ -950,7 +914,7 @@ fn addImm128(func: *CodeGen, index: u32) error{OutOfMemory}!void {
const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
// tag + 128bit value
try func.mir_extra.ensureUnusedCapacity(func.gpa, 5);
func.mir_extra.appendAssumeCapacity(std.wasm.simdOpcode(.v128_const));
func.mir_extra.appendAssumeCapacity(@intFromEnum(std.wasm.SimdOpcode.v128_const));
func.mir_extra.appendSliceAssumeCapacity(@alignCast(mem.bytesAsSlice(u32, &simd_values)));
try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
}
@ -968,15 +932,15 @@ fn addMemArg(func: *CodeGen, tag: Mir.Inst.Tag, mem_arg: Mir.MemArg) error{OutOf
/// Inserts an instruction from the 'atomics' feature which accesses wasm's linear memory dependent on the
/// given `tag`.
fn addAtomicMemArg(func: *CodeGen, tag: wasm.AtomicsOpcode, mem_arg: Mir.MemArg) error{OutOfMemory}!void {
const extra_index = try func.addExtra(@as(struct { val: u32 }, .{ .val = wasm.atomicsOpcode(tag) }));
fn addAtomicMemArg(func: *CodeGen, tag: std.wasm.AtomicsOpcode, mem_arg: Mir.MemArg) error{OutOfMemory}!void {
const extra_index = try func.addExtra(@as(struct { val: u32 }, .{ .val = @intFromEnum(tag) }));
_ = try func.addExtra(mem_arg);
try func.addInst(.{ .tag = .atomics_prefix, .data = .{ .payload = extra_index } });
}
/// Helper function to emit atomic mir opcodes.
fn addAtomicTag(func: *CodeGen, tag: wasm.AtomicsOpcode) error{OutOfMemory}!void {
const extra_index = try func.addExtra(@as(struct { val: u32 }, .{ .val = wasm.atomicsOpcode(tag) }));
fn addAtomicTag(func: *CodeGen, tag: std.wasm.AtomicsOpcode) error{OutOfMemory}!void {
const extra_index = try func.addExtra(@as(struct { val: u32 }, .{ .val = @intFromEnum(tag) }));
try func.addInst(.{ .tag = .atomics_prefix, .data = .{ .payload = extra_index } });
}
@ -1003,7 +967,7 @@ fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32
}
/// Using a given `Type`, returns the corresponding valtype for .auto callconv
fn typeToValtype(ty: Type, pt: Zcu.PerThread, target: std.Target) wasm.Valtype {
fn typeToValtype(ty: Type, pt: Zcu.PerThread, target: std.Target) std.wasm.Valtype {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
return switch (ty.zigTypeTag(zcu)) {
@ -1044,7 +1008,7 @@ fn typeToValtype(ty: Type, pt: Zcu.PerThread, target: std.Target) wasm.Valtype {
/// Using a given `Type`, returns the byte representation of its wasm value type
fn genValtype(ty: Type, pt: Zcu.PerThread, target: std.Target) u8 {
return wasm.valtype(typeToValtype(ty, pt, target));
return @intFromEnum(typeToValtype(ty, pt, target));
}
/// Using a given `Type`, returns the corresponding wasm value type
@ -1052,7 +1016,7 @@ fn genValtype(ty: Type, pt: Zcu.PerThread, target: std.Target) u8 {
/// with no return type
fn genBlockType(ty: Type, pt: Zcu.PerThread, target: std.Target) u8 {
return switch (ty.ip_index) {
.void_type, .noreturn_type => wasm.block_empty,
.void_type, .noreturn_type => std.wasm.block_empty,
else => genValtype(ty, pt, target),
};
}
@ -1141,35 +1105,34 @@ fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
return .{ .local = .{ .value = initial_index, .references = 1 } };
}
/// Generates a `wasm.Type` from a given function type.
/// Memory is owned by the caller.
fn genFunctype(
gpa: Allocator,
wasm: *link.File.Wasm,
cc: std.builtin.CallingConvention,
params: []const InternPool.Index,
return_type: Type,
pt: Zcu.PerThread,
target: std.Target,
) !wasm.Type {
) !link.File.Wasm.FunctionType.Index {
const zcu = pt.zcu;
var temp_params = std.ArrayList(wasm.Valtype).init(gpa);
defer temp_params.deinit();
var returns = std.ArrayList(wasm.Valtype).init(gpa);
defer returns.deinit();
const gpa = zcu.gpa;
var temp_params: std.ArrayListUnmanaged(std.wasm.Valtype) = .empty;
defer temp_params.deinit(gpa);
var returns: std.ArrayListUnmanaged(std.wasm.Valtype) = .empty;
defer returns.deinit(gpa);
if (firstParamSRet(cc, return_type, pt, target)) {
try temp_params.append(.i32); // memory address is always a 32-bit handle
try temp_params.append(gpa, .i32); // memory address is always a 32-bit handle
} else if (return_type.hasRuntimeBitsIgnoreComptime(zcu)) {
if (cc == .wasm_watc) {
const res_classes = abi.classifyType(return_type, zcu);
assert(res_classes[0] == .direct and res_classes[1] == .none);
const scalar_type = abi.scalarType(return_type, zcu);
try returns.append(typeToValtype(scalar_type, pt, target));
try returns.append(gpa, typeToValtype(scalar_type, pt, target));
} else {
try returns.append(typeToValtype(return_type, pt, target));
try returns.append(gpa, typeToValtype(return_type, pt, target));
}
} else if (return_type.isError(zcu)) {
try returns.append(.i32);
try returns.append(gpa, .i32);
}
// param types
@ -1183,24 +1146,24 @@ fn genFunctype(
if (param_classes[1] == .none) {
if (param_classes[0] == .direct) {
const scalar_type = abi.scalarType(param_type, zcu);
try temp_params.append(typeToValtype(scalar_type, pt, target));
try temp_params.append(gpa, typeToValtype(scalar_type, pt, target));
} else {
try temp_params.append(typeToValtype(param_type, pt, target));
try temp_params.append(gpa, typeToValtype(param_type, pt, target));
}
} else {
// i128/f128
try temp_params.append(.i64);
try temp_params.append(.i64);
try temp_params.append(gpa, .i64);
try temp_params.append(gpa, .i64);
}
},
else => try temp_params.append(typeToValtype(param_type, pt, target)),
else => try temp_params.append(gpa, typeToValtype(param_type, pt, target)),
}
}
return wasm.Type{
.params = try temp_params.toOwnedSlice(),
.returns = try returns.toOwnedSlice(),
};
return wasm.addFuncType(.{
.params = try wasm.internValtypeList(temp_params.items),
.returns = try wasm.internValtypeList(returns.items),
});
}
pub fn generate(
@ -1244,14 +1207,13 @@ pub fn generate(
}
fn genFunc(func: *CodeGen) InnerError!void {
const wasm = func.bin_file;
const pt = func.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const fn_ty = zcu.navValue(func.owner_nav).typeOf(zcu);
const fn_info = zcu.typeToFunc(fn_ty).?;
var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), pt, func.target.*);
defer func_type.deinit(func.gpa);
_ = try func.bin_file.storeNavType(func.owner_nav, func_type);
const fn_ty_index = try genFunctype(wasm, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), pt, func.target.*);
var cc_result = try func.resolveCallingConventionValues(fn_ty);
defer cc_result.deinit(func.gpa);
@ -1273,7 +1235,8 @@ fn genFunc(func: *CodeGen) InnerError!void {
// In case we have a return value, but the last instruction is a noreturn (such as a while loop)
// we emit an unreachable instruction to tell the stack validator that part will never be reached.
if (func_type.returns.len != 0 and func.air.instructions.len > 0) {
const returns = fn_ty_index.ptr(wasm).returns.slice(wasm);
if (returns.len != 0 and func.air.instructions.len > 0) {
const inst: Air.Inst.Index = @enumFromInt(func.air.instructions.len - 1);
const last_inst_ty = func.typeOfIndex(inst);
if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(zcu) or last_inst_ty.isNoReturn(zcu)) {
@ -1291,7 +1254,7 @@ fn genFunc(func: *CodeGen) InnerError!void {
var prologue = std.ArrayList(Mir.Inst).init(func.gpa);
defer prologue.deinit();
const sp = @intFromEnum(func.bin_file.zig_object.?.stack_pointer_sym);
const sp = @intFromEnum(wasm.zig_object.?.stack_pointer_sym);
// load stack pointer
try prologue.append(.{ .tag = .global_get, .data = .{ .label = sp } });
// store stack pointer so we can restore it when we return from the function
@ -1328,7 +1291,7 @@ fn genFunc(func: *CodeGen) InnerError!void {
var emit: Emit = .{
.mir = mir,
.bin_file = func.bin_file,
.bin_file = wasm,
.code = func.code,
.locals = func.locals.items,
.owner_nav = func.owner_nav,
@ -1643,8 +1606,8 @@ fn memcpy(func: *CodeGen, dst: WValue, src: WValue, len: WValue) !void {
try func.addLabel(.local_set, offset.local.value);
// outer block to jump to when loop is done
try func.startBlock(.block, wasm.block_empty);
try func.startBlock(.loop, wasm.block_empty);
try func.startBlock(.block, std.wasm.block_empty);
try func.startBlock(.loop, std.wasm.block_empty);
// loop condition (offset == length -> break)
{
@ -1792,7 +1755,7 @@ const SimdStoreStrategy = enum {
/// features are enabled, the function will return `.direct`. This would allow to store
/// it using a instruction, rather than an unrolled version.
fn determineSimdStoreStrategy(ty: Type, zcu: *Zcu, target: std.Target) SimdStoreStrategy {
std.debug.assert(ty.zigTypeTag(zcu) == .vector);
assert(ty.zigTypeTag(zcu) == .vector);
if (ty.bitSize(zcu) != 128) return .unrolled;
const hasFeature = std.Target.wasm.featureSetHas;
const features = target.cpu.features;
@ -2186,10 +2149,11 @@ fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) InnerError!void {
const wasm = func.bin_file;
if (modifier == .always_tail) return func.fail("TODO implement tail calls for wasm", .{});
const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = func.air.extraData(Air.Call, pl_op.payload);
const args = @as([]const Air.Inst.Ref, @ptrCast(func.air.extra[extra.end..][0..extra.data.args_len]));
const args: []const Air.Inst.Ref = @ptrCast(func.air.extra[extra.end..][0..extra.data.args_len]);
const ty = func.typeOf(pl_op.operand);
const pt = func.pt;
@ -2208,43 +2172,14 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
const func_val = (try func.air.value(pl_op.operand, pt)) orelse break :blk null;
switch (ip.indexToKey(func_val.toIntern())) {
.func => |function| {
_ = try func.bin_file.getOrCreateAtomForNav(pt, function.owner_nav);
break :blk function.owner_nav;
},
.@"extern" => |@"extern"| {
const ext_nav = ip.getNav(@"extern".owner_nav);
const ext_info = zcu.typeToFunc(Type.fromInterned(@"extern".ty)).?;
var func_type = try genFunctype(
func.gpa,
ext_info.cc,
ext_info.param_types.get(ip),
Type.fromInterned(ext_info.return_type),
pt,
func.target.*,
);
defer func_type.deinit(func.gpa);
const atom_index = try func.bin_file.getOrCreateAtomForNav(pt, @"extern".owner_nav);
const atom = func.bin_file.getAtomPtr(atom_index);
const type_index = try func.bin_file.storeNavType(@"extern".owner_nav, func_type);
try func.bin_file.addOrUpdateImport(
ext_nav.name.toSlice(ip),
atom.sym_index,
@"extern".lib_name.toSlice(ip),
type_index,
);
break :blk @"extern".owner_nav;
},
inline .func, .@"extern" => |x| break :blk x.owner_nav,
.ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
.nav => |nav| {
_ = try func.bin_file.getOrCreateAtomForNav(pt, nav);
break :blk nav;
},
.nav => |nav| break :blk nav,
else => {},
},
else => {},
}
return func.fail("Expected a function, but instead found '{s}'", .{@tagName(ip.indexToKey(func_val.toIntern()))});
return func.fail("unable to lower callee to a function index", .{});
};
const sret: WValue = if (first_param_sret) blk: {
@ -2262,21 +2197,17 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
try func.lowerArg(zcu.typeToFunc(fn_ty).?.cc, arg_ty, arg_val);
}
if (callee) |direct| {
const atom_index = func.bin_file.zig_object.?.navs.get(direct).?.atom;
try func.addLabel(.call, @intFromEnum(func.bin_file.getAtom(atom_index).sym_index));
if (callee) |nav_index| {
try func.addNav(.call_nav, nav_index);
} else {
// in this case we call a function pointer
// so load its value onto the stack
std.debug.assert(ty.zigTypeTag(zcu) == .pointer);
assert(ty.zigTypeTag(zcu) == .pointer);
const operand = try func.resolveInst(pl_op.operand);
try func.emitWValue(operand);
var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), pt, func.target.*);
defer fn_type.deinit(func.gpa);
const fn_type_index = try func.bin_file.zig_object.?.putOrGetFuncType(func.gpa, fn_type);
try func.addLabel(.call_indirect, fn_type_index);
const fn_type_index = try genFunctype(wasm, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), pt, func.target.*);
try func.addLabel(.call_indirect, @intFromEnum(fn_type_index));
}
const result_value = result_value: {
@ -2418,7 +2349,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
const extra_index: u32 = @intCast(func.mir_extra.items.len);
// stores as := opcode, offset, alignment (opcode::memarg)
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
std.wasm.simdOpcode(.v128_store),
@intFromEnum(std.wasm.SimdOpcode.v128_store),
offset + lhs.offset(),
@intCast(ty.abiAlignment(zcu).toByteUnits() orelse 0),
});
@ -2533,7 +2464,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
// stores as := opcode, offset, alignment (opcode::memarg)
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
std.wasm.simdOpcode(.v128_load),
@intFromEnum(std.wasm.SimdOpcode.v128_load),
offset + operand.offset(),
@intCast(ty.abiAlignment(zcu).toByteUnits().?),
});
@ -2664,7 +2595,7 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!
}
}
const opcode: wasm.Opcode = buildOpcode(.{
const opcode: std.wasm.Opcode = buildOpcode(.{
.op = op,
.valtype1 = typeToValtype(ty, pt, func.target.*),
.signedness = if (ty.isSignedInt(zcu)) .signed else .unsigned,
@ -2988,7 +2919,7 @@ fn floatNeg(func: *CodeGen, ty: Type, arg: WValue) InnerError!WValue {
},
32, 64 => {
try func.emitWValue(arg);
const val_type: wasm.Valtype = if (float_bits == 32) .f32 else .f64;
const val_type: std.wasm.Valtype = if (float_bits == 32) .f32 else .f64;
const opcode = buildOpcode(.{ .op = .neg, .valtype1 = val_type });
try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
return .stack;
@ -3197,20 +3128,14 @@ fn lowerUavRef(
return .{ .imm32 = 0xaaaaaaaa };
}
const decl_align = zcu.intern_pool.indexToKey(uav.orig_ty).ptr_type.flags.alignment;
const res = try func.bin_file.lowerUav(pt, uav.val, decl_align, func.src_loc);
const target_sym_index = switch (res) {
.mcv => |mcv| mcv.load_symbol,
.fail => |err_msg| {
func.err_msg = err_msg;
return error.CodegenFail;
},
};
if (is_fn_body) {
return .{ .function_index = target_sym_index };
} else if (offset == 0) {
return .{ .memory = target_sym_index };
} else return .{ .memory_offset = .{ .pointer = target_sym_index, .offset = offset } };
return if (is_fn_body) .{
.function_index = uav.val,
} else if (offset == 0) .{
.memory = uav.val,
} else .{ .memory_offset = .{
.pointer = uav.val,
.offset = offset,
} };
}
fn lowerNavRef(func: *CodeGen, nav_index: InternPool.Nav.Index, offset: u32) InnerError!WValue {
@ -3334,13 +3259,7 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
.f64 => |f64_val| return .{ .float64 = f64_val },
else => unreachable,
},
.slice => switch (try func.bin_file.lowerUav(pt, val.toIntern(), .none, func.src_loc)) {
.mcv => |mcv| return .{ .memory = mcv.load_symbol },
.fail => |err_msg| {
func.err_msg = err_msg;
return error.CodegenFail;
},
},
.slice => return .{ .memory = val.toIntern() },
.ptr => return func.lowerPtr(val.toIntern(), 0),
.opt => if (ty.optionalReprIsPayload(zcu)) {
const pl_ty = ty.optionalChild(zcu);
@ -3489,12 +3408,12 @@ fn lowerBlock(func: *CodeGen, inst: Air.Inst.Index, block_ty: Type, body: []cons
const wasm_block_ty = genBlockType(block_ty, pt, func.target.*);
// if wasm_block_ty is non-empty, we create a register to store the temporary value
const block_result: WValue = if (wasm_block_ty != wasm.block_empty) blk: {
const block_result: WValue = if (wasm_block_ty != std.wasm.block_empty) blk: {
const ty: Type = if (isByRef(block_ty, pt, func.target.*)) Type.u32 else block_ty;
break :blk try func.ensureAllocLocal(ty); // make sure it's a clean local as it may never get overwritten
} else .none;
try func.startBlock(.block, wasm.block_empty);
try func.startBlock(.block, std.wasm.block_empty);
// Here we set the current block idx, so breaks know the depth to jump
// to when breaking out.
try func.blocks.putNoClobber(func.gpa, inst, .{
@ -3512,7 +3431,7 @@ fn lowerBlock(func: *CodeGen, inst: Air.Inst.Index, block_ty: Type, body: []cons
}
/// appends a new wasm block to the code section and increases the `block_depth` by 1
fn startBlock(func: *CodeGen, block_tag: wasm.Opcode, valtype: u8) !void {
fn startBlock(func: *CodeGen, block_tag: std.wasm.Opcode, valtype: u8) !void {
func.block_depth += 1;
try func.addInst(.{
.tag = Mir.Inst.Tag.fromOpcode(block_tag),
@ -3533,7 +3452,7 @@ fn airLoop(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// result type of loop is always 'noreturn', meaning we can always
// emit the wasm type 'block_empty'.
try func.startBlock(.loop, wasm.block_empty);
try func.startBlock(.loop, std.wasm.block_empty);
try func.loops.putNoClobber(func.gpa, inst, func.block_depth);
defer assert(func.loops.remove(inst));
@ -3553,7 +3472,7 @@ fn airCondBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const liveness_condbr = func.liveness.getCondBr(inst);
// result type is always noreturn, so use `block_empty` as type.
try func.startBlock(.block, wasm.block_empty);
try func.startBlock(.block, std.wasm.block_empty);
// emit the conditional value
try func.emitWValue(condition);
@ -3632,7 +3551,7 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO
try func.lowerToStack(lhs);
try func.lowerToStack(rhs);
const opcode: wasm.Opcode = buildOpcode(.{
const opcode: std.wasm.Opcode = buildOpcode(.{
.valtype1 = typeToValtype(ty, pt, func.target.*),
.op = switch (op) {
.lt => .lt,
@ -3674,7 +3593,7 @@ fn cmpFloat(func: *CodeGen, ty: Type, lhs: WValue, rhs: WValue, cmp_op: std.math
32, 64 => {
try func.emitWValue(lhs);
try func.emitWValue(rhs);
const val_type: wasm.Valtype = if (float_bits == 32) .f32 else .f64;
const val_type: std.wasm.Valtype = if (float_bits == 32) .f32 else .f64;
const opcode = buildOpcode(.{ .op = op, .valtype1 = val_type });
try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
return .stack;
@ -4053,7 +3972,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
const zcu = pt.zcu;
// result type is always 'noreturn'
const blocktype = wasm.block_empty;
const blocktype = std.wasm.block_empty;
const switch_br = func.air.unwrapSwitch(inst);
const target = try func.resolveInst(switch_br.operand);
const target_ty = func.typeOf(switch_br.operand);
@ -4245,7 +4164,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
return func.finishAir(inst, .none, &.{});
}
fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!void {
fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: std.wasm.Opcode) InnerError!void {
const pt = func.pt;
const zcu = pt.zcu;
const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
@ -4467,7 +4386,7 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
} else return func.load(operand, wanted, 0);
}
fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: enum { value, ptr }) InnerError!void {
fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: std.wasm.Opcode, op_kind: enum { value, ptr }) InnerError!void {
const pt = func.pt;
const zcu = pt.zcu;
const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
@ -4481,7 +4400,7 @@ fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind:
/// For a given type and operand, checks if it's considered `null`.
/// NOTE: Leaves the result on the stack
fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) InnerError!WValue {
fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: std.wasm.Opcode) InnerError!WValue {
const pt = func.pt;
const zcu = pt.zcu;
try func.emitWValue(operand);
@ -4967,8 +4886,8 @@ fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue
try func.addLabel(.local_set, end_ptr.local.value);
// outer block to jump to when loop is done
try func.startBlock(.block, wasm.block_empty);
try func.startBlock(.loop, wasm.block_empty);
try func.startBlock(.block, std.wasm.block_empty);
try func.startBlock(.loop, std.wasm.block_empty);
// check for condition for loop end
try func.emitWValue(new_ptr);
@ -5022,11 +4941,11 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.addTag(.i32_mul);
try func.addTag(.i32_add);
} else {
std.debug.assert(array_ty.zigTypeTag(zcu) == .vector);
assert(array_ty.zigTypeTag(zcu) == .vector);
switch (index) {
inline .imm32, .imm64 => |lane| {
const opcode: wasm.SimdOpcode = switch (elem_ty.bitSize(zcu)) {
const opcode: std.wasm.SimdOpcode = switch (elem_ty.bitSize(zcu)) {
8 => if (elem_ty.isSignedInt(zcu)) .i8x16_extract_lane_s else .i8x16_extract_lane_u,
16 => if (elem_ty.isSignedInt(zcu)) .i16x8_extract_lane_s else .i16x8_extract_lane_u,
32 => if (elem_ty.isInt(zcu)) .i32x4_extract_lane else .f32x4_extract_lane,
@ -5034,7 +4953,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => unreachable,
};
var operands = [_]u32{ std.wasm.simdOpcode(opcode), @as(u8, @intCast(lane)) };
var operands = [_]u32{ @intFromEnum(opcode), @as(u8, @intCast(lane)) };
try func.emitWValue(array);
@ -5171,10 +5090,10 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// the scalar value onto the stack.
.stack_offset, .memory, .memory_offset => {
const opcode = switch (elem_ty.bitSize(zcu)) {
8 => std.wasm.simdOpcode(.v128_load8_splat),
16 => std.wasm.simdOpcode(.v128_load16_splat),
32 => std.wasm.simdOpcode(.v128_load32_splat),
64 => std.wasm.simdOpcode(.v128_load64_splat),
8 => @intFromEnum(std.wasm.SimdOpcode.v128_load8_splat),
16 => @intFromEnum(std.wasm.SimdOpcode.v128_load16_splat),
32 => @intFromEnum(std.wasm.SimdOpcode.v128_load32_splat),
64 => @intFromEnum(std.wasm.SimdOpcode.v128_load64_splat),
else => break :blk, // Cannot make use of simd-instructions
};
try func.emitWValue(operand);
@ -5191,10 +5110,10 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
},
.local => {
const opcode = switch (elem_ty.bitSize(zcu)) {
8 => std.wasm.simdOpcode(.i8x16_splat),
16 => std.wasm.simdOpcode(.i16x8_splat),
32 => if (elem_ty.isInt(zcu)) std.wasm.simdOpcode(.i32x4_splat) else std.wasm.simdOpcode(.f32x4_splat),
64 => if (elem_ty.isInt(zcu)) std.wasm.simdOpcode(.i64x2_splat) else std.wasm.simdOpcode(.f64x2_splat),
8 => @intFromEnum(std.wasm.SimdOpcode.i8x16_splat),
16 => @intFromEnum(std.wasm.SimdOpcode.i16x8_splat),
32 => if (elem_ty.isInt(zcu)) @intFromEnum(std.wasm.SimdOpcode.i32x4_splat) else @intFromEnum(std.wasm.SimdOpcode.f32x4_splat),
64 => if (elem_ty.isInt(zcu)) @intFromEnum(std.wasm.SimdOpcode.i64x2_splat) else @intFromEnum(std.wasm.SimdOpcode.f64x2_splat),
else => break :blk, // Cannot make use of simd-instructions
};
try func.emitWValue(operand);
@ -5267,7 +5186,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
return func.finishAir(inst, result, &.{ extra.a, extra.b });
} else {
var operands = [_]u32{
std.wasm.simdOpcode(.i8x16_shuffle),
@intFromEnum(std.wasm.SimdOpcode.i8x16_shuffle),
} ++ [1]u32{undefined} ** 4;
var lanes = mem.asBytes(operands[1..]);
@ -5538,7 +5457,7 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op:
var result = try func.ensureAllocLocal(Type.i32);
defer result.free(func);
try func.startBlock(.block, wasm.block_empty);
try func.startBlock(.block, std.wasm.block_empty);
_ = try func.isNull(lhs, operand_ty, .i32_eq);
_ = try func.isNull(rhs, operand_ty, .i32_eq);
try func.addTag(.i32_ne); // inverse so we can exit early
@ -5678,7 +5597,7 @@ fn fpext(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!
Type.f32,
&.{operand},
);
std.debug.assert(f32_result == .stack);
assert(f32_result == .stack);
if (wanted_bits == 64) {
try func.addTag(.f64_promote_f32);
@ -6557,7 +6476,7 @@ fn lowerTry(
if (!err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
// Block we can jump out of when error is not set
try func.startBlock(.block, wasm.block_empty);
try func.startBlock(.block, std.wasm.block_empty);
// check if the error tag is set for the error union.
try func.emitWValue(err_union);
@ -7162,17 +7081,13 @@ fn callIntrinsic(
args: []const WValue,
) InnerError!WValue {
assert(param_types.len == args.len);
const symbol_index = func.bin_file.getGlobalSymbol(name, null) catch |err| {
return func.fail("Could not find or create global symbol '{s}'", .{@errorName(err)});
};
// Always pass over C-ABI
const wasm = func.bin_file;
const pt = func.pt;
const zcu = pt.zcu;
var func_type = try genFunctype(func.gpa, .{ .wasm_watc = .{} }, param_types, return_type, pt, func.target.*);
defer func_type.deinit(func.gpa);
const func_type_index = try func.bin_file.zig_object.?.putOrGetFuncType(func.gpa, func_type);
try func.bin_file.addOrUpdateImport(name, symbol_index, null, func_type_index);
const func_type_index = try genFunctype(wasm, .{ .wasm_watc = .{} }, param_types, return_type, pt, func.target.*);
const func_index = wasm.getOutputFunction(try wasm.internString(name), func_type_index);
// Always pass over C-ABI
const want_sret_param = firstParamSRet(.{ .wasm_watc = .{} }, return_type, pt, func.target.*);
// if we want return as first param, we allocate a pointer to stack,
@ -7191,7 +7106,7 @@ fn callIntrinsic(
}
// Actually call our intrinsic
try func.addLabel(.call, @intFromEnum(symbol_index));
try func.addLabel(.call_func, func_index);
if (!return_type.hasRuntimeBitsIgnoreComptime(zcu)) {
return .none;
@ -7210,177 +7125,14 @@ fn airTagName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(un_op);
const enum_ty = func.typeOf(un_op);
const func_sym_index = try func.getTagNameFunction(enum_ty);
const result_ptr = try func.allocStack(func.typeOfIndex(inst));
try func.lowerToStack(result_ptr);
try func.emitWValue(operand);
try func.addLabel(.call, func_sym_index);
try func.addCallTagName(enum_ty.toIntern());
return func.finishAir(inst, result_ptr, &.{un_op});
}
fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
const pt = func.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
var arena_allocator = std.heap.ArenaAllocator.init(func.gpa);
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
const func_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{}", .{ip.loadEnumType(enum_ty.toIntern()).name.fmt(ip)});
// check if we already generated code for this.
if (func.bin_file.findGlobalSymbol(func_name)) |loc| {
return @intFromEnum(loc.index);
}
const int_tag_ty = enum_ty.intTagType(zcu);
if (int_tag_ty.bitSize(zcu) > 64) {
return func.fail("TODO: Implement @tagName for enums with tag size larger than 64 bits", .{});
}
var relocs = std.ArrayList(link.File.Wasm.Relocation).init(func.gpa);
defer relocs.deinit();
var body_list = std.ArrayList(u8).init(func.gpa);
defer body_list.deinit();
var writer = body_list.writer();
// The locals of the function body (always 0)
try leb.writeUleb128(writer, @as(u32, 0));
// outer block
try writer.writeByte(std.wasm.opcode(.block));
try writer.writeByte(std.wasm.block_empty);
// TODO: Make switch implementation generic so we can use a jump table for this when the tags are not sparse.
// generate an if-else chain for each tag value as well as constant.
const tag_names = enum_ty.enumFields(zcu);
for (0..tag_names.len) |tag_index| {
const tag_name = tag_names.get(ip)[tag_index];
const tag_name_len = tag_name.length(ip);
// for each tag name, create an unnamed const,
// and then get a pointer to its value.
const name_ty = try pt.arrayType(.{
.len = tag_name_len,
.child = .u8_type,
.sentinel = .zero_u8,
});
const name_val = try pt.intern(.{ .aggregate = .{
.ty = name_ty.toIntern(),
.storage = .{ .bytes = tag_name.toString() },
} });
const tag_sym_index = switch (try func.bin_file.lowerUav(pt, name_val, .none, func.src_loc)) {
.mcv => |mcv| mcv.load_symbol,
.fail => |err_msg| {
func.err_msg = err_msg;
return error.CodegenFail;
},
};
// block for this if case
try writer.writeByte(std.wasm.opcode(.block));
try writer.writeByte(std.wasm.block_empty);
// get actual tag value (stored in 2nd parameter);
try writer.writeByte(std.wasm.opcode(.local_get));
try leb.writeUleb128(writer, @as(u32, 1));
const tag_val = try pt.enumValueFieldIndex(enum_ty, @intCast(tag_index));
const tag_value = try func.lowerConstant(tag_val, enum_ty);
switch (tag_value) {
.imm32 => |value| {
try writer.writeByte(std.wasm.opcode(.i32_const));
try leb.writeIleb128(writer, @as(i32, @bitCast(value)));
try writer.writeByte(std.wasm.opcode(.i32_ne));
},
.imm64 => |value| {
try writer.writeByte(std.wasm.opcode(.i64_const));
try leb.writeIleb128(writer, @as(i64, @bitCast(value)));
try writer.writeByte(std.wasm.opcode(.i64_ne));
},
else => unreachable,
}
// if they're not equal, break out of current branch
try writer.writeByte(std.wasm.opcode(.br_if));
try leb.writeUleb128(writer, @as(u32, 0));
// store the address of the tagname in the pointer field of the slice
// get the address twice so we can also store the length.
try writer.writeByte(std.wasm.opcode(.local_get));
try leb.writeUleb128(writer, @as(u32, 0));
try writer.writeByte(std.wasm.opcode(.local_get));
try leb.writeUleb128(writer, @as(u32, 0));
// get address of tagname and emit a relocation to it
if (func.arch() == .wasm32) {
const encoded_alignment = @ctz(@as(u32, 4));
try writer.writeByte(std.wasm.opcode(.i32_const));
try relocs.append(.{
.relocation_type = .R_WASM_MEMORY_ADDR_LEB,
.offset = @as(u32, @intCast(body_list.items.len)),
.index = tag_sym_index,
});
try writer.writeAll(&[_]u8{0} ** 5); // will be relocated
// store pointer
try writer.writeByte(std.wasm.opcode(.i32_store));
try leb.writeUleb128(writer, encoded_alignment);
try leb.writeUleb128(writer, @as(u32, 0));
// store length
try writer.writeByte(std.wasm.opcode(.i32_const));
try leb.writeUleb128(writer, @as(u32, @intCast(tag_name_len)));
try writer.writeByte(std.wasm.opcode(.i32_store));
try leb.writeUleb128(writer, encoded_alignment);
try leb.writeUleb128(writer, @as(u32, 4));
} else {
const encoded_alignment = @ctz(@as(u32, 8));
try writer.writeByte(std.wasm.opcode(.i64_const));
try relocs.append(.{
.relocation_type = .R_WASM_MEMORY_ADDR_LEB64,
.offset = @as(u32, @intCast(body_list.items.len)),
.index = tag_sym_index,
});
try writer.writeAll(&[_]u8{0} ** 10); // will be relocated
// store pointer
try writer.writeByte(std.wasm.opcode(.i64_store));
try leb.writeUleb128(writer, encoded_alignment);
try leb.writeUleb128(writer, @as(u32, 0));
// store length
try writer.writeByte(std.wasm.opcode(.i64_const));
try leb.writeUleb128(writer, @as(u64, @intCast(tag_name_len)));
try writer.writeByte(std.wasm.opcode(.i64_store));
try leb.writeUleb128(writer, encoded_alignment);
try leb.writeUleb128(writer, @as(u32, 8));
}
// break outside blocks
try writer.writeByte(std.wasm.opcode(.br));
try leb.writeUleb128(writer, @as(u32, 1));
// end the block for this case
try writer.writeByte(std.wasm.opcode(.end));
}
try writer.writeByte(std.wasm.opcode(.@"unreachable")); // tag value does not have a name
// finish outer block
try writer.writeByte(std.wasm.opcode(.end));
// finish function body
try writer.writeByte(std.wasm.opcode(.end));
const slice_ty = Type.slice_const_u8_sentinel_0;
const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty.ip_index}, slice_ty, pt, func.target.*);
const sym_index = try func.bin_file.createFunction(func_name, func_type, &body_list, &relocs);
return @intFromEnum(sym_index);
}
fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
const zcu = pt.zcu;
@ -7418,11 +7170,11 @@ fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
// start block for 'true' branch
try func.startBlock(.block, wasm.block_empty);
try func.startBlock(.block, std.wasm.block_empty);
// start block for 'false' branch
try func.startBlock(.block, wasm.block_empty);
try func.startBlock(.block, std.wasm.block_empty);
// block for the jump table itself
try func.startBlock(.block, wasm.block_empty);
try func.startBlock(.block, std.wasm.block_empty);
// lower operand to determine jump table target
try func.emitWValue(operand);
@ -7549,7 +7301,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty = func.typeOfIndex(inst);
if (func.useAtomicFeature()) {
const tag: wasm.AtomicsOpcode = switch (ty.abiSize(pt.zcu)) {
const tag: std.wasm.AtomicsOpcode = switch (ty.abiSize(pt.zcu)) {
1 => .i32_atomic_load8_u,
2 => .i32_atomic_load16_u,
4 => .i32_atomic_load,
@ -7589,7 +7341,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const value = try tmp.toLocal(func, ty);
// create a loop to cmpxchg the new value
try func.startBlock(.loop, wasm.block_empty);
try func.startBlock(.loop, std.wasm.block_empty);
try func.emitWValue(ptr);
try func.emitWValue(value);
@ -7639,7 +7391,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => {
try func.emitWValue(ptr);
try func.emitWValue(operand);
const tag: wasm.AtomicsOpcode = switch (ty.abiSize(zcu)) {
const tag: std.wasm.AtomicsOpcode = switch (ty.abiSize(zcu)) {
1 => switch (op) {
.Xchg => .i32_atomic_rmw8_xchg_u,
.Add => .i32_atomic_rmw8_add_u,
@ -7754,7 +7506,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty = ptr_ty.childType(zcu);
if (func.useAtomicFeature()) {
const tag: wasm.AtomicsOpcode = switch (ty.abiSize(zcu)) {
const tag: std.wasm.AtomicsOpcode = switch (ty.abiSize(zcu)) {
1 => .i32_atomic_store8,
2 => .i32_atomic_store16,
4 => .i32_atomic_store,

View File

@ -3,12 +3,13 @@
const Emit = @This();
const std = @import("std");
const leb128 = std.leb;
const Mir = @import("Mir.zig");
const link = @import("../../link.zig");
const Zcu = @import("../../Zcu.zig");
const InternPool = @import("../../InternPool.zig");
const codegen = @import("../../codegen.zig");
const leb128 = std.leb;
/// Contains our list of instructions
mir: Mir,
@ -254,7 +255,8 @@ fn offset(self: Emit) u32 {
fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
@branchHint(.cold);
std.debug.assert(emit.error_msg == null);
const comp = emit.bin_file.base.comp;
const wasm = emit.bin_file;
const comp = wasm.base.comp;
const zcu = comp.zcu.?;
const gpa = comp.gpa;
emit.error_msg = try Zcu.ErrorMsg.create(gpa, zcu.navSrcLoc(emit.owner_nav), format, args);
@ -287,7 +289,7 @@ fn emitBrTable(emit: *Emit, inst: Mir.Inst.Index) !void {
const labels = emit.mir.extra[extra.end..][0..extra.data.length];
const writer = emit.code.writer();
try emit.code.append(std.wasm.opcode(.br_table));
try emit.code.append(@intFromEnum(std.wasm.Opcode.br_table));
try leb128.writeUleb128(writer, extra.data.length - 1); // Default label is not part of length/depth
for (labels) |label| {
try leb128.writeUleb128(writer, label);
@ -301,7 +303,8 @@ fn emitLabel(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) !void {
}
fn emitGlobal(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) !void {
const comp = emit.bin_file.base.comp;
const wasm = emit.bin_file;
const comp = wasm.base.comp;
const gpa = comp.gpa;
const label = emit.mir.instructions.items(.data)[inst].label;
try emit.code.append(@intFromEnum(tag));
@ -310,38 +313,38 @@ fn emitGlobal(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) !void {
const global_offset = emit.offset();
try emit.code.appendSlice(&buf);
const atom_index = emit.bin_file.zig_object.?.navs.get(emit.owner_nav).?.atom;
const atom = emit.bin_file.getAtomPtr(atom_index);
try atom.relocs.append(gpa, .{
const zo = wasm.zig_object.?;
try zo.relocs.append(gpa, .{
.nav_index = emit.nav_index,
.index = label,
.offset = global_offset,
.relocation_type = .R_WASM_GLOBAL_INDEX_LEB,
.tag = .GLOBAL_INDEX_LEB,
});
}
fn emitImm32(emit: *Emit, inst: Mir.Inst.Index) !void {
const value: i32 = emit.mir.instructions.items(.data)[inst].imm32;
try emit.code.append(std.wasm.opcode(.i32_const));
try emit.code.append(@intFromEnum(std.wasm.Opcode.i32_const));
try leb128.writeIleb128(emit.code.writer(), value);
}
fn emitImm64(emit: *Emit, inst: Mir.Inst.Index) !void {
const extra_index = emit.mir.instructions.items(.data)[inst].payload;
const value = emit.mir.extraData(Mir.Imm64, extra_index);
try emit.code.append(std.wasm.opcode(.i64_const));
try emit.code.append(@intFromEnum(std.wasm.Opcode.i64_const));
try leb128.writeIleb128(emit.code.writer(), @as(i64, @bitCast(value.data.toU64())));
}
fn emitFloat32(emit: *Emit, inst: Mir.Inst.Index) !void {
const value: f32 = emit.mir.instructions.items(.data)[inst].float32;
try emit.code.append(std.wasm.opcode(.f32_const));
try emit.code.append(@intFromEnum(std.wasm.Opcode.f32_const));
try emit.code.writer().writeInt(u32, @bitCast(value), .little);
}
fn emitFloat64(emit: *Emit, inst: Mir.Inst.Index) !void {
const extra_index = emit.mir.instructions.items(.data)[inst].payload;
const value = emit.mir.extraData(Mir.Float64, extra_index);
try emit.code.append(std.wasm.opcode(.f64_const));
try emit.code.append(@intFromEnum(std.wasm.Opcode.f64_const));
try emit.code.writer().writeInt(u64, value.data.toU64(), .little);
}
@ -360,105 +363,99 @@ fn encodeMemArg(mem_arg: Mir.MemArg, writer: anytype) !void {
}
fn emitCall(emit: *Emit, inst: Mir.Inst.Index) !void {
const comp = emit.bin_file.base.comp;
const wasm = emit.bin_file;
const comp = wasm.base.comp;
const gpa = comp.gpa;
const label = emit.mir.instructions.items(.data)[inst].label;
try emit.code.append(std.wasm.opcode(.call));
try emit.code.append(@intFromEnum(std.wasm.Opcode.call));
const call_offset = emit.offset();
var buf: [5]u8 = undefined;
leb128.writeUnsignedFixed(5, &buf, label);
try emit.code.appendSlice(&buf);
if (label != 0) {
const atom_index = emit.bin_file.zig_object.?.navs.get(emit.owner_nav).?.atom;
const atom = emit.bin_file.getAtomPtr(atom_index);
try atom.relocs.append(gpa, .{
const zo = wasm.zig_object.?;
try zo.relocs.append(gpa, .{
.offset = call_offset,
.index = label,
.relocation_type = .R_WASM_FUNCTION_INDEX_LEB,
.tag = .FUNCTION_INDEX_LEB,
});
}
}
fn emitCallIndirect(emit: *Emit, inst: Mir.Inst.Index) !void {
const wasm = emit.bin_file;
const type_index = emit.mir.instructions.items(.data)[inst].label;
try emit.code.append(std.wasm.opcode(.call_indirect));
try emit.code.append(@intFromEnum(std.wasm.Opcode.call_indirect));
// NOTE: If we remove unused function types in the future for incremental
// linking, we must also emit a relocation for this `type_index`
const call_offset = emit.offset();
var buf: [5]u8 = undefined;
leb128.writeUnsignedFixed(5, &buf, type_index);
try emit.code.appendSlice(&buf);
if (type_index != 0) {
const atom_index = emit.bin_file.zig_object.?.navs.get(emit.owner_nav).?.atom;
const atom = emit.bin_file.getAtomPtr(atom_index);
try atom.relocs.append(emit.bin_file.base.comp.gpa, .{
const zo = wasm.zig_object.?;
try zo.relocs.append(wasm.base.comp.gpa, .{
.offset = call_offset,
.index = type_index,
.relocation_type = .R_WASM_TYPE_INDEX_LEB,
.tag = .TYPE_INDEX_LEB,
});
}
try leb128.writeUleb128(emit.code.writer(), @as(u32, 0)); // TODO: Emit relocation for table index
}
fn emitFunctionIndex(emit: *Emit, inst: Mir.Inst.Index) !void {
const comp = emit.bin_file.base.comp;
const wasm = emit.bin_file;
const comp = wasm.base.comp;
const gpa = comp.gpa;
const symbol_index = emit.mir.instructions.items(.data)[inst].label;
try emit.code.append(std.wasm.opcode(.i32_const));
try emit.code.append(@intFromEnum(std.wasm.Opcode.i32_const));
const index_offset = emit.offset();
var buf: [5]u8 = undefined;
leb128.writeUnsignedFixed(5, &buf, symbol_index);
try emit.code.appendSlice(&buf);
if (symbol_index != 0) {
const atom_index = emit.bin_file.zig_object.?.navs.get(emit.owner_nav).?.atom;
const atom = emit.bin_file.getAtomPtr(atom_index);
try atom.relocs.append(gpa, .{
const zo = wasm.zig_object.?;
try zo.relocs.append(gpa, .{
.offset = index_offset,
.index = symbol_index,
.relocation_type = .R_WASM_TABLE_INDEX_SLEB,
.tag = .TABLE_INDEX_SLEB,
});
}
}
fn emitMemAddress(emit: *Emit, inst: Mir.Inst.Index) !void {
const wasm = emit.bin_file;
const extra_index = emit.mir.instructions.items(.data)[inst].payload;
const mem = emit.mir.extraData(Mir.Memory, extra_index).data;
const mem_offset = emit.offset() + 1;
const comp = emit.bin_file.base.comp;
const comp = wasm.base.comp;
const gpa = comp.gpa;
const target = comp.root_mod.resolved_target.result;
const is_wasm32 = target.cpu.arch == .wasm32;
if (is_wasm32) {
try emit.code.append(std.wasm.opcode(.i32_const));
try emit.code.append(@intFromEnum(std.wasm.Opcode.i32_const));
var buf: [5]u8 = undefined;
leb128.writeUnsignedFixed(5, &buf, mem.pointer);
try emit.code.appendSlice(&buf);
} else {
try emit.code.append(std.wasm.opcode(.i64_const));
try emit.code.append(@intFromEnum(std.wasm.Opcode.i64_const));
var buf: [10]u8 = undefined;
leb128.writeUnsignedFixed(10, &buf, mem.pointer);
try emit.code.appendSlice(&buf);
}
if (mem.pointer != 0) {
const atom_index = emit.bin_file.zig_object.?.navs.get(emit.owner_nav).?.atom;
const atom = emit.bin_file.getAtomPtr(atom_index);
try atom.relocs.append(gpa, .{
const zo = wasm.zig_object.?;
try zo.relocs.append(gpa, .{
.offset = mem_offset,
.index = mem.pointer,
.relocation_type = if (is_wasm32) .R_WASM_MEMORY_ADDR_LEB else .R_WASM_MEMORY_ADDR_LEB64,
.tag = if (is_wasm32) .MEMORY_ADDR_LEB else .MEMORY_ADDR_LEB64,
.addend = @as(i32, @intCast(mem.offset)),
});
}
}
fn emitExtended(emit: *Emit, inst: Mir.Inst.Index) !void {
const extra_index = emit.mir.instructions.items(.data)[inst].payload;
const opcode = emit.mir.extra[extra_index];
const writer = emit.code.writer();
try emit.code.append(std.wasm.opcode(.misc_prefix));
try emit.code.append(@intFromEnum(std.wasm.Opcode.misc_prefix));
try leb128.writeUleb128(writer, opcode);
switch (@as(std.wasm.MiscOpcode, @enumFromInt(opcode))) {
// bulk-memory opcodes
@ -497,7 +494,7 @@ fn emitSimd(emit: *Emit, inst: Mir.Inst.Index) !void {
const extra_index = emit.mir.instructions.items(.data)[inst].payload;
const opcode = emit.mir.extra[extra_index];
const writer = emit.code.writer();
try emit.code.append(std.wasm.opcode(.simd_prefix));
try emit.code.append(@intFromEnum(std.wasm.Opcode.simd_prefix));
try leb128.writeUleb128(writer, opcode);
switch (@as(std.wasm.SimdOpcode, @enumFromInt(opcode))) {
.v128_store,
@ -548,7 +545,7 @@ fn emitAtomic(emit: *Emit, inst: Mir.Inst.Index) !void {
const extra_index = emit.mir.instructions.items(.data)[inst].payload;
const opcode = emit.mir.extra[extra_index];
const writer = emit.code.writer();
try emit.code.append(std.wasm.opcode(.atomics_prefix));
try emit.code.append(@intFromEnum(std.wasm.Opcode.atomics_prefix));
try leb128.writeUleb128(writer, opcode);
switch (@as(std.wasm.AtomicsOpcode, @enumFromInt(opcode))) {
.i32_atomic_load,

View File

@ -7,6 +7,8 @@
//! and known jump labels for blocks.
const Mir = @This();
const InternPool = @import("../../InternPool.zig");
const Wasm = @import("../../link/Wasm.zig");
const std = @import("std");
@ -78,22 +80,23 @@ pub const Inst = struct {
///
/// Uses `nop`
@"return" = 0x0F,
/// Calls a function by its index
///
/// Uses `label`
call = 0x10,
/// Calls a function using `nav_index`.
call_nav,
/// Calls a function using `func_index`.
call_func,
/// Calls a function pointer by its function signature
/// and index into the function table.
///
/// Uses `label`
call_indirect = 0x11,
/// Calls a function by its index.
///
/// The function is the auto-generated tag name function for the type
/// provided in `ip_index`.
call_tag_name,
/// Contains a symbol to a function pointer
/// uses `label`
///
/// Note: This uses `0x16` as value which is reserved by the WebAssembly
/// specification but unused, meaning we must update this if the specification were to
/// use this value.
function_index = 0x16,
function_index,
/// Pops three values from the stack and pushes
/// the first or second value dependent on the third value.
/// Uses `tag`
@ -580,6 +583,10 @@ pub const Inst = struct {
///
/// Used by e.g. `br_table`
payload: u32,
ip_index: InternPool.Index,
nav_index: InternPool.Nav.Index,
func_index: Wasm.FunctionIndex,
};
};

View File

@ -25,18 +25,19 @@ const Alignment = InternPool.Alignment;
const dev = @import("dev.zig");
pub const Result = union(enum) {
/// The `code` parameter passed to `generateSymbol` has the value ok.
/// The `code` parameter passed to `generateSymbol` has the value.
ok,
/// There was a codegen error.
fail: *ErrorMsg,
};
pub const CodeGenError = error{
OutOfMemory,
/// Compiler was asked to operate on a number larger than supported.
Overflow,
/// Indicates the error is already stored in Zcu `failed_codegen`.
CodegenFail,
} || link.File.UpdateDebugInfoError;
};
fn devFeatureForBackend(comptime backend: std.builtin.CompilerBackend) dev.Feature {
comptime assert(mem.startsWith(u8, @tagName(backend), "stage2_"));
@ -49,7 +50,6 @@ fn importBackend(comptime backend: std.builtin.CompilerBackend) type {
.stage2_arm => @import("arch/arm/CodeGen.zig"),
.stage2_riscv64 => @import("arch/riscv64/CodeGen.zig"),
.stage2_sparc64 => @import("arch/sparc64/CodeGen.zig"),
.stage2_wasm => @import("arch/wasm/CodeGen.zig"),
.stage2_x86_64 => @import("arch/x86_64/CodeGen.zig"),
else => unreachable,
};
@ -74,7 +74,6 @@ pub fn generateFunction(
.stage2_arm,
.stage2_riscv64,
.stage2_sparc64,
.stage2_wasm,
.stage2_x86_64,
=> |backend| {
dev.check(devFeatureForBackend(backend));
@ -96,9 +95,7 @@ pub fn generateLazyFunction(
const target = zcu.fileByIndex(file).mod.resolved_target.result;
switch (target_util.zigBackend(target, false)) {
else => unreachable,
inline .stage2_x86_64,
.stage2_riscv64,
=> |backend| {
inline .stage2_x86_64, .stage2_riscv64 => |backend| {
dev.check(devFeatureForBackend(backend));
return importBackend(backend).generateLazy(lf, pt, src_loc, lazy_sym, code, debug_output);
},
@ -694,6 +691,7 @@ fn lowerUavRef(
offset: u64,
) CodeGenError!Result {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const target = lf.comp.root_mod.resolved_target.result;
@ -704,7 +702,7 @@ fn lowerUavRef(
const is_fn_body = uav_ty.zigTypeTag(zcu) == .@"fn";
if (!is_fn_body and !uav_ty.hasRuntimeBits(zcu)) {
try code.appendNTimes(0xaa, ptr_width_bytes);
return Result.ok;
return .ok;
}
const uav_align = ip.indexToKey(uav.orig_ty).ptr_type.flags.alignment;
@ -714,6 +712,26 @@ fn lowerUavRef(
.fail => |em| return .{ .fail = em },
}
switch (lf.tag) {
.c => unreachable,
.spirv => unreachable,
.nvptx => unreachable,
.wasm => {
dev.check(link.File.Tag.wasm.devFeature());
const wasm = lf.cast(.wasm).?;
assert(reloc_parent == .none);
try wasm.relocations.append(gpa, .{
.tag = .uav_index,
.addend = @intCast(offset),
.offset = @intCast(code.items.len),
.pointee = .{ .uav_index = uav.val },
});
try code.appendNTimes(0, ptr_width_bytes);
return .ok;
},
else => {},
}
const vaddr = try lf.getUavVAddr(uav_val, .{
.parent = reloc_parent,
.offset = code.items.len,
@ -741,31 +759,52 @@ fn lowerNavRef(
) CodeGenError!Result {
_ = src_loc;
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const target = zcu.navFileScope(nav_index).mod.resolved_target.result;
const ptr_width = target.ptrBitWidth();
const ptr_width_bytes = @divExact(target.ptrBitWidth(), 8);
const nav_ty = Type.fromInterned(ip.getNav(nav_index).typeOf(ip));
const is_fn_body = nav_ty.zigTypeTag(zcu) == .@"fn";
if (!is_fn_body and !nav_ty.hasRuntimeBits(zcu)) {
try code.appendNTimes(0xaa, @divExact(ptr_width, 8));
try code.appendNTimes(0xaa, ptr_width_bytes);
return Result.ok;
}
switch (lf.tag) {
.c => unreachable,
.spirv => unreachable,
.nvptx => unreachable,
.wasm => {
dev.check(link.File.Tag.wasm.devFeature());
const wasm = lf.cast(.wasm).?;
assert(reloc_parent == .none);
try wasm.relocations.append(gpa, .{
.tag = .nav_index,
.addend = @intCast(offset),
.offset = @intCast(code.items.len),
.pointee = .{ .nav_index = nav_index },
});
try code.appendNTimes(0, ptr_width_bytes);
return .ok;
},
else => {},
}
const vaddr = try lf.getNavVAddr(pt, nav_index, .{
.parent = reloc_parent,
.offset = code.items.len,
.addend = @intCast(offset),
});
const endian = target.cpu.arch.endian();
switch (ptr_width) {
16 => mem.writeInt(u16, try code.addManyAsArray(2), @intCast(vaddr), endian),
32 => mem.writeInt(u32, try code.addManyAsArray(4), @intCast(vaddr), endian),
64 => mem.writeInt(u64, try code.addManyAsArray(8), vaddr, endian),
switch (ptr_width_bytes) {
2 => mem.writeInt(u16, try code.addManyAsArray(2), @intCast(vaddr), endian),
4 => mem.writeInt(u32, try code.addManyAsArray(4), @intCast(vaddr), endian),
8 => mem.writeInt(u64, try code.addManyAsArray(8), vaddr, endian),
else => unreachable,
}
return Result.ok;
return .ok;
}
/// Helper struct to denote that the value is in memory but requires a linker relocation fixup:

View File

@ -1059,9 +1059,10 @@ pub const Object = struct {
lto: Compilation.Config.LtoMode,
};
pub fn emit(o: *Object, options: EmitOptions) !void {
pub fn emit(o: *Object, options: EmitOptions) error{ LinkFailure, OutOfMemory }!void {
const zcu = o.pt.zcu;
const comp = zcu.comp;
const diags = &comp.link_diags;
{
try o.genErrorNameTable();
@ -1223,27 +1224,30 @@ pub const Object = struct {
o.builder.clearAndFree();
if (options.pre_bc_path) |path| {
var file = try std.fs.cwd().createFile(path, .{});
var file = std.fs.cwd().createFile(path, .{}) catch |err|
return diags.fail("failed to create '{s}': {s}", .{ path, @errorName(err) });
defer file.close();
const ptr: [*]const u8 = @ptrCast(bitcode.ptr);
try file.writeAll(ptr[0..(bitcode.len * 4)]);
file.writeAll(ptr[0..(bitcode.len * 4)]) catch |err|
return diags.fail("failed to write to '{s}': {s}", .{ path, @errorName(err) });
}
if (options.asm_path == null and options.bin_path == null and
options.post_ir_path == null and options.post_bc_path == null) return;
if (options.post_bc_path) |path| {
var file = try std.fs.cwd().createFileZ(path, .{});
var file = std.fs.cwd().createFileZ(path, .{}) catch |err|
return diags.fail("failed to create '{s}': {s}", .{ path, @errorName(err) });
defer file.close();
const ptr: [*]const u8 = @ptrCast(bitcode.ptr);
try file.writeAll(ptr[0..(bitcode.len * 4)]);
file.writeAll(ptr[0..(bitcode.len * 4)]) catch |err|
return diags.fail("failed to write to '{s}': {s}", .{ path, @errorName(err) });
}
if (!build_options.have_llvm or !comp.config.use_lib_llvm) {
log.err("emitting without libllvm not implemented", .{});
return error.FailedToEmit;
return diags.fail("emitting without libllvm not implemented", .{});
}
initializeLLVMTarget(comp.root_mod.resolved_target.result.cpu.arch);
@ -1263,8 +1267,7 @@ pub const Object = struct {
var module: *llvm.Module = undefined;
if (context.parseBitcodeInContext2(bitcode_memory_buffer, &module).toBool() or context.getBrokenDebugInfo()) {
log.err("Failed to parse bitcode", .{});
return error.FailedToEmit;
return diags.fail("Failed to parse bitcode", .{});
}
break :emit .{ context, module };
};
@ -1274,12 +1277,7 @@ pub const Object = struct {
var error_message: [*:0]const u8 = undefined;
if (llvm.Target.getFromTriple(target_triple_sentinel, &target, &error_message).toBool()) {
defer llvm.disposeMessage(error_message);
log.err("LLVM failed to parse '{s}': {s}", .{
target_triple_sentinel,
error_message,
});
@panic("Invalid LLVM triple");
return diags.fail("LLVM failed to parse '{s}': {s}", .{ target_triple_sentinel, error_message });
}
const optimize_mode = comp.root_mod.optimize_mode;
@ -1374,10 +1372,9 @@ pub const Object = struct {
if (options.asm_path != null and options.bin_path != null) {
if (target_machine.emitToFile(module, &error_message, &lowered_options)) {
defer llvm.disposeMessage(error_message);
log.err("LLVM failed to emit bin={s} ir={s}: {s}", .{
return diags.fail("LLVM failed to emit bin={s} ir={s}: {s}", .{
emit_bin_msg, post_llvm_ir_msg, error_message,
});
return error.FailedToEmit;
}
lowered_options.bin_filename = null;
lowered_options.llvm_ir_filename = null;
@ -1386,11 +1383,9 @@ pub const Object = struct {
lowered_options.asm_filename = options.asm_path;
if (target_machine.emitToFile(module, &error_message, &lowered_options)) {
defer llvm.disposeMessage(error_message);
log.err("LLVM failed to emit asm={s} bin={s} ir={s} bc={s}: {s}", .{
emit_asm_msg, emit_bin_msg, post_llvm_ir_msg, post_llvm_bc_msg,
error_message,
return diags.fail("LLVM failed to emit asm={s} bin={s} ir={s} bc={s}: {s}", .{
emit_asm_msg, emit_bin_msg, post_llvm_ir_msg, post_llvm_bc_msg, error_message,
});
return error.FailedToEmit;
}
}
@ -1967,11 +1962,6 @@ pub const Object = struct {
}
}
pub fn freeDecl(self: *Object, decl_index: InternPool.DeclIndex) void {
const global = self.decl_map.get(decl_index) orelse return;
global.delete(&self.builder);
}
fn getDebugFile(o: *Object, file_index: Zcu.File.Index) Allocator.Error!Builder.Metadata {
const gpa = o.gpa;
const gop = try o.debug_file_map.getOrPut(gpa, file_index);

View File

@ -633,42 +633,15 @@ pub const File = struct {
pub const FlushDebugInfoError = Dwarf.FlushError;
pub const UpdateNavError = error{
OutOfMemory,
Overflow,
Underflow,
FileTooBig,
InputOutput,
FilesOpenedWithWrongFlags,
IsDir,
NoSpaceLeft,
Unseekable,
PermissionDenied,
SwapFile,
CorruptedData,
SystemResources,
OperationAborted,
BrokenPipe,
ConnectionResetByPeer,
ConnectionTimedOut,
SocketNotConnected,
NotOpenForReading,
WouldBlock,
Canceled,
AccessDenied,
Unexpected,
DiskQuota,
NotOpenForWriting,
AnalysisFail,
OutOfMemory,
/// Indicates the error is already reported and stored in
/// `failed_codegen` on the Zcu.
CodegenFail,
EmitFail,
NameTooLong,
CurrentWorkingDirectoryUnlinked,
LockViolation,
NetNameDeleted,
DeviceBusy,
InvalidArgument,
HotSwapUnavailableOnHostOperatingSystem,
} || UpdateDebugInfoError;
/// Indicates the error is already reported and stored in `link_diags`
/// on the Compilation.
LinkFailure,
};
/// Called from within CodeGen to retrieve the symbol index of a global symbol.
/// If no symbol exists yet with this name, a new undefined global symbol will
@ -771,83 +744,11 @@ pub const File = struct {
}
}
/// TODO audit this error set. most of these should be collapsed into one error,
/// and Diags.Flags should be updated to convey the meaning to the user.
pub const FlushError = error{
CacheCheckFailed,
CurrentWorkingDirectoryUnlinked,
DivisionByZero,
DllImportLibraryNotFound,
ExpectedFuncType,
FailedToEmit,
FileSystem,
FilesOpenedWithWrongFlags,
/// Deprecated. Use `LinkFailure` instead.
/// Formerly used to indicate an error will be present in `Compilation.link_errors`.
FlushFailure,
/// Indicates an error will be present in `Compilation.link_errors`.
LinkFailure,
FunctionSignatureMismatch,
GlobalTypeMismatch,
HotSwapUnavailableOnHostOperatingSystem,
InvalidCharacter,
InvalidEntryKind,
InvalidFeatureSet,
InvalidFormat,
InvalidIndex,
InvalidInitFunc,
InvalidMagicByte,
InvalidWasmVersion,
LLDCrashed,
LLDReportedFailure,
LLD_LinkingIsTODO_ForSpirV,
LibCInstallationMissingCrtDir,
LibCInstallationNotAvailable,
LinkingWithoutZigSourceUnimplemented,
MalformedArchive,
MalformedDwarf,
MalformedSection,
MemoryTooBig,
MemoryTooSmall,
MissAlignment,
MissingEndForBody,
MissingEndForExpression,
MissingSymbol,
MissingTableSymbols,
ModuleNameMismatch,
NoObjectsToLink,
NotObjectFile,
NotSupported,
OutOfMemory,
Overflow,
PermissionDenied,
StreamTooLong,
SwapFile,
SymbolCollision,
SymbolMismatchingType,
TODOImplementPlan9Objs,
TODOImplementWritingLibFiles,
UnableToSpawnSelf,
UnableToSpawnWasm,
UnableToWriteArchive,
UndefinedLocal,
UndefinedSymbol,
Underflow,
UnexpectedRemainder,
UnexpectedTable,
UnexpectedValue,
UnknownFeature,
UnrecognizedVolume,
Unseekable,
UnsupportedCpuArchitecture,
UnsupportedVersion,
UnexpectedEndOfFile,
} ||
fs.File.WriteFileError ||
fs.File.OpenError ||
std.process.Child.SpawnError ||
fs.Dir.CopyFileError ||
FlushDebugInfoError;
};
/// Commit pending changes and write headers. Takes into account final output mode
/// and `use_lld`, not only `effectiveOutputMode`.
@ -864,7 +765,12 @@ pub const File = struct {
assert(comp.c_object_table.count() == 1);
const the_key = comp.c_object_table.keys()[0];
const cached_pp_file_path = the_key.status.success.object_path;
try cached_pp_file_path.root_dir.handle.copyFile(cached_pp_file_path.sub_path, emit.root_dir.handle, emit.sub_path, .{});
cached_pp_file_path.root_dir.handle.copyFile(cached_pp_file_path.sub_path, emit.root_dir.handle, emit.sub_path, .{}) catch |err| {
const diags = &base.comp.link_diags;
return diags.fail("failed to copy '{'}' to '{'}': {s}", .{
@as(Path, cached_pp_file_path), @as(Path, emit), @errorName(err),
});
};
return;
}
@ -893,16 +799,6 @@ pub const File = struct {
}
}
/// Called when a Decl is deleted from the Zcu.
pub fn freeDecl(base: *File, decl_index: InternPool.DeclIndex) void {
switch (base.tag) {
inline else => |tag| {
dev.check(tag.devFeature());
@as(*tag.Type(), @fieldParentPtr("base", base)).freeDecl(decl_index);
},
}
}
pub const UpdateExportsError = error{
OutOfMemory,
AnalysisFail,
@ -932,6 +828,7 @@ pub const File = struct {
addend: u32,
pub const Parent = union(enum) {
none,
atom_index: u32,
debug_output: DebugInfoOutput,
};
@ -948,6 +845,7 @@ pub const File = struct {
.c => unreachable,
.spirv => unreachable,
.nvptx => unreachable,
.wasm => unreachable,
inline else => |tag| {
dev.check(tag.devFeature());
return @as(*tag.Type(), @fieldParentPtr("base", base)).getNavVAddr(pt, nav_index, reloc_info);
@ -966,6 +864,7 @@ pub const File = struct {
.c => unreachable,
.spirv => unreachable,
.nvptx => unreachable,
.wasm => unreachable,
inline else => |tag| {
dev.check(tag.devFeature());
return @as(*tag.Type(), @fieldParentPtr("base", base)).lowerUav(pt, decl_val, decl_align, src_loc);
@ -978,6 +877,7 @@ pub const File = struct {
.c => unreachable,
.spirv => unreachable,
.nvptx => unreachable,
.wasm => unreachable,
inline else => |tag| {
dev.check(tag.devFeature());
return @as(*tag.Type(), @fieldParentPtr("base", base)).getUavVAddr(decl_val, reloc_info);
@ -1099,6 +999,26 @@ pub const File = struct {
}
}
/// Called when all linker inputs have been sent via `loadInput`. After
/// this, `loadInput` will not be called anymore.
pub fn prelink(base: *File) FlushError!void {
const use_lld = build_options.have_llvm and base.comp.config.use_lld;
if (use_lld) return;
// In this case, an object file is created by the LLVM backend, so
// there is no prelink phase. The Zig code is linked as a standard
// object along with the others.
if (base.zcu_object_sub_path != null) return;
switch (base.tag) {
inline .wasm => |tag| {
dev.check(tag.devFeature());
return @as(*tag.Type(), @fieldParentPtr("base", base)).prelink();
},
else => {},
}
}
pub fn linkAsArchive(base: *File, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) FlushError!void {
dev.check(.lld_linker);

View File

@ -175,21 +175,13 @@ pub fn deinit(self: *C) void {
self.lazy_code_buf.deinit(gpa);
}
pub fn freeDecl(self: *C, decl_index: InternPool.DeclIndex) void {
const gpa = self.base.comp.gpa;
if (self.decl_table.fetchSwapRemove(decl_index)) |kv| {
var decl_block = kv.value;
decl_block.deinit(gpa);
}
}
pub fn updateFunc(
self: *C,
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
) !void {
) link.File.UpdateNavError!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const func = zcu.funcInfo(func_index);
@ -313,7 +305,7 @@ fn updateUav(self: *C, pt: Zcu.PerThread, i: usize) !void {
};
}
pub fn updateNav(self: *C, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void {
pub fn updateNav(self: *C, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) link.File.UpdateNavError!void {
const tracy = trace(@src());
defer tracy.end();
@ -390,7 +382,7 @@ pub fn updateLineNumber(self: *C, pt: Zcu.PerThread, ti_id: InternPool.TrackedIn
_ = ti_id;
}
pub fn flush(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void {
pub fn flush(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
return self.flushModule(arena, tid, prog_node);
}
@ -409,7 +401,7 @@ fn abiDefines(self: *C, target: std.Target) !std.ArrayList(u8) {
return defines;
}
pub fn flushModule(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void {
pub fn flushModule(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
_ = arena; // Has the same lifetime as the call to Compilation.update.
const tracy = trace(@src());
@ -419,6 +411,7 @@ pub fn flushModule(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node:
defer sub_prog_node.end();
const comp = self.base.comp;
const diags = &comp.link_diags;
const gpa = comp.gpa;
const zcu = self.base.comp.zcu.?;
const ip = &zcu.intern_pool;
@ -554,8 +547,10 @@ pub fn flushModule(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node:
}, self.getString(av_block.code));
const file = self.base.file.?;
try file.setEndPos(f.file_size);
try file.pwritevAll(f.all_buffers.items, 0);
file.setEndPos(f.file_size) catch |err| return diags.fail("failed to allocate file: {s}", .{@errorName(err)});
file.pwritevAll(f.all_buffers.items, 0) catch |err| return diags.fail("failed to write to '{'}': {s}", .{
self.base.emit, @errorName(err),
});
}
const Flush = struct {

View File

@ -408,7 +408,7 @@ pub fn createEmpty(
max_file_offset = header.pointer_to_raw_data + header.size_of_raw_data;
}
}
try coff.base.file.?.pwriteAll(&[_]u8{0}, max_file_offset);
try coff.pwriteAll(&[_]u8{0}, max_file_offset);
}
return coff;
@ -858,7 +858,7 @@ fn writeAtom(coff: *Coff, atom_index: Atom.Index, code: []u8) !void {
}
coff.resolveRelocs(atom_index, relocs.items, code, coff.image_base);
try coff.base.file.?.pwriteAll(code, file_offset);
try coff.pwriteAll(code, file_offset);
// Now we can mark the relocs as resolved.
while (relocs.popOrNull()) |reloc| {
@ -891,7 +891,7 @@ fn writeOffsetTableEntry(coff: *Coff, index: usize) !void {
const sect_id = coff.got_section_index.?;
if (coff.got_table_count_dirty) {
const needed_size = @as(u32, @intCast(coff.got_table.entries.items.len * coff.ptr_width.size()));
const needed_size: u32 = @intCast(coff.got_table.entries.items.len * coff.ptr_width.size());
try coff.growSection(sect_id, needed_size);
coff.got_table_count_dirty = false;
}
@ -908,13 +908,13 @@ fn writeOffsetTableEntry(coff: *Coff, index: usize) !void {
switch (coff.ptr_width) {
.p32 => {
var buf: [4]u8 = undefined;
mem.writeInt(u32, &buf, @as(u32, @intCast(entry_value + coff.image_base)), .little);
try coff.base.file.?.pwriteAll(&buf, file_offset);
mem.writeInt(u32, &buf, @intCast(entry_value + coff.image_base), .little);
try coff.pwriteAll(&buf, file_offset);
},
.p64 => {
var buf: [8]u8 = undefined;
mem.writeInt(u64, &buf, entry_value + coff.image_base, .little);
try coff.base.file.?.pwriteAll(&buf, file_offset);
try coff.pwriteAll(&buf, file_offset);
},
}
@ -1093,7 +1093,13 @@ fn freeAtom(coff: *Coff, atom_index: Atom.Index) void {
coff.getAtomPtr(atom_index).sym_index = 0;
}
pub fn updateFunc(coff: *Coff, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
pub fn updateFunc(
coff: *Coff,
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
) link.File.UpdateNavError!void {
if (build_options.skip_non_native and builtin.object_format != .coff) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
@ -1106,8 +1112,9 @@ pub fn updateFunc(coff: *Coff, pt: Zcu.PerThread, func_index: InternPool.Index,
const zcu = pt.zcu;
const gpa = zcu.gpa;
const func = zcu.funcInfo(func_index);
const nav_index = func.owner_nav;
const atom_index = try coff.getOrCreateAtomForNav(func.owner_nav);
const atom_index = try coff.getOrCreateAtomForNav(nav_index);
coff.freeRelocations(atom_index);
coff.navs.getPtr(func.owner_nav).?.section = coff.text_section_index.?;
@ -1115,25 +1122,38 @@ pub fn updateFunc(coff: *Coff, pt: Zcu.PerThread, func_index: InternPool.Index,
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
const res = try codegen.generateFunction(
const res = codegen.generateFunction(
&coff.base,
pt,
zcu.navSrcLoc(func.owner_nav),
zcu.navSrcLoc(nav_index),
func_index,
air,
liveness,
&code_buffer,
.none,
);
) catch |err| switch (err) {
error.CodegenFail => return error.CodegenFail,
error.OutOfMemory => return error.OutOfMemory,
else => |e| {
try zcu.failed_codegen.putNoClobber(gpa, nav_index, try Zcu.ErrorMsg.create(
gpa,
zcu.navSrcLoc(nav_index),
"unable to codegen: {s}",
.{@errorName(e)},
));
try zcu.retryable_failures.append(zcu.gpa, AnalUnit.wrap(.{ .func = func_index }));
return error.CodegenFail;
},
};
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
try zcu.failed_codegen.put(zcu.gpa, func.owner_nav, em);
try zcu.failed_codegen.put(zcu.gpa, nav_index, em);
return;
},
};
try coff.updateNavCode(pt, func.owner_nav, code, .FUNCTION);
try coff.updateNavCode(pt, nav_index, code, .FUNCTION);
// Exports will be updated by `Zcu.processExports` after the update.
}
@ -1258,9 +1278,11 @@ fn updateLazySymbolAtom(
sym: link.File.LazySymbol,
atom_index: Atom.Index,
section_index: u16,
) !void {
) link.File.FlushError!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const comp = coff.base.comp;
const gpa = comp.gpa;
const diags = &comp.link_diags;
var required_alignment: InternPool.Alignment = .none;
var code_buffer = std.ArrayList(u8).init(gpa);
@ -1276,7 +1298,7 @@ fn updateLazySymbolAtom(
const local_sym_index = atom.getSymbolIndex().?;
const src = Type.fromInterned(sym.ty).srcLocOrNull(zcu) orelse Zcu.LazySrcLoc.unneeded;
const res = try codegen.generateLazySymbol(
const res = codegen.generateLazySymbol(
&coff.base,
pt,
src,
@ -1285,7 +1307,10 @@ fn updateLazySymbolAtom(
&code_buffer,
.none,
.{ .atom_index = local_sym_index },
);
) catch |err| switch (err) {
error.CodegenFail => return error.LinkFailure,
else => |e| return diags.fail("failed to generate lazy symbol: {s}", .{@errorName(e)}),
};
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
@ -1387,7 +1412,7 @@ fn updateNavCode(
nav_index: InternPool.Nav.Index,
code: []u8,
complex_type: coff_util.ComplexType,
) !void {
) link.File.UpdateNavError!void {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const nav = ip.getNav(nav_index);
@ -1405,12 +1430,12 @@ fn updateNavCode(
const atom = coff.getAtom(atom_index);
const sym_index = atom.getSymbolIndex().?;
const sect_index = nav_metadata.section;
const code_len = @as(u32, @intCast(code.len));
const code_len: u32 = @intCast(code.len);
if (atom.size != 0) {
const sym = atom.getSymbolPtr(coff);
try coff.setSymbolName(sym, nav.fqn.toSlice(ip));
sym.section_number = @as(coff_util.SectionNumber, @enumFromInt(sect_index + 1));
sym.section_number = @enumFromInt(sect_index + 1);
sym.type = .{ .complex_type = complex_type, .base_type = .NULL };
const capacity = atom.capacity(coff);
@ -1434,7 +1459,7 @@ fn updateNavCode(
} else {
const sym = atom.getSymbolPtr(coff);
try coff.setSymbolName(sym, nav.fqn.toSlice(ip));
sym.section_number = @as(coff_util.SectionNumber, @enumFromInt(sect_index + 1));
sym.section_number = @enumFromInt(sect_index + 1);
sym.type = .{ .complex_type = complex_type, .base_type = .NULL };
const vaddr = try coff.allocateAtom(atom_index, code_len, @intCast(required_alignment.toByteUnits() orelse 0));
@ -1453,7 +1478,6 @@ pub fn freeNav(coff: *Coff, nav_index: InternPool.NavIndex) void {
if (coff.llvm_object) |llvm_object| return llvm_object.freeNav(nav_index);
const gpa = coff.base.comp.gpa;
log.debug("freeDecl 0x{x}", .{nav_index});
if (coff.decls.fetchOrderedRemove(nav_index)) |const_kv| {
var kv = const_kv;
@ -1674,9 +1698,10 @@ pub fn flush(coff: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: st
if (use_lld) {
return coff.linkWithLLD(arena, tid, prog_node);
}
const diags = &comp.link_diags;
switch (comp.config.output_mode) {
.Exe, .Obj => return coff.flushModule(arena, tid, prog_node),
.Lib => return error.TODOImplementWritingLibFiles,
.Lib => return diags.fail("writing lib files not yet implemented for COFF", .{}),
}
}
@ -2224,7 +2249,7 @@ pub fn flushModule(coff: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no
defer sub_prog_node.end();
const pt: Zcu.PerThread = .activate(
comp.zcu orelse return error.LinkingWithoutZigSourceUnimplemented,
comp.zcu orelse return diags.fail("linking without zig source is not yet implemented", .{}),
tid,
);
defer pt.deactivate();
@ -2232,24 +2257,18 @@ pub fn flushModule(coff: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no
if (coff.lazy_syms.getPtr(.anyerror_type)) |metadata| {
// Most lazy symbols can be updated on first use, but
// anyerror needs to wait for everything to be flushed.
if (metadata.text_state != .unused) coff.updateLazySymbolAtom(
if (metadata.text_state != .unused) try coff.updateLazySymbolAtom(
pt,
.{ .kind = .code, .ty = .anyerror_type },
metadata.text_atom,
coff.text_section_index.?,
) catch |err| return switch (err) {
error.CodegenFail => error.FlushFailure,
else => |e| e,
};
if (metadata.rdata_state != .unused) coff.updateLazySymbolAtom(
);
if (metadata.rdata_state != .unused) try coff.updateLazySymbolAtom(
pt,
.{ .kind = .const_data, .ty = .anyerror_type },
metadata.rdata_atom,
coff.rdata_section_index.?,
) catch |err| return switch (err) {
error.CodegenFail => error.FlushFailure,
else => |e| e,
};
);
}
for (coff.lazy_syms.values()) |*metadata| {
if (metadata.text_state != .unused) metadata.text_state = .flushed;
@ -2594,7 +2613,7 @@ fn writeBaseRelocations(coff: *Coff) !void {
const needed_size = @as(u32, @intCast(buffer.items.len));
try coff.growSection(coff.reloc_section_index.?, needed_size);
try coff.base.file.?.pwriteAll(buffer.items, header.pointer_to_raw_data);
try coff.pwriteAll(buffer.items, header.pointer_to_raw_data);
coff.data_directories[@intFromEnum(coff_util.DirectoryEntry.BASERELOC)] = .{
.virtual_address = header.virtual_address,
@ -2727,7 +2746,7 @@ fn writeImportTables(coff: *Coff) !void {
assert(dll_names_offset == needed_size);
try coff.base.file.?.pwriteAll(buffer.items, header.pointer_to_raw_data);
try coff.pwriteAll(buffer.items, header.pointer_to_raw_data);
coff.data_directories[@intFromEnum(coff_util.DirectoryEntry.IMPORT)] = .{
.virtual_address = header.virtual_address + iat_size,
@ -2741,20 +2760,22 @@ fn writeImportTables(coff: *Coff) !void {
coff.imports_count_dirty = false;
}
fn writeStrtab(coff: *Coff) !void {
fn writeStrtab(coff: *Coff) link.File.FlushError!void {
if (coff.strtab_offset == null) return;
const comp = coff.base.comp;
const gpa = comp.gpa;
const diags = &comp.link_diags;
const allocated_size = coff.allocatedSize(coff.strtab_offset.?);
const needed_size = @as(u32, @intCast(coff.strtab.buffer.items.len));
const needed_size: u32 = @intCast(coff.strtab.buffer.items.len);
if (needed_size > allocated_size) {
coff.strtab_offset = null;
coff.strtab_offset = @as(u32, @intCast(coff.findFreeSpace(needed_size, @alignOf(u32))));
coff.strtab_offset = @intCast(coff.findFreeSpace(needed_size, @alignOf(u32)));
}
log.debug("writing strtab from 0x{x} to 0x{x}", .{ coff.strtab_offset.?, coff.strtab_offset.? + needed_size });
const gpa = coff.base.comp.gpa;
var buffer = std.ArrayList(u8).init(gpa);
defer buffer.deinit();
try buffer.ensureTotalCapacityPrecise(needed_size);
@ -2763,17 +2784,19 @@ fn writeStrtab(coff: *Coff) !void {
// we write the length of the strtab to a temporary buffer that goes to file.
mem.writeInt(u32, buffer.items[0..4], @as(u32, @intCast(coff.strtab.buffer.items.len)), .little);
try coff.base.file.?.pwriteAll(buffer.items, coff.strtab_offset.?);
coff.pwriteAll(buffer.items, coff.strtab_offset.?) catch |err| {
return diags.fail("failed to write: {s}", .{@errorName(err)});
};
}
fn writeSectionHeaders(coff: *Coff) !void {
const offset = coff.getSectionHeadersOffset();
try coff.base.file.?.pwriteAll(mem.sliceAsBytes(coff.sections.items(.header)), offset);
try coff.pwriteAll(mem.sliceAsBytes(coff.sections.items(.header)), offset);
}
fn writeDataDirectoriesHeaders(coff: *Coff) !void {
const offset = coff.getDataDirectoryHeadersOffset();
try coff.base.file.?.pwriteAll(mem.sliceAsBytes(&coff.data_directories), offset);
try coff.pwriteAll(mem.sliceAsBytes(&coff.data_directories), offset);
}
fn writeHeader(coff: *Coff) !void {
@ -2913,7 +2936,7 @@ fn writeHeader(coff: *Coff) !void {
},
}
try coff.base.file.?.pwriteAll(buffer.items, 0);
try coff.pwriteAll(buffer.items, 0);
}
pub fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) {
@ -3710,6 +3733,14 @@ const ImportTable = struct {
const ImportIndex = u32;
};
fn pwriteAll(coff: *Coff, bytes: []const u8, offset: u64) error{LinkFailure}!void {
const comp = coff.base.comp;
const diags = &comp.link_diags;
coff.base.file.?.pwriteAll(bytes, offset) catch |err| {
return diags.fail("failed to write: {s}", .{@errorName(err)});
};
}
const Coff = @This();
const std = @import("std");

View File

@ -21,20 +21,10 @@ debug_rnglists: DebugRngLists,
debug_str: StringSection,
pub const UpdateError = error{
/// Indicates the error is already reported on `failed_codegen` in the Zcu.
CodegenFail,
ReinterpretDeclRef,
Unimplemented,
OutOfMemory,
EndOfStream,
Overflow,
Underflow,
UnexpectedEndOfFile,
} ||
std.fs.File.OpenError ||
std.fs.File.SetEndPosError ||
std.fs.File.CopyRangeError ||
std.fs.File.PReadError ||
std.fs.File.PWriteError;
};
pub const FlushError =
UpdateError ||

View File

@ -842,12 +842,12 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
.Exe => {},
}
if (diags.hasErrors()) return error.FlushFailure;
if (diags.hasErrors()) return error.LinkFailure;
// If we haven't already, create a linker-generated input file comprising of
// linker-defined synthetic symbols only such as `_DYNAMIC`, etc.
if (self.linker_defined_index == null) {
const index = @as(File.Index, @intCast(try self.files.addOne(gpa)));
const index: File.Index = @intCast(try self.files.addOne(gpa));
self.files.set(index, .{ .linker_defined = .{ .index = index } });
self.linker_defined_index = index;
const object = self.linkerDefinedPtr().?;
@ -878,7 +878,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
}
self.checkDuplicates() catch |err| switch (err) {
error.HasDuplicates => return error.FlushFailure,
error.HasDuplicates => return error.LinkFailure,
else => |e| return e,
};
@ -956,14 +956,14 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
error.RelocFailure, error.RelaxFailure => has_reloc_errors = true,
error.UnsupportedCpuArch => {
try self.reportUnsupportedCpuArch();
return error.FlushFailure;
return error.LinkFailure;
},
else => |e| return e,
};
try self.base.file.?.pwriteAll(code, file_offset);
}
if (has_reloc_errors) return error.FlushFailure;
if (has_reloc_errors) return error.LinkFailure;
}
try self.writePhdrTable();
@ -972,10 +972,10 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
try self.writeMergeSections();
self.writeSyntheticSections() catch |err| switch (err) {
error.RelocFailure => return error.FlushFailure,
error.RelocFailure => return error.LinkFailure,
error.UnsupportedCpuArch => {
try self.reportUnsupportedCpuArch();
return error.FlushFailure;
return error.LinkFailure;
},
else => |e| return e,
};
@ -989,7 +989,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
try self.writeElfHeader();
}
if (diags.hasErrors()) return error.FlushFailure;
if (diags.hasErrors()) return error.LinkFailure;
}
fn dumpArgvInit(self: *Elf, arena: Allocator) !void {
@ -1389,7 +1389,7 @@ fn scanRelocs(self: *Elf) !void {
error.RelaxFailure => unreachable,
error.UnsupportedCpuArch => {
try self.reportUnsupportedCpuArch();
return error.FlushFailure;
return error.LinkFailure;
},
error.RelocFailure => has_reloc_errors = true,
else => |e| return e,
@ -1400,7 +1400,7 @@ fn scanRelocs(self: *Elf) !void {
error.RelaxFailure => unreachable,
error.UnsupportedCpuArch => {
try self.reportUnsupportedCpuArch();
return error.FlushFailure;
return error.LinkFailure;
},
error.RelocFailure => has_reloc_errors = true,
else => |e| return e,
@ -1409,7 +1409,7 @@ fn scanRelocs(self: *Elf) !void {
try self.reportUndefinedSymbols(&undefs);
if (has_reloc_errors) return error.FlushFailure;
if (has_reloc_errors) return error.LinkFailure;
if (self.zigObjectPtr()) |zo| {
try zo.asFile().createSymbolIndirection(self);
@ -2327,7 +2327,13 @@ pub fn freeNav(self: *Elf, nav: InternPool.Nav.Index) void {
return self.zigObjectPtr().?.freeNav(self, nav);
}
pub fn updateFunc(self: *Elf, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
pub fn updateFunc(
self: *Elf,
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
) link.File.UpdateNavError!void {
if (build_options.skip_non_native and builtin.object_format != .elf) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
@ -2426,7 +2432,7 @@ pub fn addCommentString(self: *Elf) !void {
self.comment_merge_section_index = msec_index;
}
pub fn resolveMergeSections(self: *Elf) !void {
pub fn resolveMergeSections(self: *Elf) link.File.FlushError!void {
const tracy = trace(@src());
defer tracy.end();
@ -2441,7 +2447,7 @@ pub fn resolveMergeSections(self: *Elf) !void {
};
}
if (has_errors) return error.FlushFailure;
if (has_errors) return error.LinkFailure;
for (self.objects.items) |index| {
const object = self.file(index).?.object;
@ -3658,7 +3664,7 @@ fn writeAtoms(self: *Elf) !void {
atom_list.write(&buffer, &undefs, self) catch |err| switch (err) {
error.UnsupportedCpuArch => {
try self.reportUnsupportedCpuArch();
return error.FlushFailure;
return error.LinkFailure;
},
error.RelocFailure, error.RelaxFailure => has_reloc_errors = true,
else => |e| return e,
@ -3666,7 +3672,7 @@ fn writeAtoms(self: *Elf) !void {
}
try self.reportUndefinedSymbols(&undefs);
if (has_reloc_errors) return error.FlushFailure;
if (has_reloc_errors) return error.LinkFailure;
if (self.requiresThunks()) {
for (self.thunks.items) |th| {

View File

@ -264,7 +264,7 @@ pub fn deinit(self: *ZigObject, allocator: Allocator) void {
}
}
pub fn flush(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !void {
pub fn flush(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) link.File.FlushError!void {
// Handle any lazy symbols that were emitted by incremental compilation.
if (self.lazy_syms.getPtr(.anyerror_type)) |metadata| {
const pt: Zcu.PerThread = .activate(elf_file.base.comp.zcu.?, tid);
@ -278,7 +278,7 @@ pub fn flush(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !void {
.{ .kind = .code, .ty = .anyerror_type },
metadata.text_symbol_index,
) catch |err| return switch (err) {
error.CodegenFail => error.FlushFailure,
error.CodegenFail => error.LinkFailure,
else => |e| e,
};
if (metadata.rodata_state != .unused) self.updateLazySymbol(
@ -287,7 +287,7 @@ pub fn flush(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !void {
.{ .kind = .const_data, .ty = .anyerror_type },
metadata.rodata_symbol_index,
) catch |err| return switch (err) {
error.CodegenFail => error.FlushFailure,
error.CodegenFail => error.LinkFailure,
else => |e| e,
};
}
@ -933,6 +933,7 @@ pub fn getNavVAddr(
const this_sym = self.symbol(this_sym_index);
const vaddr = this_sym.address(.{}, elf_file);
switch (reloc_info.parent) {
.none => unreachable,
.atom_index => |atom_index| {
const parent_atom = self.symbol(atom_index).atom(elf_file).?;
const r_type = relocation.encode(.abs, elf_file.getTarget().cpu.arch);
@ -965,6 +966,7 @@ pub fn getUavVAddr(
const sym = self.symbol(sym_index);
const vaddr = sym.address(.{}, elf_file);
switch (reloc_info.parent) {
.none => unreachable,
.atom_index => |atom_index| {
const parent_atom = self.symbol(atom_index).atom(elf_file).?;
const r_type = relocation.encode(.abs, elf_file.getTarget().cpu.arch);
@ -1408,7 +1410,7 @@ pub fn updateFunc(
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
) !void {
) link.File.UpdateNavError!void {
const tracy = trace(@src());
defer tracy.end();
@ -1615,7 +1617,7 @@ fn updateLazySymbol(
pt: Zcu.PerThread,
sym: link.File.LazySymbol,
symbol_index: Symbol.Index,
) !void {
) link.File.FlushError!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;

View File

@ -2,7 +2,7 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation) link.File.FlushError!v
const gpa = comp.gpa;
const diags = &comp.link_diags;
if (diags.hasErrors()) return error.FlushFailure;
if (diags.hasErrors()) return error.LinkFailure;
// First, we flush relocatable object file generated with our backends.
if (elf_file.zigObjectPtr()) |zig_object| {
@ -127,13 +127,13 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation) link.File.FlushError!v
try elf_file.base.file.?.setEndPos(total_size);
try elf_file.base.file.?.pwriteAll(buffer.items, 0);
if (diags.hasErrors()) return error.FlushFailure;
if (diags.hasErrors()) return error.LinkFailure;
}
pub fn flushObject(elf_file: *Elf, comp: *Compilation) link.File.FlushError!void {
const diags = &comp.link_diags;
if (diags.hasErrors()) return error.FlushFailure;
if (diags.hasErrors()) return error.LinkFailure;
// Now, we are ready to resolve the symbols across all input files.
// We will first resolve the files in the ZigObject, next in the parsed
@ -179,7 +179,7 @@ pub fn flushObject(elf_file: *Elf, comp: *Compilation) link.File.FlushError!void
try elf_file.writeShdrTable();
try elf_file.writeElfHeader();
if (diags.hasErrors()) return error.FlushFailure;
if (diags.hasErrors()) return error.LinkFailure;
}
fn claimUnresolved(elf_file: *Elf) void {
@ -259,7 +259,7 @@ fn initComdatGroups(elf_file: *Elf) !void {
}
}
fn updateSectionSizes(elf_file: *Elf) !void {
fn updateSectionSizes(elf_file: *Elf) link.File.FlushError!void {
const slice = elf_file.sections.slice();
for (slice.items(.atom_list_2)) |*atom_list| {
if (atom_list.atoms.keys().len == 0) continue;

View File

@ -481,7 +481,7 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
}
};
if (diags.hasErrors()) return error.FlushFailure;
if (diags.hasErrors()) return error.LinkFailure;
{
const index = @as(File.Index, @intCast(try self.files.addOne(gpa)));
@ -501,7 +501,7 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
}
self.checkDuplicates() catch |err| switch (err) {
error.HasDuplicates => return error.FlushFailure,
error.HasDuplicates => return error.LinkFailure,
else => |e| return diags.fail("failed to check for duplicate symbol definitions: {s}", .{@errorName(e)}),
};
@ -516,7 +516,7 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
self.claimUnresolved();
self.scanRelocs() catch |err| switch (err) {
error.HasUndefinedSymbols => return error.FlushFailure,
error.HasUndefinedSymbols => return error.LinkFailure,
else => |e| return diags.fail("failed to scan relocations: {s}", .{@errorName(e)}),
};
@ -543,7 +543,7 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
if (self.getZigObject()) |zo| {
zo.resolveRelocs(self) catch |err| switch (err) {
error.ResolveFailed => return error.FlushFailure,
error.ResolveFailed => return error.LinkFailure,
else => |e| return e,
};
}
@ -2998,7 +2998,13 @@ pub fn writeCodeSignature(self: *MachO, code_sig: *CodeSignature) !void {
try self.base.file.?.pwriteAll(buffer.items, offset);
}
pub fn updateFunc(self: *MachO, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
pub fn updateFunc(
self: *MachO,
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
) link.File.UpdateNavError!void {
if (build_options.skip_non_native and builtin.object_format != .macho) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
@ -3006,7 +3012,7 @@ pub fn updateFunc(self: *MachO, pt: Zcu.PerThread, func_index: InternPool.Index,
return self.getZigObject().?.updateFunc(self, pt, func_index, air, liveness);
}
pub fn updateNav(self: *MachO, pt: Zcu.PerThread, nav: InternPool.Nav.Index) !void {
pub fn updateNav(self: *MachO, pt: Zcu.PerThread, nav: InternPool.Nav.Index) link.File.UpdateNavError!void {
if (build_options.skip_non_native and builtin.object_format != .macho) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}

View File

@ -560,7 +560,7 @@ pub fn flushModule(self: *ZigObject, macho_file: *MachO, tid: Zcu.PerThread.Id)
.{ .kind = .code, .ty = .anyerror_type },
metadata.text_symbol_index,
) catch |err| return switch (err) {
error.CodegenFail => error.FlushFailure,
error.CodegenFail => error.LinkFailure,
else => |e| e,
};
if (metadata.const_state != .unused) self.updateLazySymbol(
@ -569,7 +569,7 @@ pub fn flushModule(self: *ZigObject, macho_file: *MachO, tid: Zcu.PerThread.Id)
.{ .kind = .const_data, .ty = .anyerror_type },
metadata.const_symbol_index,
) catch |err| return switch (err) {
error.CodegenFail => error.FlushFailure,
error.CodegenFail => error.LinkFailure,
else => |e| e,
};
}

View File

@ -33,11 +33,11 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat
diags.addParseError(link_input.path().?, "failed to read input file: {s}", .{@errorName(err)});
}
if (diags.hasErrors()) return error.FlushFailure;
if (diags.hasErrors()) return error.LinkFailure;
try macho_file.parseInputFiles();
if (diags.hasErrors()) return error.FlushFailure;
if (diags.hasErrors()) return error.LinkFailure;
try macho_file.resolveSymbols();
try macho_file.dedupLiterals();
@ -93,11 +93,11 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
diags.addParseError(link_input.path().?, "failed to read input file: {s}", .{@errorName(err)});
}
if (diags.hasErrors()) return error.FlushFailure;
if (diags.hasErrors()) return error.LinkFailure;
try parseInputFilesAr(macho_file);
if (diags.hasErrors()) return error.FlushFailure;
if (diags.hasErrors()) return error.LinkFailure;
// First, we flush relocatable object file generated with our backends.
if (macho_file.getZigObject()) |zo| {
@ -218,7 +218,7 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
try macho_file.base.file.?.setEndPos(total_size);
try macho_file.base.file.?.pwriteAll(buffer.items, 0);
if (diags.hasErrors()) return error.FlushFailure;
if (diags.hasErrors()) return error.LinkFailure;
}
fn parseInputFilesAr(macho_file: *MachO) !void {

View File

@ -82,11 +82,17 @@ pub fn deinit(self: *NvPtx) void {
self.llvm_object.deinit();
}
pub fn updateFunc(self: *NvPtx, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
pub fn updateFunc(
self: *NvPtx,
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
) link.File.UpdateNavError!void {
try self.llvm_object.updateFunc(pt, func_index, air, liveness);
}
pub fn updateNav(self: *NvPtx, pt: Zcu.PerThread, nav: InternPool.Nav.Index) !void {
pub fn updateNav(self: *NvPtx, pt: Zcu.PerThread, nav: InternPool.Nav.Index) link.File.UpdateNavError!void {
return self.llvm_object.updateNav(pt, nav);
}
@ -102,10 +108,6 @@ pub fn updateExports(
return self.llvm_object.updateExports(pt, exported, export_indices);
}
pub fn freeDecl(self: *NvPtx, decl_index: InternPool.DeclIndex) void {
return self.llvm_object.freeDecl(decl_index);
}
pub fn flush(self: *NvPtx, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
return self.flushModule(arena, tid, prog_node);
}

View File

@ -385,7 +385,13 @@ fn addPathComponents(self: *Plan9, path: []const u8, a: *std.ArrayList(u8)) !voi
}
}
pub fn updateFunc(self: *Plan9, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
pub fn updateFunc(
self: *Plan9,
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
) link.File.UpdateNavError!void {
if (build_options.skip_non_native and builtin.object_format != .plan9) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
@ -437,7 +443,7 @@ pub fn updateFunc(self: *Plan9, pt: Zcu.PerThread, func_index: InternPool.Index,
return self.updateFinish(pt, func.owner_nav);
}
pub fn updateNav(self: *Plan9, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void {
pub fn updateNav(self: *Plan9, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) link.File.UpdateNavError!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
@ -619,7 +625,7 @@ pub fn flushModule(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
.{ .kind = .code, .ty = .anyerror_type },
metadata.text_atom,
) catch |err| return switch (err) {
error.CodegenFail => error.FlushFailure,
error.CodegenFail => error.LinkFailure,
else => |e| e,
};
if (metadata.rodata_state != .unused) self.updateLazySymbolAtom(
@ -627,7 +633,7 @@ pub fn flushModule(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
.{ .kind = .const_data, .ty = .anyerror_type },
metadata.rodata_atom,
) catch |err| return switch (err) {
error.CodegenFail => error.FlushFailure,
error.CodegenFail => error.LinkFailure,
else => |e| e,
};
}
@ -947,50 +953,6 @@ fn addNavExports(
}
}
pub fn freeDecl(self: *Plan9, decl_index: InternPool.DeclIndex) void {
const gpa = self.base.comp.gpa;
// TODO audit the lifetimes of decls table entries. It's possible to get
// freeDecl without any updateDecl in between.
const zcu = self.base.comp.zcu.?;
const decl = zcu.declPtr(decl_index);
const is_fn = decl.val.isFuncBody(zcu);
if (is_fn) {
const symidx_and_submap = self.fn_decl_table.get(decl.getFileScope(zcu)).?;
var submap = symidx_and_submap.functions;
if (submap.fetchSwapRemove(decl_index)) |removed_entry| {
gpa.free(removed_entry.value.code);
gpa.free(removed_entry.value.lineinfo);
}
if (submap.count() == 0) {
self.syms.items[symidx_and_submap.sym_index] = aout.Sym.undefined_symbol;
self.syms_index_free_list.append(gpa, symidx_and_submap.sym_index) catch {};
submap.deinit(gpa);
}
} else {
if (self.data_decl_table.fetchSwapRemove(decl_index)) |removed_entry| {
gpa.free(removed_entry.value);
}
}
if (self.decls.fetchRemove(decl_index)) |const_kv| {
var kv = const_kv;
const atom = self.getAtom(kv.value.index);
if (atom.got_index) |i| {
// TODO: if this catch {} is triggered, an assertion in flushModule will be triggered, because got_index_free_list will have the wrong length
self.got_index_free_list.append(gpa, i) catch {};
}
if (atom.sym_index) |i| {
self.syms_index_free_list.append(gpa, i) catch {};
self.syms.items[i] = aout.Sym.undefined_symbol;
}
kv.value.exports.deinit(gpa);
}
{
const atom_index = self.decls.get(decl_index).?.index;
const relocs = self.relocs.getPtr(atom_index) orelse return;
relocs.clearAndFree(gpa);
assert(self.relocs.remove(atom_index));
}
}
fn createAtom(self: *Plan9) !Atom.Index {
const gpa = self.base.comp.gpa;
const index = @as(Atom.Index, @intCast(self.atoms.items.len));

View File

@ -122,7 +122,13 @@ pub fn deinit(self: *SpirV) void {
self.object.deinit();
}
pub fn updateFunc(self: *SpirV, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
pub fn updateFunc(
self: *SpirV,
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
) link.File.UpdateNavError!void {
if (build_options.skip_non_native) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
@ -134,7 +140,7 @@ pub fn updateFunc(self: *SpirV, pt: Zcu.PerThread, func_index: InternPool.Index,
try self.object.updateFunc(pt, func_index, air, liveness);
}
pub fn updateNav(self: *SpirV, pt: Zcu.PerThread, nav: InternPool.Nav.Index) !void {
pub fn updateNav(self: *SpirV, pt: Zcu.PerThread, nav: InternPool.Nav.Index) link.File.UpdateNavError!void {
if (build_options.skip_non_native) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
@ -196,11 +202,6 @@ pub fn updateExports(
// TODO: Export regular functions, variables, etc using Linkage attributes.
}
pub fn freeDecl(self: *SpirV, decl_index: InternPool.DeclIndex) void {
_ = self;
_ = decl_index;
}
pub fn flush(self: *SpirV, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
return self.flushModule(arena, tid, prog_node);
}
@ -266,7 +267,7 @@ pub fn flushModule(self: *SpirV, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
error.OutOfMemory => return error.OutOfMemory,
else => |other| {
log.err("error while linking: {s}", .{@errorName(other)});
return error.FlushFailure;
return error.LinkFailure;
},
};

File diff suppressed because it is too large Load Diff

View File

@ -142,7 +142,16 @@ pub fn parse(gpa: Allocator, file_contents: []const u8) !Archive {
/// From a given file offset, starts reading for a file header.
/// When found, parses the object file into an `Object` and returns it.
pub fn parseObject(archive: Archive, wasm: *Wasm, file_contents: []const u8, path: Path) !Object {
pub fn parseObject(
archive: Archive,
wasm: *Wasm,
file_contents: []const u8,
path: Path,
host_name: Wasm.String,
scratch_space: *Object.ScratchSpace,
must_link: bool,
gc_sections: bool,
) !Object {
const header = mem.bytesAsValue(Header, file_contents[0..@sizeOf(Header)]);
if (!mem.eql(u8, &header.fmag, ARFMAG)) return error.BadHeaderDelimiter;
@ -157,8 +166,9 @@ pub fn parseObject(archive: Archive, wasm: *Wasm, file_contents: []const u8, pat
};
const object_file_size = try header.parsedSize();
const contents = file_contents[@sizeOf(Header)..][0..object_file_size];
return Object.create(wasm, file_contents[@sizeOf(Header)..][0..object_file_size], path, object_name);
return Object.parse(wasm, contents, path, object_name, host_name, scratch_space, must_link, gc_sections);
}
const Archive = @This();

1448
src/link/Wasm/Flush.zig Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,210 +0,0 @@
//! Represents a WebAssembly symbol. Containing all of its properties,
//! as well as providing helper methods to determine its functionality
//! and how it will/must be linked.
//! The name of the symbol can be found by providing the offset, found
//! on the `name` field, to a string table in the wasm binary or object file.
/// Bitfield containings flags for a symbol
/// Can contain any of the flags defined in `Flag`
flags: u32,
/// Symbol name, when the symbol is undefined the name will be taken from the import.
/// Note: This is an index into the wasm string table.
name: wasm.String,
/// Index into the list of objects based on set `tag`
/// NOTE: This will be set to `undefined` when `tag` is `data`
/// and the symbol is undefined.
index: u32,
/// Represents the kind of the symbol, such as a function or global.
tag: Tag,
/// Contains the virtual address of the symbol, relative to the start of its section.
/// This differs from the offset of an `Atom` which is relative to the start of a segment.
virtual_address: u32,
/// Represents a symbol index where `null` represents an invalid index.
pub const Index = enum(u32) {
null,
_,
};
pub const Tag = enum {
function,
data,
global,
section,
event,
table,
/// synthetic kind used by the wasm linker during incremental compilation
/// to notate a symbol has been freed, but still lives in the symbol list.
dead,
undefined,
/// From a given symbol tag, returns the `ExternalType`
/// Asserts the given tag can be represented as an external type.
pub fn externalType(tag: Tag) std.wasm.ExternalKind {
return switch (tag) {
.function => .function,
.global => .global,
.data => unreachable, // Data symbols will generate a global
.section => unreachable, // Not an external type
.event => unreachable, // Not an external type
.dead => unreachable, // Dead symbols should not be referenced
.undefined => unreachable,
.table => .table,
};
}
};
pub const Flag = enum(u32) {
/// Indicates a weak symbol.
/// When linking multiple modules defining the same symbol, all weak definitions are discarded
/// in favourite of the strong definition. When no strong definition exists, all weak but one definition is discarded.
/// If multiple definitions remain, we get an error: symbol collision.
WASM_SYM_BINDING_WEAK = 0x1,
/// Indicates a local, non-exported, non-module-linked symbol.
/// The names of local symbols are not required to be unique, unlike non-local symbols.
WASM_SYM_BINDING_LOCAL = 0x2,
/// Represents the binding of a symbol, indicating if it's local or not, and weak or not.
WASM_SYM_BINDING_MASK = 0x3,
/// Indicates a hidden symbol. Hidden symbols will not be exported to the link result, but may
/// link to other modules.
WASM_SYM_VISIBILITY_HIDDEN = 0x4,
/// Indicates an undefined symbol. For non-data symbols, this must match whether the symbol is
/// an import or is defined. For data symbols however, determines whether a segment is specified.
WASM_SYM_UNDEFINED = 0x10,
/// Indicates a symbol of which its intention is to be exported from the wasm module to the host environment.
/// This differs from the visibility flag as this flag affects the static linker.
WASM_SYM_EXPORTED = 0x20,
/// Indicates the symbol uses an explicit symbol name, rather than reusing the name from a wasm import.
/// Allows remapping imports from foreign WASM modules into local symbols with a different name.
WASM_SYM_EXPLICIT_NAME = 0x40,
/// Indicates the symbol is to be included in the linker output, regardless of whether it is used or has any references to it.
WASM_SYM_NO_STRIP = 0x80,
/// Indicates a symbol is TLS
WASM_SYM_TLS = 0x100,
/// Zig specific flag. Uses the most significant bit of the flag to annotate whether a symbol is
/// alive or not. Dead symbols are allowed to be garbage collected.
alive = 0x80000000,
};
/// Verifies if the given symbol should be imported from the
/// host environment or not
pub fn requiresImport(symbol: Symbol) bool {
if (symbol.tag == .data) return false;
if (!symbol.isUndefined()) return false;
if (symbol.isWeak()) return false;
// if (symbol.isDefined() and symbol.isWeak()) return true; //TODO: Only when building shared lib
return true;
}
/// Marks a symbol as 'alive', ensuring the garbage collector will not collect the trash.
pub fn mark(symbol: *Symbol) void {
symbol.flags |= @intFromEnum(Flag.alive);
}
pub fn unmark(symbol: *Symbol) void {
symbol.flags &= ~@intFromEnum(Flag.alive);
}
pub fn isAlive(symbol: Symbol) bool {
return symbol.flags & @intFromEnum(Flag.alive) != 0;
}
pub fn isDead(symbol: Symbol) bool {
return symbol.flags & @intFromEnum(Flag.alive) == 0;
}
pub fn isTLS(symbol: Symbol) bool {
return symbol.flags & @intFromEnum(Flag.WASM_SYM_TLS) != 0;
}
pub fn hasFlag(symbol: Symbol, flag: Flag) bool {
return symbol.flags & @intFromEnum(flag) != 0;
}
pub fn setFlag(symbol: *Symbol, flag: Flag) void {
symbol.flags |= @intFromEnum(flag);
}
pub fn isUndefined(symbol: Symbol) bool {
return symbol.flags & @intFromEnum(Flag.WASM_SYM_UNDEFINED) != 0;
}
pub fn setUndefined(symbol: *Symbol, is_undefined: bool) void {
if (is_undefined) {
symbol.setFlag(.WASM_SYM_UNDEFINED);
} else {
symbol.flags &= ~@intFromEnum(Flag.WASM_SYM_UNDEFINED);
}
}
pub fn setGlobal(symbol: *Symbol, is_global: bool) void {
if (is_global) {
symbol.flags &= ~@intFromEnum(Flag.WASM_SYM_BINDING_LOCAL);
} else {
symbol.setFlag(.WASM_SYM_BINDING_LOCAL);
}
}
pub fn isDefined(symbol: Symbol) bool {
return !symbol.isUndefined();
}
pub fn isVisible(symbol: Symbol) bool {
return symbol.flags & @intFromEnum(Flag.WASM_SYM_VISIBILITY_HIDDEN) == 0;
}
pub fn isLocal(symbol: Symbol) bool {
return symbol.flags & @intFromEnum(Flag.WASM_SYM_BINDING_LOCAL) != 0;
}
pub fn isGlobal(symbol: Symbol) bool {
return symbol.flags & @intFromEnum(Flag.WASM_SYM_BINDING_LOCAL) == 0;
}
pub fn isHidden(symbol: Symbol) bool {
return symbol.flags & @intFromEnum(Flag.WASM_SYM_VISIBILITY_HIDDEN) != 0;
}
pub fn isNoStrip(symbol: Symbol) bool {
return symbol.flags & @intFromEnum(Flag.WASM_SYM_NO_STRIP) != 0;
}
pub fn isExported(symbol: Symbol, is_dynamic: bool) bool {
if (symbol.isUndefined() or symbol.isLocal()) return false;
if (is_dynamic and symbol.isVisible()) return true;
return symbol.hasFlag(.WASM_SYM_EXPORTED);
}
pub fn isWeak(symbol: Symbol) bool {
return symbol.flags & @intFromEnum(Flag.WASM_SYM_BINDING_WEAK) != 0;
}
/// Formats the symbol into human-readable text
pub fn format(symbol: Symbol, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
_ = fmt;
_ = options;
const kind_fmt: u8 = switch (symbol.tag) {
.function => 'F',
.data => 'D',
.global => 'G',
.section => 'S',
.event => 'E',
.table => 'T',
.dead => '-',
.undefined => unreachable,
};
const visible: []const u8 = if (symbol.isVisible()) "yes" else "no";
const binding: []const u8 = if (symbol.isLocal()) "local" else "global";
const undef: []const u8 = if (symbol.isUndefined()) "undefined" else "";
try writer.print(
"{c} binding={s} visible={s} id={d} name_offset={d} {s}",
.{ kind_fmt, binding, visible, symbol.index, symbol.name, undef },
);
}
const std = @import("std");
const Symbol = @This();
const wasm = @import("../Wasm.zig");

File diff suppressed because it is too large Load Diff

View File

@ -75,6 +75,10 @@ pub fn fatal(comptime format: []const u8, args: anytype) noreturn {
process.exit(1);
}
/// Shaming all the locations that inappropriately use an O(N) search algorithm.
/// Please delete this and fix the compilation errors!
pub const @"bad O(N)" = void;
const normal_usage =
\\Usage: zig [command] [options]
\\

View File

@ -14,19 +14,14 @@ const link = @import("link.zig");
const log = std.log.scoped(.register_manager);
pub const AllocateRegistersError = error{
/// No registers are available anymore
pub const AllocationError = error{
OutOfRegisters,
/// Can happen when spilling an instruction in codegen runs out of
/// memory, so we propagate that error
OutOfMemory,
/// Can happen when spilling an instruction in codegen triggers integer
/// overflow, so we propagate that error
/// Compiler was asked to operate on a number larger than supported.
Overflow,
/// Can happen when spilling an instruction triggers a codegen
/// error, so we propagate that error
/// Indicates the error is already stored in `failed_codegen` on the Zcu.
CodegenFail,
} || link.File.UpdateDebugInfoError;
};
pub fn RegisterManager(
comptime Function: type,
@ -281,7 +276,7 @@ pub fn RegisterManager(
comptime count: comptime_int,
insts: [count]?Air.Inst.Index,
register_class: RegisterBitSet,
) AllocateRegistersError![count]Register {
) AllocationError![count]Register {
comptime assert(count > 0 and count <= tracked_registers.len);
var locked_registers = self.locked_registers;
@ -338,7 +333,7 @@ pub fn RegisterManager(
self: *Self,
inst: ?Air.Inst.Index,
register_class: RegisterBitSet,
) AllocateRegistersError!Register {
) AllocationError!Register {
return (try self.allocRegs(1, .{inst}, register_class))[0];
}
@ -349,7 +344,7 @@ pub fn RegisterManager(
self: *Self,
tracked_index: TrackedIndex,
inst: ?Air.Inst.Index,
) AllocateRegistersError!void {
) AllocationError!void {
log.debug("getReg {} for inst {?}", .{ regAtTrackedIndex(tracked_index), inst });
if (!self.isRegIndexFree(tracked_index)) {
// Move the instruction that was previously there to a
@ -362,7 +357,7 @@ pub fn RegisterManager(
}
self.getRegIndexAssumeFree(tracked_index, inst);
}
pub fn getReg(self: *Self, reg: Register, inst: ?Air.Inst.Index) AllocateRegistersError!void {
pub fn getReg(self: *Self, reg: Register, inst: ?Air.Inst.Index) AllocationError!void {
log.debug("getting reg: {}", .{reg});
return self.getRegIndex(indexOfRegIntoTracked(reg) orelse return, inst);
}
@ -370,7 +365,7 @@ pub fn RegisterManager(
self: *Self,
comptime reg: Register,
inst: ?Air.Inst.Index,
) AllocateRegistersError!void {
) AllocationError!void {
return self.getRegIndex((comptime indexOfRegIntoTracked(reg)) orelse return, inst);
}