Merge pull request #22220 from ziglang/wasm-linker

wasm linker: aggressive rewrite towards Data-Oriented Design
This commit is contained in:
Andrew Kelley 2025-01-16 04:20:41 -05:00 committed by GitHub
commit d4fe4698d9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
104 changed files with 13816 additions and 12348 deletions

View File

@ -643,9 +643,8 @@ set(ZIG_STAGE2_SOURCES
src/link/StringTable.zig
src/link/Wasm.zig
src/link/Wasm/Archive.zig
src/link/Wasm/Flush.zig
src/link/Wasm/Object.zig
src/link/Wasm/Symbol.zig
src/link/Wasm/ZigObject.zig
src/link/aarch64.zig
src/link/riscv.zig
src/link/table_section.zig

View File

@ -447,6 +447,7 @@ pub fn build(b: *std.Build) !void {
.skip_single_threaded = skip_single_threaded,
.skip_non_native = skip_non_native,
.skip_libc = skip_libc,
.use_llvm = use_llvm,
.max_rss = 1 * 1024 * 1024 * 1024,
}));
@ -462,6 +463,7 @@ pub fn build(b: *std.Build) !void {
.skip_single_threaded = true,
.skip_non_native = skip_non_native,
.skip_libc = skip_libc,
.use_llvm = use_llvm,
}));
test_modules_step.dependOn(tests.addModuleTests(b, .{
@ -476,6 +478,7 @@ pub fn build(b: *std.Build) !void {
.skip_single_threaded = true,
.skip_non_native = skip_non_native,
.skip_libc = true,
.use_llvm = use_llvm,
.no_builtin = true,
}));
@ -491,6 +494,7 @@ pub fn build(b: *std.Build) !void {
.skip_single_threaded = true,
.skip_non_native = skip_non_native,
.skip_libc = true,
.use_llvm = use_llvm,
.no_builtin = true,
}));
@ -506,6 +510,7 @@ pub fn build(b: *std.Build) !void {
.skip_single_threaded = skip_single_threaded,
.skip_non_native = skip_non_native,
.skip_libc = skip_libc,
.use_llvm = use_llvm,
// I observed a value of 4572626944 on the M2 CI.
.max_rss = 5029889638,
}));

View File

@ -2424,7 +2424,22 @@ const WasmDumper = struct {
}
var output = std.ArrayList(u8).init(gpa);
errdefer output.deinit();
defer output.deinit();
parseAndDumpInner(step, check, bytes, &fbs, &output) catch |err| switch (err) {
error.EndOfStream => try output.appendSlice("\n<UnexpectedEndOfStream>"),
else => |e| return e,
};
return output.toOwnedSlice();
}
fn parseAndDumpInner(
step: *Step,
check: Check,
bytes: []const u8,
fbs: *std.io.FixedBufferStream([]const u8),
output: *std.ArrayList(u8),
) !void {
const reader = fbs.reader();
const writer = output.writer();
switch (check.kind) {
@ -2442,8 +2457,6 @@ const WasmDumper = struct {
else => return step.fail("invalid check kind for Wasm file format: {s}", .{@tagName(check.kind)}),
}
return output.toOwnedSlice();
}
fn parseAndDumpSection(
@ -2682,7 +2695,7 @@ const WasmDumper = struct {
else => unreachable,
}
const end_opcode = try std.leb.readUleb128(u8, reader);
if (end_opcode != std.wasm.opcode(.end)) {
if (end_opcode != @intFromEnum(std.wasm.Opcode.end)) {
return step.fail("expected 'end' opcode in init expression", .{});
}
}

View File

@ -1219,6 +1219,12 @@ pub const Cpu = struct {
} else true;
}
pub fn count(set: Set) std.math.IntFittingRange(0, needed_bit_count) {
var sum: usize = 0;
for (set.ints) |x| sum += @popCount(x);
return @intCast(sum);
}
pub fn isEnabled(set: Set, arch_feature_index: Index) bool {
const usize_index = arch_feature_index / @bitSizeOf(usize);
const bit_index: ShiftInt = @intCast(arch_feature_index % @bitSizeOf(usize));

View File

@ -1018,12 +1018,15 @@ const WasiThreadImpl = struct {
return .{ .thread = &instance.thread };
}
/// Bootstrap procedure, called by the host environment after thread creation.
export fn wasi_thread_start(tid: i32, arg: *Instance) void {
if (builtin.single_threaded) {
// ensure function is not analyzed in single-threaded mode
return;
comptime {
if (!builtin.single_threaded) {
@export(wasi_thread_start, .{ .name = "wasi_thread_start" });
}
}
/// Called by the host environment after thread creation.
fn wasi_thread_start(tid: i32, arg: *Instance) callconv(.c) void {
comptime assert(!builtin.single_threaded);
__set_stack_pointer(arg.thread.memory.ptr + arg.stack_offset);
__wasm_init_tls(arg.thread.memory.ptr + arg.tls_offset);
@atomicStore(u32, &WasiThreadImpl.tls_thread_id, @intCast(tid), .seq_cst);

View File

@ -641,10 +641,13 @@ pub fn ArrayHashMapUnmanaged(
return self;
}
/// An empty `value_list` may be passed, in which case the values array becomes `undefined`.
pub fn reinit(self: *Self, gpa: Allocator, key_list: []const K, value_list: []const V) Oom!void {
try self.entries.resize(gpa, key_list.len);
@memcpy(self.keys(), key_list);
if (@sizeOf(V) != 0) {
if (value_list.len == 0) {
@memset(self.values(), undefined);
} else {
assert(key_list.len == value_list.len);
@memcpy(self.values(), value_list);
}

View File

@ -267,8 +267,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
/// Never invalidates element pointers.
/// Asserts that the list can hold one additional item.
pub fn appendAssumeCapacity(self: *Self, item: T) void {
const new_item_ptr = self.addOneAssumeCapacity();
new_item_ptr.* = item;
self.addOneAssumeCapacity().* = item;
}
/// Remove the element at index `i`, shift elements after index
@ -879,8 +878,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Never invalidates element pointers.
/// Asserts that the list can hold one additional item.
pub fn appendAssumeCapacity(self: *Self, item: T) void {
const new_item_ptr = self.addOneAssumeCapacity();
new_item_ptr.* = item;
self.addOneAssumeCapacity().* = item;
}
/// Remove the element at index `i` from the list and return its value.

View File

@ -16,10 +16,6 @@ const Allocator = std.mem.Allocator;
fn getStdOutHandle() posix.fd_t {
if (is_windows) {
if (builtin.zig_backend == .stage2_aarch64) {
// TODO: this is just a temporary workaround until we advance aarch64 backend further along.
return windows.GetStdHandle(windows.STD_OUTPUT_HANDLE) catch windows.INVALID_HANDLE_VALUE;
}
return windows.peb().ProcessParameters.hStdOutput;
}
@ -36,10 +32,6 @@ pub fn getStdOut() File {
fn getStdErrHandle() posix.fd_t {
if (is_windows) {
if (builtin.zig_backend == .stage2_aarch64) {
// TODO: this is just a temporary workaround until we advance aarch64 backend further along.
return windows.GetStdHandle(windows.STD_ERROR_HANDLE) catch windows.INVALID_HANDLE_VALUE;
}
return windows.peb().ProcessParameters.hStdError;
}
@ -56,10 +48,6 @@ pub fn getStdErr() File {
fn getStdInHandle() posix.fd_t {
if (is_windows) {
if (builtin.zig_backend == .stage2_aarch64) {
// TODO: this is just a temporary workaround until we advance aarch64 backend further along.
return windows.GetStdHandle(windows.STD_INPUT_HANDLE) catch windows.INVALID_HANDLE_VALUE;
}
return windows.peb().ProcessParameters.hStdInput;
}

View File

@ -4,8 +4,6 @@
const std = @import("std.zig");
const testing = std.testing;
// TODO: Add support for multi-byte ops (e.g. table operations)
/// Wasm instruction opcodes
///
/// All instructions are defined as per spec:
@ -195,27 +193,6 @@ pub const Opcode = enum(u8) {
_,
};
/// Returns the integer value of an `Opcode`. Used by the Zig compiler
/// to write instructions to the wasm binary file
pub fn opcode(op: Opcode) u8 {
return @intFromEnum(op);
}
test "opcodes" {
// Ensure our opcodes values remain intact as certain values are skipped due to them being reserved
const i32_const = opcode(.i32_const);
const end = opcode(.end);
const drop = opcode(.drop);
const local_get = opcode(.local_get);
const i64_extend32_s = opcode(.i64_extend32_s);
try testing.expectEqual(@as(u16, 0x41), i32_const);
try testing.expectEqual(@as(u16, 0x0B), end);
try testing.expectEqual(@as(u16, 0x1A), drop);
try testing.expectEqual(@as(u16, 0x20), local_get);
try testing.expectEqual(@as(u16, 0xC4), i64_extend32_s);
}
/// Opcodes that require a prefix `0xFC`.
/// Each opcode represents a varuint32, meaning
/// they are encoded as leb128 in binary.
@ -241,12 +218,6 @@ pub const MiscOpcode = enum(u32) {
_,
};
/// Returns the integer value of an `MiscOpcode`. Used by the Zig compiler
/// to write instructions to the wasm binary file
pub fn miscOpcode(op: MiscOpcode) u32 {
return @intFromEnum(op);
}
/// Simd opcodes that require a prefix `0xFD`.
/// Each opcode represents a varuint32, meaning
/// they are encoded as leb128 in binary.
@ -512,12 +483,6 @@ pub const SimdOpcode = enum(u32) {
f32x4_relaxed_dot_bf16x8_add_f32x4 = 0x114,
};
/// Returns the integer value of an `SimdOpcode`. Used by the Zig compiler
/// to write instructions to the wasm binary file
pub fn simdOpcode(op: SimdOpcode) u32 {
return @intFromEnum(op);
}
/// Atomic opcodes that require a prefix `0xFE`.
/// Each opcode represents a varuint32, meaning
/// they are encoded as leb128 in binary.
@ -592,12 +557,6 @@ pub const AtomicsOpcode = enum(u32) {
i64_atomic_rmw32_cmpxchg_u = 0x4E,
};
/// Returns the integer value of an `AtomicsOpcode`. Used by the Zig compiler
/// to write instructions to the wasm binary file
pub fn atomicsOpcode(op: AtomicsOpcode) u32 {
return @intFromEnum(op);
}
/// Enum representing all Wasm value types as per spec:
/// https://webassembly.github.io/spec/core/binary/types.html
pub const Valtype = enum(u8) {
@ -608,11 +567,6 @@ pub const Valtype = enum(u8) {
v128 = 0x7B,
};
/// Returns the integer value of a `Valtype`
pub fn valtype(value: Valtype) u8 {
return @intFromEnum(value);
}
/// Reference types, where the funcref references to a function regardless of its type
/// and ref references an object from the embedder.
pub const RefType = enum(u8) {
@ -620,41 +574,17 @@ pub const RefType = enum(u8) {
externref = 0x6F,
};
/// Returns the integer value of a `Reftype`
pub fn reftype(value: RefType) u8 {
return @intFromEnum(value);
}
test "valtypes" {
const _i32 = valtype(.i32);
const _i64 = valtype(.i64);
const _f32 = valtype(.f32);
const _f64 = valtype(.f64);
try testing.expectEqual(@as(u8, 0x7F), _i32);
try testing.expectEqual(@as(u8, 0x7E), _i64);
try testing.expectEqual(@as(u8, 0x7D), _f32);
try testing.expectEqual(@as(u8, 0x7C), _f64);
}
/// Limits classify the size range of resizeable storage associated with memory types and table types.
pub const Limits = struct {
flags: u8,
flags: Flags,
min: u32,
max: u32,
pub const Flags = enum(u8) {
WASM_LIMITS_FLAG_HAS_MAX = 0x1,
WASM_LIMITS_FLAG_IS_SHARED = 0x2,
pub const Flags = packed struct(u8) {
has_max: bool,
is_shared: bool,
reserved: u6 = 0,
};
pub fn hasFlag(limits: Limits, flag: Flags) bool {
return limits.flags & @intFromEnum(flag) != 0;
}
pub fn setFlag(limits: *Limits, flag: Flags) void {
limits.flags |= @intFromEnum(flag);
}
};
/// Initialization expressions are used to set the initial value on an object
@ -667,18 +597,6 @@ pub const InitExpression = union(enum) {
global_get: u32,
};
/// Represents a function entry, holding the index to its type
pub const Func = struct {
type_index: u32,
};
/// Tables are used to hold pointers to opaque objects.
/// This can either by any function, or an object from the host.
pub const Table = struct {
limits: Limits,
reftype: RefType,
};
/// Describes the layout of the memory where `min` represents
/// the minimal amount of pages, and the optional `max` represents
/// the max pages. When `null` will allow the host to determine the
@ -687,88 +605,6 @@ pub const Memory = struct {
limits: Limits,
};
/// Represents the type of a `Global` or an imported global.
pub const GlobalType = struct {
valtype: Valtype,
mutable: bool,
};
pub const Global = struct {
global_type: GlobalType,
init: InitExpression,
};
/// Notates an object to be exported from wasm
/// to the host.
pub const Export = struct {
name: []const u8,
kind: ExternalKind,
index: u32,
};
/// Element describes the layout of the table that can
/// be found at `table_index`
pub const Element = struct {
table_index: u32,
offset: InitExpression,
func_indexes: []const u32,
};
/// Imports are used to import objects from the host
pub const Import = struct {
module_name: []const u8,
name: []const u8,
kind: Kind,
pub const Kind = union(ExternalKind) {
function: u32,
table: Table,
memory: Limits,
global: GlobalType,
};
};
/// `Type` represents a function signature type containing both
/// a slice of parameters as well as a slice of return values.
pub const Type = struct {
params: []const Valtype,
returns: []const Valtype,
pub fn format(self: Type, comptime fmt: []const u8, opt: std.fmt.FormatOptions, writer: anytype) !void {
if (fmt.len != 0) std.fmt.invalidFmtError(fmt, self);
_ = opt;
try writer.writeByte('(');
for (self.params, 0..) |param, i| {
try writer.print("{s}", .{@tagName(param)});
if (i + 1 != self.params.len) {
try writer.writeAll(", ");
}
}
try writer.writeAll(") -> ");
if (self.returns.len == 0) {
try writer.writeAll("nil");
} else {
for (self.returns, 0..) |return_ty, i| {
try writer.print("{s}", .{@tagName(return_ty)});
if (i + 1 != self.returns.len) {
try writer.writeAll(", ");
}
}
}
}
pub fn eql(self: Type, other: Type) bool {
return std.mem.eql(Valtype, self.params, other.params) and
std.mem.eql(Valtype, self.returns, other.returns);
}
pub fn deinit(self: *Type, gpa: std.mem.Allocator) void {
gpa.free(self.params);
gpa.free(self.returns);
self.* = undefined;
}
};
/// Wasm module sections as per spec:
/// https://webassembly.github.io/spec/core/binary/modules.html
pub const Section = enum(u8) {
@ -788,11 +624,6 @@ pub const Section = enum(u8) {
_,
};
/// Returns the integer value of a given `Section`
pub fn section(val: Section) u8 {
return @intFromEnum(val);
}
/// The kind of the type when importing or exporting to/from the host environment.
/// https://webassembly.github.io/spec/core/syntax/modules.html
pub const ExternalKind = enum(u8) {
@ -802,11 +633,6 @@ pub const ExternalKind = enum(u8) {
global,
};
/// Returns the integer value of a given `ExternalKind`
pub fn externalKind(val: ExternalKind) u8 {
return @intFromEnum(val);
}
/// Defines the enum values for each subsection id for the "Names" custom section
/// as described by:
/// https://webassembly.github.io/spec/core/appendix/custom.html?highlight=name#name-section
@ -829,7 +655,18 @@ pub const function_type: u8 = 0x60;
pub const result_type: u8 = 0x40;
/// Represents a block which will not return a value
pub const block_empty: u8 = 0x40;
pub const BlockType = enum(u8) {
empty = 0x40,
i32 = 0x7F,
i64 = 0x7E,
f32 = 0x7D,
f64 = 0x7C,
v128 = 0x7B,
pub fn fromValtype(valtype: Valtype) BlockType {
return @enumFromInt(@intFromEnum(valtype));
}
};
// binary constants
pub const magic = [_]u8{ 0x00, 0x61, 0x73, 0x6D }; // \0asm

View File

@ -11,6 +11,11 @@ string_bytes: []const u8,
/// The first thing in this array is an `ErrorMessageList`.
extra: []const u32,
/// Index into `string_bytes`.
pub const String = u32;
/// Index into `string_bytes`, or null.
pub const OptionalString = u32;
/// Special encoding when there are no errors.
pub const empty: ErrorBundle = .{
.string_bytes = &.{},
@ -33,14 +38,13 @@ pub const ErrorMessageList = struct {
len: u32,
start: u32,
/// null-terminated string index. 0 means no compile log text.
compile_log_text: u32,
compile_log_text: OptionalString,
};
/// Trailing:
/// * ReferenceTrace for each reference_trace_len
pub const SourceLocation = struct {
/// null terminated string index
src_path: u32,
src_path: String,
line: u32,
column: u32,
/// byte offset of starting token
@ -49,17 +53,15 @@ pub const SourceLocation = struct {
span_main: u32,
/// byte offset of end of last token
span_end: u32,
/// null terminated string index, possibly null.
/// Does not include the trailing newline.
source_line: u32 = 0,
source_line: OptionalString = 0,
reference_trace_len: u32 = 0,
};
/// Trailing:
/// * MessageIndex for each notes_len.
pub const ErrorMessage = struct {
/// null terminated string index
msg: u32,
msg: String,
/// Usually one, but incremented for redundant messages.
count: u32 = 1,
src_loc: SourceLocationIndex = .none,
@ -71,7 +73,7 @@ pub const ReferenceTrace = struct {
/// Except for the sentinel ReferenceTrace element, in which case:
/// * 0 means remaining references hidden
/// * >0 means N references hidden
decl_name: u32,
decl_name: String,
/// Index into extra of a SourceLocation
/// If this is 0, this is the sentinel ReferenceTrace element.
src_loc: SourceLocationIndex,
@ -138,7 +140,7 @@ fn extraData(eb: ErrorBundle, comptime T: type, index: usize) struct { data: T,
}
/// Given an index into `string_bytes` returns the null-terminated string found there.
pub fn nullTerminatedString(eb: ErrorBundle, index: usize) [:0]const u8 {
pub fn nullTerminatedString(eb: ErrorBundle, index: String) [:0]const u8 {
const string_bytes = eb.string_bytes;
var end: usize = index;
while (string_bytes[end] != 0) {
@ -384,18 +386,18 @@ pub const Wip = struct {
};
}
pub fn addString(wip: *Wip, s: []const u8) Allocator.Error!u32 {
pub fn addString(wip: *Wip, s: []const u8) Allocator.Error!String {
const gpa = wip.gpa;
const index: u32 = @intCast(wip.string_bytes.items.len);
const index: String = @intCast(wip.string_bytes.items.len);
try wip.string_bytes.ensureUnusedCapacity(gpa, s.len + 1);
wip.string_bytes.appendSliceAssumeCapacity(s);
wip.string_bytes.appendAssumeCapacity(0);
return index;
}
pub fn printString(wip: *Wip, comptime fmt: []const u8, args: anytype) Allocator.Error!u32 {
pub fn printString(wip: *Wip, comptime fmt: []const u8, args: anytype) Allocator.Error!String {
const gpa = wip.gpa;
const index: u32 = @intCast(wip.string_bytes.items.len);
const index: String = @intCast(wip.string_bytes.items.len);
try wip.string_bytes.writer(gpa).print(fmt, args);
try wip.string_bytes.append(gpa, 0);
return index;

View File

@ -113,6 +113,14 @@ link_diags: link.Diags,
link_task_queue: ThreadSafeQueue(link.Task) = .empty,
/// Ensure only 1 simultaneous call to `flushTaskQueue`.
link_task_queue_safety: std.debug.SafetyLock = .{},
/// If any tasks are queued up that depend on prelink being finished, they are moved
/// here until prelink finishes.
link_task_queue_postponed: std.ArrayListUnmanaged(link.Task) = .empty,
/// Initialized with how many link input tasks are expected. After this reaches zero
/// the linker will begin the prelink phase.
/// Initialized in the Compilation main thread before the pipeline; modified only in
/// the linker task thread.
remaining_prelink_tasks: u32,
work_queues: [
len: {
@ -1515,6 +1523,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.file_system_inputs = options.file_system_inputs,
.parent_whole_cache = options.parent_whole_cache,
.link_diags = .init(gpa),
.remaining_prelink_tasks = 0,
};
// Prevent some footguns by making the "any" fields of config reflect
@ -1587,6 +1596,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.pdb_source_path = options.pdb_source_path,
.pdb_out_path = options.pdb_out_path,
.entry_addr = null, // CLI does not expose this option (yet?)
.object_host_name = "env",
};
switch (options.cache_mode) {
@ -1715,6 +1725,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
};
comp.c_object_table.putAssumeCapacityNoClobber(c_object, {});
}
comp.remaining_prelink_tasks += @intCast(comp.c_object_table.count());
// Add a `Win32Resource` for each `rc_source_files` and one for `manifest_file`.
const win32_resource_count =
@ -1722,6 +1733,10 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
if (win32_resource_count > 0) {
dev.check(.win32_resource);
try comp.win32_resource_table.ensureTotalCapacity(gpa, win32_resource_count);
// Add this after adding logic to updateWin32Resource to pass the
// result into link.loadInput. loadInput integration is not implemented
// for Windows linking logic yet.
//comp.remaining_prelink_tasks += @intCast(win32_resource_count);
for (options.rc_source_files) |rc_source_file| {
const win32_resource = try gpa.create(Win32Resource);
errdefer gpa.destroy(win32_resource);
@ -1732,6 +1747,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
};
comp.win32_resource_table.putAssumeCapacityNoClobber(win32_resource, {});
}
if (options.manifest_file) |manifest_path| {
const win32_resource = try gpa.create(Win32Resource);
errdefer gpa.destroy(win32_resource);
@ -1779,10 +1795,12 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
inline for (fields) |field| {
if (@field(paths, field.name)) |path| {
comp.link_task_queue.shared.appendAssumeCapacity(.{ .load_object = path });
comp.remaining_prelink_tasks += 1;
}
}
// Loads the libraries provided by `target_util.libcFullLinkFlags(target)`.
comp.link_task_queue.shared.appendAssumeCapacity(.load_host_libc);
comp.remaining_prelink_tasks += 1;
} else if (target.isMusl() and !target.isWasm()) {
if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
@ -1791,14 +1809,17 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.{ .musl_crt_file = .crti_o },
.{ .musl_crt_file = .crtn_o },
});
comp.remaining_prelink_tasks += 2;
}
if (musl.needsCrt0(comp.config.output_mode, comp.config.link_mode, comp.config.pie)) |f| {
try comp.queueJobs(&.{.{ .musl_crt_file = f }});
comp.remaining_prelink_tasks += 1;
}
try comp.queueJobs(&.{.{ .musl_crt_file = switch (comp.config.link_mode) {
.static => .libc_a,
.dynamic => .libc_so,
} }});
comp.remaining_prelink_tasks += 1;
} else if (target.isGnuLibC()) {
if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
@ -1807,14 +1828,18 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.{ .glibc_crt_file = .crti_o },
.{ .glibc_crt_file = .crtn_o },
});
comp.remaining_prelink_tasks += 2;
}
if (glibc.needsCrt0(comp.config.output_mode)) |f| {
try comp.queueJobs(&.{.{ .glibc_crt_file = f }});
comp.remaining_prelink_tasks += 1;
}
try comp.queueJobs(&[_]Job{
.{ .glibc_shared_objects = {} },
.{ .glibc_crt_file = .libc_nonshared_a },
});
comp.remaining_prelink_tasks += 1;
comp.remaining_prelink_tasks += glibc.sharedObjectsCount(&target);
} else if (target.isWasm() and target.os.tag == .wasi) {
if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
@ -1822,11 +1847,13 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
try comp.queueJob(.{
.wasi_libc_crt_file = crt_file,
});
comp.remaining_prelink_tasks += 1;
}
try comp.queueJobs(&[_]Job{
.{ .wasi_libc_crt_file = wasi_libc.execModelCrtFile(comp.config.wasi_exec_model) },
.{ .wasi_libc_crt_file = .libc_a },
});
comp.remaining_prelink_tasks += 2;
} else if (target.isMinGW()) {
if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
@ -1835,6 +1862,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.{ .mingw_crt_file = .mingw32_lib },
crt_job,
});
comp.remaining_prelink_tasks += 2;
// When linking mingw-w64 there are some import libs we always need.
try comp.windows_libs.ensureUnusedCapacity(gpa, mingw.always_link_libs.len);
@ -1846,6 +1874,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
}
} else if (target.os.tag == .freestanding and capable_of_building_zig_libc) {
try comp.queueJob(.{ .zig_libc = {} });
comp.remaining_prelink_tasks += 1;
} else {
return error.LibCUnavailable;
}
@ -1860,13 +1889,16 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
}
if (comp.wantBuildLibUnwindFromSource()) {
try comp.queueJob(.{ .libunwind = {} });
comp.remaining_prelink_tasks += 1;
}
if (build_options.have_llvm and is_exe_or_dyn_lib and comp.config.link_libcpp) {
try comp.queueJob(.libcxx);
try comp.queueJob(.libcxxabi);
comp.remaining_prelink_tasks += 2;
}
if (build_options.have_llvm and is_exe_or_dyn_lib and comp.config.any_sanitize_thread) {
try comp.queueJob(.libtsan);
comp.remaining_prelink_tasks += 1;
}
if (target.isMinGW() and comp.config.any_non_single_threaded) {
@ -1885,22 +1917,27 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
if (is_exe_or_dyn_lib) {
log.debug("queuing a job to build compiler_rt_lib", .{});
comp.job_queued_compiler_rt_lib = true;
comp.remaining_prelink_tasks += 1;
} else if (output_mode != .Obj) {
log.debug("queuing a job to build compiler_rt_obj", .{});
// In this case we are making a static library, so we ask
// for a compiler-rt object to put in it.
comp.job_queued_compiler_rt_obj = true;
comp.remaining_prelink_tasks += 1;
}
}
if (is_exe_or_dyn_lib and comp.config.any_fuzz and capable_of_building_compiler_rt) {
log.debug("queuing a job to build libfuzzer", .{});
comp.job_queued_fuzzer_lib = true;
comp.remaining_prelink_tasks += 1;
}
}
try comp.link_task_queue.shared.append(gpa, .load_explicitly_provided);
comp.remaining_prelink_tasks += 1;
}
log.debug("total prelink tasks: {d}", .{comp.remaining_prelink_tasks});
return comp;
}
@ -1976,6 +2013,7 @@ pub fn destroy(comp: *Compilation) void {
comp.link_diags.deinit();
comp.link_task_queue.deinit(gpa);
comp.link_task_queue_postponed.deinit(gpa);
comp.clearMiscFailures();
@ -2438,9 +2476,8 @@ fn flush(
if (comp.bin_file) |lf| {
// This is needed before reading the error flags.
lf.flush(arena, tid, prog_node) catch |err| switch (err) {
error.FlushFailure, error.LinkFailure => {}, // error reported through link_diags.flags
error.LLDReportedFailure => {}, // error reported via lockAndParseLldStderr
else => |e| return e,
error.LinkFailure => {}, // Already reported.
error.OutOfMemory => return error.OutOfMemory,
};
}
@ -3025,8 +3062,120 @@ pub fn saveState(comp: *Compilation) !void {
//// TODO: compilation errors
//// TODO: namespaces
//// TODO: decls
//// TODO: linker state
}
// linker state
switch (lf.tag) {
.wasm => {
const wasm = lf.cast(.wasm).?;
const is_obj = comp.config.output_mode == .Obj;
try bufs.ensureUnusedCapacity(85);
addBuf(&bufs, wasm.string_bytes.items);
// TODO make it well-defined memory layout
//addBuf(&bufs, mem.sliceAsBytes(wasm.objects.items));
addBuf(&bufs, mem.sliceAsBytes(wasm.func_types.keys()));
addBuf(&bufs, mem.sliceAsBytes(wasm.object_function_imports.keys()));
addBuf(&bufs, mem.sliceAsBytes(wasm.object_function_imports.values()));
addBuf(&bufs, mem.sliceAsBytes(wasm.object_functions.items));
addBuf(&bufs, mem.sliceAsBytes(wasm.object_global_imports.keys()));
addBuf(&bufs, mem.sliceAsBytes(wasm.object_global_imports.values()));
addBuf(&bufs, mem.sliceAsBytes(wasm.object_globals.items));
addBuf(&bufs, mem.sliceAsBytes(wasm.object_table_imports.keys()));
addBuf(&bufs, mem.sliceAsBytes(wasm.object_table_imports.values()));
addBuf(&bufs, mem.sliceAsBytes(wasm.object_tables.items));
addBuf(&bufs, mem.sliceAsBytes(wasm.object_memory_imports.keys()));
addBuf(&bufs, mem.sliceAsBytes(wasm.object_memory_imports.values()));
addBuf(&bufs, mem.sliceAsBytes(wasm.object_memories.items));
addBuf(&bufs, mem.sliceAsBytes(wasm.object_relocations.items(.tag)));
addBuf(&bufs, mem.sliceAsBytes(wasm.object_relocations.items(.offset)));
// TODO handle the union safety field
//addBuf(&bufs, mem.sliceAsBytes(wasm.object_relocations.items(.pointee)));
addBuf(&bufs, mem.sliceAsBytes(wasm.object_relocations.items(.addend)));
addBuf(&bufs, mem.sliceAsBytes(wasm.object_init_funcs.items));
addBuf(&bufs, mem.sliceAsBytes(wasm.object_data_segments.items));
addBuf(&bufs, mem.sliceAsBytes(wasm.object_datas.items));
addBuf(&bufs, mem.sliceAsBytes(wasm.object_data_imports.keys()));
addBuf(&bufs, mem.sliceAsBytes(wasm.object_data_imports.values()));
addBuf(&bufs, mem.sliceAsBytes(wasm.object_custom_segments.keys()));
addBuf(&bufs, mem.sliceAsBytes(wasm.object_custom_segments.values()));
// TODO make it well-defined memory layout
// addBuf(&bufs, mem.sliceAsBytes(wasm.object_comdats.items));
addBuf(&bufs, mem.sliceAsBytes(wasm.object_relocations_table.keys()));
addBuf(&bufs, mem.sliceAsBytes(wasm.object_relocations_table.values()));
addBuf(&bufs, mem.sliceAsBytes(wasm.object_comdat_symbols.items(.kind)));
addBuf(&bufs, mem.sliceAsBytes(wasm.object_comdat_symbols.items(.index)));
addBuf(&bufs, mem.sliceAsBytes(wasm.out_relocs.items(.tag)));
addBuf(&bufs, mem.sliceAsBytes(wasm.out_relocs.items(.offset)));
// TODO handle the union safety field
//addBuf(&bufs, mem.sliceAsBytes(wasm.out_relocs.items(.pointee)));
addBuf(&bufs, mem.sliceAsBytes(wasm.out_relocs.items(.addend)));
addBuf(&bufs, mem.sliceAsBytes(wasm.uav_fixups.items));
addBuf(&bufs, mem.sliceAsBytes(wasm.nav_fixups.items));
addBuf(&bufs, mem.sliceAsBytes(wasm.func_table_fixups.items));
if (is_obj) {
addBuf(&bufs, mem.sliceAsBytes(wasm.navs_obj.keys()));
addBuf(&bufs, mem.sliceAsBytes(wasm.navs_obj.values()));
addBuf(&bufs, mem.sliceAsBytes(wasm.uavs_obj.keys()));
addBuf(&bufs, mem.sliceAsBytes(wasm.uavs_obj.values()));
} else {
addBuf(&bufs, mem.sliceAsBytes(wasm.navs_exe.keys()));
addBuf(&bufs, mem.sliceAsBytes(wasm.navs_exe.values()));
addBuf(&bufs, mem.sliceAsBytes(wasm.uavs_exe.keys()));
addBuf(&bufs, mem.sliceAsBytes(wasm.uavs_exe.values()));
}
addBuf(&bufs, mem.sliceAsBytes(wasm.overaligned_uavs.keys()));
addBuf(&bufs, mem.sliceAsBytes(wasm.overaligned_uavs.values()));
addBuf(&bufs, mem.sliceAsBytes(wasm.zcu_funcs.keys()));
// TODO handle the union safety field
// addBuf(&bufs, mem.sliceAsBytes(wasm.zcu_funcs.values()));
addBuf(&bufs, mem.sliceAsBytes(wasm.nav_exports.keys()));
addBuf(&bufs, mem.sliceAsBytes(wasm.nav_exports.values()));
addBuf(&bufs, mem.sliceAsBytes(wasm.uav_exports.keys()));
addBuf(&bufs, mem.sliceAsBytes(wasm.uav_exports.values()));
addBuf(&bufs, mem.sliceAsBytes(wasm.imports.keys()));
addBuf(&bufs, mem.sliceAsBytes(wasm.missing_exports.keys()));
addBuf(&bufs, mem.sliceAsBytes(wasm.function_exports.keys()));
addBuf(&bufs, mem.sliceAsBytes(wasm.function_exports.values()));
addBuf(&bufs, mem.sliceAsBytes(wasm.hidden_function_exports.keys()));
addBuf(&bufs, mem.sliceAsBytes(wasm.hidden_function_exports.values()));
addBuf(&bufs, mem.sliceAsBytes(wasm.global_exports.items));
addBuf(&bufs, mem.sliceAsBytes(wasm.functions.keys()));
addBuf(&bufs, mem.sliceAsBytes(wasm.function_imports.keys()));
addBuf(&bufs, mem.sliceAsBytes(wasm.function_imports.values()));
addBuf(&bufs, mem.sliceAsBytes(wasm.data_imports.keys()));
addBuf(&bufs, mem.sliceAsBytes(wasm.data_imports.values()));
addBuf(&bufs, mem.sliceAsBytes(wasm.data_segments.keys()));
addBuf(&bufs, mem.sliceAsBytes(wasm.globals.keys()));
addBuf(&bufs, mem.sliceAsBytes(wasm.global_imports.keys()));
addBuf(&bufs, mem.sliceAsBytes(wasm.global_imports.values()));
addBuf(&bufs, mem.sliceAsBytes(wasm.tables.keys()));
addBuf(&bufs, mem.sliceAsBytes(wasm.table_imports.keys()));
addBuf(&bufs, mem.sliceAsBytes(wasm.table_imports.values()));
addBuf(&bufs, mem.sliceAsBytes(wasm.zcu_indirect_function_set.keys()));
addBuf(&bufs, mem.sliceAsBytes(wasm.object_indirect_function_import_set.keys()));
addBuf(&bufs, mem.sliceAsBytes(wasm.object_indirect_function_set.keys()));
addBuf(&bufs, mem.sliceAsBytes(wasm.mir_instructions.items(.tag)));
// TODO handle the union safety field
//addBuf(&bufs, mem.sliceAsBytes(wasm.mir_instructions.items(.data)));
addBuf(&bufs, mem.sliceAsBytes(wasm.mir_extra.items));
addBuf(&bufs, mem.sliceAsBytes(wasm.all_zcu_locals.items));
addBuf(&bufs, mem.sliceAsBytes(wasm.tag_name_bytes.items));
addBuf(&bufs, mem.sliceAsBytes(wasm.tag_name_offs.items));
// TODO add as header fields
// entry_resolution: FunctionImport.Resolution
// function_exports_len: u32
// global_exports_len: u32
// functions_end_prelink: u32
// globals_end_prelink: u32
// error_name_table_ref_count: u32
// tag_name_table_ref_count: u32
// any_tls_relocs: bool
// any_passive_inits: bool
},
else => log.err("TODO implement saving linker state for {s}", .{@tagName(lf.tag)}),
}
var basename_buf: [255]u8 = undefined;
const basename = std.fmt.bufPrint(&basename_buf, "{s}.zcs", .{
comp.root_name,
@ -3209,6 +3358,10 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
if (!zcu.navFileScope(nav).okToReportErrors()) continue;
try addModuleErrorMsg(zcu, &bundle, error_msg.*);
}
for (zcu.failed_types.keys(), zcu.failed_types.values()) |ty_index, error_msg| {
if (!zcu.typeFileScope(ty_index).okToReportErrors()) continue;
try addModuleErrorMsg(zcu, &bundle, error_msg.*);
}
for (zcu.failed_exports.values()) |value| {
try addModuleErrorMsg(zcu, &bundle, value.*);
}
@ -3252,7 +3405,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
}));
}
try comp.link_diags.addMessagesToBundle(&bundle);
try comp.link_diags.addMessagesToBundle(&bundle, comp.bin_file);
if (comp.zcu) |zcu| {
if (bundle.root_list.items.len == 0 and zcu.compile_log_sources.count() != 0) {
@ -3524,9 +3677,9 @@ pub fn performAllTheWork(
defer if (comp.zcu) |zcu| {
zcu.sema_prog_node.end();
zcu.sema_prog_node = std.Progress.Node.none;
zcu.sema_prog_node = .none;
zcu.codegen_prog_node.end();
zcu.codegen_prog_node = std.Progress.Node.none;
zcu.codegen_prog_node = .none;
zcu.generation += 1;
};
@ -3659,7 +3812,7 @@ fn performAllTheWorkInner(
try zcu.flushRetryableFailures();
zcu.sema_prog_node = main_progress_node.start("Semantic Analysis", 0);
zcu.codegen_prog_node = main_progress_node.start("Code Generation", 0);
zcu.codegen_prog_node = if (comp.bin_file != null) main_progress_node.start("Code Generation", 0) else .none;
}
if (!comp.separateCodegenThreadOk()) {
@ -3689,6 +3842,8 @@ fn performAllTheWorkInner(
});
continue;
}
zcu.sema_prog_node.end();
zcu.sema_prog_node = .none;
}
break;
}
@ -3962,6 +4117,7 @@ fn dispatchCodegenTask(comp: *Compilation, tid: usize, link_task: link.Task) voi
if (comp.separateCodegenThreadOk()) {
comp.queueLinkTasks(&.{link_task});
} else {
assert(comp.remaining_prelink_tasks == 0);
link.doTask(comp, tid, link_task);
}
}

View File

@ -552,6 +552,15 @@ pub const Nav = struct {
};
}
/// This function is intended to be used by code generation, since semantic
/// analysis will ensure that any `Nav` which is potentially `extern` is
/// fully resolved.
/// Asserts that `status == .fully_resolved`.
pub fn getResolvedExtern(nav: Nav, ip: *const InternPool) ?Key.Extern {
assert(nav.status == .fully_resolved);
return nav.getExtern(ip);
}
/// Always returns `null` for `status == .type_resolved`. This function is inteded
/// to be used by code generation, since semantic analysis will ensure that any `Nav`
/// which is potentially `extern` is fully resolved.
@ -585,6 +594,15 @@ pub const Nav = struct {
};
}
/// Asserts that `status != .unresolved`.
pub fn getLinkSection(nav: Nav) OptionalNullTerminatedString {
return switch (nav.status) {
.unresolved => unreachable,
.type_resolved => |r| r.@"linksection",
.fully_resolved => |r| r.@"linksection",
};
}
/// Asserts that `status != .unresolved`.
pub fn isThreadlocal(nav: Nav, ip: *const InternPool) bool {
return switch (nav.status) {
@ -598,6 +616,20 @@ pub const Nav = struct {
};
}
pub fn isFn(nav: Nav, ip: *const InternPool) bool {
return switch (nav.status) {
.unresolved => unreachable,
.type_resolved => |r| {
const tag = ip.zigTypeTagOrPoison(r.type) catch unreachable;
return tag == .@"fn";
},
.fully_resolved => |r| {
const tag = ip.zigTypeTagOrPoison(ip.typeOf(r.val)) catch unreachable;
return tag == .@"fn";
},
};
}
/// If this returns `true`, then a pointer to this `Nav` might actually be encoded as a pointer
/// to some other `Nav` due to an extern definition or extern alias (see #21027).
/// This query is valid on `Nav`s for whom only the type is resolved.
@ -3360,6 +3392,10 @@ pub const LoadedUnionType = struct {
return flags.status == .field_types_wip;
}
pub fn requiresComptime(u: LoadedUnionType, ip: *const InternPool) RequiresComptime {
return u.flagsUnordered(ip).requires_comptime;
}
pub fn setRequiresComptimeWip(u: LoadedUnionType, ip: *InternPool) RequiresComptime {
const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex;
extra_mutex.lock();
@ -4014,7 +4050,7 @@ pub const LoadedStructType = struct {
}
}
pub fn haveLayout(s: LoadedStructType, ip: *InternPool) bool {
pub fn haveLayout(s: LoadedStructType, ip: *const InternPool) bool {
return switch (s.layout) {
.@"packed" => s.backingIntTypeUnordered(ip) != .none,
.auto, .@"extern" => s.flagsUnordered(ip).layout_resolved,
@ -11797,6 +11833,10 @@ pub fn toEnum(ip: *const InternPool, comptime E: type, i: Index) E {
return @enumFromInt(ip.indexToKey(int).int.storage.u64);
}
pub fn toFunc(ip: *const InternPool, i: Index) Key.Func {
return ip.indexToKey(i).func;
}
pub fn aggregateTypeLen(ip: *const InternPool, ty: Index) u64 {
return switch (ip.indexToKey(ty)) {
.struct_type => ip.loadStructType(ty).field_types.len,

View File

@ -38298,7 +38298,7 @@ pub fn flushExports(sema: *Sema) !void {
// So, pick up and delete any existing exports. This strategy performs
// redundant work, but that's okay, because this case is exceedingly rare.
if (zcu.single_exports.get(sema.owner)) |export_idx| {
try sema.exports.append(gpa, zcu.all_exports.items[export_idx]);
try sema.exports.append(gpa, export_idx.ptr(zcu).*);
} else if (zcu.multi_exports.get(sema.owner)) |info| {
try sema.exports.appendSlice(gpa, zcu.all_exports.items[info.index..][0..info.len]);
}
@ -38307,12 +38307,12 @@ pub fn flushExports(sema: *Sema) !void {
// `sema.exports` is completed; store the data into the `Zcu`.
if (sema.exports.items.len == 1) {
try zcu.single_exports.ensureUnusedCapacity(gpa, 1);
const export_idx = zcu.free_exports.popOrNull() orelse idx: {
const export_idx: Zcu.Export.Index = zcu.free_exports.popOrNull() orelse idx: {
_ = try zcu.all_exports.addOne(gpa);
break :idx zcu.all_exports.items.len - 1;
break :idx @enumFromInt(zcu.all_exports.items.len - 1);
};
zcu.all_exports.items[export_idx] = sema.exports.items[0];
zcu.single_exports.putAssumeCapacityNoClobber(sema.owner, @intCast(export_idx));
export_idx.ptr(zcu).* = sema.exports.items[0];
zcu.single_exports.putAssumeCapacityNoClobber(sema.owner, export_idx);
} else {
try zcu.multi_exports.ensureUnusedCapacity(gpa, 1);
const exports_base = zcu.all_exports.items.len;

View File

@ -441,7 +441,7 @@ pub fn toValue(self: Type) Value {
const RuntimeBitsError = SemaError || error{NeedLazy};
pub fn hasRuntimeBits(ty: Type, zcu: *Zcu) bool {
pub fn hasRuntimeBits(ty: Type, zcu: *const Zcu) bool {
return hasRuntimeBitsInner(ty, false, .eager, zcu, {}) catch unreachable;
}
@ -452,7 +452,7 @@ pub fn hasRuntimeBitsSema(ty: Type, pt: Zcu.PerThread) SemaError!bool {
};
}
pub fn hasRuntimeBitsIgnoreComptime(ty: Type, zcu: *Zcu) bool {
pub fn hasRuntimeBitsIgnoreComptime(ty: Type, zcu: *const Zcu) bool {
return hasRuntimeBitsInner(ty, true, .eager, zcu, {}) catch unreachable;
}
@ -471,7 +471,7 @@ pub fn hasRuntimeBitsInner(
ty: Type,
ignore_comptime_only: bool,
comptime strat: ResolveStratLazy,
zcu: *Zcu,
zcu: strat.ZcuPtr(),
tid: strat.Tid(),
) RuntimeBitsError!bool {
const ip = &zcu.intern_pool;
@ -560,7 +560,7 @@ pub fn hasRuntimeBitsInner(
},
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
if (struct_type.assumeRuntimeBitsIfFieldTypesWip(ip)) {
if (strat != .eager and struct_type.assumeRuntimeBitsIfFieldTypesWip(ip)) {
// In this case, we guess that hasRuntimeBits() for this type is true,
// and then later if our guess was incorrect, we emit a compile error.
return true;
@ -596,7 +596,7 @@ pub fn hasRuntimeBitsInner(
const union_type = ip.loadUnionType(ty.toIntern());
const union_flags = union_type.flagsUnordered(ip);
switch (union_flags.runtime_tag) {
.none => {
.none => if (strat != .eager) {
// In this case, we guess that hasRuntimeBits() for this type is true,
// and then later if our guess was incorrect, we emit a compile error.
if (union_type.assumeRuntimeBitsIfFieldTypesWip(ip)) return true;
@ -774,7 +774,7 @@ pub fn fnHasRuntimeBitsSema(ty: Type, pt: Zcu.PerThread) SemaError!bool {
pub fn fnHasRuntimeBitsInner(
ty: Type,
comptime strat: ResolveStrat,
zcu: *Zcu,
zcu: strat.ZcuPtr(),
tid: strat.Tid(),
) SemaError!bool {
const fn_info = zcu.typeToFunc(ty).?;
@ -815,7 +815,7 @@ pub fn ptrAlignmentSema(ty: Type, pt: Zcu.PerThread) SemaError!Alignment {
pub fn ptrAlignmentInner(
ty: Type,
comptime strat: ResolveStrat,
zcu: *Zcu,
zcu: strat.ZcuPtr(),
tid: strat.Tid(),
) !Alignment {
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
@ -868,14 +868,25 @@ pub const ResolveStratLazy = enum {
/// This should typically be used from semantic analysis.
sema,
pub fn Tid(comptime strat: ResolveStratLazy) type {
pub fn Tid(strat: ResolveStratLazy) type {
return switch (strat) {
.lazy, .sema => Zcu.PerThread.Id,
.eager => void,
};
}
pub fn pt(comptime strat: ResolveStratLazy, zcu: *Zcu, tid: strat.Tid()) switch (strat) {
pub fn ZcuPtr(strat: ResolveStratLazy) type {
return switch (strat) {
.eager => *const Zcu,
.sema, .lazy => *Zcu,
};
}
pub fn pt(
comptime strat: ResolveStratLazy,
zcu: strat.ZcuPtr(),
tid: strat.Tid(),
) switch (strat) {
.lazy, .sema => Zcu.PerThread,
.eager => void,
} {
@ -896,14 +907,21 @@ pub const ResolveStrat = enum {
/// This should typically be used from semantic analysis.
sema,
pub fn Tid(comptime strat: ResolveStrat) type {
pub fn Tid(strat: ResolveStrat) type {
return switch (strat) {
.sema => Zcu.PerThread.Id,
.normal => void,
};
}
pub fn pt(comptime strat: ResolveStrat, zcu: *Zcu, tid: strat.Tid()) switch (strat) {
pub fn ZcuPtr(strat: ResolveStrat) type {
return switch (strat) {
.normal => *const Zcu,
.sema => *Zcu,
};
}
pub fn pt(comptime strat: ResolveStrat, zcu: strat.ZcuPtr(), tid: strat.Tid()) switch (strat) {
.sema => Zcu.PerThread,
.normal => void,
} {
@ -922,7 +940,7 @@ pub const ResolveStrat = enum {
};
/// Never returns `none`. Asserts that all necessary type resolution is already done.
pub fn abiAlignment(ty: Type, zcu: *Zcu) Alignment {
pub fn abiAlignment(ty: Type, zcu: *const Zcu) Alignment {
return (ty.abiAlignmentInner(.eager, zcu, {}) catch unreachable).scalar;
}
@ -939,7 +957,7 @@ pub fn abiAlignmentSema(ty: Type, pt: Zcu.PerThread) SemaError!Alignment {
pub fn abiAlignmentInner(
ty: Type,
comptime strat: ResolveStratLazy,
zcu: *Zcu,
zcu: strat.ZcuPtr(),
tid: strat.Tid(),
) SemaError!AbiAlignmentInner {
const pt = strat.pt(zcu, tid);
@ -1156,7 +1174,7 @@ pub fn abiAlignmentInner(
fn abiAlignmentInnerErrorUnion(
ty: Type,
comptime strat: ResolveStratLazy,
zcu: *Zcu,
zcu: strat.ZcuPtr(),
tid: strat.Tid(),
payload_ty: Type,
) SemaError!AbiAlignmentInner {
@ -1198,7 +1216,7 @@ fn abiAlignmentInnerErrorUnion(
fn abiAlignmentInnerOptional(
ty: Type,
comptime strat: ResolveStratLazy,
zcu: *Zcu,
zcu: strat.ZcuPtr(),
tid: strat.Tid(),
) SemaError!AbiAlignmentInner {
const pt = strat.pt(zcu, tid);
@ -1244,7 +1262,7 @@ const AbiSizeInner = union(enum) {
/// Asserts the type has the ABI size already resolved.
/// Types that return false for hasRuntimeBits() return 0.
pub fn abiSize(ty: Type, zcu: *Zcu) u64 {
pub fn abiSize(ty: Type, zcu: *const Zcu) u64 {
return (abiSizeInner(ty, .eager, zcu, {}) catch unreachable).scalar;
}
@ -1269,7 +1287,7 @@ pub fn abiSizeSema(ty: Type, pt: Zcu.PerThread) SemaError!u64 {
pub fn abiSizeInner(
ty: Type,
comptime strat: ResolveStratLazy,
zcu: *Zcu,
zcu: strat.ZcuPtr(),
tid: strat.Tid(),
) SemaError!AbiSizeInner {
const target = zcu.getTarget();
@ -1542,7 +1560,7 @@ pub fn abiSizeInner(
fn abiSizeInnerOptional(
ty: Type,
comptime strat: ResolveStratLazy,
zcu: *Zcu,
zcu: strat.ZcuPtr(),
tid: strat.Tid(),
) SemaError!AbiSizeInner {
const child_ty = ty.optionalChild(zcu);
@ -1701,7 +1719,7 @@ pub fn maxIntAlignment(target: std.Target, use_llvm: bool) u16 {
};
}
pub fn bitSize(ty: Type, zcu: *Zcu) u64 {
pub fn bitSize(ty: Type, zcu: *const Zcu) u64 {
return bitSizeInner(ty, .normal, zcu, {}) catch unreachable;
}
@ -1712,7 +1730,7 @@ pub fn bitSizeSema(ty: Type, pt: Zcu.PerThread) SemaError!u64 {
pub fn bitSizeInner(
ty: Type,
comptime strat: ResolveStrat,
zcu: *Zcu,
zcu: strat.ZcuPtr(),
tid: strat.Tid(),
) SemaError!u64 {
const target = zcu.getTarget();
@ -2148,7 +2166,7 @@ pub fn unionBackingType(ty: Type, pt: Zcu.PerThread) !Type {
};
}
pub fn unionGetLayout(ty: Type, zcu: *Zcu) Zcu.UnionLayout {
pub fn unionGetLayout(ty: Type, zcu: *const Zcu) Zcu.UnionLayout {
const union_obj = zcu.intern_pool.loadUnionType(ty.toIntern());
return Type.getUnionLayout(union_obj, zcu);
}
@ -2746,7 +2764,7 @@ pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value {
/// During semantic analysis, instead call `ty.comptimeOnlySema` which
/// resolves field types rather than asserting they are already resolved.
pub fn comptimeOnly(ty: Type, zcu: *Zcu) bool {
pub fn comptimeOnly(ty: Type, zcu: *const Zcu) bool {
return ty.comptimeOnlyInner(.normal, zcu, {}) catch unreachable;
}
@ -2759,7 +2777,7 @@ pub fn comptimeOnlySema(ty: Type, pt: Zcu.PerThread) SemaError!bool {
pub fn comptimeOnlyInner(
ty: Type,
comptime strat: ResolveStrat,
zcu: *Zcu,
zcu: strat.ZcuPtr(),
tid: strat.Tid(),
) SemaError!bool {
const ip = &zcu.intern_pool;
@ -2834,40 +2852,44 @@ pub fn comptimeOnlyInner(
if (struct_type.layout == .@"packed")
return false;
// A struct with no fields is not comptime-only.
return switch (struct_type.setRequiresComptimeWip(ip)) {
.no, .wip => false,
.yes => true,
.unknown => {
// Inlined `assert` so that the resolution calls below are not statically reachable.
if (strat != .sema) unreachable;
if (struct_type.flagsUnordered(ip).field_types_wip) {
struct_type.setRequiresComptime(ip, .unknown);
return false;
}
errdefer struct_type.setRequiresComptime(ip, .unknown);
const pt = strat.pt(zcu, tid);
try ty.resolveFields(pt);
for (0..struct_type.field_types.len) |i_usize| {
const i: u32 = @intCast(i_usize);
if (struct_type.fieldIsComptime(ip, i)) continue;
const field_ty = struct_type.field_types.get(ip)[i];
if (try Type.fromInterned(field_ty).comptimeOnlyInner(strat, zcu, tid)) {
// Note that this does not cause the layout to
// be considered resolved. Comptime-only types
// still maintain a layout of their
// runtime-known fields.
struct_type.setRequiresComptime(ip, .yes);
return true;
return switch (strat) {
.normal => switch (struct_type.requiresComptime(ip)) {
.wip => unreachable,
.no => false,
.yes => true,
.unknown => unreachable,
},
.sema => switch (struct_type.setRequiresComptimeWip(ip)) {
.no, .wip => false,
.yes => true,
.unknown => {
if (struct_type.flagsUnordered(ip).field_types_wip) {
struct_type.setRequiresComptime(ip, .unknown);
return false;
}
}
struct_type.setRequiresComptime(ip, .no);
return false;
errdefer struct_type.setRequiresComptime(ip, .unknown);
const pt = strat.pt(zcu, tid);
try ty.resolveFields(pt);
for (0..struct_type.field_types.len) |i_usize| {
const i: u32 = @intCast(i_usize);
if (struct_type.fieldIsComptime(ip, i)) continue;
const field_ty = struct_type.field_types.get(ip)[i];
if (try Type.fromInterned(field_ty).comptimeOnlyInner(strat, zcu, tid)) {
// Note that this does not cause the layout to
// be considered resolved. Comptime-only types
// still maintain a layout of their
// runtime-known fields.
struct_type.setRequiresComptime(ip, .yes);
return true;
}
}
struct_type.setRequiresComptime(ip, .no);
return false;
},
},
};
},
@ -2882,35 +2904,40 @@ pub fn comptimeOnlyInner(
.union_type => {
const union_type = ip.loadUnionType(ty.toIntern());
switch (union_type.setRequiresComptimeWip(ip)) {
.no, .wip => return false,
.yes => return true,
.unknown => {
// Inlined `assert` so that the resolution calls below are not statically reachable.
if (strat != .sema) unreachable;
if (union_type.flagsUnordered(ip).status == .field_types_wip) {
union_type.setRequiresComptime(ip, .unknown);
return false;
}
errdefer union_type.setRequiresComptime(ip, .unknown);
const pt = strat.pt(zcu, tid);
try ty.resolveFields(pt);
for (0..union_type.field_types.len) |field_idx| {
const field_ty = union_type.field_types.get(ip)[field_idx];
if (try Type.fromInterned(field_ty).comptimeOnlyInner(strat, zcu, tid)) {
union_type.setRequiresComptime(ip, .yes);
return true;
}
}
union_type.setRequiresComptime(ip, .no);
return false;
return switch (strat) {
.normal => switch (union_type.requiresComptime(ip)) {
.wip => unreachable,
.no => false,
.yes => true,
.unknown => unreachable,
},
}
.sema => switch (union_type.setRequiresComptimeWip(ip)) {
.no, .wip => return false,
.yes => return true,
.unknown => {
if (union_type.flagsUnordered(ip).status == .field_types_wip) {
union_type.setRequiresComptime(ip, .unknown);
return false;
}
errdefer union_type.setRequiresComptime(ip, .unknown);
const pt = strat.pt(zcu, tid);
try ty.resolveFields(pt);
for (0..union_type.field_types.len) |field_idx| {
const field_ty = union_type.field_types.get(ip)[field_idx];
if (try Type.fromInterned(field_ty).comptimeOnlyInner(strat, zcu, tid)) {
union_type.setRequiresComptime(ip, .yes);
return true;
}
}
union_type.setRequiresComptime(ip, .no);
return false;
},
},
};
},
.opaque_type => false,
@ -3207,7 +3234,7 @@ pub fn fieldAlignmentInner(
ty: Type,
index: usize,
comptime strat: ResolveStrat,
zcu: *Zcu,
zcu: strat.ZcuPtr(),
tid: strat.Tid(),
) SemaError!Alignment {
const ip = &zcu.intern_pool;
@ -3281,7 +3308,7 @@ pub fn structFieldAlignmentInner(
explicit_alignment: Alignment,
layout: std.builtin.Type.ContainerLayout,
comptime strat: Type.ResolveStrat,
zcu: *Zcu,
zcu: strat.ZcuPtr(),
tid: strat.Tid(),
) SemaError!Alignment {
assert(layout != .@"packed");
@ -3323,7 +3350,7 @@ pub fn unionFieldAlignmentInner(
explicit_alignment: Alignment,
layout: std.builtin.Type.ContainerLayout,
comptime strat: Type.ResolveStrat,
zcu: *Zcu,
zcu: strat.ZcuPtr(),
tid: strat.Tid(),
) SemaError!Alignment {
assert(layout != .@"packed");
@ -3392,11 +3419,7 @@ pub const FieldOffset = struct {
};
/// Supports structs and unions.
pub fn structFieldOffset(
ty: Type,
index: usize,
zcu: *Zcu,
) u64 {
pub fn structFieldOffset(ty: Type, index: usize, zcu: *const Zcu) u64 {
const ip = &zcu.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.struct_type => {
@ -3944,7 +3967,7 @@ fn resolveUnionInner(
};
}
pub fn getUnionLayout(loaded_union: InternPool.LoadedUnionType, zcu: *Zcu) Zcu.UnionLayout {
pub fn getUnionLayout(loaded_union: InternPool.LoadedUnionType, zcu: *const Zcu) Zcu.UnionLayout {
const ip = &zcu.intern_pool;
assert(loaded_union.haveLayout(ip));
var most_aligned_field: u32 = undefined;

View File

@ -241,12 +241,12 @@ pub fn getVariable(val: Value, mod: *Zcu) ?InternPool.Key.Variable {
/// If the value fits in a u64, return it, otherwise null.
/// Asserts not undefined.
pub fn getUnsignedInt(val: Value, zcu: *Zcu) ?u64 {
pub fn getUnsignedInt(val: Value, zcu: *const Zcu) ?u64 {
return getUnsignedIntInner(val, .normal, zcu, {}) catch unreachable;
}
/// Asserts the value is an integer and it fits in a u64
pub fn toUnsignedInt(val: Value, zcu: *Zcu) u64 {
pub fn toUnsignedInt(val: Value, zcu: *const Zcu) u64 {
return getUnsignedInt(val, zcu).?;
}
@ -259,7 +259,7 @@ pub fn getUnsignedIntSema(val: Value, pt: Zcu.PerThread) !?u64 {
pub fn getUnsignedIntInner(
val: Value,
comptime strat: ResolveStrat,
zcu: *Zcu,
zcu: strat.ZcuPtr(),
tid: strat.Tid(),
) !?u64 {
return switch (val.toIntern()) {
@ -304,7 +304,7 @@ pub fn toUnsignedIntSema(val: Value, pt: Zcu.PerThread) !u64 {
}
/// Asserts the value is an integer and it fits in a i64
pub fn toSignedInt(val: Value, zcu: *Zcu) i64 {
pub fn toSignedInt(val: Value, zcu: *const Zcu) i64 {
return switch (val.toIntern()) {
.bool_false => 0,
.bool_true => 1,

View File

@ -19,8 +19,8 @@ const Ast = std.zig.Ast;
const Zcu = @This();
const Compilation = @import("Compilation.zig");
const Cache = std.Build.Cache;
const Value = @import("Value.zig");
const Type = @import("Type.zig");
pub const Value = @import("Value.zig");
pub const Type = @import("Type.zig");
const Package = @import("Package.zig");
const link = @import("link.zig");
const Air = @import("Air.zig");
@ -79,11 +79,11 @@ local_zir_cache: Compilation.Directory,
all_exports: std.ArrayListUnmanaged(Export) = .empty,
/// This is a list of free indices in `all_exports`. These indices may be reused by exports from
/// future semantic analysis.
free_exports: std.ArrayListUnmanaged(u32) = .empty,
free_exports: std.ArrayListUnmanaged(Export.Index) = .empty,
/// Maps from an `AnalUnit` which performs a single export, to the index into `all_exports` of
/// the export it performs. Note that the key is not the `Decl` being exported, but the `AnalUnit`
/// whose analysis triggered the export.
single_exports: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .empty,
single_exports: std.AutoArrayHashMapUnmanaged(AnalUnit, Export.Index) = .empty,
/// Like `single_exports`, but for `AnalUnit`s which perform multiple exports.
/// The exports are `all_exports.items[index..][0..len]`.
multi_exports: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct {
@ -127,6 +127,7 @@ transitive_failed_analysis: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .emp
/// This may be a simple "value" `Nav`, or it may be a function.
/// The ErrorMsg memory is owned by the `AnalUnit`, using Module's general purpose allocator.
failed_codegen: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, *ErrorMsg) = .empty,
failed_types: std.AutoArrayHashMapUnmanaged(InternPool.Index, *ErrorMsg) = .empty,
/// Keep track of one `@compileLog` callsite per `AnalUnit`.
/// The value is the source location of the `@compileLog` call, convertible to a `LazySrcLoc`.
compile_log_sources: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct {
@ -144,8 +145,7 @@ compile_log_sources: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct {
failed_files: std.AutoArrayHashMapUnmanaged(*File, ?*ErrorMsg) = .empty,
/// The ErrorMsg memory is owned by the `EmbedFile`, using Module's general purpose allocator.
failed_embed_files: std.AutoArrayHashMapUnmanaged(*EmbedFile, *ErrorMsg) = .empty,
/// Key is index into `all_exports`.
failed_exports: std.AutoArrayHashMapUnmanaged(u32, *ErrorMsg) = .empty,
failed_exports: std.AutoArrayHashMapUnmanaged(Export.Index, *ErrorMsg) = .empty,
/// If analysis failed due to a cimport error, the corresponding Clang errors
/// are stored here.
cimport_errors: std.AutoArrayHashMapUnmanaged(AnalUnit, std.zig.ErrorBundle) = .empty,
@ -524,6 +524,15 @@ pub const Export = struct {
section: InternPool.OptionalNullTerminatedString = .none,
visibility: std.builtin.SymbolVisibility = .default,
};
/// Index into `all_exports`.
pub const Index = enum(u32) {
_,
pub fn ptr(i: Index, zcu: *const Zcu) *Export {
return &zcu.all_exports.items[@intFromEnum(i)];
}
};
};
pub const Reference = struct {
@ -2439,16 +2448,14 @@ pub fn deinit(zcu: *Zcu) void {
zcu.local_zir_cache.handle.close();
zcu.global_zir_cache.handle.close();
for (zcu.failed_analysis.values()) |value| {
value.destroy(gpa);
}
for (zcu.failed_codegen.values()) |value| {
value.destroy(gpa);
}
for (zcu.failed_analysis.values()) |value| value.destroy(gpa);
for (zcu.failed_codegen.values()) |value| value.destroy(gpa);
for (zcu.failed_types.values()) |value| value.destroy(gpa);
zcu.analysis_in_progress.deinit(gpa);
zcu.failed_analysis.deinit(gpa);
zcu.transitive_failed_analysis.deinit(gpa);
zcu.failed_codegen.deinit(gpa);
zcu.failed_types.deinit(gpa);
for (zcu.failed_files.values()) |value| {
if (value) |msg| msg.destroy(gpa);
@ -3093,7 +3100,7 @@ pub fn deleteUnitExports(zcu: *Zcu, anal_unit: AnalUnit) void {
const gpa = zcu.gpa;
const exports_base, const exports_len = if (zcu.single_exports.fetchSwapRemove(anal_unit)) |kv|
.{ kv.value, 1 }
.{ @intFromEnum(kv.value), 1 }
else if (zcu.multi_exports.fetchSwapRemove(anal_unit)) |info|
.{ info.value.index, info.value.len }
else
@ -3107,11 +3114,12 @@ pub fn deleteUnitExports(zcu: *Zcu, anal_unit: AnalUnit) void {
// This case is needed because in some rare edge cases, `Sema` wants to add and delete exports
// within a single update.
if (dev.env.supports(.incremental)) {
for (exports, exports_base..) |exp, export_idx| {
for (exports, exports_base..) |exp, export_index_usize| {
const export_idx: Export.Index = @enumFromInt(export_index_usize);
if (zcu.comp.bin_file) |lf| {
lf.deleteExport(exp.exported, exp.opts.name);
}
if (zcu.failed_exports.fetchSwapRemove(@intCast(export_idx))) |failed_kv| {
if (zcu.failed_exports.fetchSwapRemove(export_idx)) |failed_kv| {
failed_kv.value.destroy(gpa);
}
}
@ -3123,7 +3131,7 @@ pub fn deleteUnitExports(zcu: *Zcu, anal_unit: AnalUnit) void {
return;
};
for (exports_base..exports_base + exports_len) |export_idx| {
zcu.free_exports.appendAssumeCapacity(@intCast(export_idx));
zcu.free_exports.appendAssumeCapacity(@enumFromInt(export_idx));
}
}
@ -3269,7 +3277,7 @@ fn lockAndClearFileCompileError(zcu: *Zcu, file: *File) void {
pub fn handleUpdateExports(
zcu: *Zcu,
export_indices: []const u32,
export_indices: []const Export.Index,
result: link.File.UpdateExportsError!void,
) Allocator.Error!void {
const gpa = zcu.gpa;
@ -3277,12 +3285,10 @@ pub fn handleUpdateExports(
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {
const export_idx = export_indices[0];
const new_export = &zcu.all_exports.items[export_idx];
const new_export = export_idx.ptr(zcu);
new_export.status = .failed_retryable;
try zcu.failed_exports.ensureUnusedCapacity(gpa, 1);
const msg = try ErrorMsg.create(gpa, new_export.src, "unable to export: {s}", .{
@errorName(err),
});
const msg = try ErrorMsg.create(gpa, new_export.src, "unable to export: {s}", .{@errorName(err)});
zcu.failed_exports.putAssumeCapacityNoClobber(export_idx, msg);
},
};
@ -3443,7 +3449,7 @@ pub fn atomicPtrAlignment(
/// * `@TypeOf(.{})`
/// * A struct which has no fields (`struct {}`).
/// * Not a struct.
pub fn typeToStruct(zcu: *Zcu, ty: Type) ?InternPool.LoadedStructType {
pub fn typeToStruct(zcu: *const Zcu, ty: Type) ?InternPool.LoadedStructType {
if (ty.ip_index == .none) return null;
const ip = &zcu.intern_pool;
return switch (ip.indexToKey(ty.ip_index)) {
@ -3452,7 +3458,7 @@ pub fn typeToStruct(zcu: *Zcu, ty: Type) ?InternPool.LoadedStructType {
};
}
pub fn typeToPackedStruct(zcu: *Zcu, ty: Type) ?InternPool.LoadedStructType {
pub fn typeToPackedStruct(zcu: *const Zcu, ty: Type) ?InternPool.LoadedStructType {
const s = zcu.typeToStruct(ty) orelse return null;
if (s.layout != .@"packed") return null;
return s;
@ -3477,7 +3483,7 @@ pub fn iesFuncIndex(zcu: *const Zcu, ies_index: InternPool.Index) InternPool.Ind
}
pub fn funcInfo(zcu: *const Zcu, func_index: InternPool.Index) InternPool.Key.Func {
return zcu.intern_pool.indexToKey(func_index).func;
return zcu.intern_pool.toFunc(func_index);
}
pub fn toEnum(zcu: *const Zcu, comptime E: type, val: Value) E {
@ -3791,6 +3797,18 @@ pub fn navSrcLoc(zcu: *const Zcu, nav_index: InternPool.Nav.Index) LazySrcLoc {
};
}
pub fn typeSrcLoc(zcu: *const Zcu, ty_index: InternPool.Index) LazySrcLoc {
_ = zcu;
_ = ty_index;
@panic("TODO");
}
pub fn typeFileScope(zcu: *Zcu, ty_index: InternPool.Index) *File {
_ = zcu;
_ = ty_index;
@panic("TODO");
}
pub fn navSrcLine(zcu: *Zcu, nav_index: InternPool.Nav.Index) u32 {
const ip = &zcu.intern_pool;
const inst_info = ip.getNav(nav_index).srcInst(ip).resolveFull(ip).?;
@ -4051,3 +4069,54 @@ pub fn navValIsConst(zcu: *const Zcu, val: InternPool.Index) bool {
else => true,
};
}
pub const CodegenFailError = error{
/// Indicates the error message has been already stored at `Zcu.failed_codegen`.
CodegenFail,
OutOfMemory,
};
pub fn codegenFail(
zcu: *Zcu,
nav_index: InternPool.Nav.Index,
comptime format: []const u8,
args: anytype,
) CodegenFailError {
const gpa = zcu.gpa;
try zcu.failed_codegen.ensureUnusedCapacity(gpa, 1);
const msg = try Zcu.ErrorMsg.create(gpa, zcu.navSrcLoc(nav_index), format, args);
zcu.failed_codegen.putAssumeCapacityNoClobber(nav_index, msg);
return error.CodegenFail;
}
pub fn codegenFailMsg(zcu: *Zcu, nav_index: InternPool.Nav.Index, msg: *ErrorMsg) CodegenFailError {
const gpa = zcu.gpa;
{
errdefer msg.deinit(gpa);
try zcu.failed_codegen.putNoClobber(gpa, nav_index, msg);
}
return error.CodegenFail;
}
pub fn codegenFailType(
zcu: *Zcu,
ty_index: InternPool.Index,
comptime format: []const u8,
args: anytype,
) CodegenFailError {
const gpa = zcu.gpa;
try zcu.failed_types.ensureUnusedCapacity(gpa, 1);
const msg = try Zcu.ErrorMsg.create(gpa, zcu.typeSrcLoc(ty_index), format, args);
zcu.failed_types.putAssumeCapacityNoClobber(ty_index, msg);
return error.CodegenFail;
}
pub fn codegenFailTypeMsg(zcu: *Zcu, ty_index: InternPool.Index, msg: *ErrorMsg) CodegenFailError {
const gpa = zcu.gpa;
{
errdefer msg.deinit(gpa);
try zcu.failed_types.ensureUnusedCapacity(gpa, 1);
}
zcu.failed_types.putAssumeCapacityNoClobber(ty_index, msg);
return error.CodegenFail;
}

View File

@ -1722,22 +1722,18 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai
// Correcting this failure will involve changing a type this function
// depends on, hence triggering re-analysis of this function, so this
// interacts correctly with incremental compilation.
// TODO: do we need to mark this failure anywhere? I don't think so, since compilation
// will fail due to the type error anyway.
} else if (comp.bin_file) |lf| {
lf.updateFunc(pt, func_index, air, liveness) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {
assert(zcu.failed_codegen.contains(nav_index));
},
else => {
error.CodegenFail => assert(zcu.failed_codegen.contains(nav_index)),
error.Overflow => {
try zcu.failed_codegen.putNoClobber(gpa, nav_index, try Zcu.ErrorMsg.create(
gpa,
zcu.navSrcLoc(nav_index),
"unable to codegen: {s}",
.{@errorName(err)},
));
try zcu.retryable_failures.append(zcu.gpa, AnalUnit.wrap(.{ .func = func_index }));
// Not a retryable failure.
},
};
} else if (zcu.llvm_object) |llvm_object| {
@ -2819,8 +2815,8 @@ pub fn processExports(pt: Zcu.PerThread) !void {
const gpa = zcu.gpa;
// First, construct a mapping of every exported value and Nav to the indices of all its different exports.
var nav_exports: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, std.ArrayListUnmanaged(u32)) = .empty;
var uav_exports: std.AutoArrayHashMapUnmanaged(InternPool.Index, std.ArrayListUnmanaged(u32)) = .empty;
var nav_exports: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, std.ArrayListUnmanaged(Zcu.Export.Index)) = .empty;
var uav_exports: std.AutoArrayHashMapUnmanaged(InternPool.Index, std.ArrayListUnmanaged(Zcu.Export.Index)) = .empty;
defer {
for (nav_exports.values()) |*exports| {
exports.deinit(gpa);
@ -2839,7 +2835,7 @@ pub fn processExports(pt: Zcu.PerThread) !void {
try nav_exports.ensureTotalCapacity(gpa, zcu.single_exports.count() + zcu.multi_exports.count());
for (zcu.single_exports.values()) |export_idx| {
const exp = zcu.all_exports.items[export_idx];
const exp = export_idx.ptr(zcu);
const value_ptr, const found_existing = switch (exp.exported) {
.nav => |nav| gop: {
const gop = try nav_exports.getOrPut(gpa, nav);
@ -2867,7 +2863,7 @@ pub fn processExports(pt: Zcu.PerThread) !void {
},
};
if (!found_existing) value_ptr.* = .{};
try value_ptr.append(gpa, @intCast(export_idx));
try value_ptr.append(gpa, @enumFromInt(export_idx));
}
}
@ -2886,20 +2882,20 @@ pub fn processExports(pt: Zcu.PerThread) !void {
}
}
const SymbolExports = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, u32);
const SymbolExports = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, Zcu.Export.Index);
fn processExportsInner(
pt: Zcu.PerThread,
symbol_exports: *SymbolExports,
exported: Zcu.Exported,
export_indices: []const u32,
export_indices: []const Zcu.Export.Index,
) error{OutOfMemory}!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
for (export_indices) |export_idx| {
const new_export = &zcu.all_exports.items[export_idx];
const new_export = export_idx.ptr(zcu);
const gop = try symbol_exports.getOrPut(gpa, new_export.opts.name);
if (gop.found_existing) {
new_export.status = .failed_retryable;
@ -2908,7 +2904,7 @@ fn processExportsInner(
new_export.opts.name.fmt(ip),
});
errdefer msg.destroy(gpa);
const other_export = zcu.all_exports.items[gop.value_ptr.*];
const other_export = gop.value_ptr.ptr(zcu);
try zcu.errNote(other_export.src, msg, "other symbol here", .{});
zcu.failed_exports.putAssumeCapacityNoClobber(export_idx, msg);
new_export.status = .failed;
@ -3100,6 +3096,7 @@ pub fn populateTestFunctions(
pub fn linkerUpdateNav(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) error{OutOfMemory}!void {
const zcu = pt.zcu;
const comp = zcu.comp;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const nav = zcu.intern_pool.getNav(nav_index);
@ -3113,26 +3110,15 @@ pub fn linkerUpdateNav(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) error
} else if (comp.bin_file) |lf| {
lf.updateNav(pt, nav_index) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {
assert(zcu.failed_codegen.contains(nav_index));
},
else => {
const gpa = zcu.gpa;
try zcu.failed_codegen.ensureUnusedCapacity(gpa, 1);
zcu.failed_codegen.putAssumeCapacityNoClobber(nav_index, try Zcu.ErrorMsg.create(
error.CodegenFail => assert(zcu.failed_codegen.contains(nav_index)),
error.Overflow => {
try zcu.failed_codegen.putNoClobber(gpa, nav_index, try Zcu.ErrorMsg.create(
gpa,
zcu.navSrcLoc(nav_index),
"unable to codegen: {s}",
.{@errorName(err)},
));
if (nav.analysis != null) {
try zcu.retryable_failures.append(zcu.gpa, .wrap(.{ .nav_val = nav_index }));
} else {
// TODO: we don't have a way to indicate that this failure is retryable!
// Since these are really rare, we could as a cop-out retry the whole build next update.
// But perhaps we can do better...
@panic("TODO: retryable failure codegenning non-declaration Nav");
}
// Not a retryable failure.
},
};
} else if (zcu.llvm_object) |llvm_object| {
@ -3142,24 +3128,26 @@ pub fn linkerUpdateNav(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) error
}
}
pub fn linkerUpdateContainerType(pt: Zcu.PerThread, ty: InternPool.Index) !void {
pub fn linkerUpdateContainerType(pt: Zcu.PerThread, ty: InternPool.Index) error{OutOfMemory}!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const comp = zcu.comp;
const ip = &zcu.intern_pool;
const codegen_prog_node = zcu.codegen_prog_node.start(Type.fromInterned(ty).containerTypeName(ip).toSlice(ip), 0);
defer codegen_prog_node.end();
if (zcu.failed_types.fetchSwapRemove(ty)) |*entry| entry.value.deinit(gpa);
if (!Air.typeFullyResolved(Type.fromInterned(ty), zcu)) {
// This type failed to resolve. This is a transitive failure.
// TODO: do we need to mark this failure anywhere? I don't think so, since compilation
// will fail due to the type error anyway.
} else if (comp.bin_file) |lf| {
lf.updateContainerType(pt, ty) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => |e| log.err("codegen type failed: {s}", .{@errorName(e)}),
};
return;
}
if (comp.bin_file) |lf| lf.updateContainerType(pt, ty) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.TypeFailureReported => assert(zcu.failed_types.contains(ty)),
};
}
pub fn linkerUpdateLineNumber(pt: Zcu.PerThread, ti: InternPool.TrackedInst.Index) !void {

File diff suppressed because it is too large Load Diff

View File

@ -20,7 +20,7 @@ debug_output: link.File.DebugInfoOutput,
target: *const std.Target,
err_msg: ?*ErrorMsg = null,
src_loc: Zcu.LazySrcLoc,
code: *std.ArrayList(u8),
code: *std.ArrayListUnmanaged(u8),
prev_di_line: u32,
prev_di_column: u32,
@ -424,8 +424,10 @@ fn lowerBranches(emit: *Emit) !void {
}
fn writeInstruction(emit: *Emit, instruction: Instruction) !void {
const comp = emit.bin_file.comp;
const gpa = comp.gpa;
const endian = emit.target.cpu.arch.endian();
std.mem.writeInt(u32, try emit.code.addManyAsArray(4), instruction.toU32(), endian);
std.mem.writeInt(u32, try emit.code.addManyAsArray(gpa, 4), instruction.toU32(), endian);
}
fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {

View File

@ -23,7 +23,6 @@ const log = std.log.scoped(.codegen);
const build_options = @import("build_options");
const Alignment = InternPool.Alignment;
const Result = codegen.Result;
const CodeGenError = codegen.CodeGenError;
const bits = @import("bits.zig");
@ -333,9 +332,9 @@ pub fn generate(
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
code: *std.ArrayList(u8),
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) CodeGenError!Result {
) CodeGenError!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const func = zcu.funcInfo(func_index);
@ -377,10 +376,7 @@ pub fn generate(
defer function.dbg_info_relocs.deinit(gpa);
var call_info = function.resolveCallingConventionValues(func_ty) catch |err| switch (err) {
error.CodegenFail => return Result{ .fail = function.err_msg.? },
error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
},
error.CodegenFail => return error.CodegenFail,
else => |e| return e,
};
defer call_info.deinit(&function);
@ -391,15 +387,14 @@ pub fn generate(
function.max_end_stack = call_info.stack_byte_count;
function.gen() catch |err| switch (err) {
error.CodegenFail => return Result{ .fail = function.err_msg.? },
error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
},
error.CodegenFail => return error.CodegenFail,
error.OutOfRegisters => return function.fail("ran out of registers (Zig compiler bug)", .{}),
else => |e| return e,
};
for (function.dbg_info_relocs.items) |reloc| {
try reloc.genDbgInfo(function);
reloc.genDbgInfo(function) catch |err|
return function.fail("failed to generate debug info: {s}", .{@errorName(err)});
}
var mir = Mir{
@ -424,15 +419,9 @@ pub fn generate(
defer emit.deinit();
emit.emitMir() catch |err| switch (err) {
error.EmitFail => return Result{ .fail = emit.err_msg.? },
error.EmitFail => return function.failMsg(emit.err_msg.?),
else => |e| return e,
};
if (function.err_msg) |em| {
return Result{ .fail = em };
} else {
return Result.ok;
}
}
fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index {
@ -6310,20 +6299,19 @@ fn wantSafety(self: *Self) bool {
};
}
fn fail(self: *Self, comptime format: []const u8, args: anytype) InnerError {
fn fail(self: *Self, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
@branchHint(.cold);
assert(self.err_msg == null);
const gpa = self.gpa;
self.err_msg = try ErrorMsg.create(gpa, self.src_loc, format, args);
return error.CodegenFail;
const zcu = self.pt.zcu;
const func = zcu.funcInfo(self.func_index);
const msg = try ErrorMsg.create(zcu.gpa, self.src_loc, format, args);
return zcu.codegenFailMsg(func.owner_nav, msg);
}
fn failSymbol(self: *Self, comptime format: []const u8, args: anytype) InnerError {
fn failMsg(self: *Self, msg: *ErrorMsg) error{ OutOfMemory, CodegenFail } {
@branchHint(.cold);
assert(self.err_msg == null);
const gpa = self.gpa;
self.err_msg = try ErrorMsg.create(gpa, self.src_loc, format, args);
return error.CodegenFail;
const zcu = self.pt.zcu;
const func = zcu.funcInfo(self.func_index);
return zcu.codegenFailMsg(func.owner_nav, msg);
}
fn parseRegName(name: []const u8) ?Register {

View File

@ -24,7 +24,7 @@ debug_output: link.File.DebugInfoOutput,
target: *const std.Target,
err_msg: ?*ErrorMsg = null,
src_loc: Zcu.LazySrcLoc,
code: *std.ArrayList(u8),
code: *std.ArrayListUnmanaged(u8),
prev_di_line: u32,
prev_di_column: u32,
@ -342,8 +342,10 @@ fn lowerBranches(emit: *Emit) !void {
}
fn writeInstruction(emit: *Emit, instruction: Instruction) !void {
const comp = emit.bin_file.comp;
const gpa = comp.gpa;
const endian = emit.target.cpu.arch.endian();
std.mem.writeInt(u32, try emit.code.addManyAsArray(4), instruction.toU32(), endian);
std.mem.writeInt(u32, try emit.code.addManyAsArray(gpa, 4), instruction.toU32(), endian);
}
fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {

View File

@ -32,7 +32,6 @@ const wip_mir_log = std.log.scoped(.wip_mir);
const Alignment = InternPool.Alignment;
const CodeGenError = codegen.CodeGenError;
const Result = codegen.Result;
const bits = @import("bits.zig");
const abi = @import("abi.zig");
@ -62,7 +61,6 @@ gpa: Allocator,
mod: *Package.Module,
target: *const std.Target,
debug_output: link.File.DebugInfoOutput,
err_msg: ?*ErrorMsg,
args: []MCValue,
ret_mcv: InstTracking,
fn_type: Type,
@ -759,9 +757,9 @@ pub fn generate(
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
code: *std.ArrayList(u8),
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) CodeGenError!Result {
) CodeGenError!void {
const zcu = pt.zcu;
const comp = zcu.comp;
const gpa = zcu.gpa;
@ -788,7 +786,6 @@ pub fn generate(
.target = &mod.resolved_target.result,
.debug_output = debug_output,
.owner = .{ .nav_index = func.owner_nav },
.err_msg = null,
.args = undefined, // populated after `resolveCallingConventionValues`
.ret_mcv = undefined, // populated after `resolveCallingConventionValues`
.fn_type = fn_type,
@ -829,10 +826,7 @@ pub fn generate(
const fn_info = zcu.typeToFunc(fn_type).?;
var call_info = function.resolveCallingConventionValues(fn_info, &.{}) catch |err| switch (err) {
error.CodegenFail => return Result{ .fail = function.err_msg.? },
error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
},
error.CodegenFail => return error.CodegenFail,
else => |e| return e,
};
@ -861,10 +855,8 @@ pub fn generate(
}));
function.gen() catch |err| switch (err) {
error.CodegenFail => return Result{ .fail = function.err_msg.? },
error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
},
error.CodegenFail => return error.CodegenFail,
error.OutOfRegisters => return function.fail("ran out of registers (Zig compiler bug)", .{}),
else => |e| return e,
};
@ -895,28 +887,10 @@ pub fn generate(
defer emit.deinit();
emit.emitMir() catch |err| switch (err) {
error.LowerFail, error.EmitFail => return Result{ .fail = emit.lower.err_msg.? },
error.InvalidInstruction => |e| {
const msg = switch (e) {
error.InvalidInstruction => "CodeGen failed to find a viable instruction.",
};
return Result{
.fail = try ErrorMsg.create(
gpa,
src_loc,
"{s} This is a bug in the Zig compiler.",
.{msg},
),
};
},
error.LowerFail, error.EmitFail => return function.failMsg(emit.lower.err_msg.?),
error.InvalidInstruction => |e| return function.fail("emit MIR failed: {s} (Zig compiler bug)", .{@errorName(e)}),
else => |e| return e,
};
if (function.err_msg) |em| {
return Result{ .fail = em };
} else {
return Result.ok;
}
}
pub fn generateLazy(
@ -924,9 +898,9 @@ pub fn generateLazy(
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
lazy_sym: link.File.LazySymbol,
code: *std.ArrayList(u8),
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) CodeGenError!Result {
) CodeGenError!void {
const comp = bin_file.comp;
const gpa = comp.gpa;
const mod = comp.root_mod;
@ -941,7 +915,6 @@ pub fn generateLazy(
.target = &mod.resolved_target.result,
.debug_output = debug_output,
.owner = .{ .lazy_sym = lazy_sym },
.err_msg = null,
.args = undefined, // populated after `resolveCallingConventionValues`
.ret_mcv = undefined, // populated after `resolveCallingConventionValues`
.fn_type = undefined,
@ -957,10 +930,8 @@ pub fn generateLazy(
defer function.mir_instructions.deinit(gpa);
function.genLazy(lazy_sym) catch |err| switch (err) {
error.CodegenFail => return Result{ .fail = function.err_msg.? },
error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
},
error.CodegenFail => return error.CodegenFail,
error.OutOfRegisters => return function.fail("ran out of registers (Zig compiler bug)", .{}),
else => |e| return e,
};
@ -991,28 +962,10 @@ pub fn generateLazy(
defer emit.deinit();
emit.emitMir() catch |err| switch (err) {
error.LowerFail, error.EmitFail => return Result{ .fail = emit.lower.err_msg.? },
error.InvalidInstruction => |e| {
const msg = switch (e) {
error.InvalidInstruction => "CodeGen failed to find a viable instruction.",
};
return Result{
.fail = try ErrorMsg.create(
gpa,
src_loc,
"{s} This is a bug in the Zig compiler.",
.{msg},
),
};
},
error.LowerFail, error.EmitFail => return function.failMsg(emit.lower.err_msg.?),
error.InvalidInstruction => |e| return function.fail("emit MIR failed: {s} (Zig compiler bug)", .{@errorName(e)}),
else => |e| return e,
};
if (function.err_msg) |em| {
return Result{ .fail = em };
} else {
return Result.ok;
}
}
const FormatWipMirData = struct {
@ -4758,19 +4711,19 @@ fn airFieldParentPtr(func: *Func, inst: Air.Inst.Index) !void {
return func.fail("TODO implement codegen airFieldParentPtr", .{});
}
fn genArgDbgInfo(func: Func, inst: Air.Inst.Index, mcv: MCValue) !void {
fn genArgDbgInfo(func: *const Func, inst: Air.Inst.Index, mcv: MCValue) InnerError!void {
const arg = func.air.instructions.items(.data)[@intFromEnum(inst)].arg;
const ty = arg.ty.toType();
if (arg.name == .none) return;
switch (func.debug_output) {
.dwarf => |dw| switch (mcv) {
.register => |reg| try dw.genLocalDebugInfo(
.register => |reg| dw.genLocalDebugInfo(
.local_arg,
arg.name.toSlice(func.air),
ty,
.{ .reg = reg.dwarfNum() },
),
) catch |err| return func.fail("failed to generate debug info: {s}", .{@errorName(err)}),
.load_frame => {},
else => {},
},
@ -4779,7 +4732,7 @@ fn genArgDbgInfo(func: Func, inst: Air.Inst.Index, mcv: MCValue) !void {
}
}
fn airArg(func: *Func, inst: Air.Inst.Index) !void {
fn airArg(func: *Func, inst: Air.Inst.Index) InnerError!void {
var arg_index = func.arg_index;
// we skip over args that have no bits
@ -5255,7 +5208,7 @@ fn airDbgInlineBlock(func: *Func, inst: Air.Inst.Index) !void {
try func.lowerBlock(inst, @ptrCast(func.air.extra[extra.end..][0..extra.data.body_len]));
}
fn airDbgVar(func: *Func, inst: Air.Inst.Index) !void {
fn airDbgVar(func: *Func, inst: Air.Inst.Index) InnerError!void {
const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const operand = pl_op.operand;
const ty = func.typeOf(operand);
@ -5263,7 +5216,8 @@ fn airDbgVar(func: *Func, inst: Air.Inst.Index) !void {
const name: Air.NullTerminatedString = @enumFromInt(pl_op.payload);
const tag = func.air.instructions.items(.tag)[@intFromEnum(inst)];
try func.genVarDbgInfo(tag, ty, mcv, name.toSlice(func.air));
func.genVarDbgInfo(tag, ty, mcv, name.toSlice(func.air)) catch |err|
return func.fail("failed to generate variable debug info: {s}", .{@errorName(err)});
return func.finishAir(inst, .unreach, .{ operand, .none, .none });
}
@ -8236,10 +8190,7 @@ fn genTypedValue(func: *Func, val: Value) InnerError!MCValue {
return func.fail("TODO: genTypedValue {s}", .{@tagName(mcv)});
},
},
.fail => |msg| {
func.err_msg = msg;
return error.CodegenFail;
},
.fail => |msg| return func.failMsg(msg),
};
return mcv;
}
@ -8427,17 +8378,23 @@ fn wantSafety(func: *Func) bool {
};
}
fn fail(func: *Func, comptime format: []const u8, args: anytype) InnerError {
fn fail(func: *const Func, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
@branchHint(.cold);
assert(func.err_msg == null);
func.err_msg = try ErrorMsg.create(func.gpa, func.src_loc, format, args);
const zcu = func.pt.zcu;
switch (func.owner) {
.nav_index => |i| return zcu.codegenFail(i, format, args),
.lazy_sym => |s| return zcu.codegenFailType(s.ty, format, args),
}
return error.CodegenFail;
}
fn failSymbol(func: *Func, comptime format: []const u8, args: anytype) InnerError {
fn failMsg(func: *const Func, msg: *ErrorMsg) error{ OutOfMemory, CodegenFail } {
@branchHint(.cold);
assert(func.err_msg == null);
func.err_msg = try ErrorMsg.create(func.gpa, func.src_loc, format, args);
const zcu = func.pt.zcu;
switch (func.owner) {
.nav_index => |i| return zcu.codegenFailMsg(i, msg),
.lazy_sym => |s| return zcu.codegenFailTypeMsg(s.ty, msg),
}
return error.CodegenFail;
}

View File

@ -3,7 +3,7 @@
bin_file: *link.File,
lower: Lower,
debug_output: link.File.DebugInfoOutput,
code: *std.ArrayList(u8),
code: *std.ArrayListUnmanaged(u8),
prev_di_line: u32,
prev_di_column: u32,
@ -18,6 +18,7 @@ pub const Error = Lower.Error || error{
};
pub fn emitMir(emit: *Emit) Error!void {
const gpa = emit.bin_file.comp.gpa;
log.debug("mir instruction len: {}", .{emit.lower.mir.instructions.len});
for (0..emit.lower.mir.instructions.len) |mir_i| {
const mir_index: Mir.Inst.Index = @intCast(mir_i);
@ -30,7 +31,7 @@ pub fn emitMir(emit: *Emit) Error!void {
var lowered_relocs = lowered.relocs;
for (lowered.insts, 0..) |lowered_inst, lowered_index| {
const start_offset: u32 = @intCast(emit.code.items.len);
try lowered_inst.encode(emit.code.writer());
try lowered_inst.encode(emit.code.writer(gpa));
while (lowered_relocs.len > 0 and
lowered_relocs[0].lowered_inst_index == lowered_index) : ({
@ -56,13 +57,13 @@ pub fn emitMir(emit: *Emit) Error!void {
const hi_r_type: u32 = @intFromEnum(std.elf.R_RISCV.HI20);
const lo_r_type: u32 = @intFromEnum(std.elf.R_RISCV.LO12_I);
try atom_ptr.addReloc(elf_file.base.comp.gpa, .{
try atom_ptr.addReloc(gpa, .{
.r_offset = start_offset,
.r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | hi_r_type,
.r_addend = 0,
}, zo);
try atom_ptr.addReloc(elf_file.base.comp.gpa, .{
try atom_ptr.addReloc(gpa, .{
.r_offset = start_offset + 4,
.r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | lo_r_type,
.r_addend = 0,
@ -76,19 +77,19 @@ pub fn emitMir(emit: *Emit) Error!void {
const R_RISCV = std.elf.R_RISCV;
try atom_ptr.addReloc(elf_file.base.comp.gpa, .{
try atom_ptr.addReloc(gpa, .{
.r_offset = start_offset,
.r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | @intFromEnum(R_RISCV.TPREL_HI20),
.r_addend = 0,
}, zo);
try atom_ptr.addReloc(elf_file.base.comp.gpa, .{
try atom_ptr.addReloc(gpa, .{
.r_offset = start_offset + 4,
.r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | @intFromEnum(R_RISCV.TPREL_ADD),
.r_addend = 0,
}, zo);
try atom_ptr.addReloc(elf_file.base.comp.gpa, .{
try atom_ptr.addReloc(gpa, .{
.r_offset = start_offset + 8,
.r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | @intFromEnum(R_RISCV.TPREL_LO12_I),
.r_addend = 0,
@ -101,7 +102,7 @@ pub fn emitMir(emit: *Emit) Error!void {
const r_type: u32 = @intFromEnum(std.elf.R_RISCV.CALL_PLT);
try atom_ptr.addReloc(elf_file.base.comp.gpa, .{
try atom_ptr.addReloc(gpa, .{
.r_offset = start_offset,
.r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | r_type,
.r_addend = 0,

View File

@ -21,7 +21,6 @@ const Emit = @import("Emit.zig");
const Liveness = @import("../../Liveness.zig");
const Type = @import("../../Type.zig");
const CodeGenError = codegen.CodeGenError;
const Result = @import("../../codegen.zig").Result;
const Endian = std.builtin.Endian;
const Alignment = InternPool.Alignment;
@ -55,7 +54,7 @@ liveness: Liveness,
bin_file: *link.File,
target: *const std.Target,
func_index: InternPool.Index,
code: *std.ArrayList(u8),
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
err_msg: ?*ErrorMsg,
args: []MCValue,
@ -266,9 +265,9 @@ pub fn generate(
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
code: *std.ArrayList(u8),
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) CodeGenError!Result {
) CodeGenError!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const func = zcu.funcInfo(func_index);
@ -284,7 +283,7 @@ pub fn generate(
}
try branch_stack.append(.{});
var function = Self{
var function: Self = .{
.gpa = gpa,
.pt = pt,
.air = air,
@ -310,10 +309,7 @@ pub fn generate(
defer function.exitlude_jump_relocs.deinit(gpa);
var call_info = function.resolveCallingConventionValues(func_ty, .callee) catch |err| switch (err) {
error.CodegenFail => return Result{ .fail = function.err_msg.? },
error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
},
error.CodegenFail => return error.CodegenFail,
else => |e| return e,
};
defer call_info.deinit(&function);
@ -324,10 +320,8 @@ pub fn generate(
function.max_end_stack = call_info.stack_byte_count;
function.gen() catch |err| switch (err) {
error.CodegenFail => return Result{ .fail = function.err_msg.? },
error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
},
error.CodegenFail => return error.CodegenFail,
error.OutOfRegisters => return function.fail("ran out of registers (Zig compiler bug)", .{}),
else => |e| return e,
};
@ -337,7 +331,7 @@ pub fn generate(
};
defer mir.deinit(gpa);
var emit = Emit{
var emit: Emit = .{
.mir = mir,
.bin_file = lf,
.debug_output = debug_output,
@ -351,15 +345,9 @@ pub fn generate(
defer emit.deinit();
emit.emitMir() catch |err| switch (err) {
error.EmitFail => return Result{ .fail = emit.err_msg.? },
error.EmitFail => return function.failMsg(emit.err_msg.?),
else => |e| return e,
};
if (function.err_msg) |em| {
return Result{ .fail = em };
} else {
return Result.ok;
}
}
fn gen(self: *Self) !void {
@ -1014,7 +1002,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
return bt.finishAir(result);
}
fn airArg(self: *Self, inst: Air.Inst.Index) !void {
fn airArg(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const arg_index = self.arg_index;
@ -1036,7 +1024,8 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
}
};
try self.genArgDbgInfo(inst, mcv);
self.genArgDbgInfo(inst, mcv) catch |err|
return self.fail("failed to generate debug info for parameter: {s}", .{@errorName(err)});
if (self.liveness.isUnused(inst))
return self.finishAirBookkeeping();
@ -3511,12 +3500,19 @@ fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type)
}
}
fn fail(self: *Self, comptime format: []const u8, args: anytype) InnerError {
fn fail(self: *Self, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
@branchHint(.cold);
assert(self.err_msg == null);
const gpa = self.gpa;
self.err_msg = try ErrorMsg.create(gpa, self.src_loc, format, args);
return error.CodegenFail;
const zcu = self.pt.zcu;
const func = zcu.funcInfo(self.func_index);
const msg = try ErrorMsg.create(zcu.gpa, self.src_loc, format, args);
return zcu.codegenFailMsg(func.owner_nav, msg);
}
fn failMsg(self: *Self, msg: *ErrorMsg) error{ OutOfMemory, CodegenFail } {
@branchHint(.cold);
const zcu = self.pt.zcu;
const func = zcu.funcInfo(self.func_index);
return zcu.codegenFailMsg(func.owner_nav, msg);
}
/// Called when there are no operands, and the instruction is always unreferenced.

View File

@ -22,7 +22,7 @@ debug_output: link.File.DebugInfoOutput,
target: *const std.Target,
err_msg: ?*ErrorMsg = null,
src_loc: Zcu.LazySrcLoc,
code: *std.ArrayList(u8),
code: *std.ArrayListUnmanaged(u8),
prev_di_line: u32,
prev_di_column: u32,
@ -678,10 +678,13 @@ fn optimalBranchType(emit: *Emit, tag: Mir.Inst.Tag, offset: i64) !BranchType {
}
fn writeInstruction(emit: *Emit, instruction: Instruction) !void {
const comp = emit.bin_file.comp;
const gpa = comp.gpa;
// SPARCv9 instructions are always arranged in BE regardless of the
// endianness mode the CPU is running in (Section 3.1 of the ISA specification).
// This is to ease porting in case someone wants to do a LE SPARCv9 backend.
const endian = Endian.big;
const endian: Endian = .big;
std.mem.writeInt(u32, try emit.code.addManyAsArray(4), instruction.toU32(), endian);
std.mem.writeInt(u32, try emit.code.addManyAsArray(gpa, 4), instruction.toU32(), endian);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -7,11 +7,15 @@
//! and known jump labels for blocks.
const Mir = @This();
const InternPool = @import("../../InternPool.zig");
const Wasm = @import("../../link/Wasm.zig");
const builtin = @import("builtin");
const std = @import("std");
const assert = std.debug.assert;
/// A struct of array that represents each individual wasm
instructions: std.MultiArrayList(Inst).Slice,
instruction_tags: []const Inst.Tag,
instruction_datas: []const Inst.Data,
/// A slice of indexes where the meaning of the data is determined by the
/// `Inst.Tag` value.
extra: []const u32,
@ -26,16 +30,14 @@ pub const Inst = struct {
/// The position of a given MIR isntruction with the instruction list.
pub const Index = u32;
/// Contains all possible wasm opcodes the Zig compiler may emit
/// Rather than re-using std.wasm.Opcode, we only declare the opcodes
/// we need, and also use this possibility to document how to access
/// their payload.
///
/// Note: Uses its actual opcode value representation to easily convert
/// to and from its binary representation.
/// Some tags match wasm opcode values to facilitate trivial lowering.
pub const Tag = enum(u8) {
/// Uses `nop`
/// Uses `tag`.
@"unreachable" = 0x00,
/// Emits epilogue begin debug information. Marks the end of the function.
///
/// Uses `tag` (no additional data).
dbg_epilogue_begin,
/// Creates a new block that can be jump from.
///
/// Type of the block is given in data `block_type`
@ -44,56 +46,92 @@ pub const Inst = struct {
///
/// Type of the loop is given in data `block_type`
loop = 0x03,
/// Lowers to an i32_const (wasm32) or i64_const (wasm64) which is the
/// memory address of an unnamed constant. When emitting an object
/// file, this adds a relocation.
///
/// This may not refer to a function.
///
/// Uses `ip_index`.
uav_ref,
/// Lowers to an i32_const (wasm32) or i64_const (wasm64) which is the
/// memory address of an unnamed constant, offset by an integer value.
/// When emitting an object file, this adds a relocation.
///
/// This may not refer to a function.
///
/// Uses `payload` pointing to a `UavRefOff`.
uav_ref_off,
/// Lowers to an i32_const (wasm32) or i64_const (wasm64) which is the
/// memory address of a named constant.
///
/// May not refer to a function.
///
/// Uses `nav_index`.
nav_ref,
/// Lowers to an i32_const (wasm32) or i64_const (wasm64) which is the
/// memory address of named constant, offset by an integer value.
/// When emitting an object file, this adds a relocation.
///
/// May not refer to a function.
///
/// Uses `payload` pointing to a `NavRefOff`.
nav_ref_off,
/// Lowers to an i32_const which is the index of the function in the
/// table section.
///
/// Uses `indirect_function_table_index`.
func_ref,
/// Inserts debug information about the current line and column
/// of the source code
///
/// Uses `payload` of which the payload type is `DbgLineColumn`
dbg_line = 0x06,
/// Emits epilogue begin debug information
///
/// Uses `nop`
dbg_epilogue_begin = 0x07,
/// Emits prologue end debug information
///
/// Uses `nop`
dbg_prologue_end = 0x08,
dbg_line,
/// Lowers to an i32_const containing the number of unique Zig error
/// names.
/// Uses `tag`.
errors_len,
/// Represents the end of a function body or an initialization expression
///
/// Payload is `nop`
/// Uses `tag` (no additional data).
end = 0x0B,
/// Breaks from the current block to a label
///
/// Data is `label` where index represents the label to jump to
/// Uses `label` where index represents the label to jump to
br = 0x0C,
/// Breaks from the current block if the stack value is non-zero
///
/// Data is `label` where index represents the label to jump to
/// Uses `label` where index represents the label to jump to
br_if = 0x0D,
/// Jump table that takes the stack value as an index where each value
/// represents the label to jump to.
///
/// Data is extra of which the Payload's type is `JumpTable`
br_table = 0x0E,
br_table,
/// Returns from the function
///
/// Uses `nop`
/// Uses `tag`.
@"return" = 0x0F,
/// Calls a function by its index
/// Lowers to an i32_const (wasm32) or i64_const (wasm64) containing
/// the base address of the table of error code names, with each
/// element being a null-terminated slice.
///
/// Uses `label`
call = 0x10,
/// Uses `tag`.
error_name_table_ref,
/// Calls a function using `nav_index`.
call_nav,
/// Calls a function pointer by its function signature
/// and index into the function table.
///
/// Uses `label`
call_indirect = 0x11,
/// Contains a symbol to a function pointer
/// uses `label`
/// Uses `func_ty`
call_indirect,
/// Calls a function by its index.
///
/// Note: This uses `0x16` as value which is reserved by the WebAssembly
/// specification but unused, meaning we must update this if the specification were to
/// use this value.
function_index = 0x16,
/// The function is the auto-generated tag name function for the type
/// provided in `ip_index`.
call_tag_name,
/// Lowers to a `call` instruction, using `intrinsic`.
call_intrinsic,
/// Pops three values from the stack and pushes
/// the first or second value dependent on the third value.
/// Uses `tag`
@ -112,15 +150,11 @@ pub const Inst = struct {
///
/// Uses `label`
local_tee = 0x22,
/// Loads a (mutable) global at given index onto the stack
/// Pops a value from the stack and sets the stack pointer global.
/// The value must be the same type as the stack pointer global.
///
/// Uses `label`
global_get = 0x23,
/// Pops a value from the stack and sets the global at given index.
/// Note: Both types must be equal and global must be marked mutable.
///
/// Uses `label`.
global_set = 0x24,
/// Uses `tag` (no additional data).
global_set_sp,
/// Loads a 32-bit integer from memory (data section) onto the stack
/// Pops the value from the stack which represents the offset into memory.
///
@ -256,19 +290,19 @@ pub const Inst = struct {
/// Loads a 32-bit signed immediate value onto the stack
///
/// Uses `imm32`
i32_const = 0x41,
i32_const,
/// Loads a i64-bit signed immediate value onto the stack
///
/// uses `payload` of type `Imm64`
i64_const = 0x42,
i64_const,
/// Loads a 32-bit float value onto the stack.
///
/// Uses `float32`
f32_const = 0x43,
f32_const,
/// Loads a 64-bit float value onto the stack.
///
/// Uses `payload` of type `Float64`
f64_const = 0x44,
f64_const,
/// Uses `tag`
i32_eqz = 0x45,
/// Uses `tag`
@ -522,25 +556,19 @@ pub const Inst = struct {
///
/// The `data` field depends on the extension instruction and
/// may contain additional data.
misc_prefix = 0xFC,
misc_prefix,
/// The instruction consists of a simd opcode.
/// The actual simd-opcode is found at payload's index.
///
/// The `data` field depends on the simd instruction and
/// may contain additional data.
simd_prefix = 0xFD,
simd_prefix,
/// The instruction consists of an atomics opcode.
/// The actual atomics-opcode is found at payload's index.
///
/// The `data` field depends on the atomics instruction and
/// may contain additional data.
atomics_prefix = 0xFE,
/// Contains a symbol to a memory address
/// Uses `label`
///
/// Note: This uses `0xFF` as value as it is unused and not reserved
/// by the wasm specification, making it safe to use.
memory_address = 0xFF,
/// From a given wasm opcode, returns a MIR tag.
pub fn fromOpcode(opcode: std.wasm.Opcode) Tag {
@ -560,26 +588,41 @@ pub const Inst = struct {
/// Uses no additional data
tag: void,
/// Contains the result type of a block
///
/// Used by `block` and `loop`
block_type: u8,
/// Contains an u32 index into a wasm section entry, such as a local.
/// Note: This is not an index to another instruction.
///
/// Used by e.g. `local_get`, `local_set`, etc.
block_type: std.wasm.BlockType,
/// Label: Each structured control instruction introduces an implicit label.
/// Labels are targets for branch instructions that reference them with
/// label indices. Unlike with other index spaces, indexing of labels
/// is relative by nesting depth, that is, label 0 refers to the
/// innermost structured control instruction enclosing the referring
/// branch instruction, while increasing indices refer to those farther
/// out. Consequently, labels can only be referenced from within the
/// associated structured control instruction.
label: u32,
/// Local: The index space for locals is only accessible inside a function and
/// includes the parameters of that function, which precede the local
/// variables.
local: u32,
/// A 32-bit immediate value.
///
/// Used by `i32_const`
imm32: i32,
/// A 32-bit float value
///
/// Used by `f32_float`
float32: f32,
/// Index into `extra`. Meaning of what can be found there is context-dependent.
///
/// Used by e.g. `br_table`
payload: u32,
ip_index: InternPool.Index,
nav_index: InternPool.Nav.Index,
func_ty: Wasm.FunctionType.Index,
intrinsic: Intrinsic,
uav_obj: Wasm.UavsObjIndex,
uav_exe: Wasm.UavsExeIndex,
indirect_function_table_index: Wasm.ZcuIndirectFunctionSetIndex,
comptime {
switch (builtin.mode) {
.Debug, .ReleaseSafe => {},
.ReleaseFast, .ReleaseSmall => assert(@sizeOf(Data) == 4),
}
}
};
};
@ -596,6 +639,11 @@ pub fn extraData(self: *const Mir, comptime T: type, index: usize) struct { data
inline for (fields) |field| {
@field(result, field.name) = switch (field.type) {
u32 => self.extra[i],
i32 => @bitCast(self.extra[i]),
Wasm.UavsObjIndex,
Wasm.UavsExeIndex,
InternPool.Nav.Index,
=> @enumFromInt(self.extra[i]),
else => |field_type| @compileError("Unsupported field type " ++ @typeName(field_type)),
};
i += 1;
@ -609,28 +657,19 @@ pub const JumpTable = struct {
length: u32,
};
/// Stores an unsigned 64bit integer
/// into a 32bit most significant bits field
/// and a 32bit least significant bits field.
///
/// This uses an unsigned integer rather than a signed integer
/// as we can easily store those into `extra`
pub const Imm64 = struct {
msb: u32,
lsb: u32,
pub fn fromU64(imm: u64) Imm64 {
pub fn init(full: u64) Imm64 {
return .{
.msb = @as(u32, @truncate(imm >> 32)),
.lsb = @as(u32, @truncate(imm)),
.msb = @truncate(full >> 32),
.lsb = @truncate(full),
};
}
pub fn toU64(self: Imm64) u64 {
var result: u64 = 0;
result |= @as(u64, self.msb) << 32;
result |= @as(u64, self.lsb);
return result;
pub fn toInt(i: Imm64) u64 {
return (@as(u64, i.msb) << 32) | @as(u64, i.lsb);
}
};
@ -638,23 +677,16 @@ pub const Float64 = struct {
msb: u32,
lsb: u32,
pub fn fromFloat64(float: f64) Float64 {
const tmp = @as(u64, @bitCast(float));
pub fn init(f: f64) Float64 {
const int: u64 = @bitCast(f);
return .{
.msb = @as(u32, @truncate(tmp >> 32)),
.lsb = @as(u32, @truncate(tmp)),
.msb = @truncate(int >> 32),
.lsb = @truncate(int),
};
}
pub fn toF64(self: Float64) f64 {
@as(f64, @bitCast(self.toU64()));
}
pub fn toU64(self: Float64) u64 {
var result: u64 = 0;
result |= @as(u64, self.msb) << 32;
result |= @as(u64, self.lsb);
return result;
pub fn toInt(f: Float64) u64 {
return (@as(u64, f.msb) << 32) | @as(u64, f.lsb);
}
};
@ -663,11 +695,19 @@ pub const MemArg = struct {
alignment: u32,
};
/// Represents a memory address, which holds both the pointer
/// or the parent pointer and the offset to it.
pub const Memory = struct {
pointer: u32,
offset: u32,
pub const UavRefOffObj = struct {
uav_obj: Wasm.UavsObjIndex,
offset: i32,
};
pub const UavRefOffExe = struct {
uav_exe: Wasm.UavsExeIndex,
offset: i32,
};
pub const NavRefOff = struct {
nav_index: InternPool.Nav.Index,
offset: i32,
};
/// Maps a source line with wasm bytecode
@ -675,3 +715,199 @@ pub const DbgLineColumn = struct {
line: u32,
column: u32,
};
/// Tag names exactly match the corresponding symbol name.
pub const Intrinsic = enum(u32) {
__addhf3,
__addtf3,
__addxf3,
__ashlti3,
__ashrti3,
__bitreversedi2,
__bitreversesi2,
__bswapdi2,
__bswapsi2,
__ceilh,
__ceilx,
__cosh,
__cosx,
__divhf3,
__divtf3,
__divti3,
__divxf3,
__eqtf2,
__eqxf2,
__exp2h,
__exp2x,
__exph,
__expx,
__extenddftf2,
__extenddfxf2,
__extendhfsf2,
__extendhftf2,
__extendhfxf2,
__extendsftf2,
__extendsfxf2,
__extendxftf2,
__fabsh,
__fabsx,
__fixdfdi,
__fixdfsi,
__fixdfti,
__fixhfdi,
__fixhfsi,
__fixhfti,
__fixsfdi,
__fixsfsi,
__fixsfti,
__fixtfdi,
__fixtfsi,
__fixtfti,
__fixunsdfdi,
__fixunsdfsi,
__fixunsdfti,
__fixunshfdi,
__fixunshfsi,
__fixunshfti,
__fixunssfdi,
__fixunssfsi,
__fixunssfti,
__fixunstfdi,
__fixunstfsi,
__fixunstfti,
__fixunsxfdi,
__fixunsxfsi,
__fixunsxfti,
__fixxfdi,
__fixxfsi,
__fixxfti,
__floatdidf,
__floatdihf,
__floatdisf,
__floatditf,
__floatdixf,
__floatsidf,
__floatsihf,
__floatsisf,
__floatsitf,
__floatsixf,
__floattidf,
__floattihf,
__floattisf,
__floattitf,
__floattixf,
__floatundidf,
__floatundihf,
__floatundisf,
__floatunditf,
__floatundixf,
__floatunsidf,
__floatunsihf,
__floatunsisf,
__floatunsitf,
__floatunsixf,
__floatuntidf,
__floatuntihf,
__floatuntisf,
__floatuntitf,
__floatuntixf,
__floorh,
__floorx,
__fmah,
__fmax,
__fmaxh,
__fmaxx,
__fminh,
__fminx,
__fmodh,
__fmodx,
__getf2,
__gexf2,
__gttf2,
__gtxf2,
__letf2,
__lexf2,
__log10h,
__log10x,
__log2h,
__log2x,
__logh,
__logx,
__lshrti3,
__lttf2,
__ltxf2,
__modti3,
__mulhf3,
__mulodi4,
__muloti4,
__multf3,
__multi3,
__mulxf3,
__netf2,
__nexf2,
__roundh,
__roundx,
__sinh,
__sinx,
__sqrth,
__sqrtx,
__subhf3,
__subtf3,
__subxf3,
__tanh,
__tanx,
__trunch,
__truncsfhf2,
__trunctfdf2,
__trunctfhf2,
__trunctfsf2,
__trunctfxf2,
__truncx,
__truncxfdf2,
__truncxfhf2,
__truncxfsf2,
__udivti3,
__umodti3,
ceilq,
cos,
cosf,
cosq,
exp,
exp2,
exp2f,
exp2q,
expf,
expq,
fabsq,
floorq,
fma,
fmaf,
fmaq,
fmax,
fmaxf,
fmaxq,
fmin,
fminf,
fminq,
fmod,
fmodf,
fmodq,
log,
log10,
log10f,
log10q,
log2,
log2f,
log2q,
logf,
logq,
roundq,
sin,
sinf,
sinq,
sqrtq,
tan,
tanf,
tanq,
truncq,
};

View File

@ -22,7 +22,7 @@ const direct: [2]Class = .{ .direct, .none };
/// Classifies a given Zig type to determine how they must be passed
/// or returned as value within a wasm function.
/// When all elements result in `.none`, no value must be passed in or returned.
pub fn classifyType(ty: Type, zcu: *Zcu) [2]Class {
pub fn classifyType(ty: Type, zcu: *const Zcu) [2]Class {
const ip = &zcu.intern_pool;
const target = zcu.getTarget();
if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) return none;

View File

@ -19,7 +19,6 @@ const Allocator = mem.Allocator;
const CodeGenError = codegen.CodeGenError;
const Compilation = @import("../../Compilation.zig");
const ErrorMsg = Zcu.ErrorMsg;
const Result = codegen.Result;
const Emit = @import("Emit.zig");
const Liveness = @import("../../Liveness.zig");
const Lower = @import("Lower.zig");
@ -59,7 +58,6 @@ target: *const std.Target,
owner: Owner,
inline_func: InternPool.Index,
mod: *Package.Module,
err_msg: ?*ErrorMsg,
arg_index: u32,
args: []MCValue,
va_info: union {
@ -819,9 +817,9 @@ pub fn generate(
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
code: *std.ArrayList(u8),
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) CodeGenError!Result {
) CodeGenError!void {
const zcu = pt.zcu;
const comp = zcu.comp;
const gpa = zcu.gpa;
@ -841,7 +839,6 @@ pub fn generate(
.debug_output = debug_output,
.owner = .{ .nav_index = func.owner_nav },
.inline_func = func_index,
.err_msg = null,
.arg_index = undefined,
.args = undefined, // populated after `resolveCallingConventionValues`
.va_info = undefined, // populated after `resolveCallingConventionValues`
@ -881,15 +878,7 @@ pub fn generate(
const fn_info = zcu.typeToFunc(fn_type).?;
const cc = abi.resolveCallingConvention(fn_info.cc, function.target.*);
var call_info = function.resolveCallingConventionValues(fn_info, &.{}, .args_frame) catch |err| switch (err) {
error.CodegenFail => return Result{ .fail = function.err_msg.? },
error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(
gpa,
src_loc,
"CodeGen ran out of registers. This is a bug in the Zig compiler.",
.{},
),
},
error.CodegenFail => return error.CodegenFail,
else => |e| return e,
};
defer call_info.deinit(&function);
@ -926,10 +915,8 @@ pub fn generate(
};
function.gen() catch |err| switch (err) {
error.CodegenFail => return Result{ .fail = function.err_msg.? },
error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
},
error.CodegenFail => return error.CodegenFail,
error.OutOfRegisters => return function.fail("ran out of registers (Zig compiler bug)", .{}),
else => |e| return e,
};
@ -953,10 +940,7 @@ pub fn generate(
.pic = mod.pic,
},
.atom_index = function.owner.getSymbolIndex(&function) catch |err| switch (err) {
error.CodegenFail => return Result{ .fail = function.err_msg.? },
error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
},
error.CodegenFail => return error.CodegenFail,
else => |e| return e,
},
.debug_output = debug_output,
@ -974,29 +958,11 @@ pub fn generate(
};
defer emit.deinit();
emit.emitMir() catch |err| switch (err) {
error.LowerFail, error.EmitFail => return Result{ .fail = emit.lower.err_msg.? },
error.InvalidInstruction, error.CannotEncode => |e| {
const msg = switch (e) {
error.InvalidInstruction => "CodeGen failed to find a viable instruction.",
error.CannotEncode => "CodeGen failed to encode the instruction.",
};
return Result{
.fail = try ErrorMsg.create(
gpa,
src_loc,
"{s} This is a bug in the Zig compiler.",
.{msg},
),
};
},
else => |e| return e,
};
error.LowerFail, error.EmitFail => return function.failMsg(emit.lower.err_msg.?),
if (function.err_msg) |em| {
return Result{ .fail = em };
} else {
return Result.ok;
}
error.InvalidInstruction, error.CannotEncode => |e| return function.fail("emit MIR failed: {s} (Zig compiler bug)", .{@errorName(e)}),
else => |e| return function.fail("emit MIR failed: {s}", .{@errorName(e)}),
};
}
pub fn generateLazy(
@ -1004,9 +970,9 @@ pub fn generateLazy(
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
lazy_sym: link.File.LazySymbol,
code: *std.ArrayList(u8),
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) CodeGenError!Result {
) CodeGenError!void {
const comp = bin_file.comp;
const gpa = comp.gpa;
// This function is for generating global code, so we use the root module.
@ -1022,7 +988,6 @@ pub fn generateLazy(
.debug_output = debug_output,
.owner = .{ .lazy_sym = lazy_sym },
.inline_func = undefined,
.err_msg = null,
.arg_index = undefined,
.args = undefined,
.va_info = undefined,
@ -1038,10 +1003,8 @@ pub fn generateLazy(
}
function.genLazy(lazy_sym) catch |err| switch (err) {
error.CodegenFail => return Result{ .fail = function.err_msg.? },
error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
},
error.CodegenFail => return error.CodegenFail,
error.OutOfRegisters => return function.fail("ran out of registers (Zig compiler bug)", .{}),
else => |e| return e,
};
@ -1065,10 +1028,7 @@ pub fn generateLazy(
.pic = mod.pic,
},
.atom_index = function.owner.getSymbolIndex(&function) catch |err| switch (err) {
error.CodegenFail => return Result{ .fail = function.err_msg.? },
error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
},
error.CodegenFail => return error.CodegenFail,
else => |e| return e,
},
.debug_output = debug_output,
@ -1078,29 +1038,11 @@ pub fn generateLazy(
};
defer emit.deinit();
emit.emitMir() catch |err| switch (err) {
error.LowerFail, error.EmitFail => return Result{ .fail = emit.lower.err_msg.? },
error.InvalidInstruction, error.CannotEncode => |e| {
const msg = switch (e) {
error.InvalidInstruction => "CodeGen failed to find a viable instruction.",
error.CannotEncode => "CodeGen failed to encode the instruction.",
};
return Result{
.fail = try ErrorMsg.create(
gpa,
src_loc,
"{s} This is a bug in the Zig compiler.",
.{msg},
),
};
},
else => |e| return e,
error.LowerFail, error.EmitFail => return function.failMsg(emit.lower.err_msg.?),
error.InvalidInstruction => return function.fail("failed to find a viable x86 instruction (Zig compiler bug)", .{}),
error.CannotEncode => return function.fail("failed to encode x86 instruction (Zig compiler bug)", .{}),
else => |e| return function.fail("failed to emit MIR: {s}", .{@errorName(e)}),
};
if (function.err_msg) |em| {
return Result{ .fail = em };
} else {
return Result.ok;
}
}
const FormatNavData = struct {
@ -19276,10 +19218,7 @@ fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
.load_got => |sym_index| .{ .lea_got = sym_index },
.load_tlv => |sym_index| .{ .lea_tlv = sym_index },
},
.fail => |msg| {
self.err_msg = msg;
return error.CodegenFail;
},
.fail => |msg| return self.failMsg(msg),
};
}
@ -19592,11 +19531,23 @@ fn resolveCallingConventionValues(
return result;
}
fn fail(self: *Self, comptime format: []const u8, args: anytype) InnerError {
fn fail(self: *Self, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
@branchHint(.cold);
assert(self.err_msg == null);
const gpa = self.gpa;
self.err_msg = try ErrorMsg.create(gpa, self.src_loc, format, args);
const zcu = self.pt.zcu;
switch (self.owner) {
.nav_index => |i| return zcu.codegenFail(i, format, args),
.lazy_sym => |s| return zcu.codegenFailType(s.ty, format, args),
}
return error.CodegenFail;
}
fn failMsg(self: *Self, msg: *ErrorMsg) error{ OutOfMemory, CodegenFail } {
@branchHint(.cold);
const zcu = self.pt.zcu;
switch (self.owner) {
.nav_index => |i| return zcu.codegenFailMsg(i, msg),
.lazy_sym => |s| return zcu.codegenFailTypeMsg(s.ty, msg),
}
return error.CodegenFail;
}

View File

@ -4,7 +4,7 @@ air: Air,
lower: Lower,
atom_index: u32,
debug_output: link.File.DebugInfoOutput,
code: *std.ArrayList(u8),
code: *std.ArrayListUnmanaged(u8),
prev_di_loc: Loc,
/// Relative to the beginning of `code`.
@ -18,6 +18,7 @@ pub const Error = Lower.Error || error{
} || link.File.UpdateDebugInfoError;
pub fn emitMir(emit: *Emit) Error!void {
const gpa = emit.lower.bin_file.comp.gpa;
for (0..emit.lower.mir.instructions.len) |mir_i| {
const mir_index: Mir.Inst.Index = @intCast(mir_i);
try emit.code_offset_mapping.putNoClobber(
@ -82,7 +83,7 @@ pub fn emitMir(emit: *Emit) Error!void {
}
continue;
}
try lowered_inst.encode(emit.code.writer(), .{});
try lowered_inst.encode(emit.code.writer(gpa), .{});
const end_offset: u32 = @intCast(emit.code.items.len);
while (lowered_relocs.len > 0 and
lowered_relocs[0].lowered_inst_index == lowered_index) : ({
@ -100,7 +101,7 @@ pub fn emitMir(emit: *Emit) Error!void {
const zo = elf_file.zigObjectPtr().?;
const atom_ptr = zo.symbol(emit.atom_index).atom(elf_file).?;
const r_type = @intFromEnum(std.elf.R_X86_64.PLT32);
try atom_ptr.addReloc(elf_file.base.comp.gpa, .{
try atom_ptr.addReloc(gpa, .{
.r_offset = end_offset - 4,
.r_info = (@as(u64, @intCast(sym_index)) << 32) | r_type,
.r_addend = lowered_relocs[0].off - 4,
@ -147,7 +148,7 @@ pub fn emitMir(emit: *Emit) Error!void {
const zo = elf_file.zigObjectPtr().?;
const atom = zo.symbol(emit.atom_index).atom(elf_file).?;
const r_type = @intFromEnum(std.elf.R_X86_64.TLSLD);
try atom.addReloc(elf_file.base.comp.gpa, .{
try atom.addReloc(gpa, .{
.r_offset = end_offset - 4,
.r_info = (@as(u64, @intCast(sym_index)) << 32) | r_type,
.r_addend = lowered_relocs[0].off - 4,
@ -158,7 +159,7 @@ pub fn emitMir(emit: *Emit) Error!void {
const zo = elf_file.zigObjectPtr().?;
const atom = zo.symbol(emit.atom_index).atom(elf_file).?;
const r_type = @intFromEnum(std.elf.R_X86_64.DTPOFF32);
try atom.addReloc(elf_file.base.comp.gpa, .{
try atom.addReloc(gpa, .{
.r_offset = end_offset - 4,
.r_info = (@as(u64, @intCast(sym_index)) << 32) | r_type,
.r_addend = lowered_relocs[0].off,
@ -173,7 +174,7 @@ pub fn emitMir(emit: *Emit) Error!void {
@intFromEnum(std.elf.R_X86_64.GOTPCREL)
else
@intFromEnum(std.elf.R_X86_64.PC32);
try atom.addReloc(elf_file.base.comp.gpa, .{
try atom.addReloc(gpa, .{
.r_offset = end_offset - 4,
.r_info = (@as(u64, @intCast(sym_index)) << 32) | r_type,
.r_addend = lowered_relocs[0].off - 4,
@ -183,7 +184,7 @@ pub fn emitMir(emit: *Emit) Error!void {
@intFromEnum(std.elf.R_X86_64.TPOFF32)
else
@intFromEnum(std.elf.R_X86_64.@"32");
try atom.addReloc(elf_file.base.comp.gpa, .{
try atom.addReloc(gpa, .{
.r_offset = end_offset - 4,
.r_info = (@as(u64, @intCast(sym_index)) << 32) | r_type,
.r_addend = lowered_relocs[0].off,

View File

@ -2,7 +2,6 @@ const std = @import("std");
const build_options = @import("build_options");
const builtin = @import("builtin");
const assert = std.debug.assert;
const leb128 = std.leb;
const link = @import("link.zig");
const log = std.log.scoped(.codegen);
const mem = std.mem;
@ -24,19 +23,13 @@ const Zir = std.zig.Zir;
const Alignment = InternPool.Alignment;
const dev = @import("dev.zig");
pub const Result = union(enum) {
/// The `code` parameter passed to `generateSymbol` has the value ok.
ok,
/// There was a codegen error.
fail: *ErrorMsg,
};
pub const CodeGenError = error{
OutOfMemory,
/// Compiler was asked to operate on a number larger than supported.
Overflow,
/// Indicates the error is already stored in Zcu `failed_codegen`.
CodegenFail,
} || link.File.UpdateDebugInfoError;
};
fn devFeatureForBackend(comptime backend: std.builtin.CompilerBackend) dev.Feature {
comptime assert(mem.startsWith(u8, @tagName(backend), "stage2_"));
@ -49,7 +42,6 @@ fn importBackend(comptime backend: std.builtin.CompilerBackend) type {
.stage2_arm => @import("arch/arm/CodeGen.zig"),
.stage2_riscv64 => @import("arch/riscv64/CodeGen.zig"),
.stage2_sparc64 => @import("arch/sparc64/CodeGen.zig"),
.stage2_wasm => @import("arch/wasm/CodeGen.zig"),
.stage2_x86_64 => @import("arch/x86_64/CodeGen.zig"),
else => unreachable,
};
@ -62,9 +54,9 @@ pub fn generateFunction(
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
code: *std.ArrayList(u8),
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) CodeGenError!Result {
) CodeGenError!void {
const zcu = pt.zcu;
const func = zcu.funcInfo(func_index);
const target = zcu.navFileScope(func.owner_nav).mod.resolved_target.result;
@ -74,7 +66,6 @@ pub fn generateFunction(
.stage2_arm,
.stage2_riscv64,
.stage2_sparc64,
.stage2_wasm,
.stage2_x86_64,
=> |backend| {
dev.check(devFeatureForBackend(backend));
@ -88,17 +79,15 @@ pub fn generateLazyFunction(
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
lazy_sym: link.File.LazySymbol,
code: *std.ArrayList(u8),
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) CodeGenError!Result {
) CodeGenError!void {
const zcu = pt.zcu;
const file = Type.fromInterned(lazy_sym.ty).typeDeclInstAllowGeneratedTag(zcu).?.resolveFile(&zcu.intern_pool);
const target = zcu.fileByIndex(file).mod.resolved_target.result;
switch (target_util.zigBackend(target, false)) {
else => unreachable,
inline .stage2_x86_64,
.stage2_riscv64,
=> |backend| {
inline .stage2_x86_64, .stage2_riscv64 => |backend| {
dev.check(devFeatureForBackend(backend));
return importBackend(backend).generateLazy(lf, pt, src_loc, lazy_sym, code, debug_output);
},
@ -120,20 +109,21 @@ pub fn generateLazySymbol(
lazy_sym: link.File.LazySymbol,
// TODO don't use an "out" parameter like this; put it in the result instead
alignment: *Alignment,
code: *std.ArrayList(u8),
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
reloc_parent: link.File.RelocInfo.Parent,
) CodeGenError!Result {
) CodeGenError!void {
_ = reloc_parent;
const tracy = trace(@src());
defer tracy.end();
const comp = bin_file.comp;
const ip = &pt.zcu.intern_pool;
const gpa = comp.gpa;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const target = comp.root_mod.resolved_target.result;
const endian = target.cpu.arch.endian();
const gpa = comp.gpa;
log.debug("generateLazySymbol: kind = {s}, ty = {}", .{
@tagName(lazy_sym.kind),
@ -150,52 +140,56 @@ pub fn generateLazySymbol(
const err_names = ip.global_error_set.getNamesFromMainThread();
var offset_index: u32 = @intCast(code.items.len);
var string_index: u32 = @intCast(4 * (1 + err_names.len + @intFromBool(err_names.len > 0)));
try code.resize(offset_index + string_index);
try code.resize(gpa, offset_index + string_index);
mem.writeInt(u32, code.items[offset_index..][0..4], @intCast(err_names.len), endian);
if (err_names.len == 0) return .ok;
if (err_names.len == 0) return;
offset_index += 4;
for (err_names) |err_name_nts| {
const err_name = err_name_nts.toSlice(ip);
mem.writeInt(u32, code.items[offset_index..][0..4], string_index, endian);
offset_index += 4;
try code.ensureUnusedCapacity(err_name.len + 1);
try code.ensureUnusedCapacity(gpa, err_name.len + 1);
code.appendSliceAssumeCapacity(err_name);
code.appendAssumeCapacity(0);
string_index += @intCast(err_name.len + 1);
}
mem.writeInt(u32, code.items[offset_index..][0..4], string_index, endian);
return .ok;
} else if (Type.fromInterned(lazy_sym.ty).zigTypeTag(pt.zcu) == .@"enum") {
} else if (Type.fromInterned(lazy_sym.ty).zigTypeTag(zcu) == .@"enum") {
alignment.* = .@"1";
const enum_ty = Type.fromInterned(lazy_sym.ty);
const tag_names = enum_ty.enumFields(pt.zcu);
const tag_names = enum_ty.enumFields(zcu);
for (0..tag_names.len) |tag_index| {
const tag_name = tag_names.get(ip)[tag_index].toSlice(ip);
try code.ensureUnusedCapacity(tag_name.len + 1);
try code.ensureUnusedCapacity(gpa, tag_name.len + 1);
code.appendSliceAssumeCapacity(tag_name);
code.appendAssumeCapacity(0);
}
return .ok;
} else return .{ .fail = try .create(
gpa,
src_loc,
"TODO implement generateLazySymbol for {s} {}",
.{ @tagName(lazy_sym.kind), Type.fromInterned(lazy_sym.ty).fmt(pt) },
) };
} else {
return zcu.codegenFailType(lazy_sym.ty, "TODO implement generateLazySymbol for {s} {}", .{
@tagName(lazy_sym.kind), Type.fromInterned(lazy_sym.ty).fmt(pt),
});
}
}
pub const GenerateSymbolError = error{
OutOfMemory,
/// Compiler was asked to operate on a number larger than supported.
Overflow,
};
pub fn generateSymbol(
bin_file: *link.File,
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
val: Value,
code: *std.ArrayList(u8),
code: *std.ArrayListUnmanaged(u8),
reloc_parent: link.File.RelocInfo.Parent,
) CodeGenError!Result {
) GenerateSymbolError!void {
const tracy = trace(@src());
defer tracy.end();
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const ty = val.typeOf(zcu);
@ -206,8 +200,8 @@ pub fn generateSymbol(
if (val.isUndefDeep(zcu)) {
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
try code.appendNTimes(0xaa, abi_size);
return .ok;
try code.appendNTimes(gpa, 0xaa, abi_size);
return;
}
switch (ip.indexToKey(val.toIntern())) {
@ -231,14 +225,13 @@ pub fn generateSymbol(
.undef => unreachable, // handled above
.simple_value => |simple_value| switch (simple_value) {
.undefined,
.void,
.null,
.empty_tuple,
.@"unreachable",
.generic_poison,
=> unreachable, // non-runtime values
.false, .true => try code.append(switch (simple_value) {
.undefined => unreachable, // non-runtime value
.void => unreachable, // non-runtime value
.null => unreachable, // non-runtime value
.@"unreachable" => unreachable, // non-runtime value
.generic_poison => unreachable, // non-runtime value
.empty_tuple => return,
.false, .true => try code.append(gpa, switch (simple_value) {
.false => 0,
.true => 1,
else => unreachable,
@ -254,11 +247,11 @@ pub fn generateSymbol(
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
var space: Value.BigIntSpace = undefined;
const int_val = val.toBigInt(&space, zcu);
int_val.writeTwosComplement(try code.addManyAsSlice(abi_size), endian);
int_val.writeTwosComplement(try code.addManyAsSlice(gpa, abi_size), endian);
},
.err => |err| {
const int = try pt.getErrorValue(err.name);
try code.writer().writeInt(u16, @intCast(int), endian);
try code.writer(gpa).writeInt(u16, @intCast(int), endian);
},
.error_union => |error_union| {
const payload_ty = ty.errorUnionPayload(zcu);
@ -268,8 +261,8 @@ pub fn generateSymbol(
};
if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
try code.writer().writeInt(u16, err_val, endian);
return .ok;
try code.writer(gpa).writeInt(u16, err_val, endian);
return;
}
const payload_align = payload_ty.abiAlignment(zcu);
@ -278,72 +271,57 @@ pub fn generateSymbol(
// error value first when its type is larger than the error union's payload
if (error_align.order(payload_align) == .gt) {
try code.writer().writeInt(u16, err_val, endian);
try code.writer(gpa).writeInt(u16, err_val, endian);
}
// emit payload part of the error union
{
const begin = code.items.len;
switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(switch (error_union.val) {
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(switch (error_union.val) {
.err_name => try pt.intern(.{ .undef = payload_ty.toIntern() }),
.payload => |payload| payload,
}), code, reloc_parent)) {
.ok => {},
.fail => |em| return .{ .fail = em },
}
}), code, reloc_parent);
const unpadded_end = code.items.len - begin;
const padded_end = abi_align.forward(unpadded_end);
const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow;
if (padding > 0) {
try code.appendNTimes(0, padding);
try code.appendNTimes(gpa, 0, padding);
}
}
// Payload size is larger than error set, so emit our error set last
if (error_align.compare(.lte, payload_align)) {
const begin = code.items.len;
try code.writer().writeInt(u16, err_val, endian);
try code.writer(gpa).writeInt(u16, err_val, endian);
const unpadded_end = code.items.len - begin;
const padded_end = abi_align.forward(unpadded_end);
const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow;
if (padding > 0) {
try code.appendNTimes(0, padding);
try code.appendNTimes(gpa, 0, padding);
}
}
},
.enum_tag => |enum_tag| {
const int_tag_ty = ty.intTagType(zcu);
switch (try generateSymbol(bin_file, pt, src_loc, try pt.getCoerced(Value.fromInterned(enum_tag.int), int_tag_ty), code, reloc_parent)) {
.ok => {},
.fail => |em| return .{ .fail = em },
}
try generateSymbol(bin_file, pt, src_loc, try pt.getCoerced(Value.fromInterned(enum_tag.int), int_tag_ty), code, reloc_parent);
},
.float => |float| switch (float.storage) {
.f16 => |f16_val| writeFloat(f16, f16_val, target, endian, try code.addManyAsArray(2)),
.f32 => |f32_val| writeFloat(f32, f32_val, target, endian, try code.addManyAsArray(4)),
.f64 => |f64_val| writeFloat(f64, f64_val, target, endian, try code.addManyAsArray(8)),
.f16 => |f16_val| writeFloat(f16, f16_val, target, endian, try code.addManyAsArray(gpa, 2)),
.f32 => |f32_val| writeFloat(f32, f32_val, target, endian, try code.addManyAsArray(gpa, 4)),
.f64 => |f64_val| writeFloat(f64, f64_val, target, endian, try code.addManyAsArray(gpa, 8)),
.f80 => |f80_val| {
writeFloat(f80, f80_val, target, endian, try code.addManyAsArray(10));
writeFloat(f80, f80_val, target, endian, try code.addManyAsArray(gpa, 10));
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
try code.appendNTimes(0, abi_size - 10);
try code.appendNTimes(gpa, 0, abi_size - 10);
},
.f128 => |f128_val| writeFloat(f128, f128_val, target, endian, try code.addManyAsArray(16)),
},
.ptr => switch (try lowerPtr(bin_file, pt, src_loc, val.toIntern(), code, reloc_parent, 0)) {
.ok => {},
.fail => |em| return .{ .fail = em },
.f128 => |f128_val| writeFloat(f128, f128_val, target, endian, try code.addManyAsArray(gpa, 16)),
},
.ptr => try lowerPtr(bin_file, pt, src_loc, val.toIntern(), code, reloc_parent, 0),
.slice => |slice| {
switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.ptr), code, reloc_parent)) {
.ok => {},
.fail => |em| return .{ .fail = em },
}
switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.len), code, reloc_parent)) {
.ok => {},
.fail => |em| return .{ .fail = em },
}
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.ptr), code, reloc_parent);
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.len), code, reloc_parent);
},
.opt => {
const payload_type = ty.optionalChild(zcu);
@ -352,12 +330,9 @@ pub fn generateSymbol(
if (ty.optionalReprIsPayload(zcu)) {
if (payload_val) |value| {
switch (try generateSymbol(bin_file, pt, src_loc, value, code, reloc_parent)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
try generateSymbol(bin_file, pt, src_loc, value, code, reloc_parent);
} else {
try code.appendNTimes(0, abi_size);
try code.appendNTimes(gpa, 0, abi_size);
}
} else {
const padding = abi_size - (math.cast(usize, payload_type.abiSize(zcu)) orelse return error.Overflow) - 1;
@ -365,39 +340,33 @@ pub fn generateSymbol(
const value = payload_val orelse Value.fromInterned(try pt.intern(.{
.undef = payload_type.toIntern(),
}));
switch (try generateSymbol(bin_file, pt, src_loc, value, code, reloc_parent)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
try generateSymbol(bin_file, pt, src_loc, value, code, reloc_parent);
}
try code.writer().writeByte(@intFromBool(payload_val != null));
try code.appendNTimes(0, padding);
try code.writer(gpa).writeByte(@intFromBool(payload_val != null));
try code.appendNTimes(gpa, 0, padding);
}
},
.aggregate => |aggregate| switch (ip.indexToKey(ty.toIntern())) {
.array_type => |array_type| switch (aggregate.storage) {
.bytes => |bytes| try code.appendSlice(bytes.toSlice(array_type.lenIncludingSentinel(), ip)),
.bytes => |bytes| try code.appendSlice(gpa, bytes.toSlice(array_type.lenIncludingSentinel(), ip)),
.elems, .repeated_elem => {
var index: u64 = 0;
while (index < array_type.lenIncludingSentinel()) : (index += 1) {
switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(switch (aggregate.storage) {
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(switch (aggregate.storage) {
.bytes => unreachable,
.elems => |elems| elems[@intCast(index)],
.repeated_elem => |elem| if (index < array_type.len)
elem
else
array_type.sentinel,
}), code, reloc_parent)) {
.ok => {},
.fail => |em| return .{ .fail = em },
}
}), code, reloc_parent);
}
},
},
.vector_type => |vector_type| {
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
if (vector_type.child == .bool_type) {
const bytes = try code.addManyAsSlice(abi_size);
const bytes = try code.addManyAsSlice(gpa, abi_size);
@memset(bytes, 0xaa);
var index: usize = 0;
const len = math.cast(usize, vector_type.len) orelse return error.Overflow;
@ -436,20 +405,17 @@ pub fn generateSymbol(
}
} else {
switch (aggregate.storage) {
.bytes => |bytes| try code.appendSlice(bytes.toSlice(vector_type.len, ip)),
.bytes => |bytes| try code.appendSlice(gpa, bytes.toSlice(vector_type.len, ip)),
.elems, .repeated_elem => {
var index: u64 = 0;
while (index < vector_type.len) : (index += 1) {
switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(switch (aggregate.storage) {
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(switch (aggregate.storage) {
.bytes => unreachable,
.elems => |elems| elems[
math.cast(usize, index) orelse return error.Overflow
],
.repeated_elem => |elem| elem,
}), code, reloc_parent)) {
.ok => {},
.fail => |em| return .{ .fail = em },
}
}), code, reloc_parent);
}
},
}
@ -457,7 +423,7 @@ pub fn generateSymbol(
const padding = abi_size -
(math.cast(usize, Type.fromInterned(vector_type.child).abiSize(zcu) * vector_type.len) orelse
return error.Overflow);
if (padding > 0) try code.appendNTimes(0, padding);
if (padding > 0) try code.appendNTimes(gpa, 0, padding);
}
},
.tuple_type => |tuple| {
@ -479,10 +445,7 @@ pub fn generateSymbol(
.repeated_elem => |elem| elem,
};
switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), code, reloc_parent)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), code, reloc_parent);
const unpadded_field_end = code.items.len - struct_begin;
// Pad struct members if required
@ -491,7 +454,7 @@ pub fn generateSymbol(
return error.Overflow;
if (padding > 0) {
try code.appendNTimes(0, padding);
try code.appendNTimes(gpa, 0, padding);
}
}
},
@ -501,7 +464,7 @@ pub fn generateSymbol(
.@"packed" => {
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
const current_pos = code.items.len;
try code.appendNTimes(0, abi_size);
try code.appendNTimes(gpa, 0, abi_size);
var bits: u16 = 0;
for (struct_type.field_types.get(ip), 0..) |field_ty, index| {
@ -519,12 +482,10 @@ pub fn generateSymbol(
if (Type.fromInterned(field_ty).zigTypeTag(zcu) == .pointer) {
const field_size = math.cast(usize, Type.fromInterned(field_ty).abiSize(zcu)) orelse
return error.Overflow;
var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size);
defer tmp_list.deinit();
switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), &tmp_list, reloc_parent)) {
.ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items),
.fail => |em| return Result{ .fail = em },
}
var tmp_list = try std.ArrayListUnmanaged(u8).initCapacity(gpa, field_size);
defer tmp_list.deinit(gpa);
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), &tmp_list, reloc_parent);
@memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items);
} else {
Value.fromInterned(field_val).writeToPackedMemory(Type.fromInterned(field_ty), pt, code.items[current_pos..], bits) catch unreachable;
}
@ -554,12 +515,9 @@ pub fn generateSymbol(
usize,
offsets[field_index] - (code.items.len - struct_begin),
) orelse return error.Overflow;
if (padding > 0) try code.appendNTimes(0, padding);
if (padding > 0) try code.appendNTimes(gpa, 0, padding);
switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), code, reloc_parent)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), code, reloc_parent);
}
const size = struct_type.sizeUnordered(ip);
@ -570,7 +528,7 @@ pub fn generateSymbol(
std.mem.alignForward(u64, size, @max(alignment, 1)) -
(code.items.len - struct_begin),
) orelse return error.Overflow;
if (padding > 0) try code.appendNTimes(0, padding);
if (padding > 0) try code.appendNTimes(gpa, 0, padding);
},
}
},
@ -585,10 +543,7 @@ pub fn generateSymbol(
// Check if we should store the tag first.
if (layout.tag_size > 0 and layout.tag_align.compare(.gte, layout.payload_align)) {
switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, reloc_parent)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, reloc_parent);
}
const union_obj = zcu.typeToUnion(ty).?;
@ -596,39 +551,29 @@ pub fn generateSymbol(
const field_index = ty.unionTagFieldIndex(Value.fromInterned(un.tag), zcu).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBits(zcu)) {
try code.appendNTimes(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow);
try code.appendNTimes(gpa, 0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow);
} else {
switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), code, reloc_parent)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), code, reloc_parent);
const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(zcu)) orelse return error.Overflow;
if (padding > 0) {
try code.appendNTimes(0, padding);
try code.appendNTimes(gpa, 0, padding);
}
}
} else {
switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), code, reloc_parent)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), code, reloc_parent);
}
if (layout.tag_size > 0 and layout.tag_align.compare(.lt, layout.payload_align)) {
switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, reloc_parent)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, reloc_parent);
if (layout.padding > 0) {
try code.appendNTimes(0, layout.padding);
try code.appendNTimes(gpa, 0, layout.padding);
}
}
},
.memoized_call => unreachable,
}
return .ok;
}
fn lowerPtr(
@ -636,15 +581,15 @@ fn lowerPtr(
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
ptr_val: InternPool.Index,
code: *std.ArrayList(u8),
code: *std.ArrayListUnmanaged(u8),
reloc_parent: link.File.RelocInfo.Parent,
prev_offset: u64,
) CodeGenError!Result {
) GenerateSymbolError!void {
const zcu = pt.zcu;
const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr;
const offset: u64 = prev_offset + ptr.byte_offset;
return switch (ptr.base_addr) {
.nav => |nav| try lowerNavRef(bin_file, pt, src_loc, nav, code, reloc_parent, offset),
.nav => |nav| try lowerNavRef(bin_file, pt, nav, code, reloc_parent, offset),
.uav => |uav| try lowerUavRef(bin_file, pt, src_loc, uav, code, reloc_parent, offset),
.int => try generateSymbol(bin_file, pt, src_loc, try pt.intValue(Type.usize, offset), code, reloc_parent),
.eu_payload => |eu_ptr| try lowerPtr(
@ -689,29 +634,62 @@ fn lowerUavRef(
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
uav: InternPool.Key.Ptr.BaseAddr.Uav,
code: *std.ArrayList(u8),
code: *std.ArrayListUnmanaged(u8),
reloc_parent: link.File.RelocInfo.Parent,
offset: u64,
) CodeGenError!Result {
) GenerateSymbolError!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const target = lf.comp.root_mod.resolved_target.result;
const comp = lf.comp;
const target = &comp.root_mod.resolved_target.result;
const ptr_width_bytes = @divExact(target.ptrBitWidth(), 8);
const is_obj = comp.config.output_mode == .Obj;
const uav_val = uav.val;
const uav_ty = Type.fromInterned(ip.typeOf(uav_val));
log.debug("lowerUavRef: ty = {}", .{uav_ty.fmt(pt)});
const is_fn_body = uav_ty.zigTypeTag(zcu) == .@"fn";
log.debug("lowerUavRef: ty = {}", .{uav_ty.fmt(pt)});
try code.ensureUnusedCapacity(gpa, ptr_width_bytes);
if (!is_fn_body and !uav_ty.hasRuntimeBits(zcu)) {
try code.appendNTimes(0xaa, ptr_width_bytes);
return Result.ok;
code.appendNTimesAssumeCapacity(0xaa, ptr_width_bytes);
return;
}
switch (lf.tag) {
.c => unreachable,
.spirv => unreachable,
.nvptx => unreachable,
.wasm => {
dev.check(link.File.Tag.wasm.devFeature());
const wasm = lf.cast(.wasm).?;
assert(reloc_parent == .none);
if (is_obj) {
try wasm.out_relocs.append(gpa, .{
.offset = @intCast(code.items.len),
.pointee = .{ .symbol_index = try wasm.uavSymbolIndex(uav.val) },
.tag = if (ptr_width_bytes == 4) .memory_addr_i32 else .memory_addr_i64,
.addend = @intCast(offset),
});
} else {
try wasm.uav_fixups.ensureUnusedCapacity(gpa, 1);
wasm.uav_fixups.appendAssumeCapacity(.{
.uavs_exe_index = try wasm.refUavExe(uav.val, uav.orig_ty),
.offset = @intCast(code.items.len),
.addend = @intCast(offset),
});
}
code.appendNTimesAssumeCapacity(0, ptr_width_bytes);
return;
},
else => {},
}
const uav_align = ip.indexToKey(uav.orig_ty).ptr_type.flags.alignment;
const res = try lf.lowerUav(pt, uav_val, uav_align, src_loc);
switch (res) {
switch (try lf.lowerUav(pt, uav_val, uav_align, src_loc)) {
.mcv => {},
.fail => |em| return .{ .fail = em },
.fail => |em| std.debug.panic("TODO rework lowerUav. internal error: {s}", .{em.msg}),
}
const vaddr = try lf.getUavVAddr(uav_val, .{
@ -721,51 +699,91 @@ fn lowerUavRef(
});
const endian = target.cpu.arch.endian();
switch (ptr_width_bytes) {
2 => mem.writeInt(u16, try code.addManyAsArray(2), @intCast(vaddr), endian),
4 => mem.writeInt(u32, try code.addManyAsArray(4), @intCast(vaddr), endian),
8 => mem.writeInt(u64, try code.addManyAsArray(8), vaddr, endian),
2 => mem.writeInt(u16, code.addManyAsArrayAssumeCapacity(2), @intCast(vaddr), endian),
4 => mem.writeInt(u32, code.addManyAsArrayAssumeCapacity(4), @intCast(vaddr), endian),
8 => mem.writeInt(u64, code.addManyAsArrayAssumeCapacity(8), vaddr, endian),
else => unreachable,
}
return Result.ok;
}
fn lowerNavRef(
lf: *link.File,
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
nav_index: InternPool.Nav.Index,
code: *std.ArrayList(u8),
code: *std.ArrayListUnmanaged(u8),
reloc_parent: link.File.RelocInfo.Parent,
offset: u64,
) CodeGenError!Result {
_ = src_loc;
) GenerateSymbolError!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const target = zcu.navFileScope(nav_index).mod.resolved_target.result;
const ptr_width = target.ptrBitWidth();
const ptr_width_bytes = @divExact(target.ptrBitWidth(), 8);
const is_obj = lf.comp.config.output_mode == .Obj;
const nav_ty = Type.fromInterned(ip.getNav(nav_index).typeOf(ip));
const is_fn_body = nav_ty.zigTypeTag(zcu) == .@"fn";
try code.ensureUnusedCapacity(gpa, ptr_width_bytes);
if (!is_fn_body and !nav_ty.hasRuntimeBits(zcu)) {
try code.appendNTimes(0xaa, @divExact(ptr_width, 8));
return Result.ok;
code.appendNTimesAssumeCapacity(0xaa, ptr_width_bytes);
return;
}
const vaddr = try lf.getNavVAddr(pt, nav_index, .{
switch (lf.tag) {
.c => unreachable,
.spirv => unreachable,
.nvptx => unreachable,
.wasm => {
dev.check(link.File.Tag.wasm.devFeature());
const wasm = lf.cast(.wasm).?;
assert(reloc_parent == .none);
if (is_fn_body) {
const gop = try wasm.zcu_indirect_function_set.getOrPut(gpa, nav_index);
if (!gop.found_existing) gop.value_ptr.* = {};
if (is_obj) {
@panic("TODO add out_reloc for this");
} else {
try wasm.func_table_fixups.append(gpa, .{
.table_index = @enumFromInt(gop.index),
.offset = @intCast(code.items.len),
});
}
} else {
if (is_obj) {
try wasm.out_relocs.append(gpa, .{
.offset = @intCast(code.items.len),
.pointee = .{ .symbol_index = try wasm.navSymbolIndex(nav_index) },
.tag = if (ptr_width_bytes == 4) .memory_addr_i32 else .memory_addr_i64,
.addend = @intCast(offset),
});
} else {
try wasm.nav_fixups.ensureUnusedCapacity(gpa, 1);
wasm.nav_fixups.appendAssumeCapacity(.{
.navs_exe_index = try wasm.refNavExe(nav_index),
.offset = @intCast(code.items.len),
.addend = @intCast(offset),
});
}
}
code.appendNTimesAssumeCapacity(0, ptr_width_bytes);
return;
},
else => {},
}
const vaddr = lf.getNavVAddr(pt, nav_index, .{
.parent = reloc_parent,
.offset = code.items.len,
.addend = @intCast(offset),
});
}) catch @panic("TODO rework getNavVAddr");
const endian = target.cpu.arch.endian();
switch (ptr_width) {
16 => mem.writeInt(u16, try code.addManyAsArray(2), @intCast(vaddr), endian),
32 => mem.writeInt(u32, try code.addManyAsArray(4), @intCast(vaddr), endian),
64 => mem.writeInt(u64, try code.addManyAsArray(8), vaddr, endian),
switch (ptr_width_bytes) {
2 => mem.writeInt(u16, code.addManyAsArrayAssumeCapacity(2), @intCast(vaddr), endian),
4 => mem.writeInt(u32, code.addManyAsArrayAssumeCapacity(4), @intCast(vaddr), endian),
8 => mem.writeInt(u64, code.addManyAsArrayAssumeCapacity(8), vaddr, endian),
else => unreachable,
}
return Result.ok;
}
/// Helper struct to denote that the value is in memory but requires a linker relocation fixup:

View File

@ -3052,12 +3052,12 @@ pub fn genDeclValue(
try w.writeAll(";\n");
}
pub fn genExports(dg: *DeclGen, exported: Zcu.Exported, export_indices: []const u32) !void {
pub fn genExports(dg: *DeclGen, exported: Zcu.Exported, export_indices: []const Zcu.Export.Index) !void {
const zcu = dg.pt.zcu;
const ip = &zcu.intern_pool;
const fwd = dg.fwdDeclWriter();
const main_name = zcu.all_exports.items[export_indices[0]].opts.name;
const main_name = export_indices[0].ptr(zcu).opts.name;
try fwd.writeAll("#define ");
switch (exported) {
.nav => |nav| try dg.renderNavName(fwd, nav),
@ -3069,7 +3069,7 @@ pub fn genExports(dg: *DeclGen, exported: Zcu.Exported, export_indices: []const
const exported_val = exported.getValue(zcu);
if (ip.isFunctionType(exported_val.typeOf(zcu).toIntern())) return for (export_indices) |export_index| {
const @"export" = &zcu.all_exports.items[export_index];
const @"export" = export_index.ptr(zcu);
try fwd.writeAll("zig_extern ");
if (@"export".opts.linkage == .weak) try fwd.writeAll("zig_weak_linkage_fn ");
try dg.renderFunctionSignature(
@ -3091,7 +3091,7 @@ pub fn genExports(dg: *DeclGen, exported: Zcu.Exported, export_indices: []const
else => true,
};
for (export_indices) |export_index| {
const @"export" = &zcu.all_exports.items[export_index];
const @"export" = export_index.ptr(zcu);
try fwd.writeAll("zig_extern ");
if (@"export".opts.linkage == .weak) try fwd.writeAll("zig_weak_linkage ");
const extern_name = @"export".opts.name.toSlice(ip);

View File

@ -1059,9 +1059,10 @@ pub const Object = struct {
lto: Compilation.Config.LtoMode,
};
pub fn emit(o: *Object, options: EmitOptions) !void {
pub fn emit(o: *Object, options: EmitOptions) error{ LinkFailure, OutOfMemory }!void {
const zcu = o.pt.zcu;
const comp = zcu.comp;
const diags = &comp.link_diags;
{
try o.genErrorNameTable();
@ -1223,27 +1224,30 @@ pub const Object = struct {
o.builder.clearAndFree();
if (options.pre_bc_path) |path| {
var file = try std.fs.cwd().createFile(path, .{});
var file = std.fs.cwd().createFile(path, .{}) catch |err|
return diags.fail("failed to create '{s}': {s}", .{ path, @errorName(err) });
defer file.close();
const ptr: [*]const u8 = @ptrCast(bitcode.ptr);
try file.writeAll(ptr[0..(bitcode.len * 4)]);
file.writeAll(ptr[0..(bitcode.len * 4)]) catch |err|
return diags.fail("failed to write to '{s}': {s}", .{ path, @errorName(err) });
}
if (options.asm_path == null and options.bin_path == null and
options.post_ir_path == null and options.post_bc_path == null) return;
if (options.post_bc_path) |path| {
var file = try std.fs.cwd().createFileZ(path, .{});
var file = std.fs.cwd().createFileZ(path, .{}) catch |err|
return diags.fail("failed to create '{s}': {s}", .{ path, @errorName(err) });
defer file.close();
const ptr: [*]const u8 = @ptrCast(bitcode.ptr);
try file.writeAll(ptr[0..(bitcode.len * 4)]);
file.writeAll(ptr[0..(bitcode.len * 4)]) catch |err|
return diags.fail("failed to write to '{s}': {s}", .{ path, @errorName(err) });
}
if (!build_options.have_llvm or !comp.config.use_lib_llvm) {
log.err("emitting without libllvm not implemented", .{});
return error.FailedToEmit;
return diags.fail("emitting without libllvm not implemented", .{});
}
initializeLLVMTarget(comp.root_mod.resolved_target.result.cpu.arch);
@ -1263,8 +1267,7 @@ pub const Object = struct {
var module: *llvm.Module = undefined;
if (context.parseBitcodeInContext2(bitcode_memory_buffer, &module).toBool() or context.getBrokenDebugInfo()) {
log.err("Failed to parse bitcode", .{});
return error.FailedToEmit;
return diags.fail("Failed to parse bitcode", .{});
}
break :emit .{ context, module };
};
@ -1274,12 +1277,7 @@ pub const Object = struct {
var error_message: [*:0]const u8 = undefined;
if (llvm.Target.getFromTriple(target_triple_sentinel, &target, &error_message).toBool()) {
defer llvm.disposeMessage(error_message);
log.err("LLVM failed to parse '{s}': {s}", .{
target_triple_sentinel,
error_message,
});
@panic("Invalid LLVM triple");
return diags.fail("LLVM failed to parse '{s}': {s}", .{ target_triple_sentinel, error_message });
}
const optimize_mode = comp.root_mod.optimize_mode;
@ -1374,10 +1372,9 @@ pub const Object = struct {
if (options.asm_path != null and options.bin_path != null) {
if (target_machine.emitToFile(module, &error_message, &lowered_options)) {
defer llvm.disposeMessage(error_message);
log.err("LLVM failed to emit bin={s} ir={s}: {s}", .{
return diags.fail("LLVM failed to emit bin={s} ir={s}: {s}", .{
emit_bin_msg, post_llvm_ir_msg, error_message,
});
return error.FailedToEmit;
}
lowered_options.bin_filename = null;
lowered_options.llvm_ir_filename = null;
@ -1386,11 +1383,9 @@ pub const Object = struct {
lowered_options.asm_filename = options.asm_path;
if (target_machine.emitToFile(module, &error_message, &lowered_options)) {
defer llvm.disposeMessage(error_message);
log.err("LLVM failed to emit asm={s} bin={s} ir={s} bc={s}: {s}", .{
emit_asm_msg, emit_bin_msg, post_llvm_ir_msg, post_llvm_bc_msg,
error_message,
return diags.fail("LLVM failed to emit asm={s} bin={s} ir={s} bc={s}: {s}", .{
emit_asm_msg, emit_bin_msg, post_llvm_ir_msg, post_llvm_bc_msg, error_message,
});
return error.FailedToEmit;
}
}
@ -1815,7 +1810,7 @@ pub const Object = struct {
self: *Object,
pt: Zcu.PerThread,
exported: Zcu.Exported,
export_indices: []const u32,
export_indices: []const Zcu.Export.Index,
) link.File.UpdateExportsError!void {
assert(std.meta.eql(pt, self.pt));
const zcu = pt.zcu;
@ -1843,11 +1838,11 @@ pub const Object = struct {
o: *Object,
zcu: *Zcu,
exported_value: InternPool.Index,
export_indices: []const u32,
export_indices: []const Zcu.Export.Index,
) link.File.UpdateExportsError!void {
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const main_exp_name = try o.builder.strtabString(zcu.all_exports.items[export_indices[0]].opts.name.toSlice(ip));
const main_exp_name = try o.builder.strtabString(export_indices[0].ptr(zcu).opts.name.toSlice(ip));
const global_index = i: {
const gop = try o.uav_map.getOrPut(gpa, exported_value);
if (gop.found_existing) {
@ -1878,11 +1873,11 @@ pub const Object = struct {
o: *Object,
zcu: *Zcu,
global_index: Builder.Global.Index,
export_indices: []const u32,
export_indices: []const Zcu.Export.Index,
) link.File.UpdateExportsError!void {
const comp = zcu.comp;
const ip = &zcu.intern_pool;
const first_export = zcu.all_exports.items[export_indices[0]];
const first_export = export_indices[0].ptr(zcu);
// We will rename this global to have a name matching `first_export`.
// Successive exports become aliases.
@ -1939,7 +1934,7 @@ pub const Object = struct {
// Until then we iterate over existing aliases and make them point
// to the correct decl, or otherwise add a new alias. Old aliases are leaked.
for (export_indices[1..]) |export_idx| {
const exp = zcu.all_exports.items[export_idx];
const exp = export_idx.ptr(zcu);
const exp_name = try o.builder.strtabString(exp.opts.name.toSlice(ip));
if (o.builder.getGlobal(exp_name)) |global| {
switch (global.ptrConst(&o.builder).kind) {
@ -1967,11 +1962,6 @@ pub const Object = struct {
}
}
pub fn freeDecl(self: *Object, decl_index: InternPool.DeclIndex) void {
const global = self.decl_map.get(decl_index) orelse return;
global.delete(&self.builder);
}
fn getDebugFile(o: *Object, file_index: Zcu.File.Index) Allocator.Error!Builder.Metadata {
const gpa = o.gpa;
const gop = try o.debug_file_map.getOrPut(gpa, file_index);

View File

@ -30,6 +30,10 @@ pub const Env = enum {
/// - `zig build-* -fno-llvm -fno-lld -target riscv64-linux`
@"riscv64-linux",
/// - sema
/// - `zig build-* -fno-llvm -fno-lld -target wasm32-* --listen=-`
wasm,
pub inline fn supports(comptime dev_env: Env, comptime feature: Feature) bool {
return switch (dev_env) {
.full => true,
@ -144,6 +148,14 @@ pub const Env = enum {
=> true,
else => Env.sema.supports(feature),
},
.wasm => switch (feature) {
.stdio_listen,
.incremental,
.wasm_backend,
.wasm_linker,
=> true,
else => Env.sema.supports(feature),
},
};
}

View File

@ -1217,6 +1217,18 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) !voi
});
}
pub fn sharedObjectsCount(target: *const std.Target) u8 {
const target_version = target.os.versionRange().gnuLibCVersion() orelse return 0;
var count: u8 = 0;
for (libs) |lib| {
if (lib.removed_in) |rem_in| {
if (target_version.order(rem_in) != .lt) continue;
}
count += 1;
}
return count;
}
fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) void {
const target_version = comp.getTarget().os.versionRange().gnuLibCVersion().?;

View File

@ -38,6 +38,11 @@ pub const Diags = struct {
flags: Flags,
lld: std.ArrayListUnmanaged(Lld),
pub const SourceLocation = union(enum) {
none,
wasm: File.Wasm.SourceLocation,
};
pub const Flags = packed struct {
no_entry_point_found: bool = false,
missing_libc: bool = false,
@ -70,9 +75,25 @@ pub const Diags = struct {
};
pub const Msg = struct {
source_location: SourceLocation = .none,
msg: []const u8,
notes: []Msg = &.{},
fn string(
msg: *const Msg,
bundle: *std.zig.ErrorBundle.Wip,
base: ?*File,
) Allocator.Error!std.zig.ErrorBundle.String {
return switch (msg.source_location) {
.none => try bundle.addString(msg.msg),
.wasm => |sl| {
dev.check(.wasm_linker);
const wasm = base.?.cast(.wasm).?;
return sl.string(msg.msg, bundle, wasm);
},
};
}
pub fn deinit(self: *Msg, gpa: Allocator) void {
for (self.notes) |*note| note.deinit(gpa);
gpa.free(self.notes);
@ -97,15 +118,12 @@ pub const Diags = struct {
err_msg.msg = try std.fmt.allocPrint(gpa, format, args);
}
pub fn addNote(
err: *ErrorWithNotes,
comptime format: []const u8,
args: anytype,
) error{OutOfMemory}!void {
pub fn addNote(err: *ErrorWithNotes, comptime format: []const u8, args: anytype) void {
const gpa = err.diags.gpa;
const msg = std.fmt.allocPrint(gpa, format, args) catch return err.diags.setAllocFailure();
const err_msg = &err.diags.msgs.items[err.index];
assert(err.note_slot < err_msg.notes.len);
err_msg.notes[err.note_slot] = .{ .msg = try std.fmt.allocPrint(gpa, format, args) };
err_msg.notes[err.note_slot] = .{ .msg = msg };
err.note_slot += 1;
}
};
@ -196,22 +214,35 @@ pub const Diags = struct {
return error.LinkFailure;
}
pub fn failSourceLocation(diags: *Diags, sl: SourceLocation, comptime format: []const u8, args: anytype) error{LinkFailure} {
@branchHint(.cold);
addErrorSourceLocation(diags, sl, format, args);
return error.LinkFailure;
}
pub fn addError(diags: *Diags, comptime format: []const u8, args: anytype) void {
return addErrorSourceLocation(diags, .none, format, args);
}
pub fn addErrorSourceLocation(diags: *Diags, sl: SourceLocation, comptime format: []const u8, args: anytype) void {
@branchHint(.cold);
const gpa = diags.gpa;
const eu_main_msg = std.fmt.allocPrint(gpa, format, args);
diags.mutex.lock();
defer diags.mutex.unlock();
addErrorLockedFallible(diags, eu_main_msg) catch |err| switch (err) {
addErrorLockedFallible(diags, sl, eu_main_msg) catch |err| switch (err) {
error.OutOfMemory => diags.setAllocFailureLocked(),
};
}
fn addErrorLockedFallible(diags: *Diags, eu_main_msg: Allocator.Error![]u8) Allocator.Error!void {
fn addErrorLockedFallible(diags: *Diags, sl: SourceLocation, eu_main_msg: Allocator.Error![]u8) Allocator.Error!void {
const gpa = diags.gpa;
const main_msg = try eu_main_msg;
errdefer gpa.free(main_msg);
try diags.msgs.append(gpa, .{ .msg = main_msg });
try diags.msgs.append(gpa, .{
.msg = main_msg,
.source_location = sl,
});
}
pub fn addErrorWithNotes(diags: *Diags, note_count: usize) error{OutOfMemory}!ErrorWithNotes {
@ -329,16 +360,16 @@ pub const Diags = struct {
diags.flags.alloc_failure_occurred = true;
}
pub fn addMessagesToBundle(diags: *const Diags, bundle: *std.zig.ErrorBundle.Wip) Allocator.Error!void {
pub fn addMessagesToBundle(diags: *const Diags, bundle: *std.zig.ErrorBundle.Wip, base: ?*File) Allocator.Error!void {
for (diags.msgs.items) |link_err| {
try bundle.addRootErrorMessage(.{
.msg = try bundle.addString(link_err.msg),
.msg = try link_err.string(bundle, base),
.notes_len = @intCast(link_err.notes.len),
});
const notes_start = try bundle.reserveNotes(@intCast(link_err.notes.len));
for (link_err.notes, 0..) |note, i| {
bundle.extra.items[notes_start + i] = @intFromEnum(try bundle.addErrorMessage(.{
.msg = try bundle.addString(note.msg),
.msg = try note.string(bundle, base),
}));
}
}
@ -364,6 +395,7 @@ pub const File = struct {
build_id: std.zig.BuildId,
allow_shlib_undefined: bool,
stack_size: u64,
post_prelink: bool = false,
/// Prevents other processes from clobbering files in the output directory
/// of this linking operation.
@ -400,6 +432,7 @@ pub const File = struct {
export_table: bool,
initial_memory: ?u64,
max_memory: ?u64,
object_host_name: ?[]const u8,
export_symbol_names: []const []const u8,
global_base: ?u64,
build_id: std.zig.BuildId,
@ -632,43 +665,15 @@ pub const File = struct {
pub const UpdateDebugInfoError = Dwarf.UpdateError;
pub const FlushDebugInfoError = Dwarf.FlushError;
/// Note that `LinkFailure` is not a member of this error set because the error message
/// must be attached to `Zcu.failed_codegen` rather than `Compilation.link_diags`.
pub const UpdateNavError = error{
OutOfMemory,
Overflow,
Underflow,
FileTooBig,
InputOutput,
FilesOpenedWithWrongFlags,
IsDir,
NoSpaceLeft,
Unseekable,
PermissionDenied,
SwapFile,
CorruptedData,
SystemResources,
OperationAborted,
BrokenPipe,
ConnectionResetByPeer,
ConnectionTimedOut,
SocketNotConnected,
NotOpenForReading,
WouldBlock,
Canceled,
AccessDenied,
Unexpected,
DiskQuota,
NotOpenForWriting,
AnalysisFail,
OutOfMemory,
/// Indicates the error is already reported and stored in
/// `failed_codegen` on the Zcu.
CodegenFail,
EmitFail,
NameTooLong,
CurrentWorkingDirectoryUnlinked,
LockViolation,
NetNameDeleted,
DeviceBusy,
InvalidArgument,
HotSwapUnavailableOnHostOperatingSystem,
} || UpdateDebugInfoError;
};
/// Called from within CodeGen to retrieve the symbol index of a global symbol.
/// If no symbol exists yet with this name, a new undefined global symbol will
@ -701,7 +706,13 @@ pub const File = struct {
}
}
pub fn updateContainerType(base: *File, pt: Zcu.PerThread, ty: InternPool.Index) UpdateNavError!void {
pub const UpdateContainerTypeError = error{
OutOfMemory,
/// `Zcu.failed_types` is already populated with the error message.
TypeFailureReported,
};
pub fn updateContainerType(base: *File, pt: Zcu.PerThread, ty: InternPool.Index) UpdateContainerTypeError!void {
switch (base.tag) {
else => {},
inline .elf => |tag| {
@ -727,9 +738,15 @@ pub const File = struct {
}
}
pub const UpdateLineNumberError = error{
OutOfMemory,
Overflow,
LinkFailure,
};
/// On an incremental update, fixup the line number of all `Nav`s at the given `TrackedInst`, because
/// its line number has changed. The ZIR instruction `ti_id` has tag `.declaration`.
pub fn updateLineNumber(base: *File, pt: Zcu.PerThread, ti_id: InternPool.TrackedInst.Index) UpdateNavError!void {
pub fn updateLineNumber(base: *File, pt: Zcu.PerThread, ti_id: InternPool.TrackedInst.Index) UpdateLineNumberError!void {
{
const ti = ti_id.resolveFull(&pt.zcu.intern_pool).?;
const file = pt.zcu.fileByIndex(ti.file);
@ -771,83 +788,11 @@ pub const File = struct {
}
}
/// TODO audit this error set. most of these should be collapsed into one error,
/// and Diags.Flags should be updated to convey the meaning to the user.
pub const FlushError = error{
CacheCheckFailed,
CurrentWorkingDirectoryUnlinked,
DivisionByZero,
DllImportLibraryNotFound,
ExpectedFuncType,
FailedToEmit,
FileSystem,
FilesOpenedWithWrongFlags,
/// Deprecated. Use `LinkFailure` instead.
/// Formerly used to indicate an error will be present in `Compilation.link_errors`.
FlushFailure,
/// Indicates an error will be present in `Compilation.link_errors`.
/// Indicates an error will be present in `Compilation.link_diags`.
LinkFailure,
FunctionSignatureMismatch,
GlobalTypeMismatch,
HotSwapUnavailableOnHostOperatingSystem,
InvalidCharacter,
InvalidEntryKind,
InvalidFeatureSet,
InvalidFormat,
InvalidIndex,
InvalidInitFunc,
InvalidMagicByte,
InvalidWasmVersion,
LLDCrashed,
LLDReportedFailure,
LLD_LinkingIsTODO_ForSpirV,
LibCInstallationMissingCrtDir,
LibCInstallationNotAvailable,
LinkingWithoutZigSourceUnimplemented,
MalformedArchive,
MalformedDwarf,
MalformedSection,
MemoryTooBig,
MemoryTooSmall,
MissAlignment,
MissingEndForBody,
MissingEndForExpression,
MissingSymbol,
MissingTableSymbols,
ModuleNameMismatch,
NoObjectsToLink,
NotObjectFile,
NotSupported,
OutOfMemory,
Overflow,
PermissionDenied,
StreamTooLong,
SwapFile,
SymbolCollision,
SymbolMismatchingType,
TODOImplementPlan9Objs,
TODOImplementWritingLibFiles,
UnableToSpawnSelf,
UnableToSpawnWasm,
UnableToWriteArchive,
UndefinedLocal,
UndefinedSymbol,
Underflow,
UnexpectedRemainder,
UnexpectedTable,
UnexpectedValue,
UnknownFeature,
UnrecognizedVolume,
Unseekable,
UnsupportedCpuArchitecture,
UnsupportedVersion,
UnexpectedEndOfFile,
} ||
fs.File.WriteFileError ||
fs.File.OpenError ||
std.process.Child.SpawnError ||
fs.Dir.CopyFileError ||
FlushDebugInfoError;
};
/// Commit pending changes and write headers. Takes into account final output mode
/// and `use_lld`, not only `effectiveOutputMode`.
@ -864,10 +809,17 @@ pub const File = struct {
assert(comp.c_object_table.count() == 1);
const the_key = comp.c_object_table.keys()[0];
const cached_pp_file_path = the_key.status.success.object_path;
try cached_pp_file_path.root_dir.handle.copyFile(cached_pp_file_path.sub_path, emit.root_dir.handle, emit.sub_path, .{});
cached_pp_file_path.root_dir.handle.copyFile(cached_pp_file_path.sub_path, emit.root_dir.handle, emit.sub_path, .{}) catch |err| {
const diags = &base.comp.link_diags;
return diags.fail("failed to copy '{'}' to '{'}': {s}", .{
@as(Path, cached_pp_file_path), @as(Path, emit), @errorName(err),
});
};
return;
}
assert(base.post_prelink);
const use_lld = build_options.have_llvm and comp.config.use_lld;
const output_mode = comp.config.output_mode;
const link_mode = comp.config.link_mode;
@ -893,16 +845,6 @@ pub const File = struct {
}
}
/// Called when a Decl is deleted from the Zcu.
pub fn freeDecl(base: *File, decl_index: InternPool.DeclIndex) void {
switch (base.tag) {
inline else => |tag| {
dev.check(tag.devFeature());
@as(*tag.Type(), @fieldParentPtr("base", base)).freeDecl(decl_index);
},
}
}
pub const UpdateExportsError = error{
OutOfMemory,
AnalysisFail,
@ -916,7 +858,7 @@ pub const File = struct {
base: *File,
pt: Zcu.PerThread,
exported: Zcu.Exported,
export_indices: []const u32,
export_indices: []const Zcu.Export.Index,
) UpdateExportsError!void {
switch (base.tag) {
inline else => |tag| {
@ -932,6 +874,7 @@ pub const File = struct {
addend: u32,
pub const Parent = union(enum) {
none,
atom_index: u32,
debug_output: DebugInfoOutput,
};
@ -948,6 +891,7 @@ pub const File = struct {
.c => unreachable,
.spirv => unreachable,
.nvptx => unreachable,
.wasm => unreachable,
inline else => |tag| {
dev.check(tag.devFeature());
return @as(*tag.Type(), @fieldParentPtr("base", base)).getNavVAddr(pt, nav_index, reloc_info);
@ -966,6 +910,7 @@ pub const File = struct {
.c => unreachable,
.spirv => unreachable,
.nvptx => unreachable,
.wasm => unreachable,
inline else => |tag| {
dev.check(tag.devFeature());
return @as(*tag.Type(), @fieldParentPtr("base", base)).lowerUav(pt, decl_val, decl_align, src_loc);
@ -978,6 +923,7 @@ pub const File = struct {
.c => unreachable,
.spirv => unreachable,
.nvptx => unreachable,
.wasm => unreachable,
inline else => |tag| {
dev.check(tag.devFeature());
return @as(*tag.Type(), @fieldParentPtr("base", base)).getUavVAddr(decl_val, reloc_info);
@ -1099,12 +1045,44 @@ pub const File = struct {
}
}
/// Called when all linker inputs have been sent via `loadInput`. After
/// this, `loadInput` will not be called anymore.
pub fn prelink(base: *File, prog_node: std.Progress.Node) FlushError!void {
assert(!base.post_prelink);
const use_lld = build_options.have_llvm and base.comp.config.use_lld;
if (use_lld) return;
// In this case, an object file is created by the LLVM backend, so
// there is no prelink phase. The Zig code is linked as a standard
// object along with the others.
if (base.zcu_object_sub_path != null) return;
switch (base.tag) {
inline .wasm => |tag| {
dev.check(tag.devFeature());
return @as(*tag.Type(), @fieldParentPtr("base", base)).prelink(prog_node);
},
else => {},
}
}
pub fn linkAsArchive(base: *File, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) FlushError!void {
dev.check(.lld_linker);
const tracy = trace(@src());
defer tracy.end();
const comp = base.comp;
const diags = &comp.link_diags;
return linkAsArchiveInner(base, arena, tid, prog_node) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.LinkFailure => return error.LinkFailure,
else => |e| return diags.fail("failed to link as archive: {s}", .{@errorName(e)}),
};
}
fn linkAsArchiveInner(base: *File, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void {
const comp = base.comp;
const directory = base.emit.root_dir; // Just an alias to make it shorter to type.
@ -1364,6 +1342,16 @@ pub const File = struct {
}, llvm_object, prog_node);
}
pub fn cgFail(
base: *File,
nav_index: InternPool.Nav.Index,
comptime format: []const u8,
args: anytype,
) error{ CodegenFail, OutOfMemory } {
@branchHint(.cold);
return base.comp.zcu.?.codegenFail(nav_index, format, args);
}
pub const C = @import("link/C.zig");
pub const Coff = @import("link/Coff.zig");
pub const Plan9 = @import("link/Plan9.zig");
@ -1379,12 +1367,32 @@ pub const File = struct {
/// from the rest of compilation. All tasks performed here are
/// single-threaded with respect to one another.
pub fn flushTaskQueue(tid: usize, comp: *Compilation) void {
const diags = &comp.link_diags;
// As soon as check() is called, another `flushTaskQueue` call could occur,
// so the safety lock must go after the check.
while (comp.link_task_queue.check()) |tasks| {
comp.link_task_queue_safety.lock();
defer comp.link_task_queue_safety.unlock();
if (comp.remaining_prelink_tasks > 0) {
comp.link_task_queue_postponed.ensureUnusedCapacity(comp.gpa, tasks.len) catch |err| switch (err) {
error.OutOfMemory => return diags.setAllocFailure(),
};
}
for (tasks) |task| doTask(comp, tid, task);
if (comp.remaining_prelink_tasks == 0) {
if (comp.bin_file) |base| if (!base.post_prelink) {
base.prelink(comp.work_queue_progress_node) catch |err| switch (err) {
error.OutOfMemory => diags.setAllocFailure(),
error.LinkFailure => continue,
};
base.post_prelink = true;
for (comp.link_task_queue_postponed.items) |task| doTask(comp, tid, task);
comp.link_task_queue_postponed.clearRetainingCapacity();
};
}
}
}
@ -1428,6 +1436,7 @@ pub fn doTask(comp: *Compilation, tid: usize, task: Task) void {
const diags = &comp.link_diags;
switch (task) {
.load_explicitly_provided => if (comp.bin_file) |base| {
comp.remaining_prelink_tasks -= 1;
const prog_node = comp.work_queue_progress_node.start("Parse Linker Inputs", comp.link_inputs.len);
defer prog_node.end();
for (comp.link_inputs) |input| {
@ -1445,6 +1454,7 @@ pub fn doTask(comp: *Compilation, tid: usize, task: Task) void {
}
},
.load_host_libc => if (comp.bin_file) |base| {
comp.remaining_prelink_tasks -= 1;
const prog_node = comp.work_queue_progress_node.start("Linker Parse Host libc", 0);
defer prog_node.end();
@ -1504,6 +1514,7 @@ pub fn doTask(comp: *Compilation, tid: usize, task: Task) void {
}
},
.load_object => |path| if (comp.bin_file) |base| {
comp.remaining_prelink_tasks -= 1;
const prog_node = comp.work_queue_progress_node.start("Linker Parse Object", 0);
defer prog_node.end();
base.openLoadObject(path) catch |err| switch (err) {
@ -1512,6 +1523,7 @@ pub fn doTask(comp: *Compilation, tid: usize, task: Task) void {
};
},
.load_archive => |path| if (comp.bin_file) |base| {
comp.remaining_prelink_tasks -= 1;
const prog_node = comp.work_queue_progress_node.start("Linker Parse Archive", 0);
defer prog_node.end();
base.openLoadArchive(path, null) catch |err| switch (err) {
@ -1520,6 +1532,7 @@ pub fn doTask(comp: *Compilation, tid: usize, task: Task) void {
};
},
.load_dso => |path| if (comp.bin_file) |base| {
comp.remaining_prelink_tasks -= 1;
const prog_node = comp.work_queue_progress_node.start("Linker Parse Shared Library", 0);
defer prog_node.end();
base.openLoadDso(path, .{
@ -1531,6 +1544,7 @@ pub fn doTask(comp: *Compilation, tid: usize, task: Task) void {
};
},
.load_input => |input| if (comp.bin_file) |base| {
comp.remaining_prelink_tasks -= 1;
const prog_node = comp.work_queue_progress_node.start("Linker Parse Input", 0);
defer prog_node.end();
base.loadInput(input) catch |err| switch (err) {
@ -1545,26 +1559,38 @@ pub fn doTask(comp: *Compilation, tid: usize, task: Task) void {
};
},
.codegen_nav => |nav_index| {
const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid));
defer pt.deactivate();
pt.linkerUpdateNav(nav_index) catch |err| switch (err) {
error.OutOfMemory => diags.setAllocFailure(),
};
if (comp.remaining_prelink_tasks == 0) {
const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid));
defer pt.deactivate();
pt.linkerUpdateNav(nav_index) catch |err| switch (err) {
error.OutOfMemory => diags.setAllocFailure(),
};
} else {
comp.link_task_queue_postponed.appendAssumeCapacity(task);
}
},
.codegen_func => |func| {
const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid));
defer pt.deactivate();
// This call takes ownership of `func.air`.
pt.linkerUpdateFunc(func.func, func.air) catch |err| switch (err) {
error.OutOfMemory => diags.setAllocFailure(),
};
if (comp.remaining_prelink_tasks == 0) {
const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid));
defer pt.deactivate();
// This call takes ownership of `func.air`.
pt.linkerUpdateFunc(func.func, func.air) catch |err| switch (err) {
error.OutOfMemory => diags.setAllocFailure(),
};
} else {
comp.link_task_queue_postponed.appendAssumeCapacity(task);
}
},
.codegen_type => |ty| {
const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid));
defer pt.deactivate();
pt.linkerUpdateContainerType(ty) catch |err| switch (err) {
error.OutOfMemory => diags.setAllocFailure(),
};
if (comp.remaining_prelink_tasks == 0) {
const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid));
defer pt.deactivate();
pt.linkerUpdateContainerType(ty) catch |err| switch (err) {
error.OutOfMemory => diags.setAllocFailure(),
};
} else {
comp.link_task_queue_postponed.appendAssumeCapacity(task);
}
},
.update_line_number => |ti| {
const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid));
@ -1593,7 +1619,7 @@ pub fn spawnLld(
const exit_code = try lldMain(arena, argv, false);
if (exit_code == 0) return;
if (comp.clang_passthrough_mode) std.process.exit(exit_code);
return error.LLDReportedFailure;
return error.LinkFailure;
}
var stderr: []u8 = &.{};
@ -1670,17 +1696,16 @@ pub fn spawnLld(
return error.UnableToSpawnSelf;
};
const diags = &comp.link_diags;
switch (term) {
.Exited => |code| if (code != 0) {
if (comp.clang_passthrough_mode) std.process.exit(code);
const diags = &comp.link_diags;
diags.lockAndParseLldStderr(argv[1], stderr);
return error.LLDReportedFailure;
return error.LinkFailure;
},
else => {
if (comp.clang_passthrough_mode) std.process.abort();
log.err("{s} terminated with stderr:\n{s}", .{ argv[0], stderr });
return error.LLDCrashed;
return diags.fail("{s} terminated with stderr:\n{s}", .{ argv[0], stderr });
},
}
@ -2239,7 +2264,7 @@ fn resolvePathInputLib(
try wip_errors.init(gpa);
defer wip_errors.deinit();
try diags.addMessagesToBundle(&wip_errors);
try diags.addMessagesToBundle(&wip_errors, null);
var error_bundle = try wip_errors.toOwnedBundle("");
defer error_bundle.deinit(gpa);

View File

@ -175,21 +175,13 @@ pub fn deinit(self: *C) void {
self.lazy_code_buf.deinit(gpa);
}
pub fn freeDecl(self: *C, decl_index: InternPool.DeclIndex) void {
const gpa = self.base.comp.gpa;
if (self.decl_table.fetchSwapRemove(decl_index)) |kv| {
var decl_block = kv.value;
decl_block.deinit(gpa);
}
}
pub fn updateFunc(
self: *C,
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
) !void {
) link.File.UpdateNavError!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const func = zcu.funcInfo(func_index);
@ -313,7 +305,7 @@ fn updateUav(self: *C, pt: Zcu.PerThread, i: usize) !void {
};
}
pub fn updateNav(self: *C, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void {
pub fn updateNav(self: *C, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) link.File.UpdateNavError!void {
const tracy = trace(@src());
defer tracy.end();
@ -390,7 +382,7 @@ pub fn updateLineNumber(self: *C, pt: Zcu.PerThread, ti_id: InternPool.TrackedIn
_ = ti_id;
}
pub fn flush(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void {
pub fn flush(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
return self.flushModule(arena, tid, prog_node);
}
@ -409,7 +401,7 @@ fn abiDefines(self: *C, target: std.Target) !std.ArrayList(u8) {
return defines;
}
pub fn flushModule(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void {
pub fn flushModule(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
_ = arena; // Has the same lifetime as the call to Compilation.update.
const tracy = trace(@src());
@ -419,6 +411,7 @@ pub fn flushModule(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node:
defer sub_prog_node.end();
const comp = self.base.comp;
const diags = &comp.link_diags;
const gpa = comp.gpa;
const zcu = self.base.comp.zcu.?;
const ip = &zcu.intern_pool;
@ -476,7 +469,7 @@ pub fn flushModule(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node:
defer export_names.deinit(gpa);
try export_names.ensureTotalCapacity(gpa, @intCast(zcu.single_exports.count()));
for (zcu.single_exports.values()) |export_index| {
export_names.putAssumeCapacity(zcu.all_exports.items[export_index].opts.name, {});
export_names.putAssumeCapacity(export_index.ptr(zcu).opts.name, {});
}
for (zcu.multi_exports.values()) |info| {
try export_names.ensureUnusedCapacity(gpa, info.len);
@ -554,8 +547,10 @@ pub fn flushModule(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node:
}, self.getString(av_block.code));
const file = self.base.file.?;
try file.setEndPos(f.file_size);
try file.pwritevAll(f.all_buffers.items, 0);
file.setEndPos(f.file_size) catch |err| return diags.fail("failed to allocate file: {s}", .{@errorName(err)});
file.pwritevAll(f.all_buffers.items, 0) catch |err| return diags.fail("failed to write to '{'}': {s}", .{
self.base.emit, @errorName(err),
});
}
const Flush = struct {
@ -845,7 +840,7 @@ pub fn updateExports(
self: *C,
pt: Zcu.PerThread,
exported: Zcu.Exported,
export_indices: []const u32,
export_indices: []const Zcu.Export.Index,
) !void {
const zcu = pt.zcu;
const gpa = zcu.gpa;

View File

@ -408,7 +408,7 @@ pub fn createEmpty(
max_file_offset = header.pointer_to_raw_data + header.size_of_raw_data;
}
}
try coff.base.file.?.pwriteAll(&[_]u8{0}, max_file_offset);
try coff.pwriteAll(&[_]u8{0}, max_file_offset);
}
return coff;
@ -858,7 +858,7 @@ fn writeAtom(coff: *Coff, atom_index: Atom.Index, code: []u8) !void {
}
coff.resolveRelocs(atom_index, relocs.items, code, coff.image_base);
try coff.base.file.?.pwriteAll(code, file_offset);
try coff.pwriteAll(code, file_offset);
// Now we can mark the relocs as resolved.
while (relocs.popOrNull()) |reloc| {
@ -891,7 +891,7 @@ fn writeOffsetTableEntry(coff: *Coff, index: usize) !void {
const sect_id = coff.got_section_index.?;
if (coff.got_table_count_dirty) {
const needed_size = @as(u32, @intCast(coff.got_table.entries.items.len * coff.ptr_width.size()));
const needed_size: u32 = @intCast(coff.got_table.entries.items.len * coff.ptr_width.size());
try coff.growSection(sect_id, needed_size);
coff.got_table_count_dirty = false;
}
@ -908,7 +908,7 @@ fn writeOffsetTableEntry(coff: *Coff, index: usize) !void {
switch (coff.ptr_width) {
.p32 => {
var buf: [4]u8 = undefined;
mem.writeInt(u32, &buf, @as(u32, @intCast(entry_value + coff.image_base)), .little);
mem.writeInt(u32, &buf, @intCast(entry_value + coff.image_base), .little);
try coff.base.file.?.pwriteAll(&buf, file_offset);
},
.p64 => {
@ -1093,7 +1093,13 @@ fn freeAtom(coff: *Coff, atom_index: Atom.Index) void {
coff.getAtomPtr(atom_index).sym_index = 0;
}
pub fn updateFunc(coff: *Coff, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
pub fn updateFunc(
coff: *Coff,
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
) link.File.UpdateNavError!void {
if (build_options.skip_non_native and builtin.object_format != .coff) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
@ -1106,34 +1112,41 @@ pub fn updateFunc(coff: *Coff, pt: Zcu.PerThread, func_index: InternPool.Index,
const zcu = pt.zcu;
const gpa = zcu.gpa;
const func = zcu.funcInfo(func_index);
const nav_index = func.owner_nav;
const atom_index = try coff.getOrCreateAtomForNav(func.owner_nav);
const atom_index = try coff.getOrCreateAtomForNav(nav_index);
coff.freeRelocations(atom_index);
coff.navs.getPtr(func.owner_nav).?.section = coff.text_section_index.?;
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
defer code_buffer.deinit(gpa);
const res = try codegen.generateFunction(
codegen.generateFunction(
&coff.base,
pt,
zcu.navSrcLoc(func.owner_nav),
zcu.navSrcLoc(nav_index),
func_index,
air,
liveness,
&code_buffer,
.none,
);
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
try zcu.failed_codegen.put(zcu.gpa, func.owner_nav, em);
return;
) catch |err| switch (err) {
error.CodegenFail => return error.CodegenFail,
error.OutOfMemory => return error.OutOfMemory,
error.Overflow => |e| {
try zcu.failed_codegen.putNoClobber(gpa, nav_index, try Zcu.ErrorMsg.create(
gpa,
zcu.navSrcLoc(nav_index),
"unable to codegen: {s}",
.{@errorName(e)},
));
try zcu.retryable_failures.append(zcu.gpa, AnalUnit.wrap(.{ .func = func_index }));
return error.CodegenFail;
},
};
try coff.updateNavCode(pt, func.owner_nav, code, .FUNCTION);
try coff.updateNavCode(pt, nav_index, code_buffer.items, .FUNCTION);
// Exports will be updated by `Zcu.processExports` after the update.
}
@ -1154,24 +1167,21 @@ fn lowerConst(
) !LowerConstResult {
const gpa = coff.base.comp.gpa;
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
defer code_buffer.deinit(gpa);
const atom_index = try coff.createAtom();
const sym = coff.getAtom(atom_index).getSymbolPtr(coff);
try coff.setSymbolName(sym, name);
sym.section_number = @as(coff_util.SectionNumber, @enumFromInt(sect_id + 1));
const res = try codegen.generateSymbol(&coff.base, pt, src_loc, val, &code_buffer, .{
try codegen.generateSymbol(&coff.base, pt, src_loc, val, &code_buffer, .{
.atom_index = coff.getAtom(atom_index).getSymbolIndex().?,
});
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| return .{ .fail = em },
};
const code = code_buffer.items;
const atom = coff.getAtomPtr(atom_index);
atom.size = @as(u32, @intCast(code.len));
atom.size = @intCast(code.len);
atom.getSymbolPtr(coff).value = try coff.allocateAtom(
atom_index,
atom.size,
@ -1227,10 +1237,10 @@ pub fn updateNav(
coff.navs.getPtr(nav_index).?.section = coff.getNavOutputSection(nav_index);
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
defer code_buffer.deinit(gpa);
const res = try codegen.generateSymbol(
try codegen.generateSymbol(
&coff.base,
pt,
zcu.navSrcLoc(nav_index),
@ -1238,15 +1248,8 @@ pub fn updateNav(
&code_buffer,
.{ .atom_index = atom.getSymbolIndex().? },
);
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
try zcu.failed_codegen.put(gpa, nav_index, em);
return;
},
};
try coff.updateNavCode(pt, nav_index, code, .NULL);
try coff.updateNavCode(pt, nav_index, code_buffer.items, .NULL);
}
// Exports will be updated by `Zcu.processExports` after the update.
@ -1260,11 +1263,12 @@ fn updateLazySymbolAtom(
section_index: u16,
) !void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const comp = coff.base.comp;
const gpa = comp.gpa;
var required_alignment: InternPool.Alignment = .none;
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
defer code_buffer.deinit(gpa);
const name = try allocPrint(gpa, "__lazy_{s}_{}", .{
@tagName(sym.kind),
@ -1276,7 +1280,7 @@ fn updateLazySymbolAtom(
const local_sym_index = atom.getSymbolIndex().?;
const src = Type.fromInterned(sym.ty).srcLocOrNull(zcu) orelse Zcu.LazySrcLoc.unneeded;
const res = try codegen.generateLazySymbol(
try codegen.generateLazySymbol(
&coff.base,
pt,
src,
@ -1286,13 +1290,7 @@ fn updateLazySymbolAtom(
.none,
.{ .atom_index = local_sym_index },
);
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
log.err("{s}", .{em.msg});
return error.CodegenFail;
},
};
const code = code_buffer.items;
const code_len: u32 = @intCast(code.len);
const symbol = atom.getSymbolPtr(coff);
@ -1387,7 +1385,7 @@ fn updateNavCode(
nav_index: InternPool.Nav.Index,
code: []u8,
complex_type: coff_util.ComplexType,
) !void {
) link.File.UpdateNavError!void {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const nav = ip.getNav(nav_index);
@ -1405,18 +1403,21 @@ fn updateNavCode(
const atom = coff.getAtom(atom_index);
const sym_index = atom.getSymbolIndex().?;
const sect_index = nav_metadata.section;
const code_len = @as(u32, @intCast(code.len));
const code_len: u32 = @intCast(code.len);
if (atom.size != 0) {
const sym = atom.getSymbolPtr(coff);
try coff.setSymbolName(sym, nav.fqn.toSlice(ip));
sym.section_number = @as(coff_util.SectionNumber, @enumFromInt(sect_index + 1));
sym.section_number = @enumFromInt(sect_index + 1);
sym.type = .{ .complex_type = complex_type, .base_type = .NULL };
const capacity = atom.capacity(coff);
const need_realloc = code.len > capacity or !required_alignment.check(sym.value);
if (need_realloc) {
const vaddr = try coff.growAtom(atom_index, code_len, @intCast(required_alignment.toByteUnits() orelse 0));
const vaddr = coff.growAtom(atom_index, code_len, @intCast(required_alignment.toByteUnits() orelse 0)) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => |e| return coff.base.cgFail(nav_index, "failed to grow atom: {s}", .{@errorName(e)}),
};
log.debug("growing {} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), sym.value, vaddr });
log.debug(" (required alignment 0x{x}", .{required_alignment});
@ -1424,7 +1425,10 @@ fn updateNavCode(
sym.value = vaddr;
log.debug(" (updating GOT entry)", .{});
const got_entry_index = coff.got_table.lookup.get(.{ .sym_index = sym_index }).?;
try coff.writeOffsetTableEntry(got_entry_index);
coff.writeOffsetTableEntry(got_entry_index) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => |e| return coff.base.cgFail(nav_index, "failed to write offset table entry: {s}", .{@errorName(e)}),
};
coff.markRelocsDirtyByTarget(.{ .sym_index = sym_index });
}
} else if (code_len < atom.size) {
@ -1434,26 +1438,34 @@ fn updateNavCode(
} else {
const sym = atom.getSymbolPtr(coff);
try coff.setSymbolName(sym, nav.fqn.toSlice(ip));
sym.section_number = @as(coff_util.SectionNumber, @enumFromInt(sect_index + 1));
sym.section_number = @enumFromInt(sect_index + 1);
sym.type = .{ .complex_type = complex_type, .base_type = .NULL };
const vaddr = try coff.allocateAtom(atom_index, code_len, @intCast(required_alignment.toByteUnits() orelse 0));
const vaddr = coff.allocateAtom(atom_index, code_len, @intCast(required_alignment.toByteUnits() orelse 0)) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => |e| return coff.base.cgFail(nav_index, "failed to allocate atom: {s}", .{@errorName(e)}),
};
errdefer coff.freeAtom(atom_index);
log.debug("allocated atom for {} at 0x{x}", .{ nav.fqn.fmt(ip), vaddr });
coff.getAtomPtr(atom_index).size = code_len;
sym.value = vaddr;
try coff.addGotEntry(.{ .sym_index = sym_index });
coff.addGotEntry(.{ .sym_index = sym_index }) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => |e| return coff.base.cgFail(nav_index, "failed to add GOT entry: {s}", .{@errorName(e)}),
};
}
try coff.writeAtom(atom_index, code);
coff.writeAtom(atom_index, code) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => |e| return coff.base.cgFail(nav_index, "failed to write atom: {s}", .{@errorName(e)}),
};
}
pub fn freeNav(coff: *Coff, nav_index: InternPool.NavIndex) void {
if (coff.llvm_object) |llvm_object| return llvm_object.freeNav(nav_index);
const gpa = coff.base.comp.gpa;
log.debug("freeDecl 0x{x}", .{nav_index});
if (coff.decls.fetchOrderedRemove(nav_index)) |const_kv| {
var kv = const_kv;
@ -1466,7 +1478,7 @@ pub fn updateExports(
coff: *Coff,
pt: Zcu.PerThread,
exported: Zcu.Exported,
export_indices: []const u32,
export_indices: []const Zcu.Export.Index,
) link.File.UpdateExportsError!void {
if (build_options.skip_non_native and builtin.object_format != .coff) {
@panic("Attempted to compile for object format that was disabled by build configuration");
@ -1481,7 +1493,7 @@ pub fn updateExports(
// Even in the case of LLVM, we need to notice certain exported symbols in order to
// detect the default subsystem.
for (export_indices) |export_idx| {
const exp = zcu.all_exports.items[export_idx];
const exp = export_idx.ptr(zcu);
const exported_nav_index = switch (exp.exported) {
.nav => |nav| nav,
.uav => continue,
@ -1524,7 +1536,7 @@ pub fn updateExports(
break :blk coff.navs.getPtr(nav).?;
},
.uav => |uav| coff.uavs.getPtr(uav) orelse blk: {
const first_exp = zcu.all_exports.items[export_indices[0]];
const first_exp = export_indices[0].ptr(zcu);
const res = try coff.lowerUav(pt, uav, .none, first_exp.src);
switch (res) {
.mcv => {},
@ -1543,7 +1555,7 @@ pub fn updateExports(
const atom = coff.getAtom(atom_index);
for (export_indices) |export_idx| {
const exp = zcu.all_exports.items[export_idx];
const exp = export_idx.ptr(zcu);
log.debug("adding new export '{}'", .{exp.opts.name.fmt(&zcu.intern_pool)});
if (exp.opts.section.toSlice(&zcu.intern_pool)) |section_name| {
@ -1671,12 +1683,17 @@ fn resolveGlobalSymbol(coff: *Coff, current: SymbolWithLoc) !void {
pub fn flush(coff: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
const comp = coff.base.comp;
const use_lld = build_options.have_llvm and comp.config.use_lld;
const diags = &comp.link_diags;
if (use_lld) {
return coff.linkWithLLD(arena, tid, prog_node);
return coff.linkWithLLD(arena, tid, prog_node) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.LinkFailure => return error.LinkFailure,
else => |e| return diags.fail("failed to link with LLD: {s}", .{@errorName(e)}),
};
}
switch (comp.config.output_mode) {
.Exe, .Obj => return coff.flushModule(arena, tid, prog_node),
.Lib => return error.TODOImplementWritingLibFiles,
.Lib => return diags.fail("writing lib files not yet implemented for COFF", .{}),
}
}
@ -2207,12 +2224,16 @@ fn findLib(arena: Allocator, name: []const u8, lib_directories: []const Director
return null;
}
pub fn flushModule(coff: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
pub fn flushModule(
coff: *Coff,
arena: Allocator,
tid: Zcu.PerThread.Id,
prog_node: std.Progress.Node,
) link.File.FlushError!void {
const tracy = trace(@src());
defer tracy.end();
const comp = coff.base.comp;
const gpa = comp.gpa;
const diags = &comp.link_diags;
if (coff.llvm_object) |llvm_object| {
@ -2223,8 +2244,22 @@ pub fn flushModule(coff: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no
const sub_prog_node = prog_node.start("COFF Flush", 0);
defer sub_prog_node.end();
return flushModuleInner(coff, arena, tid) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.LinkFailure => return error.LinkFailure,
else => |e| return diags.fail("COFF flush failed: {s}", .{@errorName(e)}),
};
}
fn flushModuleInner(coff: *Coff, arena: Allocator, tid: Zcu.PerThread.Id) !void {
_ = arena;
const comp = coff.base.comp;
const gpa = comp.gpa;
const diags = &comp.link_diags;
const pt: Zcu.PerThread = .activate(
comp.zcu orelse return error.LinkingWithoutZigSourceUnimplemented,
comp.zcu orelse return diags.fail("linking without zig source is not yet implemented", .{}),
tid,
);
defer pt.deactivate();
@ -2232,24 +2267,18 @@ pub fn flushModule(coff: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no
if (coff.lazy_syms.getPtr(.anyerror_type)) |metadata| {
// Most lazy symbols can be updated on first use, but
// anyerror needs to wait for everything to be flushed.
if (metadata.text_state != .unused) coff.updateLazySymbolAtom(
if (metadata.text_state != .unused) try coff.updateLazySymbolAtom(
pt,
.{ .kind = .code, .ty = .anyerror_type },
metadata.text_atom,
coff.text_section_index.?,
) catch |err| return switch (err) {
error.CodegenFail => error.FlushFailure,
else => |e| e,
};
if (metadata.rdata_state != .unused) coff.updateLazySymbolAtom(
);
if (metadata.rdata_state != .unused) try coff.updateLazySymbolAtom(
pt,
.{ .kind = .const_data, .ty = .anyerror_type },
metadata.rdata_atom,
coff.rdata_section_index.?,
) catch |err| return switch (err) {
error.CodegenFail => error.FlushFailure,
else => |e| e,
};
);
}
for (coff.lazy_syms.values()) |*metadata| {
if (metadata.text_state != .unused) metadata.text_state = .flushed;
@ -2594,7 +2623,7 @@ fn writeBaseRelocations(coff: *Coff) !void {
const needed_size = @as(u32, @intCast(buffer.items.len));
try coff.growSection(coff.reloc_section_index.?, needed_size);
try coff.base.file.?.pwriteAll(buffer.items, header.pointer_to_raw_data);
try coff.pwriteAll(buffer.items, header.pointer_to_raw_data);
coff.data_directories[@intFromEnum(coff_util.DirectoryEntry.BASERELOC)] = .{
.virtual_address = header.virtual_address,
@ -2727,7 +2756,7 @@ fn writeImportTables(coff: *Coff) !void {
assert(dll_names_offset == needed_size);
try coff.base.file.?.pwriteAll(buffer.items, header.pointer_to_raw_data);
try coff.pwriteAll(buffer.items, header.pointer_to_raw_data);
coff.data_directories[@intFromEnum(coff_util.DirectoryEntry.IMPORT)] = .{
.virtual_address = header.virtual_address + iat_size,
@ -2744,17 +2773,19 @@ fn writeImportTables(coff: *Coff) !void {
fn writeStrtab(coff: *Coff) !void {
if (coff.strtab_offset == null) return;
const comp = coff.base.comp;
const gpa = comp.gpa;
const diags = &comp.link_diags;
const allocated_size = coff.allocatedSize(coff.strtab_offset.?);
const needed_size = @as(u32, @intCast(coff.strtab.buffer.items.len));
const needed_size: u32 = @intCast(coff.strtab.buffer.items.len);
if (needed_size > allocated_size) {
coff.strtab_offset = null;
coff.strtab_offset = @as(u32, @intCast(coff.findFreeSpace(needed_size, @alignOf(u32))));
coff.strtab_offset = @intCast(coff.findFreeSpace(needed_size, @alignOf(u32)));
}
log.debug("writing strtab from 0x{x} to 0x{x}", .{ coff.strtab_offset.?, coff.strtab_offset.? + needed_size });
const gpa = coff.base.comp.gpa;
var buffer = std.ArrayList(u8).init(gpa);
defer buffer.deinit();
try buffer.ensureTotalCapacityPrecise(needed_size);
@ -2763,17 +2794,19 @@ fn writeStrtab(coff: *Coff) !void {
// we write the length of the strtab to a temporary buffer that goes to file.
mem.writeInt(u32, buffer.items[0..4], @as(u32, @intCast(coff.strtab.buffer.items.len)), .little);
try coff.base.file.?.pwriteAll(buffer.items, coff.strtab_offset.?);
coff.pwriteAll(buffer.items, coff.strtab_offset.?) catch |err| {
return diags.fail("failed to write: {s}", .{@errorName(err)});
};
}
fn writeSectionHeaders(coff: *Coff) !void {
const offset = coff.getSectionHeadersOffset();
try coff.base.file.?.pwriteAll(mem.sliceAsBytes(coff.sections.items(.header)), offset);
try coff.pwriteAll(mem.sliceAsBytes(coff.sections.items(.header)), offset);
}
fn writeDataDirectoriesHeaders(coff: *Coff) !void {
const offset = coff.getDataDirectoryHeadersOffset();
try coff.base.file.?.pwriteAll(mem.sliceAsBytes(&coff.data_directories), offset);
try coff.pwriteAll(mem.sliceAsBytes(&coff.data_directories), offset);
}
fn writeHeader(coff: *Coff) !void {
@ -2913,7 +2946,7 @@ fn writeHeader(coff: *Coff) !void {
},
}
try coff.base.file.?.pwriteAll(buffer.items, 0);
try coff.pwriteAll(buffer.items, 0);
}
pub fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) {
@ -3710,6 +3743,14 @@ const ImportTable = struct {
const ImportIndex = u32;
};
fn pwriteAll(coff: *Coff, bytes: []const u8, offset: u64) error{LinkFailure}!void {
const comp = coff.base.comp;
const diags = &comp.link_diags;
coff.base.file.?.pwriteAll(bytes, offset) catch |err| {
return diags.fail("failed to write: {s}", .{@errorName(err)});
};
}
const Coff = @This();
const std = @import("std");

View File

@ -21,7 +21,6 @@ debug_rnglists: DebugRngLists,
debug_str: StringSection,
pub const UpdateError = error{
CodegenFail,
ReinterpretDeclRef,
Unimplemented,
OutOfMemory,
@ -451,7 +450,6 @@ pub const Section = struct {
const zo = elf_file.zigObjectPtr().?;
const atom = zo.symbol(sec.index).atom(elf_file).?;
if (atom.prevAtom(elf_file)) |_| {
// FIXME:JK trimming/shrinking has to be reworked on ZigObject/Elf level
atom.value += len;
} else {
const shdr = &elf_file.sections.items(.shdr)[atom.output_section_index];
@ -600,12 +598,13 @@ const Unit = struct {
fn move(unit: *Unit, sec: *Section, dwarf: *Dwarf, new_off: u32) UpdateError!void {
if (unit.off == new_off) return;
if (try dwarf.getFile().?.copyRangeAll(
const n = try dwarf.getFile().?.copyRangeAll(
sec.off(dwarf) + unit.off,
dwarf.getFile().?,
sec.off(dwarf) + new_off,
unit.len,
) != unit.len) return error.InputOutput;
);
if (n != unit.len) return error.InputOutput;
unit.off = new_off;
}
@ -1891,19 +1890,16 @@ pub const WipNav = struct {
const bytes = if (ty.hasRuntimeBits(wip_nav.pt.zcu)) ty.abiSize(wip_nav.pt.zcu) else 0;
try uleb128(diw, bytes);
if (bytes == 0) return;
var dim = wip_nav.debug_info.toManaged(wip_nav.dwarf.gpa);
defer wip_nav.debug_info = dim.moveToUnmanaged();
switch (try codegen.generateSymbol(
const old_len = wip_nav.debug_info.items.len;
try codegen.generateSymbol(
wip_nav.dwarf.bin_file,
wip_nav.pt,
src_loc,
val,
&dim,
&wip_nav.debug_info,
.{ .debug_output = .{ .dwarf = wip_nav } },
)) {
.ok => assert(dim.items.len == wip_nav.debug_info.items.len + bytes),
.fail => unreachable,
}
);
assert(old_len + bytes == wip_nav.debug_info.items.len);
}
const AbbrevCodeForForm = struct {
@ -2278,7 +2274,7 @@ pub fn deinit(dwarf: *Dwarf) void {
dwarf.* = undefined;
}
fn getUnit(dwarf: *Dwarf, mod: *Module) UpdateError!Unit.Index {
fn getUnit(dwarf: *Dwarf, mod: *Module) !Unit.Index {
const mod_gop = try dwarf.mods.getOrPut(dwarf.gpa, mod);
const unit: Unit.Index = @enumFromInt(mod_gop.index);
if (!mod_gop.found_existing) {
@ -2338,7 +2334,24 @@ fn getModInfo(dwarf: *Dwarf, unit: Unit.Index) *ModInfo {
return &dwarf.mods.values()[@intFromEnum(unit)];
}
pub fn initWipNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index, sym_index: u32) UpdateError!?WipNav {
pub fn initWipNav(
dwarf: *Dwarf,
pt: Zcu.PerThread,
nav_index: InternPool.Nav.Index,
sym_index: u32,
) error{ OutOfMemory, CodegenFail }!?WipNav {
return initWipNavInner(dwarf, pt, nav_index, sym_index) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => |e| return pt.zcu.codegenFail(nav_index, "failed to init dwarf: {s}", .{@errorName(e)}),
};
}
fn initWipNavInner(
dwarf: *Dwarf,
pt: Zcu.PerThread,
nav_index: InternPool.Nav.Index,
sym_index: u32,
) !?WipNav {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
@ -2667,7 +2680,14 @@ pub fn finishWipNav(
try wip_nav.updateLazy(zcu.navSrcLoc(nav_index));
}
pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) UpdateError!void {
pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) error{ OutOfMemory, CodegenFail }!void {
return updateComptimeNavInner(dwarf, pt, nav_index) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => |e| return pt.zcu.codegenFail(nav_index, "failed to update dwarf: {s}", .{@errorName(e)}),
};
}
fn updateComptimeNavInner(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const nav_src_loc = zcu.navSrcLoc(nav_index);

View File

@ -795,9 +795,15 @@ pub fn loadInput(self: *Elf, input: link.Input) !void {
}
pub fn flush(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
const use_lld = build_options.have_llvm and self.base.comp.config.use_lld;
const comp = self.base.comp;
const use_lld = build_options.have_llvm and comp.config.use_lld;
const diags = &comp.link_diags;
if (use_lld) {
return self.linkWithLLD(arena, tid, prog_node);
return self.linkWithLLD(arena, tid, prog_node) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.LinkFailure => return error.LinkFailure,
else => |e| return diags.fail("failed to link with LLD: {s}", .{@errorName(e)}),
};
}
try self.flushModule(arena, tid, prog_node);
}
@ -807,7 +813,6 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
defer tracy.end();
const comp = self.base.comp;
const gpa = comp.gpa;
const diags = &comp.link_diags;
if (self.llvm_object) |llvm_object| {
@ -821,6 +826,18 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
const sub_prog_node = prog_node.start("ELF Flush", 0);
defer sub_prog_node.end();
return flushModuleInner(self, arena, tid) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.LinkFailure => return error.LinkFailure,
else => |e| return diags.fail("ELF flush failed: {s}", .{@errorName(e)}),
};
}
fn flushModuleInner(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id) !void {
const comp = self.base.comp;
const gpa = comp.gpa;
const diags = &comp.link_diags;
const module_obj_path: ?Path = if (self.base.zcu_object_sub_path) |path| .{
.root_dir = self.base.emit.root_dir,
.sub_path = if (fs.path.dirname(self.base.emit.sub_path)) |dirname|
@ -842,12 +859,12 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
.Exe => {},
}
if (diags.hasErrors()) return error.FlushFailure;
if (diags.hasErrors()) return error.LinkFailure;
// If we haven't already, create a linker-generated input file comprising of
// linker-defined synthetic symbols only such as `_DYNAMIC`, etc.
if (self.linker_defined_index == null) {
const index = @as(File.Index, @intCast(try self.files.addOne(gpa)));
const index: File.Index = @intCast(try self.files.addOne(gpa));
self.files.set(index, .{ .linker_defined = .{ .index = index } });
self.linker_defined_index = index;
const object = self.linkerDefinedPtr().?;
@ -878,7 +895,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
}
self.checkDuplicates() catch |err| switch (err) {
error.HasDuplicates => return error.FlushFailure,
error.HasDuplicates => return error.LinkFailure,
else => |e| return e,
};
@ -956,14 +973,14 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
error.RelocFailure, error.RelaxFailure => has_reloc_errors = true,
error.UnsupportedCpuArch => {
try self.reportUnsupportedCpuArch();
return error.FlushFailure;
return error.LinkFailure;
},
else => |e| return e,
};
try self.base.file.?.pwriteAll(code, file_offset);
try self.pwriteAll(code, file_offset);
}
if (has_reloc_errors) return error.FlushFailure;
if (has_reloc_errors) return error.LinkFailure;
}
try self.writePhdrTable();
@ -972,10 +989,10 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
try self.writeMergeSections();
self.writeSyntheticSections() catch |err| switch (err) {
error.RelocFailure => return error.FlushFailure,
error.RelocFailure => return error.LinkFailure,
error.UnsupportedCpuArch => {
try self.reportUnsupportedCpuArch();
return error.FlushFailure;
return error.LinkFailure;
},
else => |e| return e,
};
@ -989,7 +1006,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
try self.writeElfHeader();
}
if (diags.hasErrors()) return error.FlushFailure;
if (diags.hasErrors()) return error.LinkFailure;
}
fn dumpArgvInit(self: *Elf, arena: Allocator) !void {
@ -1389,7 +1406,7 @@ fn scanRelocs(self: *Elf) !void {
error.RelaxFailure => unreachable,
error.UnsupportedCpuArch => {
try self.reportUnsupportedCpuArch();
return error.FlushFailure;
return error.LinkFailure;
},
error.RelocFailure => has_reloc_errors = true,
else => |e| return e,
@ -1400,7 +1417,7 @@ fn scanRelocs(self: *Elf) !void {
error.RelaxFailure => unreachable,
error.UnsupportedCpuArch => {
try self.reportUnsupportedCpuArch();
return error.FlushFailure;
return error.LinkFailure;
},
error.RelocFailure => has_reloc_errors = true,
else => |e| return e,
@ -1409,7 +1426,7 @@ fn scanRelocs(self: *Elf) !void {
try self.reportUndefinedSymbols(&undefs);
if (has_reloc_errors) return error.FlushFailure;
if (has_reloc_errors) return error.LinkFailure;
if (self.zigObjectPtr()) |zo| {
try zo.asFile().createSymbolIndirection(self);
@ -2117,7 +2134,7 @@ pub fn writeShdrTable(self: *Elf) !void {
mem.byteSwapAllFields(elf.Elf32_Shdr, shdr);
}
}
try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?);
try self.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?);
},
.p64 => {
const buf = try gpa.alloc(elf.Elf64_Shdr, self.sections.items(.shdr).len);
@ -2130,7 +2147,7 @@ pub fn writeShdrTable(self: *Elf) !void {
mem.byteSwapAllFields(elf.Elf64_Shdr, shdr);
}
}
try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?);
try self.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?);
},
}
}
@ -2157,7 +2174,7 @@ fn writePhdrTable(self: *Elf) !void {
mem.byteSwapAllFields(elf.Elf32_Phdr, phdr);
}
}
try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), phdr_table.p_offset);
try self.pwriteAll(mem.sliceAsBytes(buf), phdr_table.p_offset);
},
.p64 => {
const buf = try gpa.alloc(elf.Elf64_Phdr, self.phdrs.items.len);
@ -2169,7 +2186,7 @@ fn writePhdrTable(self: *Elf) !void {
mem.byteSwapAllFields(elf.Elf64_Phdr, phdr);
}
}
try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), phdr_table.p_offset);
try self.pwriteAll(mem.sliceAsBytes(buf), phdr_table.p_offset);
},
}
}
@ -2319,7 +2336,7 @@ pub fn writeElfHeader(self: *Elf) !void {
assert(index == e_ehsize);
try self.base.file.?.pwriteAll(hdr_buf[0..index], 0);
try self.pwriteAll(hdr_buf[0..index], 0);
}
pub fn freeNav(self: *Elf, nav: InternPool.Nav.Index) void {
@ -2327,7 +2344,13 @@ pub fn freeNav(self: *Elf, nav: InternPool.Nav.Index) void {
return self.zigObjectPtr().?.freeNav(self, nav);
}
pub fn updateFunc(self: *Elf, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
pub fn updateFunc(
self: *Elf,
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
) link.File.UpdateNavError!void {
if (build_options.skip_non_native and builtin.object_format != .elf) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
@ -2351,19 +2374,32 @@ pub fn updateContainerType(
self: *Elf,
pt: Zcu.PerThread,
ty: InternPool.Index,
) link.File.UpdateNavError!void {
) link.File.UpdateContainerTypeError!void {
if (build_options.skip_non_native and builtin.object_format != .elf) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (self.llvm_object) |_| return;
return self.zigObjectPtr().?.updateContainerType(pt, ty);
const zcu = pt.zcu;
const gpa = zcu.gpa;
return self.zigObjectPtr().?.updateContainerType(pt, ty) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => |e| {
try zcu.failed_types.putNoClobber(gpa, ty, try Zcu.ErrorMsg.create(
gpa,
zcu.typeSrcLoc(ty),
"failed to update container type: {s}",
.{@errorName(e)},
));
return error.TypeFailureReported;
},
};
}
pub fn updateExports(
self: *Elf,
pt: Zcu.PerThread,
exported: Zcu.Exported,
export_indices: []const u32,
export_indices: []const Zcu.Export.Index,
) link.File.UpdateExportsError!void {
if (build_options.skip_non_native and builtin.object_format != .elf) {
@panic("Attempted to compile for object format that was disabled by build configuration");
@ -2441,7 +2477,7 @@ pub fn resolveMergeSections(self: *Elf) !void {
};
}
if (has_errors) return error.FlushFailure;
if (has_errors) return error.LinkFailure;
for (self.objects.items) |index| {
const object = self.file(index).?.object;
@ -2491,8 +2527,8 @@ pub fn writeMergeSections(self: *Elf) !void {
for (self.merge_sections.items) |*msec| {
const shdr = self.sections.items(.shdr)[msec.output_section_index];
const fileoff = math.cast(usize, msec.value + shdr.sh_offset) orelse return error.Overflow;
const size = math.cast(usize, msec.size) orelse return error.Overflow;
const fileoff = try self.cast(usize, msec.value + shdr.sh_offset);
const size = try self.cast(usize, msec.size);
try buffer.ensureTotalCapacity(size);
buffer.appendNTimesAssumeCapacity(0, size);
@ -2500,11 +2536,11 @@ pub fn writeMergeSections(self: *Elf) !void {
const msub = msec.mergeSubsection(msub_index);
assert(msub.alive);
const string = msub.getString(self);
const off = math.cast(usize, msub.value) orelse return error.Overflow;
const off = try self.cast(usize, msub.value);
@memcpy(buffer.items[off..][0..string.len], string);
}
try self.base.file.?.pwriteAll(buffer.items, fileoff);
try self.pwriteAll(buffer.items, fileoff);
buffer.clearRetainingCapacity();
}
}
@ -3121,9 +3157,6 @@ pub fn sortShdrs(
fileLookup(files, ref.file, zig_object_ptr).?.atom(ref.index).?.output_section_index = atom_list.output_section_index;
}
if (shdr.sh_type == elf.SHT_RELA) {
// FIXME:JK we should spin up .symtab potentially earlier, or set all non-dynamic RELA sections
// to point at symtab
// shdr.sh_link = backlinks[shdr.sh_link];
shdr.sh_link = section_indexes.symtab.?;
shdr.sh_info = backlinks[shdr.sh_info];
}
@ -3211,7 +3244,7 @@ fn updateSectionSizes(self: *Elf) !void {
atom_list.dirty = false;
}
// FIXME:JK this will hopefully not be needed once we create a link from Atom/Thunk to AtomList.
// This might not be needed if there was a link from Atom/Thunk to AtomList.
for (self.thunks.items) |*th| {
th.value += slice.items(.atom_list_2)[th.output_section_index].value;
}
@ -3297,7 +3330,6 @@ fn updateSectionSizes(self: *Elf) !void {
self.updateShStrtabSize();
}
// FIXME:JK this is very much obsolete, remove!
pub fn updateShStrtabSize(self: *Elf) void {
if (self.section_indexes.shstrtab) |index| {
self.sections.items(.shdr)[index].sh_size = self.shstrtab.items.len;
@ -3362,7 +3394,7 @@ fn allocatePhdrTable(self: *Elf) error{OutOfMemory}!void {
// TODO verify `getMaxNumberOfPhdrs()` is accurate and convert this into no-op
var err = try diags.addErrorWithNotes(1);
try err.addMsg("fatal linker error: not enough space reserved for EHDR and PHDR table", .{});
try err.addNote("required 0x{x}, available 0x{x}", .{ needed_size, available_space });
err.addNote("required 0x{x}, available 0x{x}", .{ needed_size, available_space });
}
phdr_table_load.p_filesz = needed_size + ehsize;
@ -3658,7 +3690,7 @@ fn writeAtoms(self: *Elf) !void {
atom_list.write(&buffer, &undefs, self) catch |err| switch (err) {
error.UnsupportedCpuArch => {
try self.reportUnsupportedCpuArch();
return error.FlushFailure;
return error.LinkFailure;
},
error.RelocFailure, error.RelaxFailure => has_reloc_errors = true,
else => |e| return e,
@ -3666,7 +3698,7 @@ fn writeAtoms(self: *Elf) !void {
}
try self.reportUndefinedSymbols(&undefs);
if (has_reloc_errors) return error.FlushFailure;
if (has_reloc_errors) return error.LinkFailure;
if (self.requiresThunks()) {
for (self.thunks.items) |th| {
@ -3676,7 +3708,7 @@ fn writeAtoms(self: *Elf) !void {
const offset = @as(u64, @intCast(th.value)) + shdr.sh_offset;
try th.write(self, buffer.writer());
assert(buffer.items.len == thunk_size);
try self.base.file.?.pwriteAll(buffer.items, offset);
try self.pwriteAll(buffer.items, offset);
buffer.clearRetainingCapacity();
}
}
@ -3784,12 +3816,12 @@ fn writeSyntheticSections(self: *Elf) !void {
const contents = buffer[0 .. interp.len + 1];
const shdr = slice.items(.shdr)[shndx];
assert(shdr.sh_size == contents.len);
try self.base.file.?.pwriteAll(contents, shdr.sh_offset);
try self.pwriteAll(contents, shdr.sh_offset);
}
if (self.section_indexes.hash) |shndx| {
const shdr = slice.items(.shdr)[shndx];
try self.base.file.?.pwriteAll(self.hash.buffer.items, shdr.sh_offset);
try self.pwriteAll(self.hash.buffer.items, shdr.sh_offset);
}
if (self.section_indexes.gnu_hash) |shndx| {
@ -3797,12 +3829,12 @@ fn writeSyntheticSections(self: *Elf) !void {
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.gnu_hash.size());
defer buffer.deinit();
try self.gnu_hash.write(self, buffer.writer());
try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
try self.pwriteAll(buffer.items, shdr.sh_offset);
}
if (self.section_indexes.versym) |shndx| {
const shdr = slice.items(.shdr)[shndx];
try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.versym.items), shdr.sh_offset);
try self.pwriteAll(mem.sliceAsBytes(self.versym.items), shdr.sh_offset);
}
if (self.section_indexes.verneed) |shndx| {
@ -3810,7 +3842,7 @@ fn writeSyntheticSections(self: *Elf) !void {
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.verneed.size());
defer buffer.deinit();
try self.verneed.write(buffer.writer());
try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
try self.pwriteAll(buffer.items, shdr.sh_offset);
}
if (self.section_indexes.dynamic) |shndx| {
@ -3818,7 +3850,7 @@ fn writeSyntheticSections(self: *Elf) !void {
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.dynamic.size(self));
defer buffer.deinit();
try self.dynamic.write(self, buffer.writer());
try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
try self.pwriteAll(buffer.items, shdr.sh_offset);
}
if (self.section_indexes.dynsymtab) |shndx| {
@ -3826,12 +3858,12 @@ fn writeSyntheticSections(self: *Elf) !void {
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.dynsym.size());
defer buffer.deinit();
try self.dynsym.write(self, buffer.writer());
try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
try self.pwriteAll(buffer.items, shdr.sh_offset);
}
if (self.section_indexes.dynstrtab) |shndx| {
const shdr = slice.items(.shdr)[shndx];
try self.base.file.?.pwriteAll(self.dynstrtab.items, shdr.sh_offset);
try self.pwriteAll(self.dynstrtab.items, shdr.sh_offset);
}
if (self.section_indexes.eh_frame) |shndx| {
@ -3841,21 +3873,21 @@ fn writeSyntheticSections(self: *Elf) !void {
break :existing_size sym.atom(self).?.size;
};
const shdr = slice.items(.shdr)[shndx];
const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
const sh_size = try self.cast(usize, shdr.sh_size);
var buffer = try std.ArrayList(u8).initCapacity(gpa, @intCast(sh_size - existing_size));
defer buffer.deinit();
try eh_frame.writeEhFrame(self, buffer.writer());
assert(buffer.items.len == sh_size - existing_size);
try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset + existing_size);
try self.pwriteAll(buffer.items, shdr.sh_offset + existing_size);
}
if (self.section_indexes.eh_frame_hdr) |shndx| {
const shdr = slice.items(.shdr)[shndx];
const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
const sh_size = try self.cast(usize, shdr.sh_size);
var buffer = try std.ArrayList(u8).initCapacity(gpa, sh_size);
defer buffer.deinit();
try eh_frame.writeEhFrameHdr(self, buffer.writer());
try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
try self.pwriteAll(buffer.items, shdr.sh_offset);
}
if (self.section_indexes.got) |index| {
@ -3863,7 +3895,7 @@ fn writeSyntheticSections(self: *Elf) !void {
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.got.size(self));
defer buffer.deinit();
try self.got.write(self, buffer.writer());
try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
try self.pwriteAll(buffer.items, shdr.sh_offset);
}
if (self.section_indexes.rela_dyn) |shndx| {
@ -3871,7 +3903,7 @@ fn writeSyntheticSections(self: *Elf) !void {
try self.got.addRela(self);
try self.copy_rel.addRela(self);
self.sortRelaDyn();
try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.rela_dyn.items), shdr.sh_offset);
try self.pwriteAll(mem.sliceAsBytes(self.rela_dyn.items), shdr.sh_offset);
}
if (self.section_indexes.plt) |shndx| {
@ -3879,7 +3911,7 @@ fn writeSyntheticSections(self: *Elf) !void {
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.plt.size(self));
defer buffer.deinit();
try self.plt.write(self, buffer.writer());
try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
try self.pwriteAll(buffer.items, shdr.sh_offset);
}
if (self.section_indexes.got_plt) |shndx| {
@ -3887,7 +3919,7 @@ fn writeSyntheticSections(self: *Elf) !void {
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.got_plt.size(self));
defer buffer.deinit();
try self.got_plt.write(self, buffer.writer());
try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
try self.pwriteAll(buffer.items, shdr.sh_offset);
}
if (self.section_indexes.plt_got) |shndx| {
@ -3895,25 +3927,24 @@ fn writeSyntheticSections(self: *Elf) !void {
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.plt_got.size(self));
defer buffer.deinit();
try self.plt_got.write(self, buffer.writer());
try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
try self.pwriteAll(buffer.items, shdr.sh_offset);
}
if (self.section_indexes.rela_plt) |shndx| {
const shdr = slice.items(.shdr)[shndx];
try self.plt.addRela(self);
try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.rela_plt.items), shdr.sh_offset);
try self.pwriteAll(mem.sliceAsBytes(self.rela_plt.items), shdr.sh_offset);
}
try self.writeSymtab();
try self.writeShStrtab();
}
// FIXME:JK again, why is this needed?
pub fn writeShStrtab(self: *Elf) !void {
if (self.section_indexes.shstrtab) |index| {
const shdr = self.sections.items(.shdr)[index];
log.debug("writing .shstrtab from 0x{x} to 0x{x}", .{ shdr.sh_offset, shdr.sh_offset + shdr.sh_size });
try self.base.file.?.pwriteAll(self.shstrtab.items, shdr.sh_offset);
try self.pwriteAll(self.shstrtab.items, shdr.sh_offset);
}
}
@ -3928,7 +3959,7 @@ pub fn writeSymtab(self: *Elf) !void {
.p32 => @sizeOf(elf.Elf32_Sym),
.p64 => @sizeOf(elf.Elf64_Sym),
};
const nsyms = math.cast(usize, @divExact(symtab_shdr.sh_size, sym_size)) orelse return error.Overflow;
const nsyms = try self.cast(usize, @divExact(symtab_shdr.sh_size, sym_size));
log.debug("writing {d} symbols in .symtab from 0x{x} to 0x{x}", .{
nsyms,
@ -3941,7 +3972,7 @@ pub fn writeSymtab(self: *Elf) !void {
});
try self.symtab.resize(gpa, nsyms);
const needed_strtab_size = math.cast(usize, strtab_shdr.sh_size - 1) orelse return error.Overflow;
const needed_strtab_size = try self.cast(usize, strtab_shdr.sh_size - 1);
// TODO we could resize instead and in ZigObject/Object always access as slice
self.strtab.clearRetainingCapacity();
self.strtab.appendAssumeCapacity(0);
@ -4010,17 +4041,17 @@ pub fn writeSymtab(self: *Elf) !void {
};
if (foreign_endian) mem.byteSwapAllFields(elf.Elf32_Sym, out);
}
try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), symtab_shdr.sh_offset);
try self.pwriteAll(mem.sliceAsBytes(buf), symtab_shdr.sh_offset);
},
.p64 => {
if (foreign_endian) {
for (self.symtab.items) |*sym| mem.byteSwapAllFields(elf.Elf64_Sym, sym);
}
try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.symtab.items), symtab_shdr.sh_offset);
try self.pwriteAll(mem.sliceAsBytes(self.symtab.items), symtab_shdr.sh_offset);
},
}
try self.base.file.?.pwriteAll(self.strtab.items, strtab_shdr.sh_offset);
try self.pwriteAll(self.strtab.items, strtab_shdr.sh_offset);
}
/// Always 4 or 8 depending on whether this is 32-bit ELF or 64-bit ELF.
@ -4514,12 +4545,12 @@ fn reportUndefinedSymbols(self: *Elf, undefs: anytype) !void {
for (refs.items[0..nrefs]) |ref| {
const atom_ptr = self.atom(ref).?;
const file_ptr = atom_ptr.file(self).?;
try err.addNote("referenced by {s}:{s}", .{ file_ptr.fmtPath(), atom_ptr.name(self) });
err.addNote("referenced by {s}:{s}", .{ file_ptr.fmtPath(), atom_ptr.name(self) });
}
if (refs.items.len > max_notes) {
const remaining = refs.items.len - max_notes;
try err.addNote("referenced {d} more times", .{remaining});
err.addNote("referenced {d} more times", .{remaining});
}
}
}
@ -4536,17 +4567,17 @@ fn reportDuplicates(self: *Elf, dupes: anytype) error{ HasDuplicates, OutOfMemor
var err = try diags.addErrorWithNotes(nnotes + 1);
try err.addMsg("duplicate symbol definition: {s}", .{sym.name(self)});
try err.addNote("defined by {}", .{sym.file(self).?.fmtPath()});
err.addNote("defined by {}", .{sym.file(self).?.fmtPath()});
var inote: usize = 0;
while (inote < @min(notes.items.len, max_notes)) : (inote += 1) {
const file_ptr = self.file(notes.items[inote]).?;
try err.addNote("defined by {}", .{file_ptr.fmtPath()});
err.addNote("defined by {}", .{file_ptr.fmtPath()});
}
if (notes.items.len > max_notes) {
const remaining = notes.items.len - max_notes;
try err.addNote("defined {d} more times", .{remaining});
err.addNote("defined {d} more times", .{remaining});
}
}
@ -4570,7 +4601,7 @@ pub fn addFileError(
const diags = &self.base.comp.link_diags;
var err = try diags.addErrorWithNotes(1);
try err.addMsg(format, args);
try err.addNote("while parsing {}", .{self.file(file_index).?.fmtPath()});
err.addNote("while parsing {}", .{self.file(file_index).?.fmtPath()});
}
pub fn failFile(
@ -5184,6 +5215,30 @@ pub fn stringTableLookup(strtab: []const u8, off: u32) [:0]const u8 {
return slice[0..mem.indexOfScalar(u8, slice, 0).? :0];
}
pub fn pwriteAll(elf_file: *Elf, bytes: []const u8, offset: u64) error{LinkFailure}!void {
const comp = elf_file.base.comp;
const diags = &comp.link_diags;
elf_file.base.file.?.pwriteAll(bytes, offset) catch |err| {
return diags.fail("failed to write: {s}", .{@errorName(err)});
};
}
pub fn setEndPos(elf_file: *Elf, length: u64) error{LinkFailure}!void {
const comp = elf_file.base.comp;
const diags = &comp.link_diags;
elf_file.base.file.?.setEndPos(length) catch |err| {
return diags.fail("failed to set file end pos: {s}", .{@errorName(err)});
};
}
pub fn cast(elf_file: *Elf, comptime T: type, x: anytype) error{LinkFailure}!T {
return std.math.cast(T, x) orelse {
const comp = elf_file.base.comp;
const diags = &comp.link_diags;
return diags.fail("encountered {d}, overflowing {d}-bit value", .{ x, @bitSizeOf(T) });
};
}
const std = @import("std");
const build_options = @import("build_options");
const builtin = @import("builtin");

View File

@ -523,7 +523,7 @@ fn reportUnhandledRelocError(self: Atom, rel: elf.Elf64_Rela, elf_file: *Elf) Re
relocation.fmtRelocType(rel.r_type(), elf_file.getTarget().cpu.arch),
rel.r_offset,
});
try err.addNote("in {}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) });
err.addNote("in {}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) });
return error.RelocFailure;
}
@ -539,7 +539,7 @@ fn reportTextRelocError(
rel.r_offset,
symbol.name(elf_file),
});
try err.addNote("in {}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) });
err.addNote("in {}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) });
return error.RelocFailure;
}
@ -555,8 +555,8 @@ fn reportPicError(
rel.r_offset,
symbol.name(elf_file),
});
try err.addNote("in {}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) });
try err.addNote("recompile with -fPIC", .{});
err.addNote("in {}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) });
err.addNote("recompile with -fPIC", .{});
return error.RelocFailure;
}
@ -572,8 +572,8 @@ fn reportNoPicError(
rel.r_offset,
symbol.name(elf_file),
});
try err.addNote("in {}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) });
try err.addNote("recompile with -fno-PIC", .{});
err.addNote("in {}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) });
err.addNote("recompile with -fno-PIC", .{});
return error.RelocFailure;
}
@ -1187,7 +1187,7 @@ const x86_64 = struct {
x86_64.relaxGotPcTlsDesc(code[r_offset - 3 ..]) catch {
var err = try diags.addErrorWithNotes(1);
try err.addMsg("could not relax {s}", .{@tagName(r_type)});
try err.addNote("in {}:{s} at offset 0x{x}", .{
err.addNote("in {}:{s} at offset 0x{x}", .{
atom.file(elf_file).?.fmtPath(),
atom.name(elf_file),
rel.r_offset,
@ -1332,7 +1332,7 @@ const x86_64 = struct {
relocation.fmtRelocType(rels[0].r_type(), .x86_64),
relocation.fmtRelocType(rels[1].r_type(), .x86_64),
});
try err.addNote("in {}:{s} at offset 0x{x}", .{
err.addNote("in {}:{s} at offset 0x{x}", .{
self.file(elf_file).?.fmtPath(),
self.name(elf_file),
rels[0].r_offset,
@ -1388,7 +1388,7 @@ const x86_64 = struct {
relocation.fmtRelocType(rels[0].r_type(), .x86_64),
relocation.fmtRelocType(rels[1].r_type(), .x86_64),
});
try err.addNote("in {}:{s} at offset 0x{x}", .{
err.addNote("in {}:{s} at offset 0x{x}", .{
self.file(elf_file).?.fmtPath(),
self.name(elf_file),
rels[0].r_offset,
@ -1485,7 +1485,7 @@ const x86_64 = struct {
relocation.fmtRelocType(rels[0].r_type(), .x86_64),
relocation.fmtRelocType(rels[1].r_type(), .x86_64),
});
try err.addNote("in {}:{s} at offset 0x{x}", .{
err.addNote("in {}:{s} at offset 0x{x}", .{
self.file(elf_file).?.fmtPath(),
self.name(elf_file),
rels[0].r_offset,
@ -1672,7 +1672,7 @@ const aarch64 = struct {
// TODO: relax
var err = try diags.addErrorWithNotes(1);
try err.addMsg("TODO: relax ADR_GOT_PAGE", .{});
try err.addNote("in {}:{s} at offset 0x{x}", .{
err.addNote("in {}:{s} at offset 0x{x}", .{
atom.file(elf_file).?.fmtPath(),
atom.name(elf_file),
r_offset,
@ -1959,7 +1959,7 @@ const riscv = struct {
// TODO: implement searching forward
var err = try diags.addErrorWithNotes(1);
try err.addMsg("TODO: find HI20 paired reloc scanning forward", .{});
try err.addNote("in {}:{s} at offset 0x{x}", .{
err.addNote("in {}:{s} at offset 0x{x}", .{
atom.file(elf_file).?.fmtPath(),
atom.name(elf_file),
rel.r_offset,

View File

@ -58,7 +58,7 @@ pub fn allocate(list: *AtomList, elf_file: *Elf) !void {
if (expand_section) last_atom_ref.* = list.lastAtom(elf_file).ref();
shdr.sh_addralign = @max(shdr.sh_addralign, list.alignment.toByteUnits().?);
// FIXME:JK this currently ignores Thunks as valid chunks.
// This currently ignores Thunks as valid chunks.
{
var idx: usize = 0;
while (idx < list.atoms.keys().len) : (idx += 1) {
@ -78,7 +78,8 @@ pub fn allocate(list: *AtomList, elf_file: *Elf) !void {
placement_atom.next_atom_ref = list.firstAtom(elf_file).ref();
}
// FIXME:JK if we had a link from Atom to parent AtomList we would not need to update Atom's value or osec index
// If we had a link from Atom to parent AtomList we would not need to
// update Atom's value or osec index.
for (list.atoms.keys()) |ref| {
const atom_ptr = elf_file.atom(ref).?;
atom_ptr.output_section_index = list.output_section_index;

View File

@ -797,7 +797,7 @@ pub fn initInputMergeSections(self: *Object, elf_file: *Elf) !void {
if (!isNull(data[end .. end + sh_entsize])) {
var err = try diags.addErrorWithNotes(1);
try err.addMsg("string not null terminated", .{});
try err.addNote("in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) });
err.addNote("in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) });
return error.LinkFailure;
}
end += sh_entsize;
@ -812,7 +812,7 @@ pub fn initInputMergeSections(self: *Object, elf_file: *Elf) !void {
if (shdr.sh_size % sh_entsize != 0) {
var err = try diags.addErrorWithNotes(1);
try err.addMsg("size not a multiple of sh_entsize", .{});
try err.addNote("in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) });
err.addNote("in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) });
return error.LinkFailure;
}
@ -889,8 +889,8 @@ pub fn resolveMergeSubsections(self: *Object, elf_file: *Elf) error{
const res = imsec.findSubsection(@intCast(esym.st_value)) orelse {
var err = try diags.addErrorWithNotes(2);
try err.addMsg("invalid symbol value: {x}", .{esym.st_value});
try err.addNote("for symbol {s}", .{sym.name(elf_file)});
try err.addNote("in {}", .{self.fmtPath()});
err.addNote("for symbol {s}", .{sym.name(elf_file)});
err.addNote("in {}", .{self.fmtPath()});
return error.LinkFailure;
};
@ -915,7 +915,7 @@ pub fn resolveMergeSubsections(self: *Object, elf_file: *Elf) error{
const res = imsec.findSubsection(@intCast(@as(i64, @intCast(esym.st_value)) + rel.r_addend)) orelse {
var err = try diags.addErrorWithNotes(1);
try err.addMsg("invalid relocation at offset 0x{x}", .{rel.r_offset});
try err.addNote("in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) });
err.addNote("in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) });
return error.LinkFailure;
};

View File

@ -278,8 +278,8 @@ pub fn flush(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !void {
.{ .kind = .code, .ty = .anyerror_type },
metadata.text_symbol_index,
) catch |err| return switch (err) {
error.CodegenFail => error.FlushFailure,
else => |e| e,
error.CodegenFail => error.LinkFailure,
else => |e| return e,
};
if (metadata.rodata_state != .unused) self.updateLazySymbol(
elf_file,
@ -287,8 +287,8 @@ pub fn flush(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !void {
.{ .kind = .const_data, .ty = .anyerror_type },
metadata.rodata_symbol_index,
) catch |err| return switch (err) {
error.CodegenFail => error.FlushFailure,
else => |e| e,
error.CodegenFail => error.LinkFailure,
else => |e| return e,
};
}
for (self.lazy_syms.values()) |*metadata| {
@ -933,6 +933,7 @@ pub fn getNavVAddr(
const this_sym = self.symbol(this_sym_index);
const vaddr = this_sym.address(.{}, elf_file);
switch (reloc_info.parent) {
.none => unreachable,
.atom_index => |atom_index| {
const parent_atom = self.symbol(atom_index).atom(elf_file).?;
const r_type = relocation.encode(.abs, elf_file.getTarget().cpu.arch);
@ -965,6 +966,7 @@ pub fn getUavVAddr(
const sym = self.symbol(sym_index);
const vaddr = sym.address(.{}, elf_file);
switch (reloc_info.parent) {
.none => unreachable,
.atom_index => |atom_index| {
const parent_atom = self.symbol(atom_index).atom(elf_file).?;
const r_type = relocation.encode(.abs, elf_file.getTarget().cpu.arch);
@ -1261,7 +1263,7 @@ fn updateNavCode(
shdr_index: u32,
code: []const u8,
stt_bits: u8,
) !void {
) link.File.UpdateNavError!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
@ -1298,7 +1300,9 @@ fn updateNavCode(
const capacity = atom_ptr.capacity(elf_file);
const need_realloc = code.len > capacity or !required_alignment.check(@intCast(atom_ptr.value));
if (need_realloc) {
try self.allocateAtom(atom_ptr, true, elf_file);
self.allocateAtom(atom_ptr, true, elf_file) catch |err|
return elf_file.base.cgFail(nav_index, "failed to allocate atom: {s}", .{@errorName(err)});
log.debug("growing {} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), old_vaddr, atom_ptr.value });
if (old_vaddr != atom_ptr.value) {
sym.value = 0;
@ -1308,7 +1312,9 @@ fn updateNavCode(
// TODO shrink section size
}
} else {
try self.allocateAtom(atom_ptr, true, elf_file);
self.allocateAtom(atom_ptr, true, elf_file) catch |err|
return elf_file.base.cgFail(nav_index, "failed to allocate atom: {s}", .{@errorName(err)});
errdefer self.freeNavMetadata(elf_file, sym_index);
sym.value = 0;
esym.st_value = 0;
@ -1333,14 +1339,15 @@ fn updateNavCode(
else => |errno| log.warn("process_vm_writev failure: {s}", .{@tagName(errno)}),
}
},
else => return error.HotSwapUnavailableOnHostOperatingSystem,
else => return elf_file.base.cgFail(nav_index, "ELF hot swap unavailable on host operating system '{s}'", .{@tagName(builtin.os.tag)}),
}
}
const shdr = elf_file.sections.items(.shdr)[shdr_index];
if (shdr.sh_type != elf.SHT_NOBITS) {
const file_offset = atom_ptr.offset(elf_file);
try elf_file.base.file.?.pwriteAll(code, file_offset);
elf_file.base.file.?.pwriteAll(code, file_offset) catch |err|
return elf_file.base.cgFail(nav_index, "failed to write to output file: {s}", .{@errorName(err)});
log.debug("writing {} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), file_offset, file_offset + code.len });
}
}
@ -1353,7 +1360,7 @@ fn updateTlv(
sym_index: Symbol.Index,
shndx: u32,
code: []const u8,
) !void {
) link.File.UpdateNavError!void {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const gpa = zcu.gpa;
@ -1383,7 +1390,8 @@ fn updateTlv(
const gop = try self.tls_variables.getOrPut(gpa, atom_ptr.atom_index);
assert(!gop.found_existing); // TODO incremental updates
try self.allocateAtom(atom_ptr, true, elf_file);
self.allocateAtom(atom_ptr, true, elf_file) catch |err|
return elf_file.base.cgFail(nav_index, "failed to allocate atom: {s}", .{@errorName(err)});
sym.value = 0;
esym.st_value = 0;
@ -1392,7 +1400,8 @@ fn updateTlv(
const shdr = elf_file.sections.items(.shdr)[shndx];
if (shdr.sh_type != elf.SHT_NOBITS) {
const file_offset = atom_ptr.offset(elf_file);
try elf_file.base.file.?.pwriteAll(code, file_offset);
elf_file.base.file.?.pwriteAll(code, file_offset) catch |err|
return elf_file.base.cgFail(nav_index, "failed to write to output file: {s}", .{@errorName(err)});
log.debug("writing TLV {s} from 0x{x} to 0x{x}", .{
atom_ptr.name(elf_file),
file_offset,
@ -1408,7 +1417,7 @@ pub fn updateFunc(
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
) !void {
) link.File.UpdateNavError!void {
const tracy = trace(@src());
defer tracy.end();
@ -1422,13 +1431,13 @@ pub fn updateFunc(
const sym_index = try self.getOrCreateMetadataForNav(zcu, func.owner_nav);
self.atom(self.symbol(sym_index).ref.index).?.freeRelocs(self);
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
defer code_buffer.deinit(gpa);
var debug_wip_nav = if (self.dwarf) |*dwarf| try dwarf.initWipNav(pt, func.owner_nav, sym_index) else null;
defer if (debug_wip_nav) |*wip_nav| wip_nav.deinit();
const res = try codegen.generateFunction(
try codegen.generateFunction(
&elf_file.base,
pt,
zcu.navSrcLoc(func.owner_nav),
@ -1438,14 +1447,7 @@ pub fn updateFunc(
&code_buffer,
if (debug_wip_nav) |*dn| .{ .dwarf = dn } else .none,
);
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
try zcu.failed_codegen.put(gpa, func.owner_nav, em);
return;
},
};
const code = code_buffer.items;
const shndx = try self.getNavShdrIndex(elf_file, zcu, func.owner_nav, sym_index, code);
log.debug("setting shdr({x},{s}) for {}", .{
@ -1463,7 +1465,8 @@ pub fn updateFunc(
break :blk .{ atom_ptr.value, atom_ptr.alignment };
};
if (debug_wip_nav) |*wip_nav| try self.dwarf.?.finishWipNavFunc(pt, func.owner_nav, code.len, wip_nav);
if (debug_wip_nav) |*wip_nav| self.dwarf.?.finishWipNavFunc(pt, func.owner_nav, code.len, wip_nav) catch |err|
return elf_file.base.cgFail(func.owner_nav, "failed to finish dwarf function: {s}", .{@errorName(err)});
// Exports will be updated by `Zcu.processExports` after the update.
@ -1511,7 +1514,8 @@ pub fn updateFunc(
target_sym.flags.has_trampoline = true;
}
const target_sym = self.symbol(sym_index);
try writeTrampoline(self.symbol(target_sym.extra(elf_file).trampoline).*, target_sym.*, elf_file);
writeTrampoline(self.symbol(target_sym.extra(elf_file).trampoline).*, target_sym.*, elf_file) catch |err|
return elf_file.base.cgFail(func.owner_nav, "failed to write trampoline: {s}", .{@errorName(err)});
}
}
@ -1547,7 +1551,11 @@ pub fn updateNav(
if (self.dwarf) |*dwarf| dwarf: {
var debug_wip_nav = try dwarf.initWipNav(pt, nav_index, sym_index) orelse break :dwarf;
defer debug_wip_nav.deinit();
try dwarf.finishWipNav(pt, nav_index, &debug_wip_nav);
dwarf.finishWipNav(pt, nav_index, &debug_wip_nav) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.Overflow => return error.Overflow,
else => |e| return elf_file.base.cgFail(nav_index, "failed to finish dwarf nav: {s}", .{@errorName(e)}),
};
}
return;
},
@ -1558,13 +1566,13 @@ pub fn updateNav(
const sym_index = try self.getOrCreateMetadataForNav(zcu, nav_index);
self.symbol(sym_index).atom(elf_file).?.freeRelocs(self);
var code_buffer = std.ArrayList(u8).init(zcu.gpa);
defer code_buffer.deinit();
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
defer code_buffer.deinit(zcu.gpa);
var debug_wip_nav = if (self.dwarf) |*dwarf| try dwarf.initWipNav(pt, nav_index, sym_index) else null;
defer if (debug_wip_nav) |*wip_nav| wip_nav.deinit();
const res = try codegen.generateSymbol(
try codegen.generateSymbol(
&elf_file.base,
pt,
zcu.navSrcLoc(nav_index),
@ -1572,14 +1580,7 @@ pub fn updateNav(
&code_buffer,
.{ .atom_index = sym_index },
);
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
try zcu.failed_codegen.put(zcu.gpa, nav_index, em);
return;
},
};
const code = code_buffer.items;
const shndx = try self.getNavShdrIndex(elf_file, zcu, nav_index, sym_index, code);
log.debug("setting shdr({x},{s}) for {}", .{
@ -1592,7 +1593,11 @@ pub fn updateNav(
else
try self.updateNavCode(elf_file, pt, nav_index, sym_index, shndx, code, elf.STT_OBJECT);
if (debug_wip_nav) |*wip_nav| try self.dwarf.?.finishWipNav(pt, nav_index, wip_nav);
if (debug_wip_nav) |*wip_nav| self.dwarf.?.finishWipNav(pt, nav_index, wip_nav) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.Overflow => return error.Overflow,
else => |e| return elf_file.base.cgFail(nav_index, "failed to finish dwarf nav: {s}", .{@errorName(e)}),
};
} else if (self.dwarf) |*dwarf| try dwarf.updateComptimeNav(pt, nav_index);
// Exports will be updated by `Zcu.processExports` after the update.
@ -1602,7 +1607,7 @@ pub fn updateContainerType(
self: *ZigObject,
pt: Zcu.PerThread,
ty: InternPool.Index,
) link.File.UpdateNavError!void {
) !void {
const tracy = trace(@src());
defer tracy.end();
@ -1620,8 +1625,8 @@ fn updateLazySymbol(
const gpa = zcu.gpa;
var required_alignment: InternPool.Alignment = .none;
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
defer code_buffer.deinit(gpa);
const name_str_index = blk: {
const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{}", .{
@ -1633,7 +1638,7 @@ fn updateLazySymbol(
};
const src = Type.fromInterned(sym.ty).srcLocOrNull(zcu) orelse Zcu.LazySrcLoc.unneeded;
const res = try codegen.generateLazySymbol(
try codegen.generateLazySymbol(
&elf_file.base,
pt,
src,
@ -1643,13 +1648,7 @@ fn updateLazySymbol(
.none,
.{ .atom_index = symbol_index },
);
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
log.err("{s}", .{em.msg});
return error.CodegenFail;
},
};
const code = code_buffer.items;
const output_section_index = switch (sym.kind) {
.code => if (self.text_index) |sym_index|
@ -1696,7 +1695,7 @@ fn updateLazySymbol(
local_sym.value = 0;
local_esym.st_value = 0;
try elf_file.base.file.?.pwriteAll(code, atom_ptr.offset(elf_file));
try elf_file.pwriteAll(code, atom_ptr.offset(elf_file));
}
const LowerConstResult = union(enum) {
@ -1716,13 +1715,13 @@ fn lowerConst(
) !LowerConstResult {
const gpa = pt.zcu.gpa;
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
defer code_buffer.deinit(gpa);
const name_off = try self.addString(gpa, name);
const sym_index = try self.newSymbolWithAtom(gpa, name_off);
const res = try codegen.generateSymbol(
try codegen.generateSymbol(
&elf_file.base,
pt,
src_loc,
@ -1730,10 +1729,7 @@ fn lowerConst(
&code_buffer,
.{ .atom_index = sym_index },
);
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| return .{ .fail = em },
};
const code = code_buffer.items;
const local_sym = self.symbol(sym_index);
const local_esym = &self.symtab.items(.elf_sym)[local_sym.esym_index];
@ -1748,7 +1744,7 @@ fn lowerConst(
try self.allocateAtom(atom_ptr, true, elf_file);
errdefer self.freeNavMetadata(elf_file, sym_index);
try elf_file.base.file.?.pwriteAll(code, atom_ptr.offset(elf_file));
try elf_file.pwriteAll(code, atom_ptr.offset(elf_file));
return .{ .ok = sym_index };
}
@ -1758,7 +1754,7 @@ pub fn updateExports(
elf_file: *Elf,
pt: Zcu.PerThread,
exported: Zcu.Exported,
export_indices: []const u32,
export_indices: []const Zcu.Export.Index,
) link.File.UpdateExportsError!void {
const tracy = trace(@src());
defer tracy.end();
@ -1771,7 +1767,7 @@ pub fn updateExports(
break :blk self.navs.getPtr(nav).?;
},
.uav => |uav| self.uavs.getPtr(uav) orelse blk: {
const first_exp = zcu.all_exports.items[export_indices[0]];
const first_exp = export_indices[0].ptr(zcu);
const res = try self.lowerUav(elf_file, pt, uav, .none, first_exp.src);
switch (res) {
.mcv => {},
@ -1792,7 +1788,7 @@ pub fn updateExports(
const esym_shndx = self.symtab.items(.shndx)[esym_index];
for (export_indices) |export_idx| {
const exp = zcu.all_exports.items[export_idx];
const exp = export_idx.ptr(zcu);
if (exp.opts.section.unwrap()) |section_name| {
if (!section_name.eqlSlice(".text", &zcu.intern_pool)) {
try zcu.failed_exports.ensureUnusedCapacity(zcu.gpa, 1);
@ -1849,7 +1845,13 @@ pub fn updateExports(
pub fn updateLineNumber(self: *ZigObject, pt: Zcu.PerThread, ti_id: InternPool.TrackedInst.Index) !void {
if (self.dwarf) |*dwarf| {
try dwarf.updateLineNumber(pt.zcu, ti_id);
const comp = dwarf.bin_file.comp;
const diags = &comp.link_diags;
dwarf.updateLineNumber(pt.zcu, ti_id) catch |err| switch (err) {
error.Overflow => return error.Overflow,
error.OutOfMemory => return error.OutOfMemory,
else => |e| return diags.fail("failed to update dwarf line numbers: {s}", .{@errorName(e)}),
};
}
}
@ -1935,8 +1937,8 @@ pub fn allocateAtom(self: *ZigObject, atom_ptr: *Atom, requires_padding: bool, e
const shdr = &slice.items(.shdr)[atom_ptr.output_section_index];
const last_atom_ref = &slice.items(.last_atom)[atom_ptr.output_section_index];
// FIXME:JK this only works if this atom is the only atom in the output section
// In every other case, we need to redo the prev/next links
// This only works if this atom is the only atom in the output section. In
// every other case, we need to redo the prev/next links.
if (last_atom_ref.eql(atom_ptr.ref())) last_atom_ref.* = .{};
const alloc_res = try elf_file.allocateChunk(.{

View File

@ -611,7 +611,7 @@ fn reportInvalidReloc(rec: anytype, elf_file: *Elf, rel: elf.Elf64_Rela) !void {
relocation.fmtRelocType(rel.r_type(), elf_file.getTarget().cpu.arch),
rel.r_offset,
});
try err.addNote("in {}:.eh_frame", .{elf_file.file(rec.file_index).?.fmtPath()});
err.addNote("in {}:.eh_frame", .{elf_file.file(rec.file_index).?.fmtPath()});
return error.RelocFailure;
}

View File

@ -1,8 +1,8 @@
pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation) link.File.FlushError!void {
pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation) !void {
const gpa = comp.gpa;
const diags = &comp.link_diags;
if (diags.hasErrors()) return error.FlushFailure;
if (diags.hasErrors()) return error.LinkFailure;
// First, we flush relocatable object file generated with our backends.
if (elf_file.zigObjectPtr()) |zig_object| {
@ -127,13 +127,13 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation) link.File.FlushError!v
try elf_file.base.file.?.setEndPos(total_size);
try elf_file.base.file.?.pwriteAll(buffer.items, 0);
if (diags.hasErrors()) return error.FlushFailure;
if (diags.hasErrors()) return error.LinkFailure;
}
pub fn flushObject(elf_file: *Elf, comp: *Compilation) link.File.FlushError!void {
pub fn flushObject(elf_file: *Elf, comp: *Compilation) !void {
const diags = &comp.link_diags;
if (diags.hasErrors()) return error.FlushFailure;
if (diags.hasErrors()) return error.LinkFailure;
// Now, we are ready to resolve the symbols across all input files.
// We will first resolve the files in the ZigObject, next in the parsed
@ -179,7 +179,7 @@ pub fn flushObject(elf_file: *Elf, comp: *Compilation) link.File.FlushError!void
try elf_file.writeShdrTable();
try elf_file.writeElfHeader();
if (diags.hasErrors()) return error.FlushFailure;
if (diags.hasErrors()) return error.LinkFailure;
}
fn claimUnresolved(elf_file: *Elf) void {

View File

@ -434,7 +434,7 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
// libc/libSystem dep
self.resolveLibSystem(arena, comp, &system_libs) catch |err| switch (err) {
error.MissingLibSystem => {}, // already reported
else => |e| return e, // TODO: convert into an error
else => |e| return diags.fail("failed to resolve libSystem: {s}", .{@errorName(e)}),
};
for (comp.link_inputs) |link_input| switch (link_input) {
@ -481,7 +481,7 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
}
};
if (diags.hasErrors()) return error.FlushFailure;
if (diags.hasErrors()) return error.LinkFailure;
{
const index = @as(File.Index, @intCast(try self.files.addOne(gpa)));
@ -494,14 +494,17 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
try self.resolveSymbols();
try self.convertTentativeDefsAndResolveSpecialSymbols();
try self.dedupLiterals();
self.dedupLiterals() catch |err| switch (err) {
error.LinkFailure => return error.LinkFailure,
else => |e| return diags.fail("failed to deduplicate literals: {s}", .{@errorName(e)}),
};
if (self.base.gc_sections) {
try dead_strip.gcAtoms(self);
}
self.checkDuplicates() catch |err| switch (err) {
error.HasDuplicates => return error.FlushFailure,
error.HasDuplicates => return error.LinkFailure,
else => |e| return diags.fail("failed to check for duplicate symbol definitions: {s}", .{@errorName(e)}),
};
@ -516,7 +519,7 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
self.claimUnresolved();
self.scanRelocs() catch |err| switch (err) {
error.HasUndefinedSymbols => return error.FlushFailure,
error.HasUndefinedSymbols => return error.LinkFailure,
else => |e| return diags.fail("failed to scan relocations: {s}", .{@errorName(e)}),
};
@ -529,7 +532,10 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
try self.generateUnwindInfo();
try self.initSegments();
try self.allocateSections();
self.allocateSections() catch |err| switch (err) {
error.LinkFailure => return error.LinkFailure,
else => |e| return diags.fail("failed to allocate sections: {s}", .{@errorName(e)}),
};
self.allocateSegments();
self.allocateSyntheticSymbols();
@ -543,7 +549,7 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
if (self.getZigObject()) |zo| {
zo.resolveRelocs(self) catch |err| switch (err) {
error.ResolveFailed => return error.FlushFailure,
error.ResolveFailed => return error.LinkFailure,
else => |e| return e,
};
}
@ -551,7 +557,11 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
try self.writeSectionsToFile();
try self.allocateLinkeditSegment();
try self.writeLinkeditSectionsToFile();
self.writeLinkeditSectionsToFile() catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.LinkFailure => return error.LinkFailure,
else => |e| return diags.fail("failed to write linkedit sections to file: {s}", .{@errorName(e)}),
};
var codesig: ?CodeSignature = if (self.requiresCodeSig()) blk: {
// Preallocate space for the code signature.
@ -561,7 +571,8 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
// where the code signature goes into.
var codesig = CodeSignature.init(self.getPageSize());
codesig.code_directory.ident = fs.path.basename(self.base.emit.sub_path);
if (self.entitlements) |path| try codesig.addEntitlements(gpa, path);
if (self.entitlements) |path| codesig.addEntitlements(gpa, path) catch |err|
return diags.fail("failed to add entitlements from {s}: {s}", .{ path, @errorName(err) });
try self.writeCodeSignaturePadding(&codesig);
break :blk codesig;
} else null;
@ -573,15 +584,34 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
self.getPageSize(),
);
const ncmds, const sizeofcmds, const uuid_cmd_offset = try self.writeLoadCommands();
const ncmds, const sizeofcmds, const uuid_cmd_offset = self.writeLoadCommands() catch |err| switch (err) {
error.NoSpaceLeft => unreachable,
error.OutOfMemory => return error.OutOfMemory,
error.LinkFailure => return error.LinkFailure,
};
try self.writeHeader(ncmds, sizeofcmds);
try self.writeUuid(uuid_cmd_offset, self.requiresCodeSig());
if (self.getDebugSymbols()) |dsym| try dsym.flushModule(self);
self.writeUuid(uuid_cmd_offset, self.requiresCodeSig()) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.LinkFailure => return error.LinkFailure,
else => |e| return diags.fail("failed to calculate and write uuid: {s}", .{@errorName(e)}),
};
if (self.getDebugSymbols()) |dsym| dsym.flushModule(self) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => |e| return diags.fail("failed to get debug symbols: {s}", .{@errorName(e)}),
};
// Code signing always comes last.
if (codesig) |*csig| {
try self.writeCodeSignature(csig); // code signing always comes last
self.writeCodeSignature(csig) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.LinkFailure => return error.LinkFailure,
else => |e| return diags.fail("failed to write code signature: {s}", .{@errorName(e)}),
};
const emit = self.base.emit;
try invalidateKernelCache(emit.root_dir.handle, emit.sub_path);
invalidateKernelCache(emit.root_dir.handle, emit.sub_path) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => |e| return diags.fail("failed to invalidate kernel cache: {s}", .{@errorName(e)}),
};
}
}
@ -1545,21 +1575,21 @@ fn reportUndefs(self: *MachO) !void {
try err.addMsg("undefined symbol: {s}", .{undef_sym.getName(self)});
switch (notes) {
.force_undefined => try err.addNote("referenced with linker flag -u", .{}),
.entry => try err.addNote("referenced with linker flag -e", .{}),
.dyld_stub_binder, .objc_msgsend => try err.addNote("referenced implicitly", .{}),
.force_undefined => err.addNote("referenced with linker flag -u", .{}),
.entry => err.addNote("referenced with linker flag -e", .{}),
.dyld_stub_binder, .objc_msgsend => err.addNote("referenced implicitly", .{}),
.refs => |refs| {
var inote: usize = 0;
while (inote < @min(refs.items.len, max_notes)) : (inote += 1) {
const ref = refs.items[inote];
const file = self.getFile(ref.file).?;
const atom = ref.getAtom(self).?;
try err.addNote("referenced by {}:{s}", .{ file.fmtPath(), atom.getName(self) });
err.addNote("referenced by {}:{s}", .{ file.fmtPath(), atom.getName(self) });
}
if (refs.items.len > max_notes) {
const remaining = refs.items.len - max_notes;
try err.addNote("referenced {d} more times", .{remaining});
err.addNote("referenced {d} more times", .{remaining});
}
},
}
@ -2171,7 +2201,7 @@ fn allocateSections(self: *MachO) !void {
fileoff = mem.alignForward(u32, fileoff, page_size);
}
const alignment = try math.powi(u32, 2, header.@"align");
const alignment = try self.alignPow(header.@"align");
vmaddr = mem.alignForward(u64, vmaddr, alignment);
header.addr = vmaddr;
@ -2327,7 +2357,7 @@ fn allocateLinkeditSegment(self: *MachO) !void {
seg.vmaddr = mem.alignForward(u64, vmaddr, page_size);
seg.fileoff = mem.alignForward(u64, fileoff, page_size);
var off = math.cast(u32, seg.fileoff) orelse return error.Overflow;
var off = try self.cast(u32, seg.fileoff);
// DYLD_INFO_ONLY
{
const cmd = &self.dyld_info_cmd;
@ -2392,7 +2422,7 @@ fn resizeSections(self: *MachO) !void {
if (header.isZerofill()) continue;
if (self.isZigSection(@intCast(n_sect))) continue; // TODO this is horrible
const cpu_arch = self.getTarget().cpu.arch;
const size = math.cast(usize, header.size) orelse return error.Overflow;
const size = try self.cast(usize, header.size);
try out.resize(self.base.comp.gpa, size);
const padding_byte: u8 = if (header.isCode() and cpu_arch == .x86_64) 0xcc else 0;
@memset(out.items, padding_byte);
@ -2489,7 +2519,7 @@ fn writeThunkWorker(self: *MachO, thunk: Thunk) void {
const doWork = struct {
fn doWork(th: Thunk, buffer: []u8, macho_file: *MachO) !void {
const off = math.cast(usize, th.value) orelse return error.Overflow;
const off = try macho_file.cast(usize, th.value);
const size = th.size();
var stream = std.io.fixedBufferStream(buffer[off..][0..size]);
try th.write(macho_file, stream.writer());
@ -2601,7 +2631,7 @@ fn writeSectionsToFile(self: *MachO) !void {
const slice = self.sections.slice();
for (slice.items(.header), slice.items(.out)) |header, out| {
try self.base.file.?.pwriteAll(out.items, header.offset);
try self.pwriteAll(out.items, header.offset);
}
}
@ -2644,7 +2674,7 @@ fn writeDyldInfo(self: *MachO) !void {
try self.lazy_bind_section.write(writer);
try stream.seekTo(cmd.export_off - base_off);
try self.export_trie.write(writer);
try self.base.file.?.pwriteAll(buffer, cmd.rebase_off);
try self.pwriteAll(buffer, cmd.rebase_off);
}
pub fn writeDataInCode(self: *MachO) !void {
@ -2655,7 +2685,7 @@ pub fn writeDataInCode(self: *MachO) !void {
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.data_in_code.size());
defer buffer.deinit();
try self.data_in_code.write(self, buffer.writer());
try self.base.file.?.pwriteAll(buffer.items, cmd.dataoff);
try self.pwriteAll(buffer.items, cmd.dataoff);
}
fn writeIndsymtab(self: *MachO) !void {
@ -2667,15 +2697,15 @@ fn writeIndsymtab(self: *MachO) !void {
var buffer = try std.ArrayList(u8).initCapacity(gpa, needed_size);
defer buffer.deinit();
try self.indsymtab.write(self, buffer.writer());
try self.base.file.?.pwriteAll(buffer.items, cmd.indirectsymoff);
try self.pwriteAll(buffer.items, cmd.indirectsymoff);
}
pub fn writeSymtabToFile(self: *MachO) !void {
const tracy = trace(@src());
defer tracy.end();
const cmd = self.symtab_cmd;
try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.symtab.items), cmd.symoff);
try self.base.file.?.pwriteAll(self.strtab.items, cmd.stroff);
try self.pwriteAll(mem.sliceAsBytes(self.symtab.items), cmd.symoff);
try self.pwriteAll(self.strtab.items, cmd.stroff);
}
fn writeUnwindInfo(self: *MachO) !void {
@ -2686,20 +2716,20 @@ fn writeUnwindInfo(self: *MachO) !void {
if (self.eh_frame_sect_index) |index| {
const header = self.sections.items(.header)[index];
const size = math.cast(usize, header.size) orelse return error.Overflow;
const size = try self.cast(usize, header.size);
const buffer = try gpa.alloc(u8, size);
defer gpa.free(buffer);
eh_frame.write(self, buffer);
try self.base.file.?.pwriteAll(buffer, header.offset);
try self.pwriteAll(buffer, header.offset);
}
if (self.unwind_info_sect_index) |index| {
const header = self.sections.items(.header)[index];
const size = math.cast(usize, header.size) orelse return error.Overflow;
const size = try self.cast(usize, header.size);
const buffer = try gpa.alloc(u8, size);
defer gpa.free(buffer);
try self.unwind_info.write(self, buffer);
try self.base.file.?.pwriteAll(buffer, header.offset);
try self.pwriteAll(buffer, header.offset);
}
}
@ -2890,7 +2920,7 @@ fn writeLoadCommands(self: *MachO) !struct { usize, usize, u64 } {
assert(stream.pos == needed_size);
try self.base.file.?.pwriteAll(buffer, @sizeOf(macho.mach_header_64));
try self.pwriteAll(buffer, @sizeOf(macho.mach_header_64));
return .{ ncmds, buffer.len, uuid_cmd_offset };
}
@ -2944,7 +2974,7 @@ fn writeHeader(self: *MachO, ncmds: usize, sizeofcmds: usize) !void {
log.debug("writing Mach-O header {}", .{header});
try self.base.file.?.pwriteAll(mem.asBytes(&header), 0);
try self.pwriteAll(mem.asBytes(&header), 0);
}
fn writeUuid(self: *MachO, uuid_cmd_offset: u64, has_codesig: bool) !void {
@ -2954,7 +2984,7 @@ fn writeUuid(self: *MachO, uuid_cmd_offset: u64, has_codesig: bool) !void {
} else self.codesig_cmd.dataoff;
try calcUuid(self.base.comp, self.base.file.?, file_size, &self.uuid_cmd.uuid);
const offset = uuid_cmd_offset + @sizeOf(macho.load_command);
try self.base.file.?.pwriteAll(&self.uuid_cmd.uuid, offset);
try self.pwriteAll(&self.uuid_cmd.uuid, offset);
}
pub fn writeCodeSignaturePadding(self: *MachO, code_sig: *CodeSignature) !void {
@ -2968,7 +2998,7 @@ pub fn writeCodeSignaturePadding(self: *MachO, code_sig: *CodeSignature) !void {
log.debug("writing code signature padding from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
// Pad out the space. We need to do this to calculate valid hashes for everything in the file
// except for code signature data.
try self.base.file.?.pwriteAll(&[_]u8{0}, offset + needed_size - 1);
try self.pwriteAll(&[_]u8{0}, offset + needed_size - 1);
self.codesig_cmd.dataoff = @as(u32, @intCast(offset));
self.codesig_cmd.datasize = @as(u32, @intCast(needed_size));
@ -2995,10 +3025,16 @@ pub fn writeCodeSignature(self: *MachO, code_sig: *CodeSignature) !void {
offset + buffer.items.len,
});
try self.base.file.?.pwriteAll(buffer.items, offset);
try self.pwriteAll(buffer.items, offset);
}
pub fn updateFunc(self: *MachO, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
pub fn updateFunc(
self: *MachO,
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
) link.File.UpdateNavError!void {
if (build_options.skip_non_native and builtin.object_format != .macho) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
@ -3006,7 +3042,7 @@ pub fn updateFunc(self: *MachO, pt: Zcu.PerThread, func_index: InternPool.Index,
return self.getZigObject().?.updateFunc(self, pt, func_index, air, liveness);
}
pub fn updateNav(self: *MachO, pt: Zcu.PerThread, nav: InternPool.Nav.Index) !void {
pub fn updateNav(self: *MachO, pt: Zcu.PerThread, nav: InternPool.Nav.Index) link.File.UpdateNavError!void {
if (build_options.skip_non_native and builtin.object_format != .macho) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
@ -3023,7 +3059,7 @@ pub fn updateExports(
self: *MachO,
pt: Zcu.PerThread,
exported: Zcu.Exported,
export_indices: []const u32,
export_indices: []const Zcu.Export.Index,
) link.File.UpdateExportsError!void {
if (build_options.skip_non_native and builtin.object_format != .macho) {
@panic("Attempted to compile for object format that was disabled by build configuration");
@ -3199,7 +3235,7 @@ fn copyRangeAllZeroOut(self: *MachO, old_offset: u64, new_offset: u64, size: u64
const gpa = self.base.comp.gpa;
try self.copyRangeAll(old_offset, new_offset, size);
const size_u = math.cast(usize, size) orelse return error.Overflow;
const zeroes = try gpa.alloc(u8, size_u);
const zeroes = try gpa.alloc(u8, size_u); // TODO no need to allocate here.
defer gpa.free(zeroes);
@memset(zeroes, 0);
try self.base.file.?.pwriteAll(zeroes, old_offset);
@ -3306,10 +3342,9 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
const allocSect = struct {
fn allocSect(macho_file: *MachO, sect_id: u8, size: u64) !void {
const sect = &macho_file.sections.items(.header)[sect_id];
const alignment = try math.powi(u32, 2, sect.@"align");
const alignment = try macho_file.alignPow(sect.@"align");
if (!sect.isZerofill()) {
sect.offset = math.cast(u32, try macho_file.findFreeSpace(size, alignment)) orelse
return error.Overflow;
sect.offset = try macho_file.cast(u32, try macho_file.findFreeSpace(size, alignment));
}
sect.addr = macho_file.findFreeSpaceVirtual(size, alignment);
sect.size = size;
@ -3441,8 +3476,8 @@ fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !vo
seg_id,
seg.segName(),
});
try err.addNote("TODO: emit relocations to memory locations in self-hosted backends", .{});
try err.addNote("as a workaround, try increasing pre-allocated virtual memory of each segment", .{});
err.addNote("TODO: emit relocations to memory locations in self-hosted backends", .{});
err.addNote("as a workaround, try increasing pre-allocated virtual memory of each segment", .{});
}
seg.vmsize = needed_size;
@ -3744,7 +3779,7 @@ pub fn reportParseError2(
const diags = &self.base.comp.link_diags;
var err = try diags.addErrorWithNotes(1);
try err.addMsg(format, args);
try err.addNote("while parsing {}", .{self.getFile(file_index).?.fmtPath()});
err.addNote("while parsing {}", .{self.getFile(file_index).?.fmtPath()});
}
fn reportMissingDependencyError(
@ -3758,10 +3793,10 @@ fn reportMissingDependencyError(
const diags = &self.base.comp.link_diags;
var err = try diags.addErrorWithNotes(2 + checked_paths.len);
try err.addMsg(format, args);
try err.addNote("while resolving {s}", .{path});
try err.addNote("a dependency of {}", .{self.getFile(parent).?.fmtPath()});
err.addNote("while resolving {s}", .{path});
err.addNote("a dependency of {}", .{self.getFile(parent).?.fmtPath()});
for (checked_paths) |p| {
try err.addNote("tried {s}", .{p});
err.addNote("tried {s}", .{p});
}
}
@ -3775,8 +3810,8 @@ fn reportDependencyError(
const diags = &self.base.comp.link_diags;
var err = try diags.addErrorWithNotes(2);
try err.addMsg(format, args);
try err.addNote("while parsing {s}", .{path});
try err.addNote("a dependency of {}", .{self.getFile(parent).?.fmtPath()});
err.addNote("while parsing {s}", .{path});
err.addNote("a dependency of {}", .{self.getFile(parent).?.fmtPath()});
}
fn reportDuplicates(self: *MachO) error{ HasDuplicates, OutOfMemory }!void {
@ -3806,17 +3841,17 @@ fn reportDuplicates(self: *MachO) error{ HasDuplicates, OutOfMemory }!void {
var err = try diags.addErrorWithNotes(nnotes + 1);
try err.addMsg("duplicate symbol definition: {s}", .{sym.getName(self)});
try err.addNote("defined by {}", .{sym.getFile(self).?.fmtPath()});
err.addNote("defined by {}", .{sym.getFile(self).?.fmtPath()});
var inote: usize = 0;
while (inote < @min(notes.items.len, max_notes)) : (inote += 1) {
const file = self.getFile(notes.items[inote]).?;
try err.addNote("defined by {}", .{file.fmtPath()});
err.addNote("defined by {}", .{file.fmtPath()});
}
if (notes.items.len > max_notes) {
const remaining = notes.items.len - max_notes;
try err.addNote("defined {d} more times", .{remaining});
err.addNote("defined {d} more times", .{remaining});
}
}
return error.HasDuplicates;
@ -5310,6 +5345,40 @@ fn isReachable(atom: *const Atom, rel: Relocation, macho_file: *MachO) bool {
return true;
}
pub fn pwriteAll(macho_file: *MachO, bytes: []const u8, offset: u64) error{LinkFailure}!void {
const comp = macho_file.base.comp;
const diags = &comp.link_diags;
macho_file.base.file.?.pwriteAll(bytes, offset) catch |err| {
return diags.fail("failed to write: {s}", .{@errorName(err)});
};
}
pub fn setEndPos(macho_file: *MachO, length: u64) error{LinkFailure}!void {
const comp = macho_file.base.comp;
const diags = &comp.link_diags;
macho_file.base.file.?.setEndPos(length) catch |err| {
return diags.fail("failed to set file end pos: {s}", .{@errorName(err)});
};
}
pub fn cast(macho_file: *MachO, comptime T: type, x: anytype) error{LinkFailure}!T {
return std.math.cast(T, x) orelse {
const comp = macho_file.base.comp;
const diags = &comp.link_diags;
return diags.fail("encountered {d}, overflowing {d}-bit value", .{ x, @bitSizeOf(T) });
};
}
pub fn alignPow(macho_file: *MachO, x: u32) error{LinkFailure}!u32 {
const result, const ov = @shlWithOverflow(@as(u32, 1), try cast(macho_file, u5, x));
if (ov != 0) {
const comp = macho_file.base.comp;
const diags = &comp.link_diags;
return diags.fail("alignment overflow", .{});
}
return result;
}
/// Branch instruction has 26 bits immediate but is 4 byte aligned.
const jump_bits = @bitSizeOf(i28);
const max_distance = (1 << (jump_bits - 1));

View File

@ -909,8 +909,8 @@ const x86_64 = struct {
rel.offset,
rel.fmtPretty(.x86_64),
});
try err.addNote("expected .mov instruction but found .{s}", .{@tagName(x)});
try err.addNote("while parsing {}", .{self.getFile(macho_file).fmtPath()});
err.addNote("expected .mov instruction but found .{s}", .{@tagName(x)});
err.addNote("while parsing {}", .{self.getFile(macho_file).fmtPath()});
return error.RelaxFailUnexpectedInstruction;
},
}
@ -971,7 +971,7 @@ pub fn calcNumRelocs(self: Atom, macho_file: *MachO) u32 {
}
}
pub fn writeRelocs(self: Atom, macho_file: *MachO, code: []u8, buffer: []macho.relocation_info) !void {
pub fn writeRelocs(self: Atom, macho_file: *MachO, code: []u8, buffer: []macho.relocation_info) error{ LinkFailure, OutOfMemory }!void {
const tracy = trace(@src());
defer tracy.end();
@ -983,15 +983,15 @@ pub fn writeRelocs(self: Atom, macho_file: *MachO, code: []u8, buffer: []macho.r
var i: usize = 0;
for (relocs) |rel| {
defer i += 1;
const rel_offset = math.cast(usize, rel.offset - self.off) orelse return error.Overflow;
const r_address: i32 = math.cast(i32, self.value + rel_offset) orelse return error.Overflow;
const rel_offset = try macho_file.cast(usize, rel.offset - self.off);
const r_address: i32 = try macho_file.cast(i32, self.value + rel_offset);
assert(r_address >= 0);
const r_symbolnum = r_symbolnum: {
const r_symbolnum: u32 = switch (rel.tag) {
.local => rel.getTargetAtom(self, macho_file).out_n_sect + 1,
.@"extern" => rel.getTargetSymbol(self, macho_file).getOutputSymtabIndex(macho_file).?,
};
break :r_symbolnum math.cast(u24, r_symbolnum) orelse return error.Overflow;
break :r_symbolnum try macho_file.cast(u24, r_symbolnum);
};
const r_extern = rel.tag == .@"extern";
var addend = rel.addend + rel.getRelocAddend(cpu_arch);
@ -1027,7 +1027,7 @@ pub fn writeRelocs(self: Atom, macho_file: *MachO, code: []u8, buffer: []macho.r
} else if (addend > 0) {
buffer[i] = .{
.r_address = r_address,
.r_symbolnum = @bitCast(math.cast(i24, addend) orelse return error.Overflow),
.r_symbolnum = @bitCast(try macho_file.cast(i24, addend)),
.r_pcrel = 0,
.r_length = 2,
.r_extern = 0,

View File

@ -414,10 +414,11 @@ pub fn resolveLiterals(self: *InternalObject, lp: *MachO.LiteralPool, macho_file
const rel = relocs[0];
assert(rel.tag == .@"extern");
const target = rel.getTargetSymbol(atom.*, macho_file).getAtom(macho_file).?;
const target_size = std.math.cast(usize, target.size) orelse return error.Overflow;
const target_size = try macho_file.cast(usize, target.size);
try buffer.ensureUnusedCapacity(target_size);
buffer.resize(target_size) catch unreachable;
@memcpy(buffer.items, try self.getSectionData(target.n_sect));
const section_data = try self.getSectionData(target.n_sect, macho_file);
@memcpy(buffer.items, section_data);
const res = try lp.insert(gpa, header.type(), buffer.items);
buffer.clearRetainingCapacity();
if (!res.found_existing) {
@ -607,10 +608,11 @@ pub fn writeAtoms(self: *InternalObject, macho_file: *MachO) !void {
if (!atom.isAlive()) continue;
const sect = atom.getInputSection(macho_file);
if (sect.isZerofill()) continue;
const off = std.math.cast(usize, atom.value) orelse return error.Overflow;
const size = std.math.cast(usize, atom.size) orelse return error.Overflow;
const off = try macho_file.cast(usize, atom.value);
const size = try macho_file.cast(usize, atom.size);
const buffer = macho_file.sections.items(.out)[atom.out_n_sect].items[off..][0..size];
@memcpy(buffer, try self.getSectionData(atom.n_sect));
const section_data = try self.getSectionData(atom.n_sect, macho_file);
@memcpy(buffer, section_data);
try atom.resolveRelocs(macho_file, buffer);
}
}
@ -644,13 +646,13 @@ fn addSection(self: *InternalObject, allocator: Allocator, segname: []const u8,
return n_sect;
}
fn getSectionData(self: *const InternalObject, index: u32) error{Overflow}![]const u8 {
fn getSectionData(self: *const InternalObject, index: u32, macho_file: *MachO) error{LinkFailure}![]const u8 {
const slice = self.sections.slice();
assert(index < slice.items(.header).len);
const sect = slice.items(.header)[index];
const extra = slice.items(.extra)[index];
if (extra.is_objc_methname) {
const size = std.math.cast(usize, sect.size) orelse return error.Overflow;
const size = try macho_file.cast(usize, sect.size);
return self.objc_methnames.items[sect.offset..][0..size];
} else if (extra.is_objc_selref)
return &self.objc_selrefs

View File

@ -582,7 +582,7 @@ fn initPointerLiterals(self: *Object, allocator: Allocator, macho_file: *MachO)
);
return error.MalformedObject;
}
const num_ptrs = math.cast(usize, @divExact(sect.size, rec_size)) orelse return error.Overflow;
const num_ptrs = try macho_file.cast(usize, @divExact(sect.size, rec_size));
for (0..num_ptrs) |i| {
const pos: u32 = @as(u32, @intCast(i)) * rec_size;
@ -650,8 +650,8 @@ pub fn resolveLiterals(self: *Object, lp: *MachO.LiteralPool, macho_file: *MachO
for (subs.items) |sub| {
const atom = self.getAtom(sub.atom).?;
const atom_off = math.cast(usize, atom.off) orelse return error.Overflow;
const atom_size = math.cast(usize, atom.size) orelse return error.Overflow;
const atom_off = try macho_file.cast(usize, atom.off);
const atom_size = try macho_file.cast(usize, atom.size);
const atom_data = data[atom_off..][0..atom_size];
const res = try lp.insert(gpa, header.type(), atom_data);
if (!res.found_existing) {
@ -674,8 +674,8 @@ pub fn resolveLiterals(self: *Object, lp: *MachO.LiteralPool, macho_file: *MachO
.local => rel.getTargetAtom(atom.*, macho_file),
.@"extern" => rel.getTargetSymbol(atom.*, macho_file).getAtom(macho_file).?,
};
const addend = math.cast(u32, rel.addend) orelse return error.Overflow;
const target_size = math.cast(usize, target.size) orelse return error.Overflow;
const addend = try macho_file.cast(u32, rel.addend);
const target_size = try macho_file.cast(usize, target.size);
try buffer.ensureUnusedCapacity(target_size);
buffer.resize(target_size) catch unreachable;
const gop = try sections_data.getOrPut(target.n_sect);
@ -683,7 +683,7 @@ pub fn resolveLiterals(self: *Object, lp: *MachO.LiteralPool, macho_file: *MachO
gop.value_ptr.* = try self.readSectionData(gpa, file, @intCast(target.n_sect));
}
const data = gop.value_ptr.*;
const target_off = math.cast(usize, target.off) orelse return error.Overflow;
const target_off = try macho_file.cast(usize, target.off);
@memcpy(buffer.items, data[target_off..][0..target_size]);
const res = try lp.insert(gpa, header.type(), buffer.items[addend..]);
buffer.clearRetainingCapacity();
@ -1033,7 +1033,7 @@ fn initEhFrameRecords(self: *Object, allocator: Allocator, sect_id: u8, file: Fi
const sect = slice.items(.header)[sect_id];
const relocs = slice.items(.relocs)[sect_id];
const size = math.cast(usize, sect.size) orelse return error.Overflow;
const size = try macho_file.cast(usize, sect.size);
try self.eh_frame_data.resize(allocator, size);
const amt = try file.preadAll(self.eh_frame_data.items, sect.offset + self.offset);
if (amt != self.eh_frame_data.items.len) return error.InputOutput;
@ -1696,7 +1696,7 @@ pub fn updateArSize(self: *Object, macho_file: *MachO) !void {
pub fn writeAr(self: Object, ar_format: Archive.Format, macho_file: *MachO, writer: anytype) !void {
// Header
const size = std.math.cast(usize, self.output_ar_state.size) orelse return error.Overflow;
const size = try macho_file.cast(usize, self.output_ar_state.size);
const basename = std.fs.path.basename(self.path.sub_path);
try Archive.writeHeader(basename, size, ar_format, writer);
// Data
@ -1826,7 +1826,7 @@ pub fn writeAtoms(self: *Object, macho_file: *MachO) !void {
for (headers, 0..) |header, n_sect| {
if (header.isZerofill()) continue;
const size = math.cast(usize, header.size) orelse return error.Overflow;
const size = try macho_file.cast(usize, header.size);
const data = try gpa.alloc(u8, size);
const amt = try file.preadAll(data, header.offset + self.offset);
if (amt != data.len) return error.InputOutput;
@ -1837,9 +1837,9 @@ pub fn writeAtoms(self: *Object, macho_file: *MachO) !void {
if (!atom.isAlive()) continue;
const sect = atom.getInputSection(macho_file);
if (sect.isZerofill()) continue;
const value = math.cast(usize, atom.value) orelse return error.Overflow;
const off = math.cast(usize, atom.off) orelse return error.Overflow;
const size = math.cast(usize, atom.size) orelse return error.Overflow;
const value = try macho_file.cast(usize, atom.value);
const off = try macho_file.cast(usize, atom.off);
const size = try macho_file.cast(usize, atom.size);
const buffer = macho_file.sections.items(.out)[atom.out_n_sect].items;
const data = sections_data[atom.n_sect];
@memcpy(buffer[value..][0..size], data[off..][0..size]);
@ -1865,7 +1865,7 @@ pub fn writeAtomsRelocatable(self: *Object, macho_file: *MachO) !void {
for (headers, 0..) |header, n_sect| {
if (header.isZerofill()) continue;
const size = math.cast(usize, header.size) orelse return error.Overflow;
const size = try macho_file.cast(usize, header.size);
const data = try gpa.alloc(u8, size);
const amt = try file.preadAll(data, header.offset + self.offset);
if (amt != data.len) return error.InputOutput;
@ -1876,9 +1876,9 @@ pub fn writeAtomsRelocatable(self: *Object, macho_file: *MachO) !void {
if (!atom.isAlive()) continue;
const sect = atom.getInputSection(macho_file);
if (sect.isZerofill()) continue;
const value = math.cast(usize, atom.value) orelse return error.Overflow;
const off = math.cast(usize, atom.off) orelse return error.Overflow;
const size = math.cast(usize, atom.size) orelse return error.Overflow;
const value = try macho_file.cast(usize, atom.value);
const off = try macho_file.cast(usize, atom.off);
const size = try macho_file.cast(usize, atom.size);
const buffer = macho_file.sections.items(.out)[atom.out_n_sect].items;
const data = sections_data[atom.n_sect];
@memcpy(buffer[value..][0..size], data[off..][0..size]);
@ -1909,29 +1909,27 @@ pub fn calcCompactUnwindSizeRelocatable(self: *Object, macho_file: *MachO) void
}
}
fn addReloc(offset: u32, arch: std.Target.Cpu.Arch) !macho.relocation_info {
return .{
.r_address = std.math.cast(i32, offset) orelse return error.Overflow,
.r_symbolnum = 0,
.r_pcrel = 0,
.r_length = 3,
.r_extern = 0,
.r_type = switch (arch) {
.aarch64 => @intFromEnum(macho.reloc_type_arm64.ARM64_RELOC_UNSIGNED),
.x86_64 => @intFromEnum(macho.reloc_type_x86_64.X86_64_RELOC_UNSIGNED),
else => unreachable,
},
};
}
pub fn writeCompactUnwindRelocatable(self: *Object, macho_file: *MachO) !void {
const tracy = trace(@src());
defer tracy.end();
const cpu_arch = macho_file.getTarget().cpu.arch;
const addReloc = struct {
fn addReloc(offset: u32, arch: std.Target.Cpu.Arch) !macho.relocation_info {
return .{
.r_address = math.cast(i32, offset) orelse return error.Overflow,
.r_symbolnum = 0,
.r_pcrel = 0,
.r_length = 3,
.r_extern = 0,
.r_type = switch (arch) {
.aarch64 => @intFromEnum(macho.reloc_type_arm64.ARM64_RELOC_UNSIGNED),
.x86_64 => @intFromEnum(macho.reloc_type_x86_64.X86_64_RELOC_UNSIGNED),
else => unreachable,
},
};
}
}.addReloc;
const nsect = macho_file.unwind_info_sect_index.?;
const buffer = macho_file.sections.items(.out)[nsect].items;
const relocs = macho_file.sections.items(.relocs)[nsect].items;
@ -1967,7 +1965,7 @@ pub fn writeCompactUnwindRelocatable(self: *Object, macho_file: *MachO) !void {
// Personality function
if (rec.getPersonality(macho_file)) |sym| {
const r_symbolnum = math.cast(u24, sym.getOutputSymtabIndex(macho_file).?) orelse return error.Overflow;
const r_symbolnum = try macho_file.cast(u24, sym.getOutputSymtabIndex(macho_file).?);
var reloc = try addReloc(offset + 16, cpu_arch);
reloc.r_symbolnum = r_symbolnum;
reloc.r_extern = 1;

View File

@ -290,12 +290,15 @@ pub fn dedupLiterals(self: *ZigObject, lp: MachO.LiteralPool, macho_file: *MachO
/// We need this so that we can write to an archive.
/// TODO implement writing ZigObject data directly to a buffer instead.
pub fn readFileContents(self: *ZigObject, macho_file: *MachO) !void {
const diags = &macho_file.base.comp.link_diags;
// Size of the output object file is always the offset + size of the strtab
const size = macho_file.symtab_cmd.stroff + macho_file.symtab_cmd.strsize;
const gpa = macho_file.base.comp.gpa;
try self.data.resize(gpa, size);
const amt = try macho_file.base.file.?.preadAll(self.data.items, 0);
if (amt != size) return error.InputOutput;
const amt = macho_file.base.file.?.preadAll(self.data.items, 0) catch |err|
return diags.fail("failed to read output file: {s}", .{@errorName(err)});
if (amt != size)
return diags.fail("unexpected EOF reading from output file", .{});
}
pub fn updateArSymtab(self: ZigObject, ar_symtab: *Archive.ArSymtab, macho_file: *MachO) error{OutOfMemory}!void {
@ -376,7 +379,7 @@ pub fn resolveRelocs(self: *ZigObject, macho_file: *MachO) !void {
if (atom.getRelocs(macho_file).len == 0) continue;
// TODO: we will resolve and write ZigObject's TLS data twice:
// once here, and once in writeAtoms
const atom_size = std.math.cast(usize, atom.size) orelse return error.Overflow;
const atom_size = try macho_file.cast(usize, atom.size);
const code = try gpa.alloc(u8, atom_size);
defer gpa.free(code);
self.getAtomData(macho_file, atom.*, code) catch |err| {
@ -400,7 +403,7 @@ pub fn resolveRelocs(self: *ZigObject, macho_file: *MachO) !void {
has_error = true;
continue;
};
try macho_file.base.file.?.pwriteAll(code, file_offset);
try macho_file.pwriteAll(code, file_offset);
}
if (has_error) return error.ResolveFailed;
@ -419,7 +422,7 @@ pub fn calcNumRelocs(self: *ZigObject, macho_file: *MachO) void {
}
}
pub fn writeRelocs(self: *ZigObject, macho_file: *MachO) !void {
pub fn writeRelocs(self: *ZigObject, macho_file: *MachO) error{ LinkFailure, OutOfMemory }!void {
const gpa = macho_file.base.comp.gpa;
const diags = &macho_file.base.comp.link_diags;
@ -432,14 +435,14 @@ pub fn writeRelocs(self: *ZigObject, macho_file: *MachO) !void {
if (!macho_file.isZigSection(atom.out_n_sect) and !macho_file.isDebugSection(atom.out_n_sect)) continue;
if (atom.getRelocs(macho_file).len == 0) continue;
const extra = atom.getExtra(macho_file);
const atom_size = std.math.cast(usize, atom.size) orelse return error.Overflow;
const atom_size = try macho_file.cast(usize, atom.size);
const code = try gpa.alloc(u8, atom_size);
defer gpa.free(code);
self.getAtomData(macho_file, atom.*, code) catch |err|
return diags.fail("failed to fetch code for '{s}': {s}", .{ atom.getName(macho_file), @errorName(err) });
const file_offset = header.offset + atom.value;
try atom.writeRelocs(macho_file, code, relocs[extra.rel_out_index..][0..extra.rel_out_count]);
try macho_file.base.file.?.pwriteAll(code, file_offset);
try macho_file.pwriteAll(code, file_offset);
}
}
@ -457,8 +460,8 @@ pub fn writeAtomsRelocatable(self: *ZigObject, macho_file: *MachO) !void {
if (sect.isZerofill()) continue;
if (macho_file.isZigSection(atom.out_n_sect)) continue;
if (atom.getRelocs(macho_file).len == 0) continue;
const off = std.math.cast(usize, atom.value) orelse return error.Overflow;
const size = std.math.cast(usize, atom.size) orelse return error.Overflow;
const off = try macho_file.cast(usize, atom.value);
const size = try macho_file.cast(usize, atom.size);
const buffer = macho_file.sections.items(.out)[atom.out_n_sect].items;
try self.getAtomData(macho_file, atom.*, buffer[off..][0..size]);
const relocs = macho_file.sections.items(.relocs)[atom.out_n_sect].items;
@ -480,8 +483,8 @@ pub fn writeAtoms(self: *ZigObject, macho_file: *MachO) !void {
const sect = atom.getInputSection(macho_file);
if (sect.isZerofill()) continue;
if (macho_file.isZigSection(atom.out_n_sect)) continue;
const off = std.math.cast(usize, atom.value) orelse return error.Overflow;
const size = std.math.cast(usize, atom.size) orelse return error.Overflow;
const off = try macho_file.cast(usize, atom.value);
const size = try macho_file.cast(usize, atom.size);
const buffer = macho_file.sections.items(.out)[atom.out_n_sect].items;
try self.getAtomData(macho_file, atom.*, buffer[off..][0..size]);
try atom.resolveRelocs(macho_file, buffer[off..][0..size]);
@ -546,7 +549,9 @@ pub fn getInputSection(self: ZigObject, atom: Atom, macho_file: *MachO) macho.se
return sect;
}
pub fn flushModule(self: *ZigObject, macho_file: *MachO, tid: Zcu.PerThread.Id) !void {
pub fn flushModule(self: *ZigObject, macho_file: *MachO, tid: Zcu.PerThread.Id) link.File.FlushError!void {
const diags = &macho_file.base.comp.link_diags;
// Handle any lazy symbols that were emitted by incremental compilation.
if (self.lazy_syms.getPtr(.anyerror_type)) |metadata| {
const pt: Zcu.PerThread = .activate(macho_file.base.comp.zcu.?, tid);
@ -559,18 +564,20 @@ pub fn flushModule(self: *ZigObject, macho_file: *MachO, tid: Zcu.PerThread.Id)
pt,
.{ .kind = .code, .ty = .anyerror_type },
metadata.text_symbol_index,
) catch |err| return switch (err) {
error.CodegenFail => error.FlushFailure,
else => |e| e,
) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.LinkFailure => return error.LinkFailure,
else => |e| return diags.fail("failed to update lazy symbol: {s}", .{@errorName(e)}),
};
if (metadata.const_state != .unused) self.updateLazySymbol(
macho_file,
pt,
.{ .kind = .const_data, .ty = .anyerror_type },
metadata.const_symbol_index,
) catch |err| return switch (err) {
error.CodegenFail => error.FlushFailure,
else => |e| e,
) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.LinkFailure => return error.LinkFailure,
else => |e| return diags.fail("failed to update lazy symbol: {s}", .{@errorName(e)}),
};
}
for (self.lazy_syms.values()) |*metadata| {
@ -581,7 +588,10 @@ pub fn flushModule(self: *ZigObject, macho_file: *MachO, tid: Zcu.PerThread.Id)
if (self.dwarf) |*dwarf| {
const pt: Zcu.PerThread = .activate(macho_file.base.comp.zcu.?, tid);
defer pt.deactivate();
try dwarf.flushModule(pt);
dwarf.flushModule(pt) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => |e| return diags.fail("failed to flush dwarf module: {s}", .{@errorName(e)}),
};
self.debug_abbrev_dirty = false;
self.debug_aranges_dirty = false;
@ -616,6 +626,7 @@ pub fn getNavVAddr(
const sym = self.symbols.items[sym_index];
const vaddr = sym.getAddress(.{}, macho_file);
switch (reloc_info.parent) {
.none => unreachable,
.atom_index => |atom_index| {
const parent_atom = self.symbols.items[atom_index].getAtom(macho_file).?;
try parent_atom.addReloc(macho_file, .{
@ -655,6 +666,7 @@ pub fn getUavVAddr(
const sym = self.symbols.items[sym_index];
const vaddr = sym.getAddress(.{}, macho_file);
switch (reloc_info.parent) {
.none => unreachable,
.atom_index => |atom_index| {
const parent_atom = self.symbols.items[atom_index].getAtom(macho_file).?;
try parent_atom.addReloc(macho_file, .{
@ -766,7 +778,7 @@ pub fn updateFunc(
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
) !void {
) link.File.UpdateNavError!void {
const tracy = trace(@src());
defer tracy.end();
@ -777,13 +789,13 @@ pub fn updateFunc(
const sym_index = try self.getOrCreateMetadataForNav(macho_file, func.owner_nav);
self.symbols.items[sym_index].getAtom(macho_file).?.freeRelocs(macho_file);
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
defer code_buffer.deinit(gpa);
var debug_wip_nav = if (self.dwarf) |*dwarf| try dwarf.initWipNav(pt, func.owner_nav, sym_index) else null;
defer if (debug_wip_nav) |*wip_nav| wip_nav.deinit();
const res = try codegen.generateFunction(
try codegen.generateFunction(
&macho_file.base,
pt,
zcu.navSrcLoc(func.owner_nav),
@ -793,14 +805,7 @@ pub fn updateFunc(
&code_buffer,
if (debug_wip_nav) |*wip_nav| .{ .dwarf = wip_nav } else .none,
);
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
try zcu.failed_codegen.put(gpa, func.owner_nav, em);
return;
},
};
const code = code_buffer.items;
const sect_index = try self.getNavOutputSection(macho_file, zcu, func.owner_nav, code);
const old_rva, const old_alignment = blk: {
@ -813,7 +818,8 @@ pub fn updateFunc(
break :blk .{ atom.value, atom.alignment };
};
if (debug_wip_nav) |*wip_nav| try self.dwarf.?.finishWipNavFunc(pt, func.owner_nav, code.len, wip_nav);
if (debug_wip_nav) |*wip_nav| self.dwarf.?.finishWipNavFunc(pt, func.owner_nav, code.len, wip_nav) catch |err|
return macho_file.base.cgFail(func.owner_nav, "falied to finish dwarf function: {s}", .{@errorName(err)});
// Exports will be updated by `Zcu.processExports` after the update.
if (old_rva != new_rva and old_rva > 0) {
@ -850,7 +856,8 @@ pub fn updateFunc(
}
const target_sym = self.symbols.items[sym_index];
const source_sym = self.symbols.items[target_sym.getExtra(macho_file).trampoline];
try writeTrampoline(source_sym, target_sym, macho_file);
writeTrampoline(source_sym, target_sym, macho_file) catch |err|
return macho_file.base.cgFail(func.owner_nav, "failed to write trampoline: {s}", .{@errorName(err)});
}
}
@ -883,7 +890,11 @@ pub fn updateNav(
if (self.dwarf) |*dwarf| dwarf: {
var debug_wip_nav = try dwarf.initWipNav(pt, nav_index, sym_index) orelse break :dwarf;
defer debug_wip_nav.deinit();
try dwarf.finishWipNav(pt, nav_index, &debug_wip_nav);
dwarf.finishWipNav(pt, nav_index, &debug_wip_nav) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.Overflow => return error.Overflow,
else => |e| return macho_file.base.cgFail(nav_index, "failed to finish dwarf nav: {s}", .{@errorName(e)}),
};
}
return;
},
@ -894,13 +905,13 @@ pub fn updateNav(
const sym_index = try self.getOrCreateMetadataForNav(macho_file, nav_index);
self.symbols.items[sym_index].getAtom(macho_file).?.freeRelocs(macho_file);
var code_buffer = std.ArrayList(u8).init(zcu.gpa);
defer code_buffer.deinit();
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
defer code_buffer.deinit(zcu.gpa);
var debug_wip_nav = if (self.dwarf) |*dwarf| try dwarf.initWipNav(pt, nav_index, sym_index) else null;
defer if (debug_wip_nav) |*wip_nav| wip_nav.deinit();
const res = try codegen.generateSymbol(
try codegen.generateSymbol(
&macho_file.base,
pt,
zcu.navSrcLoc(nav_index),
@ -908,21 +919,19 @@ pub fn updateNav(
&code_buffer,
.{ .atom_index = sym_index },
);
const code = code_buffer.items;
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
try zcu.failed_codegen.put(zcu.gpa, nav_index, em);
return;
},
};
const sect_index = try self.getNavOutputSection(macho_file, zcu, nav_index, code);
if (isThreadlocal(macho_file, nav_index))
try self.updateTlv(macho_file, pt, nav_index, sym_index, sect_index, code)
else
try self.updateNavCode(macho_file, pt, nav_index, sym_index, sect_index, code);
if (debug_wip_nav) |*wip_nav| try self.dwarf.?.finishWipNav(pt, nav_index, wip_nav);
if (debug_wip_nav) |*wip_nav| self.dwarf.?.finishWipNav(pt, nav_index, wip_nav) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.Overflow => return error.Overflow,
else => |e| return macho_file.base.cgFail(nav_index, "failed to finish dwarf nav: {s}", .{@errorName(e)}),
};
} else if (self.dwarf) |*dwarf| try dwarf.updateComptimeNav(pt, nav_index);
// Exports will be updated by `Zcu.processExports` after the update.
@ -936,7 +945,7 @@ fn updateNavCode(
sym_index: Symbol.Index,
sect_index: u8,
code: []const u8,
) !void {
) link.File.UpdateNavError!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
@ -978,7 +987,8 @@ fn updateNavCode(
const need_realloc = code.len > capacity or !required_alignment.check(atom.value);
if (need_realloc) {
try atom.grow(macho_file);
atom.grow(macho_file) catch |err|
return macho_file.base.cgFail(nav_index, "failed to grow atom: {s}", .{@errorName(err)});
log.debug("growing {} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), old_vaddr, atom.value });
if (old_vaddr != atom.value) {
sym.value = 0;
@ -991,7 +1001,8 @@ fn updateNavCode(
sect.size = needed_size;
}
} else {
try atom.allocate(macho_file);
atom.allocate(macho_file) catch |err|
return macho_file.base.cgFail(nav_index, "failed to allocate atom: {s}", .{@errorName(err)});
errdefer self.freeNavMetadata(macho_file, sym_index);
sym.value = 0;
@ -1000,7 +1011,8 @@ fn updateNavCode(
if (!sect.isZerofill()) {
const file_offset = sect.offset + atom.value;
try macho_file.base.file.?.pwriteAll(code, file_offset);
macho_file.base.file.?.pwriteAll(code, file_offset) catch |err|
return macho_file.base.cgFail(nav_index, "failed to write output file: {s}", .{@errorName(err)});
}
}
@ -1198,13 +1210,13 @@ fn lowerConst(
) !LowerConstResult {
const gpa = macho_file.base.comp.gpa;
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
defer code_buffer.deinit(gpa);
const name_str = try self.addString(gpa, name);
const sym_index = try self.newSymbolWithAtom(gpa, name_str, macho_file);
const res = try codegen.generateSymbol(
try codegen.generateSymbol(
&macho_file.base,
pt,
src_loc,
@ -1212,10 +1224,7 @@ fn lowerConst(
&code_buffer,
.{ .atom_index = sym_index },
);
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| return .{ .fail = em },
};
const code = code_buffer.items;
const sym = &self.symbols.items[sym_index];
sym.out_n_sect = output_section_index;
@ -1236,7 +1245,7 @@ fn lowerConst(
const sect = macho_file.sections.items(.header)[output_section_index];
const file_offset = sect.offset + atom.value;
try macho_file.base.file.?.pwriteAll(code, file_offset);
try macho_file.pwriteAll(code, file_offset);
return .{ .ok = sym_index };
}
@ -1246,7 +1255,7 @@ pub fn updateExports(
macho_file: *MachO,
pt: Zcu.PerThread,
exported: Zcu.Exported,
export_indices: []const u32,
export_indices: []const Zcu.Export.Index,
) link.File.UpdateExportsError!void {
const tracy = trace(@src());
defer tracy.end();
@ -1259,7 +1268,7 @@ pub fn updateExports(
break :blk self.navs.getPtr(nav).?;
},
.uav => |uav| self.uavs.getPtr(uav) orelse blk: {
const first_exp = zcu.all_exports.items[export_indices[0]];
const first_exp = export_indices[0].ptr(zcu);
const res = try self.lowerUav(macho_file, pt, uav, .none, first_exp.src);
switch (res) {
.mcv => {},
@ -1279,7 +1288,7 @@ pub fn updateExports(
const nlist = self.symtab.items(.nlist)[nlist_idx];
for (export_indices) |export_idx| {
const exp = zcu.all_exports.items[export_idx];
const exp = export_idx.ptr(zcu);
if (exp.opts.section.unwrap()) |section_name| {
if (!section_name.eqlSlice("__text", &zcu.intern_pool)) {
try zcu.failed_exports.ensureUnusedCapacity(zcu.gpa, 1);
@ -1352,8 +1361,8 @@ fn updateLazySymbol(
const gpa = zcu.gpa;
var required_alignment: Atom.Alignment = .none;
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
defer code_buffer.deinit(gpa);
const name_str = blk: {
const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{}", .{
@ -1365,7 +1374,7 @@ fn updateLazySymbol(
};
const src = Type.fromInterned(lazy_sym.ty).srcLocOrNull(zcu) orelse Zcu.LazySrcLoc.unneeded;
const res = try codegen.generateLazySymbol(
try codegen.generateLazySymbol(
&macho_file.base,
pt,
src,
@ -1375,13 +1384,7 @@ fn updateLazySymbol(
.none,
.{ .atom_index = symbol_index },
);
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
log.err("{s}", .{em.msg});
return error.CodegenFail;
},
};
const code = code_buffer.items;
const output_section_index = switch (lazy_sym.kind) {
.code => macho_file.zig_text_sect_index.?,
@ -1412,12 +1415,18 @@ fn updateLazySymbol(
const sect = macho_file.sections.items(.header)[output_section_index];
const file_offset = sect.offset + atom.value;
try macho_file.base.file.?.pwriteAll(code, file_offset);
try macho_file.pwriteAll(code, file_offset);
}
pub fn updateLineNumber(self: *ZigObject, pt: Zcu.PerThread, ti_id: InternPool.TrackedInst.Index) !void {
if (self.dwarf) |*dwarf| {
try dwarf.updateLineNumber(pt.zcu, ti_id);
const comp = dwarf.bin_file.comp;
const diags = &comp.link_diags;
dwarf.updateLineNumber(pt.zcu, ti_id) catch |err| switch (err) {
error.Overflow => return error.Overflow,
error.OutOfMemory => return error.OutOfMemory,
else => |e| return diags.fail("failed to update dwarf line numbers: {s}", .{@errorName(e)}),
};
}
}

View File

@ -18,13 +18,15 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat
// Instead of invoking a full-blown `-r` mode on the input which sadly will strip all
// debug info segments/sections (this is apparently by design by Apple), we copy
// the *only* input file over.
// TODO: in the future, when we implement `dsymutil` alternative directly in the Zig
// compiler, investigate if we can get rid of this `if` prong here.
const path = positionals.items[0].path().?;
const in_file = try path.root_dir.handle.openFile(path.sub_path, .{});
const stat = try in_file.stat();
const amt = try in_file.copyRangeAll(0, macho_file.base.file.?, 0, stat.size);
if (amt != stat.size) return error.InputOutput; // TODO: report an actual user error
const in_file = path.root_dir.handle.openFile(path.sub_path, .{}) catch |err|
return diags.fail("failed to open {}: {s}", .{ path, @errorName(err) });
const stat = in_file.stat() catch |err|
return diags.fail("failed to stat {}: {s}", .{ path, @errorName(err) });
const amt = in_file.copyRangeAll(0, macho_file.base.file.?, 0, stat.size) catch |err|
return diags.fail("failed to copy range of file {}: {s}", .{ path, @errorName(err) });
if (amt != stat.size)
return diags.fail("unexpected short write in copy range of file {}", .{path});
return;
}
@ -33,14 +35,18 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat
diags.addParseError(link_input.path().?, "failed to read input file: {s}", .{@errorName(err)});
}
if (diags.hasErrors()) return error.FlushFailure;
if (diags.hasErrors()) return error.LinkFailure;
try macho_file.parseInputFiles();
if (diags.hasErrors()) return error.FlushFailure;
if (diags.hasErrors()) return error.LinkFailure;
try macho_file.resolveSymbols();
try macho_file.dedupLiterals();
macho_file.dedupLiterals() catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.LinkFailure => return error.LinkFailure,
else => |e| return diags.fail("failed to update ar size: {s}", .{@errorName(e)}),
};
markExports(macho_file);
claimUnresolved(macho_file);
try initOutputSections(macho_file);
@ -49,7 +55,10 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat
try calcSectionSizes(macho_file);
try createSegment(macho_file);
try allocateSections(macho_file);
allocateSections(macho_file) catch |err| switch (err) {
error.LinkFailure => return error.LinkFailure,
else => |e| return diags.fail("failed to allocate sections: {s}", .{@errorName(e)}),
};
allocateSegment(macho_file);
if (build_options.enable_logging) {
@ -93,11 +102,11 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
diags.addParseError(link_input.path().?, "failed to read input file: {s}", .{@errorName(err)});
}
if (diags.hasErrors()) return error.FlushFailure;
if (diags.hasErrors()) return error.LinkFailure;
try parseInputFilesAr(macho_file);
if (diags.hasErrors()) return error.FlushFailure;
if (diags.hasErrors()) return error.LinkFailure;
// First, we flush relocatable object file generated with our backends.
if (macho_file.getZigObject()) |zo| {
@ -108,7 +117,8 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
try macho_file.addAtomsToSections();
try calcSectionSizes(macho_file);
try createSegment(macho_file);
try allocateSections(macho_file);
allocateSections(macho_file) catch |err|
return diags.fail("failed to allocate sections: {s}", .{@errorName(err)});
allocateSegment(macho_file);
if (build_options.enable_logging) {
@ -126,8 +136,6 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
const ncmds, const sizeofcmds = try writeLoadCommands(macho_file);
try writeHeader(macho_file, ncmds, sizeofcmds);
// TODO we can avoid reading in the file contents we just wrote if we give the linker
// ability to write directly to a buffer.
try zo.readFileContents(macho_file);
}
@ -152,7 +160,8 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
// Update sizes of contributing objects
for (files.items) |index| {
try macho_file.getFile(index).?.updateArSize(macho_file);
macho_file.getFile(index).?.updateArSize(macho_file) catch |err|
return diags.fail("failed to update ar size: {s}", .{@errorName(err)});
}
// Update file offsets of contributing objects
@ -171,7 +180,7 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
state.file_off = pos;
pos += @sizeOf(Archive.ar_hdr);
pos += mem.alignForward(usize, zo.basename.len + 1, ptr_width);
pos += math.cast(usize, state.size) orelse return error.Overflow;
pos += try macho_file.cast(usize, state.size);
},
.object => |o| {
const state = &o.output_ar_state;
@ -179,7 +188,7 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
state.file_off = pos;
pos += @sizeOf(Archive.ar_hdr);
pos += mem.alignForward(usize, o.path.basename().len + 1, ptr_width);
pos += math.cast(usize, state.size) orelse return error.Overflow;
pos += try macho_file.cast(usize, state.size);
},
else => unreachable,
}
@ -201,7 +210,10 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
try writer.writeAll(Archive.ARMAG);
// Write symtab
try ar_symtab.write(format, macho_file, writer);
ar_symtab.write(format, macho_file, writer) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => |e| return diags.fail("failed to write archive symbol table: {s}", .{@errorName(e)}),
};
// Write object files
for (files.items) |index| {
@ -210,15 +222,16 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
if (padding > 0) {
try writer.writeByteNTimes(0, padding);
}
try macho_file.getFile(index).?.writeAr(format, macho_file, writer);
macho_file.getFile(index).?.writeAr(format, macho_file, writer) catch |err|
return diags.fail("failed to write archive: {s}", .{@errorName(err)});
}
assert(buffer.items.len == total_size);
try macho_file.base.file.?.setEndPos(total_size);
try macho_file.base.file.?.pwriteAll(buffer.items, 0);
try macho_file.setEndPos(total_size);
try macho_file.pwriteAll(buffer.items, 0);
if (diags.hasErrors()) return error.FlushFailure;
if (diags.hasErrors()) return error.LinkFailure;
}
fn parseInputFilesAr(macho_file: *MachO) !void {
@ -452,11 +465,10 @@ fn allocateSections(macho_file: *MachO) !void {
for (slice.items(.header)) |*header| {
const needed_size = header.size;
header.size = 0;
const alignment = try math.powi(u32, 2, header.@"align");
const alignment = try macho_file.alignPow(header.@"align");
if (!header.isZerofill()) {
if (needed_size > macho_file.allocatedSize(header.offset)) {
header.offset = math.cast(u32, try macho_file.findFreeSpace(needed_size, alignment)) orelse
return error.Overflow;
header.offset = try macho_file.cast(u32, try macho_file.findFreeSpace(needed_size, alignment));
}
}
if (needed_size > macho_file.allocatedSizeVirtual(header.addr)) {
@ -572,7 +584,7 @@ fn sortRelocs(macho_file: *MachO) void {
}
}
fn writeSections(macho_file: *MachO) !void {
fn writeSections(macho_file: *MachO) link.File.FlushError!void {
const tracy = trace(@src());
defer tracy.end();
@ -583,7 +595,7 @@ fn writeSections(macho_file: *MachO) !void {
for (slice.items(.header), slice.items(.out), slice.items(.relocs), 0..) |header, *out, *relocs, n_sect| {
if (header.isZerofill()) continue;
if (!macho_file.isZigSection(@intCast(n_sect))) { // TODO this is wrong; what about debug sections?
const size = math.cast(usize, header.size) orelse return error.Overflow;
const size = try macho_file.cast(usize, header.size);
try out.resize(gpa, size);
const padding_byte: u8 = if (header.isCode() and cpu_arch == .x86_64) 0xcc else 0;
@memset(out.items, padding_byte);
@ -662,16 +674,16 @@ fn writeSectionsToFile(macho_file: *MachO) !void {
const slice = macho_file.sections.slice();
for (slice.items(.header), slice.items(.out), slice.items(.relocs)) |header, out, relocs| {
try macho_file.base.file.?.pwriteAll(out.items, header.offset);
try macho_file.base.file.?.pwriteAll(mem.sliceAsBytes(relocs.items), header.reloff);
try macho_file.pwriteAll(out.items, header.offset);
try macho_file.pwriteAll(mem.sliceAsBytes(relocs.items), header.reloff);
}
try macho_file.writeDataInCode();
try macho_file.base.file.?.pwriteAll(mem.sliceAsBytes(macho_file.symtab.items), macho_file.symtab_cmd.symoff);
try macho_file.base.file.?.pwriteAll(macho_file.strtab.items, macho_file.symtab_cmd.stroff);
try macho_file.pwriteAll(mem.sliceAsBytes(macho_file.symtab.items), macho_file.symtab_cmd.symoff);
try macho_file.pwriteAll(macho_file.strtab.items, macho_file.symtab_cmd.stroff);
}
fn writeLoadCommands(macho_file: *MachO) !struct { usize, usize } {
fn writeLoadCommands(macho_file: *MachO) error{ LinkFailure, OutOfMemory }!struct { usize, usize } {
const gpa = macho_file.base.comp.gpa;
const needed_size = load_commands.calcLoadCommandsSizeObject(macho_file);
const buffer = try gpa.alloc(u8, needed_size);
@ -686,31 +698,45 @@ fn writeLoadCommands(macho_file: *MachO) !struct { usize, usize } {
{
assert(macho_file.segments.items.len == 1);
const seg = macho_file.segments.items[0];
try writer.writeStruct(seg);
writer.writeStruct(seg) catch |err| switch (err) {
error.NoSpaceLeft => unreachable,
};
for (macho_file.sections.items(.header)) |header| {
try writer.writeStruct(header);
writer.writeStruct(header) catch |err| switch (err) {
error.NoSpaceLeft => unreachable,
};
}
ncmds += 1;
}
try writer.writeStruct(macho_file.data_in_code_cmd);
writer.writeStruct(macho_file.data_in_code_cmd) catch |err| switch (err) {
error.NoSpaceLeft => unreachable,
};
ncmds += 1;
try writer.writeStruct(macho_file.symtab_cmd);
writer.writeStruct(macho_file.symtab_cmd) catch |err| switch (err) {
error.NoSpaceLeft => unreachable,
};
ncmds += 1;
try writer.writeStruct(macho_file.dysymtab_cmd);
writer.writeStruct(macho_file.dysymtab_cmd) catch |err| switch (err) {
error.NoSpaceLeft => unreachable,
};
ncmds += 1;
if (macho_file.platform.isBuildVersionCompatible()) {
try load_commands.writeBuildVersionLC(macho_file.platform, macho_file.sdk_version, writer);
load_commands.writeBuildVersionLC(macho_file.platform, macho_file.sdk_version, writer) catch |err| switch (err) {
error.NoSpaceLeft => unreachable,
};
ncmds += 1;
} else {
try load_commands.writeVersionMinLC(macho_file.platform, macho_file.sdk_version, writer);
load_commands.writeVersionMinLC(macho_file.platform, macho_file.sdk_version, writer) catch |err| switch (err) {
error.NoSpaceLeft => unreachable,
};
ncmds += 1;
}
assert(stream.pos == needed_size);
try macho_file.base.file.?.pwriteAll(buffer, @sizeOf(macho.mach_header_64));
try macho_file.pwriteAll(buffer, @sizeOf(macho.mach_header_64));
return .{ ncmds, buffer.len };
}
@ -742,7 +768,7 @@ fn writeHeader(macho_file: *MachO, ncmds: usize, sizeofcmds: usize) !void {
header.ncmds = @intCast(ncmds);
header.sizeofcmds = @intCast(sizeofcmds);
try macho_file.base.file.?.pwriteAll(mem.asBytes(&header), 0);
try macho_file.pwriteAll(mem.asBytes(&header), 0);
}
const std = @import("std");

View File

@ -82,11 +82,17 @@ pub fn deinit(self: *NvPtx) void {
self.llvm_object.deinit();
}
pub fn updateFunc(self: *NvPtx, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
pub fn updateFunc(
self: *NvPtx,
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
) link.File.UpdateNavError!void {
try self.llvm_object.updateFunc(pt, func_index, air, liveness);
}
pub fn updateNav(self: *NvPtx, pt: Zcu.PerThread, nav: InternPool.Nav.Index) !void {
pub fn updateNav(self: *NvPtx, pt: Zcu.PerThread, nav: InternPool.Nav.Index) link.File.UpdateNavError!void {
return self.llvm_object.updateNav(pt, nav);
}
@ -94,7 +100,7 @@ pub fn updateExports(
self: *NvPtx,
pt: Zcu.PerThread,
exported: Zcu.Exported,
export_indices: []const u32,
export_indices: []const Zcu.Export.Index,
) !void {
if (build_options.skip_non_native and builtin.object_format != .nvptx)
@panic("Attempted to compile for object format that was disabled by build configuration");
@ -102,10 +108,6 @@ pub fn updateExports(
return self.llvm_object.updateExports(pt, exported, export_indices);
}
pub fn freeDecl(self: *NvPtx, decl_index: InternPool.DeclIndex) void {
return self.llvm_object.freeDecl(decl_index);
}
pub fn flush(self: *NvPtx, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
return self.flushModule(arena, tid, prog_node);
}

View File

@ -60,7 +60,7 @@ fn_nav_table: std.AutoArrayHashMapUnmanaged(
data_nav_table: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, []u8) = .empty,
/// When `updateExports` is called, we store the export indices here, to be used
/// during flush.
nav_exports: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, []u32) = .empty,
nav_exports: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, []Zcu.Export.Index) = .empty,
lazy_syms: LazySymbolTable = .{},
@ -345,6 +345,7 @@ fn putFn(self: *Plan9, nav_index: InternPool.Nav.Index, out: FnNavOutput) !void
try a.writer().writeInt(u16, 1, .big);
// getting the full file path
// TODO don't call getcwd here, that is inappropriate
var buf: [std.fs.max_path_bytes]u8 = undefined;
const full_path = try std.fs.path.join(arena, &.{
file.mod.root.root_dir.path orelse try std.posix.getcwd(&buf),
@ -385,7 +386,13 @@ fn addPathComponents(self: *Plan9, path: []const u8, a: *std.ArrayList(u8)) !voi
}
}
pub fn updateFunc(self: *Plan9, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
pub fn updateFunc(
self: *Plan9,
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
) link.File.UpdateNavError!void {
if (build_options.skip_non_native and builtin.object_format != .plan9) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
@ -397,8 +404,8 @@ pub fn updateFunc(self: *Plan9, pt: Zcu.PerThread, func_index: InternPool.Index,
const atom_idx = try self.seeNav(pt, func.owner_nav);
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
defer code_buffer.deinit(gpa);
var dbg_info_output: DebugInfoOutput = .{
.dbg_line = std.ArrayList(u8).init(gpa),
.start_line = null,
@ -409,7 +416,7 @@ pub fn updateFunc(self: *Plan9, pt: Zcu.PerThread, func_index: InternPool.Index,
};
defer dbg_info_output.dbg_line.deinit();
const res = try codegen.generateFunction(
try codegen.generateFunction(
&self.base,
pt,
zcu.navSrcLoc(func.owner_nav),
@ -419,10 +426,7 @@ pub fn updateFunc(self: *Plan9, pt: Zcu.PerThread, func_index: InternPool.Index,
&code_buffer,
.{ .plan9 = &dbg_info_output },
);
const code = switch (res) {
.ok => try code_buffer.toOwnedSlice(),
.fail => |em| return zcu.failed_codegen.put(gpa, func.owner_nav, em),
};
const code = try code_buffer.toOwnedSlice(gpa);
self.getAtomPtr(atom_idx).code = .{
.code_ptr = null,
.other = .{ .nav_index = func.owner_nav },
@ -433,11 +437,13 @@ pub fn updateFunc(self: *Plan9, pt: Zcu.PerThread, func_index: InternPool.Index,
.start_line = dbg_info_output.start_line.?,
.end_line = dbg_info_output.end_line,
};
try self.putFn(func.owner_nav, out);
// The awkward error handling here is due to putFn calling `std.posix.getcwd` which it should not do.
self.putFn(func.owner_nav, out) catch |err|
return zcu.codegenFail(func.owner_nav, "failed to put fn: {s}", .{@errorName(err)});
return self.updateFinish(pt, func.owner_nav);
}
pub fn updateNav(self: *Plan9, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void {
pub fn updateNav(self: *Plan9, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) link.File.UpdateNavError!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
@ -456,10 +462,10 @@ pub fn updateNav(self: *Plan9, pt: Zcu.PerThread, nav_index: InternPool.Nav.Inde
if (nav_init.typeOf(zcu).hasRuntimeBits(zcu)) {
const atom_idx = try self.seeNav(pt, nav_index);
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
defer code_buffer.deinit(gpa);
// TODO we need the symbol index for symbol in the table of locals for the containing atom
const res = try codegen.generateSymbol(
try codegen.generateSymbol(
&self.base,
pt,
zcu.navSrcLoc(nav_index),
@ -467,10 +473,7 @@ pub fn updateNav(self: *Plan9, pt: Zcu.PerThread, nav_index: InternPool.Nav.Inde
&code_buffer,
.{ .atom_index = @intCast(atom_idx) },
);
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| return zcu.failed_codegen.put(gpa, nav_index, em),
};
const code = code_buffer.items;
try self.data_nav_table.ensureUnusedCapacity(gpa, 1);
const duped_code = try gpa.dupe(u8, code);
self.getAtomPtr(self.navs.get(nav_index).?.index).code = .{ .code_ptr = null, .other = .{ .nav_index = nav_index } };
@ -529,16 +532,21 @@ fn allocateGotIndex(self: *Plan9) usize {
}
}
pub fn flush(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
pub fn flush(
self: *Plan9,
arena: Allocator,
tid: Zcu.PerThread.Id,
prog_node: std.Progress.Node,
) link.File.FlushError!void {
const comp = self.base.comp;
const diags = &comp.link_diags;
const use_lld = build_options.have_llvm and comp.config.use_lld;
assert(!use_lld);
switch (link.File.effectiveOutputMode(use_lld, comp.config.output_mode)) {
.Exe => {},
// plan9 object files are totally different
.Obj => return error.TODOImplementPlan9Objs,
.Lib => return error.TODOImplementWritingLibFiles,
.Obj => return diags.fail("writing plan9 object files unimplemented", .{}),
.Lib => return diags.fail("writing plan9 lib files unimplemented", .{}),
}
return self.flushModule(arena, tid, prog_node);
}
@ -583,7 +591,13 @@ fn atomCount(self: *Plan9) usize {
return data_nav_count + fn_nav_count + lazy_atom_count + extern_atom_count + uav_atom_count;
}
pub fn flushModule(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
pub fn flushModule(
self: *Plan9,
arena: Allocator,
/// TODO: stop using this
tid: Zcu.PerThread.Id,
prog_node: std.Progress.Node,
) link.File.FlushError!void {
if (build_options.skip_non_native and builtin.object_format != .plan9) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
@ -594,6 +608,7 @@ pub fn flushModule(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
_ = arena; // Has the same lifetime as the call to Compilation.update.
const comp = self.base.comp;
const diags = &comp.link_diags;
const gpa = comp.gpa;
const target = comp.root_mod.resolved_target.result;
@ -605,7 +620,7 @@ pub fn flushModule(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
defer assert(self.hdr.entry != 0x0);
const pt: Zcu.PerThread = .activate(
self.base.comp.zcu orelse return error.LinkingWithoutZigSourceUnimplemented,
self.base.comp.zcu orelse return diags.fail("linking without zig source unimplemented", .{}),
tid,
);
defer pt.deactivate();
@ -614,22 +629,16 @@ pub fn flushModule(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
if (self.lazy_syms.getPtr(.none)) |metadata| {
// Most lazy symbols can be updated on first use, but
// anyerror needs to wait for everything to be flushed.
if (metadata.text_state != .unused) self.updateLazySymbolAtom(
if (metadata.text_state != .unused) try self.updateLazySymbolAtom(
pt,
.{ .kind = .code, .ty = .anyerror_type },
metadata.text_atom,
) catch |err| return switch (err) {
error.CodegenFail => error.FlushFailure,
else => |e| e,
};
if (metadata.rodata_state != .unused) self.updateLazySymbolAtom(
);
if (metadata.rodata_state != .unused) try self.updateLazySymbolAtom(
pt,
.{ .kind = .const_data, .ty = .anyerror_type },
metadata.rodata_atom,
) catch |err| return switch (err) {
error.CodegenFail => error.FlushFailure,
else => |e| e,
};
);
}
for (self.lazy_syms.values()) |*metadata| {
if (metadata.text_state != .unused) metadata.text_state = .flushed;
@ -902,30 +911,29 @@ pub fn flushModule(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
}
}
}
// write it all!
try file.pwritevAll(iovecs, 0);
file.pwritevAll(iovecs, 0) catch |err| return diags.fail("failed to write file: {s}", .{@errorName(err)});
}
fn addNavExports(
self: *Plan9,
mod: *Zcu,
zcu: *Zcu,
nav_index: InternPool.Nav.Index,
export_indices: []const u32,
export_indices: []const Zcu.Export.Index,
) !void {
const gpa = self.base.comp.gpa;
const metadata = self.navs.getPtr(nav_index).?;
const atom = self.getAtom(metadata.index);
for (export_indices) |export_idx| {
const exp = mod.all_exports.items[export_idx];
const exp_name = exp.opts.name.toSlice(&mod.intern_pool);
const exp = export_idx.ptr(zcu);
const exp_name = exp.opts.name.toSlice(&zcu.intern_pool);
// plan9 does not support custom sections
if (exp.opts.section.unwrap()) |section_name| {
if (!section_name.eqlSlice(".text", &mod.intern_pool) and
!section_name.eqlSlice(".data", &mod.intern_pool))
if (!section_name.eqlSlice(".text", &zcu.intern_pool) and
!section_name.eqlSlice(".data", &zcu.intern_pool))
{
try mod.failed_exports.put(mod.gpa, export_idx, try Zcu.ErrorMsg.create(
try zcu.failed_exports.put(zcu.gpa, export_idx, try Zcu.ErrorMsg.create(
gpa,
mod.navSrcLoc(nav_index),
zcu.navSrcLoc(nav_index),
"plan9 does not support extra sections",
.{},
));
@ -947,50 +955,6 @@ fn addNavExports(
}
}
pub fn freeDecl(self: *Plan9, decl_index: InternPool.DeclIndex) void {
const gpa = self.base.comp.gpa;
// TODO audit the lifetimes of decls table entries. It's possible to get
// freeDecl without any updateDecl in between.
const zcu = self.base.comp.zcu.?;
const decl = zcu.declPtr(decl_index);
const is_fn = decl.val.isFuncBody(zcu);
if (is_fn) {
const symidx_and_submap = self.fn_decl_table.get(decl.getFileScope(zcu)).?;
var submap = symidx_and_submap.functions;
if (submap.fetchSwapRemove(decl_index)) |removed_entry| {
gpa.free(removed_entry.value.code);
gpa.free(removed_entry.value.lineinfo);
}
if (submap.count() == 0) {
self.syms.items[symidx_and_submap.sym_index] = aout.Sym.undefined_symbol;
self.syms_index_free_list.append(gpa, symidx_and_submap.sym_index) catch {};
submap.deinit(gpa);
}
} else {
if (self.data_decl_table.fetchSwapRemove(decl_index)) |removed_entry| {
gpa.free(removed_entry.value);
}
}
if (self.decls.fetchRemove(decl_index)) |const_kv| {
var kv = const_kv;
const atom = self.getAtom(kv.value.index);
if (atom.got_index) |i| {
// TODO: if this catch {} is triggered, an assertion in flushModule will be triggered, because got_index_free_list will have the wrong length
self.got_index_free_list.append(gpa, i) catch {};
}
if (atom.sym_index) |i| {
self.syms_index_free_list.append(gpa, i) catch {};
self.syms.items[i] = aout.Sym.undefined_symbol;
}
kv.value.exports.deinit(gpa);
}
{
const atom_index = self.decls.get(decl_index).?.index;
const relocs = self.relocs.getPtr(atom_index) orelse return;
relocs.clearAndFree(gpa);
assert(self.relocs.remove(atom_index));
}
}
fn createAtom(self: *Plan9) !Atom.Index {
const gpa = self.base.comp.gpa;
const index = @as(Atom.Index, @intCast(self.atoms.items.len));
@ -1043,7 +1007,7 @@ pub fn updateExports(
self: *Plan9,
pt: Zcu.PerThread,
exported: Zcu.Exported,
export_indices: []const u32,
export_indices: []const Zcu.Export.Index,
) !void {
const gpa = self.base.comp.gpa;
switch (exported) {
@ -1054,7 +1018,7 @@ pub fn updateExports(
gpa.free(kv.value);
}
try self.nav_exports.ensureUnusedCapacity(gpa, 1);
const duped_indices = try gpa.dupe(u32, export_indices);
const duped_indices = try gpa.dupe(Zcu.Export.Index, export_indices);
self.nav_exports.putAssumeCapacityNoClobber(nav, duped_indices);
},
}
@ -1085,12 +1049,19 @@ pub fn getOrCreateAtomForLazySymbol(self: *Plan9, pt: Zcu.PerThread, lazy_sym: F
return atom;
}
fn updateLazySymbolAtom(self: *Plan9, pt: Zcu.PerThread, sym: File.LazySymbol, atom_index: Atom.Index) !void {
fn updateLazySymbolAtom(
self: *Plan9,
pt: Zcu.PerThread,
sym: File.LazySymbol,
atom_index: Atom.Index,
) error{ LinkFailure, OutOfMemory }!void {
const gpa = pt.zcu.gpa;
const comp = self.base.comp;
const diags = &comp.link_diags;
var required_alignment: InternPool.Alignment = .none;
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
defer code_buffer.deinit(gpa);
// create the symbol for the name
const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{}", .{
@ -1107,7 +1078,7 @@ fn updateLazySymbolAtom(self: *Plan9, pt: Zcu.PerThread, sym: File.LazySymbol, a
// generate the code
const src = Type.fromInterned(sym.ty).srcLocOrNull(pt.zcu) orelse Zcu.LazySrcLoc.unneeded;
const res = try codegen.generateLazySymbol(
codegen.generateLazySymbol(
&self.base,
pt,
src,
@ -1116,14 +1087,12 @@ fn updateLazySymbolAtom(self: *Plan9, pt: Zcu.PerThread, sym: File.LazySymbol, a
&code_buffer,
.none,
.{ .atom_index = @intCast(atom_index) },
);
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
log.err("{s}", .{em.msg});
return error.CodegenFail;
},
) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.CodegenFail => return error.LinkFailure,
error.Overflow => return diags.fail("codegen failure: encountered number too big for compiler", .{}),
};
const code = code_buffer.items;
// duped_code is freed when the atom is freed
const duped_code = try gpa.dupe(u8, code);
errdefer gpa.free(duped_code);
@ -1283,7 +1252,7 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
try self.writeSym(writer, sym);
if (self.nav_exports.get(nav_index)) |export_indices| {
for (export_indices) |export_idx| {
const exp = zcu.all_exports.items[export_idx];
const exp = export_idx.ptr(zcu);
if (nav_metadata.getExport(self, exp.opts.name.toSlice(ip))) |exp_i| {
try self.writeSym(writer, self.syms.items[exp_i]);
}
@ -1322,7 +1291,7 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
try self.writeSym(writer, sym);
if (self.nav_exports.get(nav_index)) |export_indices| {
for (export_indices) |export_idx| {
const exp = zcu.all_exports.items[export_idx];
const exp = export_idx.ptr(zcu);
if (nav_metadata.getExport(self, exp.opts.name.toSlice(ip))) |exp_i| {
const s = self.syms.items[exp_i];
if (mem.eql(u8, s.name, "_start"))
@ -1432,19 +1401,16 @@ pub fn lowerUav(
const got_index = self.allocateGotIndex();
gop.value_ptr.* = index;
// we need to free name latex
var code_buffer = std.ArrayList(u8).init(gpa);
const res = try codegen.generateSymbol(&self.base, pt, src_loc, val, &code_buffer, .{ .atom_index = index });
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| return .{ .fail = em },
};
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
defer code_buffer.deinit(gpa);
try codegen.generateSymbol(&self.base, pt, src_loc, val, &code_buffer, .{ .atom_index = index });
const atom_ptr = self.getAtomPtr(index);
atom_ptr.* = .{
.type = .d,
.offset = undefined,
.sym_index = null,
.got_index = got_index,
.code = Atom.CodePtr.fromSlice(code),
.code = Atom.CodePtr.fromSlice(try code_buffer.toOwnedSlice(gpa)),
};
_ = try atom_ptr.getOrCreateSymbolTableEntry(self);
self.syms.items[atom_ptr.sym_index.?] = .{

View File

@ -122,7 +122,13 @@ pub fn deinit(self: *SpirV) void {
self.object.deinit();
}
pub fn updateFunc(self: *SpirV, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
pub fn updateFunc(
self: *SpirV,
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
) link.File.UpdateNavError!void {
if (build_options.skip_non_native) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
@ -134,7 +140,7 @@ pub fn updateFunc(self: *SpirV, pt: Zcu.PerThread, func_index: InternPool.Index,
try self.object.updateFunc(pt, func_index, air, liveness);
}
pub fn updateNav(self: *SpirV, pt: Zcu.PerThread, nav: InternPool.Nav.Index) !void {
pub fn updateNav(self: *SpirV, pt: Zcu.PerThread, nav: InternPool.Nav.Index) link.File.UpdateNavError!void {
if (build_options.skip_non_native) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
@ -149,7 +155,7 @@ pub fn updateExports(
self: *SpirV,
pt: Zcu.PerThread,
exported: Zcu.Exported,
export_indices: []const u32,
export_indices: []const Zcu.Export.Index,
) !void {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
@ -184,7 +190,7 @@ pub fn updateExports(
};
for (export_indices) |export_idx| {
const exp = zcu.all_exports.items[export_idx];
const exp = export_idx.ptr(zcu);
try self.object.spv.declareEntryPoint(
spv_decl_index,
exp.opts.name.toSlice(ip),
@ -196,16 +202,21 @@ pub fn updateExports(
// TODO: Export regular functions, variables, etc using Linkage attributes.
}
pub fn freeDecl(self: *SpirV, decl_index: InternPool.DeclIndex) void {
_ = self;
_ = decl_index;
}
pub fn flush(self: *SpirV, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
return self.flushModule(arena, tid, prog_node);
}
pub fn flushModule(self: *SpirV, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
pub fn flushModule(
self: *SpirV,
arena: Allocator,
tid: Zcu.PerThread.Id,
prog_node: std.Progress.Node,
) link.File.FlushError!void {
// The goal is to never use this because it's only needed if we need to
// write to InternPool, but flushModule is too late to be writing to the
// InternPool.
_ = tid;
if (build_options.skip_non_native) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
@ -216,12 +227,11 @@ pub fn flushModule(self: *SpirV, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
const sub_prog_node = prog_node.start("Flush Module", 0);
defer sub_prog_node.end();
const spv = &self.object.spv;
const comp = self.base.comp;
const spv = &self.object.spv;
const diags = &comp.link_diags;
const gpa = comp.gpa;
const target = comp.getTarget();
_ = tid;
try writeCapabilities(spv, target);
try writeMemoryModel(spv, target);
@ -264,13 +274,11 @@ pub fn flushModule(self: *SpirV, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
const linked_module = self.linkModule(arena, module, sub_prog_node) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => |other| {
log.err("error while linking: {s}", .{@errorName(other)});
return error.FlushFailure;
},
else => |other| return diags.fail("error while linking: {s}", .{@errorName(other)}),
};
try self.base.file.?.writeAll(std.mem.sliceAsBytes(linked_module));
self.base.file.?.writeAll(std.mem.sliceAsBytes(linked_module)) catch |err|
return diags.fail("failed to write: {s}", .{@errorName(err)});
}
fn linkModule(self: *SpirV, a: Allocator, module: []Word, progress: std.Progress.Node) ![]Word {

File diff suppressed because it is too large Load Diff

View File

@ -142,8 +142,18 @@ pub fn parse(gpa: Allocator, file_contents: []const u8) !Archive {
/// From a given file offset, starts reading for a file header.
/// When found, parses the object file into an `Object` and returns it.
pub fn parseObject(archive: Archive, wasm: *Wasm, file_contents: []const u8, path: Path) !Object {
const header = mem.bytesAsValue(Header, file_contents[0..@sizeOf(Header)]);
pub fn parseObject(
archive: Archive,
wasm: *Wasm,
file_contents: []const u8,
object_offset: u32,
path: Path,
host_name: Wasm.OptionalString,
scratch_space: *Object.ScratchSpace,
must_link: bool,
gc_sections: bool,
) !Object {
const header = mem.bytesAsValue(Header, file_contents[object_offset..][0..@sizeOf(Header)]);
if (!mem.eql(u8, &header.fmag, ARFMAG)) return error.BadHeaderDelimiter;
const name_or_index = try header.nameOrIndex();
@ -157,8 +167,9 @@ pub fn parseObject(archive: Archive, wasm: *Wasm, file_contents: []const u8, pat
};
const object_file_size = try header.parsedSize();
const contents = file_contents[object_offset + @sizeOf(Header) ..][0..object_file_size];
return Object.create(wasm, file_contents[@sizeOf(Header)..][0..object_file_size], path, object_name);
return Object.parse(wasm, contents, path, object_name, host_name, scratch_space, must_link, gc_sections);
}
const Archive = @This();

1975
src/link/Wasm/Flush.zig Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,210 +0,0 @@
//! Represents a WebAssembly symbol. Containing all of its properties,
//! as well as providing helper methods to determine its functionality
//! and how it will/must be linked.
//! The name of the symbol can be found by providing the offset, found
//! on the `name` field, to a string table in the wasm binary or object file.
/// Bitfield containings flags for a symbol
/// Can contain any of the flags defined in `Flag`
flags: u32,
/// Symbol name, when the symbol is undefined the name will be taken from the import.
/// Note: This is an index into the wasm string table.
name: wasm.String,
/// Index into the list of objects based on set `tag`
/// NOTE: This will be set to `undefined` when `tag` is `data`
/// and the symbol is undefined.
index: u32,
/// Represents the kind of the symbol, such as a function or global.
tag: Tag,
/// Contains the virtual address of the symbol, relative to the start of its section.
/// This differs from the offset of an `Atom` which is relative to the start of a segment.
virtual_address: u32,
/// Represents a symbol index where `null` represents an invalid index.
pub const Index = enum(u32) {
null,
_,
};
pub const Tag = enum {
function,
data,
global,
section,
event,
table,
/// synthetic kind used by the wasm linker during incremental compilation
/// to notate a symbol has been freed, but still lives in the symbol list.
dead,
undefined,
/// From a given symbol tag, returns the `ExternalType`
/// Asserts the given tag can be represented as an external type.
pub fn externalType(tag: Tag) std.wasm.ExternalKind {
return switch (tag) {
.function => .function,
.global => .global,
.data => unreachable, // Data symbols will generate a global
.section => unreachable, // Not an external type
.event => unreachable, // Not an external type
.dead => unreachable, // Dead symbols should not be referenced
.undefined => unreachable,
.table => .table,
};
}
};
pub const Flag = enum(u32) {
/// Indicates a weak symbol.
/// When linking multiple modules defining the same symbol, all weak definitions are discarded
/// in favourite of the strong definition. When no strong definition exists, all weak but one definition is discarded.
/// If multiple definitions remain, we get an error: symbol collision.
WASM_SYM_BINDING_WEAK = 0x1,
/// Indicates a local, non-exported, non-module-linked symbol.
/// The names of local symbols are not required to be unique, unlike non-local symbols.
WASM_SYM_BINDING_LOCAL = 0x2,
/// Represents the binding of a symbol, indicating if it's local or not, and weak or not.
WASM_SYM_BINDING_MASK = 0x3,
/// Indicates a hidden symbol. Hidden symbols will not be exported to the link result, but may
/// link to other modules.
WASM_SYM_VISIBILITY_HIDDEN = 0x4,
/// Indicates an undefined symbol. For non-data symbols, this must match whether the symbol is
/// an import or is defined. For data symbols however, determines whether a segment is specified.
WASM_SYM_UNDEFINED = 0x10,
/// Indicates a symbol of which its intention is to be exported from the wasm module to the host environment.
/// This differs from the visibility flag as this flag affects the static linker.
WASM_SYM_EXPORTED = 0x20,
/// Indicates the symbol uses an explicit symbol name, rather than reusing the name from a wasm import.
/// Allows remapping imports from foreign WASM modules into local symbols with a different name.
WASM_SYM_EXPLICIT_NAME = 0x40,
/// Indicates the symbol is to be included in the linker output, regardless of whether it is used or has any references to it.
WASM_SYM_NO_STRIP = 0x80,
/// Indicates a symbol is TLS
WASM_SYM_TLS = 0x100,
/// Zig specific flag. Uses the most significant bit of the flag to annotate whether a symbol is
/// alive or not. Dead symbols are allowed to be garbage collected.
alive = 0x80000000,
};
/// Verifies if the given symbol should be imported from the
/// host environment or not
pub fn requiresImport(symbol: Symbol) bool {
if (symbol.tag == .data) return false;
if (!symbol.isUndefined()) return false;
if (symbol.isWeak()) return false;
// if (symbol.isDefined() and symbol.isWeak()) return true; //TODO: Only when building shared lib
return true;
}
/// Marks a symbol as 'alive', ensuring the garbage collector will not collect the trash.
pub fn mark(symbol: *Symbol) void {
symbol.flags |= @intFromEnum(Flag.alive);
}
pub fn unmark(symbol: *Symbol) void {
symbol.flags &= ~@intFromEnum(Flag.alive);
}
pub fn isAlive(symbol: Symbol) bool {
return symbol.flags & @intFromEnum(Flag.alive) != 0;
}
pub fn isDead(symbol: Symbol) bool {
return symbol.flags & @intFromEnum(Flag.alive) == 0;
}
pub fn isTLS(symbol: Symbol) bool {
return symbol.flags & @intFromEnum(Flag.WASM_SYM_TLS) != 0;
}
pub fn hasFlag(symbol: Symbol, flag: Flag) bool {
return symbol.flags & @intFromEnum(flag) != 0;
}
pub fn setFlag(symbol: *Symbol, flag: Flag) void {
symbol.flags |= @intFromEnum(flag);
}
pub fn isUndefined(symbol: Symbol) bool {
return symbol.flags & @intFromEnum(Flag.WASM_SYM_UNDEFINED) != 0;
}
pub fn setUndefined(symbol: *Symbol, is_undefined: bool) void {
if (is_undefined) {
symbol.setFlag(.WASM_SYM_UNDEFINED);
} else {
symbol.flags &= ~@intFromEnum(Flag.WASM_SYM_UNDEFINED);
}
}
pub fn setGlobal(symbol: *Symbol, is_global: bool) void {
if (is_global) {
symbol.flags &= ~@intFromEnum(Flag.WASM_SYM_BINDING_LOCAL);
} else {
symbol.setFlag(.WASM_SYM_BINDING_LOCAL);
}
}
pub fn isDefined(symbol: Symbol) bool {
return !symbol.isUndefined();
}
pub fn isVisible(symbol: Symbol) bool {
return symbol.flags & @intFromEnum(Flag.WASM_SYM_VISIBILITY_HIDDEN) == 0;
}
pub fn isLocal(symbol: Symbol) bool {
return symbol.flags & @intFromEnum(Flag.WASM_SYM_BINDING_LOCAL) != 0;
}
pub fn isGlobal(symbol: Symbol) bool {
return symbol.flags & @intFromEnum(Flag.WASM_SYM_BINDING_LOCAL) == 0;
}
pub fn isHidden(symbol: Symbol) bool {
return symbol.flags & @intFromEnum(Flag.WASM_SYM_VISIBILITY_HIDDEN) != 0;
}
pub fn isNoStrip(symbol: Symbol) bool {
return symbol.flags & @intFromEnum(Flag.WASM_SYM_NO_STRIP) != 0;
}
pub fn isExported(symbol: Symbol, is_dynamic: bool) bool {
if (symbol.isUndefined() or symbol.isLocal()) return false;
if (is_dynamic and symbol.isVisible()) return true;
return symbol.hasFlag(.WASM_SYM_EXPORTED);
}
pub fn isWeak(symbol: Symbol) bool {
return symbol.flags & @intFromEnum(Flag.WASM_SYM_BINDING_WEAK) != 0;
}
/// Formats the symbol into human-readable text
pub fn format(symbol: Symbol, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
_ = fmt;
_ = options;
const kind_fmt: u8 = switch (symbol.tag) {
.function => 'F',
.data => 'D',
.global => 'G',
.section => 'S',
.event => 'E',
.table => 'T',
.dead => '-',
.undefined => unreachable,
};
const visible: []const u8 = if (symbol.isVisible()) "yes" else "no";
const binding: []const u8 = if (symbol.isLocal()) "local" else "global";
const undef: []const u8 = if (symbol.isUndefined()) "undefined" else "";
try writer.print(
"{c} binding={s} visible={s} id={d} name_offset={d} {s}",
.{ kind_fmt, binding, visible, symbol.index, symbol.name, undef },
);
}
const std = @import("std");
const Symbol = @This();
const wasm = @import("../Wasm.zig");

File diff suppressed because it is too large Load Diff

View File

@ -75,6 +75,10 @@ pub fn fatal(comptime format: []const u8, args: anytype) noreturn {
process.exit(1);
}
/// Shaming all the locations that inappropriately use an O(N) search algorithm.
/// Please delete this and fix the compilation errors!
pub const @"bad O(N)" = void;
const normal_usage =
\\Usage: zig [command] [options]
\\

View File

@ -14,19 +14,14 @@ const link = @import("link.zig");
const log = std.log.scoped(.register_manager);
pub const AllocateRegistersError = error{
/// No registers are available anymore
pub const AllocationError = error{
OutOfRegisters,
/// Can happen when spilling an instruction in codegen runs out of
/// memory, so we propagate that error
OutOfMemory,
/// Can happen when spilling an instruction in codegen triggers integer
/// overflow, so we propagate that error
/// Compiler was asked to operate on a number larger than supported.
Overflow,
/// Can happen when spilling an instruction triggers a codegen
/// error, so we propagate that error
/// Indicates the error is already stored in `failed_codegen` on the Zcu.
CodegenFail,
} || link.File.UpdateDebugInfoError;
};
pub fn RegisterManager(
comptime Function: type,
@ -281,7 +276,7 @@ pub fn RegisterManager(
comptime count: comptime_int,
insts: [count]?Air.Inst.Index,
register_class: RegisterBitSet,
) AllocateRegistersError![count]Register {
) AllocationError![count]Register {
comptime assert(count > 0 and count <= tracked_registers.len);
var locked_registers = self.locked_registers;
@ -338,7 +333,7 @@ pub fn RegisterManager(
self: *Self,
inst: ?Air.Inst.Index,
register_class: RegisterBitSet,
) AllocateRegistersError!Register {
) AllocationError!Register {
return (try self.allocRegs(1, .{inst}, register_class))[0];
}
@ -349,7 +344,7 @@ pub fn RegisterManager(
self: *Self,
tracked_index: TrackedIndex,
inst: ?Air.Inst.Index,
) AllocateRegistersError!void {
) AllocationError!void {
log.debug("getReg {} for inst {?}", .{ regAtTrackedIndex(tracked_index), inst });
if (!self.isRegIndexFree(tracked_index)) {
// Move the instruction that was previously there to a
@ -362,7 +357,7 @@ pub fn RegisterManager(
}
self.getRegIndexAssumeFree(tracked_index, inst);
}
pub fn getReg(self: *Self, reg: Register, inst: ?Air.Inst.Index) AllocateRegistersError!void {
pub fn getReg(self: *Self, reg: Register, inst: ?Air.Inst.Index) AllocationError!void {
log.debug("getting reg: {}", .{reg});
return self.getRegIndex(indexOfRegIntoTracked(reg) orelse return, inst);
}
@ -370,7 +365,7 @@ pub fn RegisterManager(
self: *Self,
comptime reg: Register,
inst: ?Air.Inst.Index,
) AllocateRegistersError!void {
) AllocationError!void {
return self.getRegIndex((comptime indexOfRegIntoTracked(reg)) orelse return, inst);
}

View File

@ -31,8 +31,6 @@ test {
_ = @import("behavior/error.zig");
_ = @import("behavior/eval.zig");
_ = @import("behavior/export_builtin.zig");
_ = @import("behavior/export_self_referential_type_info.zig");
_ = @import("behavior/extern.zig");
_ = @import("behavior/field_parent_ptr.zig");
_ = @import("behavior/floatop.zig");
_ = @import("behavior/fn.zig");
@ -45,7 +43,6 @@ test {
_ = @import("behavior/hasfield.zig");
_ = @import("behavior/if.zig");
_ = @import("behavior/import.zig");
_ = @import("behavior/import_c_keywords.zig");
_ = @import("behavior/incomplete_struct_param_tld.zig");
_ = @import("behavior/inline_switch.zig");
_ = @import("behavior/int128.zig");
@ -127,6 +124,16 @@ test {
{
_ = @import("behavior/export_keyword.zig");
}
if (!builtin.cpu.arch.isWasm()) {
// Due to lack of import/export of global support
// (https://github.com/ziglang/zig/issues/4866), these tests correctly
// cause linker errors, since a data symbol cannot be exported when
// building an executable.
_ = @import("behavior/export_self_referential_type_info.zig");
_ = @import("behavior/extern.zig");
_ = @import("behavior/import_c_keywords.zig");
}
}
// This bug only repros in the root file

View File

@ -6,6 +6,11 @@ test "exporting enum value" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.cpu.arch.isWasm()) {
// https://github.com/ziglang/zig/issues/4866
return error.SkipZigTest;
}
const S = struct {
const E = enum(c_int) { one, two };
const e: E = .two;
@ -33,6 +38,11 @@ test "exporting using namespace access" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.cpu.arch.isWasm()) {
// https://github.com/ziglang/zig/issues/4866
return error.SkipZigTest;
}
const S = struct {
const Inner = struct {
const x: u32 = 5;
@ -46,7 +56,6 @@ test "exporting using namespace access" {
}
test "exporting comptime-known value" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and
(builtin.target.ofmt != .elf and
@ -56,6 +65,11 @@ test "exporting comptime-known value" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.cpu.arch.isWasm()) {
// https://github.com/ziglang/zig/issues/4866
return error.SkipZigTest;
}
const x: u32 = 10;
@export(&x, .{ .name = "exporting_comptime_known_value_foo" });
const S = struct {

View File

@ -1,6 +1,7 @@
#target=x86_64-linux-selfhosted
#target=x86_64-linux-cbe
#target=x86_64-windows-cbe
#target=wasm32-wasi-selfhosted
#update=initial version
#file=main.zig
const std = @import("std");

View File

@ -1,6 +1,7 @@
#target=x86_64-linux-selfhosted
#target=x86_64-linux-cbe
#target=x86_64-windows-cbe
#target=wasm32-wasi-selfhosted
#update=initial version
#file=main.zig
const std = @import("std");

View File

@ -1,4 +1,5 @@
#target=x86_64-linux-selfhosted
#target=wasm32-wasi-selfhosted
#update=initial version
#file=main.zig
const std = @import("std");

View File

@ -1,4 +1,5 @@
#target=x86_64-linux-selfhosted
#target=wasm32-wasi-selfhosted
#update=initial version
#file=main.zig
const std = @import("std");

View File

@ -1,6 +1,7 @@
#target=x86_64-linux-selfhosted
#target=x86_64-linux-cbe
#target=x86_64-windows-cbe
#target=wasm32-wasi-selfhosted
#update=initial version
#file=main.zig
pub fn main() !void {

View File

@ -1,6 +1,7 @@
#target=x86_64-linux-selfhosted
#target=x86_64-linux-cbe
#target=x86_64-windows-cbe
#target=wasm32-wasi-selfhosted
#update=initial version
#file=main.zig
const S = extern struct { x: u8, y: u8 };

View File

@ -1,6 +1,7 @@
#target=x86_64-linux-selfhosted
#target=x86_64-linux-cbe
#target=x86_64-windows-cbe
#target=wasm32-wasi-selfhosted
#update=initial version with compile error
#file=main.zig
comptime {

View File

@ -1,6 +1,7 @@
#target=x86_64-linux-selfhosted
#target=x86_64-linux-cbe
#target=x86_64-windows-cbe
#target=wasm32-wasi-selfhosted
#update=initial version
#file=main.zig
pub fn main() void {}

View File

@ -1,6 +1,7 @@
#target=x86_64-linux-selfhosted
#target=x86_64-linux-cbe
#target=x86_64-windows-cbe
#target=wasm32-wasi-selfhosted
#update=initial version with error
#file=main.zig
pub fn main() !void {

View File

@ -1,6 +1,7 @@
#target=x86_64-linux-selfhosted
#target=x86_64-linux-cbe
#target=x86_64-windows-cbe
#target=wasm32-wasi-selfhosted
#update=initial version
#file=main.zig
const std = @import("std");

View File

@ -1,6 +1,7 @@
#target=x86_64-linux-selfhosted
#target=x86_64-linux-cbe
#target=x86_64-windows-cbe
#target=wasm32-wasi-selfhosted
#update=initial version
#file=main.zig
const std = @import("std");

View File

@ -1,6 +1,7 @@
#target=x86_64-linux-selfhosted
#target=x86_64-linux-cbe
#target=x86_64-windows-cbe
#target=wasm32-wasi-selfhosted
#update=initial version
#file=main.zig
const std = @import("std");

View File

@ -1,6 +1,7 @@
#target=x86_64-linux-selfhosted
#target=x86_64-linux-cbe
#target=x86_64-windows-cbe
#target=wasm32-wasi-selfhosted
#update=initial version
#file=main.zig
pub fn main() !void {

View File

@ -1,6 +1,7 @@
#target=x86_64-linux-selfhosted
#target=x86_64-linux-cbe
#target=x86_64-windows-cbe
#target=wasm32-wasi-selfhosted
#update=initial version
#file=main.zig
const MyEnum = enum(u8) {

View File

@ -1,6 +1,7 @@
#target=x86_64-linux-selfhosted
#target=x86_64-linux-cbe
#target=x86_64-windows-cbe
#target=wasm32-wasi-selfhosted
#update=initial version
#file=main.zig
const E = enum { a, b, c };

View File

@ -1,6 +1,7 @@
#target=x86_64-linux-selfhosted
#target=x86_64-linux-cbe
#target=x86_64-windows-cbe
#target=wasm32-wasi-selfhosted
#update=initial version
#file=main.zig
const std = @import("std");

View File

@ -1,6 +1,7 @@
#target=x86_64-linux-selfhosted
#target=x86_64-linux-cbe
#target=x86_64-windows-cbe
#target=wasm32-wasi-selfhosted
#update=initial version
#file=main.zig
const SomeType = u32;

View File

@ -1,6 +1,7 @@
#target=x86_64-linux-selfhosted
#target=x86_64-linux-cbe
#target=x86_64-windows-cbe
#target=wasm32-wasi-selfhosted
#update=initial version
#file=main.zig
const std = @import("std");

View File

@ -24,9 +24,6 @@
.wasm_basic_features = .{
.path = "wasm/basic-features",
},
.wasm_bss = .{
.path = "wasm/bss",
},
.wasm_export = .{
.path = "wasm/export",
},
@ -48,9 +45,6 @@
.wasm_producers = .{
.path = "wasm/producers",
},
.wasm_segments = .{
.path = "wasm/segments",
},
.wasm_shared_memory = .{
.path = "wasm/shared-memory",
},

View File

@ -1,7 +1,5 @@
const std = @import("std");
pub const requires_stage2 = true;
pub fn build(b: *std.Build) void {
const test_step = b.step("test", "Test it");
b.default_step = test_step;

View File

@ -1,7 +1,5 @@
const std = @import("std");
pub const requires_stage2 = true;
pub fn build(b: *std.Build) void {
// Library with explicitly set cpu features
const lib = b.addExecutable(.{

View File

@ -1,95 +0,0 @@
const std = @import("std");
pub const requires_stage2 = true;
pub fn build(b: *std.Build) void {
const test_step = b.step("test", "Test");
b.default_step = test_step;
add(b, test_step, .Debug, true);
add(b, test_step, .ReleaseFast, false);
add(b, test_step, .ReleaseSmall, false);
add(b, test_step, .ReleaseSafe, true);
}
fn add(b: *std.Build, test_step: *std.Build.Step, optimize_mode: std.builtin.OptimizeMode, is_safe: bool) void {
{
const lib = b.addExecutable(.{
.name = "lib",
.root_module = b.createModule(.{
.root_source_file = b.path("lib.zig"),
.target = b.resolveTargetQuery(.{ .cpu_arch = .wasm32, .os_tag = .freestanding }),
.optimize = optimize_mode,
.strip = false,
}),
});
lib.entry = .disabled;
lib.use_llvm = false;
lib.use_lld = false;
// to make sure the bss segment is emitted, we must import memory
lib.import_memory = true;
lib.link_gc_sections = false;
const check_lib = lib.checkObject();
// since we import memory, make sure it exists with the correct naming
check_lib.checkInHeaders();
check_lib.checkExact("Section import");
check_lib.checkExact("entries 1");
check_lib.checkExact("module env"); // default module name is "env"
check_lib.checkExact("name memory"); // as per linker specification
// since we are importing memory, ensure it's not exported
check_lib.checkInHeaders();
check_lib.checkNotPresent("Section export");
// validate the name of the stack pointer
check_lib.checkInHeaders();
check_lib.checkExact("Section custom");
check_lib.checkExact("type data_segment");
check_lib.checkExact("names 2");
check_lib.checkExact("index 0");
check_lib.checkExact("name .rodata");
// for safe optimization modes `undefined` is stored in data instead of bss.
if (is_safe) {
check_lib.checkExact("index 1");
check_lib.checkExact("name .data");
check_lib.checkNotPresent("name .bss");
} else {
check_lib.checkExact("index 1"); // bss section always last
check_lib.checkExact("name .bss");
}
test_step.dependOn(&check_lib.step);
}
// verify zero'd declaration is stored in bss for all optimization modes.
{
const lib = b.addExecutable(.{
.name = "lib",
.root_module = b.createModule(.{
.root_source_file = b.path("lib2.zig"),
.target = b.resolveTargetQuery(.{ .cpu_arch = .wasm32, .os_tag = .freestanding }),
.optimize = optimize_mode,
.strip = false,
}),
});
lib.entry = .disabled;
lib.use_llvm = false;
lib.use_lld = false;
// to make sure the bss segment is emitted, we must import memory
lib.import_memory = true;
lib.link_gc_sections = false;
const check_lib = lib.checkObject();
check_lib.checkInHeaders();
check_lib.checkExact("Section custom");
check_lib.checkExact("type data_segment");
check_lib.checkExact("names 2");
check_lib.checkExact("index 0");
check_lib.checkExact("name .rodata");
check_lib.checkExact("index 1");
check_lib.checkExact("name .bss");
test_step.dependOn(&check_lib.step);
}
}

View File

@ -1,5 +0,0 @@
pub var bss: u32 = undefined;
export fn foo() void {
_ = bss;
}

View File

@ -1,5 +0,0 @@
pub var bss: u32 = 0;
export fn foo() void {
_ = bss;
}

View File

@ -4,48 +4,24 @@ pub fn build(b: *std.Build) void {
const test_step = b.step("test", "Test");
b.default_step = test_step;
if (@import("builtin").os.tag == .windows) {
// TODO: Fix open handle in wasm-linker refraining rename from working on Windows.
return;
}
const lib = b.addExecutable(.{
.name = "lib",
.root_module = b.createModule(.{
.root_source_file = b.path("lib.zig"),
.optimize = .ReleaseSafe, // to make the output deterministic in address positions
.optimize = .Debug,
.target = b.resolveTargetQuery(.{ .cpu_arch = .wasm32, .os_tag = .freestanding }),
}),
});
lib.entry = .disabled;
lib.use_lld = false;
lib.root_module.export_symbol_names = &.{ "foo", "bar" };
lib.global_base = 0; // put data section at address 0 to make data symbols easier to parse
// Object being linked has neither functions nor globals named "foo" or "bar" and
// so these names correctly fail to be exported when creating an executable.
lib.expect_errors = .{ .exact = &.{
"error: manually specified export name 'foo' undefined",
"error: manually specified export name 'bar' undefined",
} };
_ = lib.getEmittedBin();
const check_lib = lib.checkObject();
check_lib.checkInHeaders();
check_lib.checkExact("Section global");
check_lib.checkExact("entries 3");
check_lib.checkExact("type i32"); // stack pointer so skip other fields
check_lib.checkExact("type i32");
check_lib.checkExact("mutable false");
check_lib.checkExtract("i32.const {foo_address}");
check_lib.checkExact("type i32");
check_lib.checkExact("mutable false");
check_lib.checkExtract("i32.const {bar_address}");
check_lib.checkComputeCompare("foo_address", .{ .op = .eq, .value = .{ .literal = 4 } });
check_lib.checkComputeCompare("bar_address", .{ .op = .eq, .value = .{ .literal = 0 } });
check_lib.checkInHeaders();
check_lib.checkExact("Section export");
check_lib.checkExact("entries 3");
check_lib.checkExact("name foo");
check_lib.checkExact("kind global");
check_lib.checkExact("index 1");
check_lib.checkExact("name bar");
check_lib.checkExact("kind global");
check_lib.checkExact("index 2");
test_step.dependOn(&check_lib.step);
test_step.dependOn(&lib.step);
}

View File

@ -1,22 +1,17 @@
const std = @import("std");
pub const requires_stage2 = true;
pub fn build(b: *std.Build) void {
const test_step = b.step("test", "Test it");
b.default_step = test_step;
add(b, test_step, .Debug);
add(b, test_step, .ReleaseFast);
add(b, test_step, .ReleaseSmall);
add(b, test_step, .ReleaseSafe);
}
fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
const no_export = b.addExecutable(.{
.name = "no-export",
.root_module = b.createModule(.{
.root_source_file = b.path("main.zig"),
.root_source_file = b.path("main-hidden.zig"),
.optimize = optimize,
.target = b.resolveTargetQuery(.{ .cpu_arch = .wasm32, .os_tag = .freestanding }),
}),
@ -41,7 +36,7 @@ fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.Optimize
const force_export = b.addExecutable(.{
.name = "force",
.root_module = b.createModule(.{
.root_source_file = b.path("main.zig"),
.root_source_file = b.path("main-hidden.zig"),
.optimize = optimize,
.target = b.resolveTargetQuery(.{ .cpu_arch = .wasm32, .os_tag = .freestanding }),
}),

View File

@ -0,0 +1,4 @@
fn foo() callconv(.c) void {}
comptime {
@export(&foo, .{ .name = "foo", .visibility = .hidden });
}

View File

@ -1,15 +1,10 @@
const std = @import("std");
pub const requires_stage2 = true;
pub fn build(b: *std.Build) void {
const test_step = b.step("test", "Test it");
b.default_step = test_step;
add(b, test_step, .Debug);
add(b, test_step, .ReleaseFast);
add(b, test_step, .ReleaseSmall);
add(b, test_step, .ReleaseSafe);
}
fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {

View File

@ -1,32 +1,13 @@
const std = @import("std");
pub const requires_stage2 = true;
pub fn build(b: *std.Build) void {
const test_step = b.step("test", "Test it");
b.default_step = test_step;
add(b, test_step, .Debug);
add(b, test_step, .ReleaseFast);
add(b, test_step, .ReleaseSmall);
add(b, test_step, .ReleaseSafe);
}
fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
const import_table = b.addExecutable(.{
.name = "import_table",
.root_module = b.createModule(.{
.root_source_file = b.path("lib.zig"),
.target = b.resolveTargetQuery(.{ .cpu_arch = .wasm32, .os_tag = .freestanding }),
.optimize = optimize,
}),
});
import_table.entry = .disabled;
import_table.use_llvm = false;
import_table.use_lld = false;
import_table.import_table = true;
import_table.link_gc_sections = false;
const export_table = b.addExecutable(.{
.name = "export_table",
.root_module = b.createModule(.{
@ -54,24 +35,12 @@ fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.Optimize
regular_table.use_lld = false;
regular_table.link_gc_sections = false; // Ensure function table is not empty
const check_import = import_table.checkObject();
const check_export = export_table.checkObject();
const check_regular = regular_table.checkObject();
check_import.checkInHeaders();
check_import.checkExact("Section import");
check_import.checkExact("entries 1");
check_import.checkExact("module env");
check_import.checkExact("name __indirect_function_table");
check_import.checkExact("kind table");
check_import.checkExact("type funcref");
check_import.checkExact("min 1"); // 1 function pointer
check_import.checkNotPresent("max"); // when importing, we do not provide a max
check_import.checkNotPresent("Section table"); // we're importing it
check_export.checkInHeaders();
check_export.checkExact("Section export");
check_export.checkExact("entries 2");
check_export.checkExact("entries 3");
check_export.checkExact("name __indirect_function_table"); // as per linker specification
check_export.checkExact("kind table");
@ -89,7 +58,6 @@ fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.Optimize
check_regular.checkExact("i32.const 1"); // we want to start function indexes at 1
check_regular.checkExact("indexes 1"); // 1 function pointer
test_step.dependOn(&check_import.step);
test_step.dependOn(&check_export.step);
test_step.dependOn(&check_regular.step);
}

View File

@ -1,7 +1,5 @@
const std = @import("std");
pub const requires_stage2 = true;
pub fn build(b: *std.Build) void {
// Wasm Object file which we will use to infer the features from
const c_obj = b.addObject(.{
@ -37,27 +35,10 @@ pub fn build(b: *std.Build) void {
lib.use_lld = false;
lib.root_module.addObject(c_obj);
// Verify the result contains the features from the C Object file.
const check = lib.checkObject();
check.checkInHeaders();
check.checkExact("name target_features");
check.checkExact("features 14");
check.checkExact("+ atomics");
check.checkExact("+ bulk-memory");
check.checkExact("+ exception-handling");
check.checkExact("+ extended-const");
check.checkExact("+ half-precision");
check.checkExact("+ multimemory");
check.checkExact("+ multivalue");
check.checkExact("+ mutable-globals");
check.checkExact("+ nontrapping-fptoint");
check.checkExact("+ reference-types");
check.checkExact("+ relaxed-simd");
check.checkExact("+ sign-ext");
check.checkExact("+ simd128");
check.checkExact("+ tail-call");
lib.expect_errors = .{ .contains = "error: object requires atomics but specified target features exclude atomics" };
_ = lib.getEmittedBin();
const test_step = b.step("test", "Run linker test");
test_step.dependOn(&check.step);
test_step.dependOn(&lib.step);
b.default_step = test_step;
}

View File

@ -1,8 +1,6 @@
const std = @import("std");
const builtin = @import("builtin");
pub const requires_stage2 = true;
pub fn build(b: *std.Build) void {
const test_step = b.step("test", "Test it");
b.default_step = test_step;

View File

@ -1,48 +0,0 @@
const std = @import("std");
pub const requires_stage2 = true;
pub fn build(b: *std.Build) void {
const test_step = b.step("test", "Test it");
b.default_step = test_step;
add(b, test_step, .Debug);
add(b, test_step, .ReleaseFast);
add(b, test_step, .ReleaseSmall);
add(b, test_step, .ReleaseSafe);
}
fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
const lib = b.addExecutable(.{
.name = "lib",
.root_module = b.createModule(.{
.root_source_file = b.path("lib.zig"),
.target = b.resolveTargetQuery(.{ .cpu_arch = .wasm32, .os_tag = .freestanding }),
.optimize = optimize,
.strip = false,
}),
});
lib.entry = .disabled;
lib.use_llvm = false;
lib.use_lld = false;
lib.link_gc_sections = false; // so data is not garbage collected and we can verify data section
b.installArtifact(lib);
const check_lib = lib.checkObject();
check_lib.checkInHeaders();
check_lib.checkExact("Section data");
check_lib.checkExact("entries 2"); // rodata & data, no bss because we're exporting memory
check_lib.checkInHeaders();
check_lib.checkExact("Section custom");
check_lib.checkInHeaders();
check_lib.checkExact("name name"); // names custom section
check_lib.checkInHeaders();
check_lib.checkExact("type data_segment");
check_lib.checkExact("names 2");
check_lib.checkExact("index 0");
check_lib.checkExact("name .rodata");
check_lib.checkExact("index 1");
check_lib.checkExact("name .data");
test_step.dependOn(&check_lib.step);
}

View File

@ -1,9 +0,0 @@
pub const rodata: u32 = 5;
pub var data: u32 = 10;
pub var bss: u32 = undefined;
export fn foo() void {
_ = rodata;
_ = data;
_ = bss;
}

View File

@ -6,8 +6,6 @@ pub fn build(b: *std.Build) void {
add(b, test_step, .Debug);
add(b, test_step, .ReleaseFast);
add(b, test_step, .ReleaseSmall);
add(b, test_step, .ReleaseSafe);
}
fn add(b: *std.Build, test_step: *std.Build.Step, optimize_mode: std.builtin.OptimizeMode) void {
@ -45,6 +43,7 @@ fn add(b: *std.Build, test_step: *std.Build.Step, optimize_mode: std.builtin.Opt
check_exe.checkInHeaders();
check_exe.checkExact("Section export");
check_exe.checkExact("entries 2");
check_exe.checkExact("name foo");
check_exe.checkExact("name memory"); // ensure we also export memory again
// This section *must* be emit as the start function is set to the index
@ -71,23 +70,27 @@ fn add(b: *std.Build, test_step: *std.Build.Step, optimize_mode: std.builtin.Opt
check_exe.checkExact("type function");
if (optimize_mode == .Debug) {
check_exe.checkExact("name __wasm_init_memory");
check_exe.checkExact("name __wasm_init_tls");
}
check_exe.checkExact("name __wasm_init_tls");
check_exe.checkExact("type global");
// In debug mode the symbol __tls_base is resolved to an undefined symbol
// from the object file, hence its placement differs than in release modes
// where the entire tls segment is optimized away, and tls_base will have
// its original position.
check_exe.checkExact("name __tls_base");
check_exe.checkExact("name __tls_size");
check_exe.checkExact("name __tls_align");
check_exe.checkExact("type data_segment");
if (optimize_mode == .Debug) {
check_exe.checkExact("name __tls_base");
check_exe.checkExact("name __tls_size");
check_exe.checkExact("name __tls_align");
check_exe.checkExact("type data_segment");
check_exe.checkExact("names 1");
check_exe.checkExact("index 0");
check_exe.checkExact("name .tdata");
} else {
check_exe.checkNotPresent("name __tls_base");
check_exe.checkNotPresent("name __tls_size");
check_exe.checkNotPresent("name __tls_align");
}
test_step.dependOn(&check_exe.step);

Some files were not shown because too many files have changed in this diff Show More