Merge pull request #24124 from mlugg/better-backend-pipeline-2

compiler: threaded codegen (and more goodies)
This commit is contained in:
Andrew Kelley 2025-06-12 20:46:36 -04:00 committed by GitHub
commit dcdb4422b8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
74 changed files with 8083 additions and 7631 deletions

View File

@ -535,7 +535,6 @@ set(ZIG_STAGE2_SOURCES
src/Sema.zig
src/Sema/bitcast.zig
src/Sema/comptime_ptr_access.zig
src/ThreadSafeQueue.zig
src/Type.zig
src/Value.zig
src/Zcu.zig
@ -624,6 +623,7 @@ set(ZIG_STAGE2_SOURCES
src/link/Elf/synthetic_sections.zig
src/link/Goff.zig
src/link/LdScript.zig
src/link/Lld.zig
src/link/MachO.zig
src/link/MachO/Archive.zig
src/link/MachO/Atom.zig
@ -652,6 +652,7 @@ set(ZIG_STAGE2_SOURCES
src/link/MachO/uuid.zig
src/link/Plan9.zig
src/link/Plan9/aout.zig
src/link/Queue.zig
src/link/SpirV.zig
src/link/SpirV/BinaryModule.zig
src/link/SpirV/deduplicate.zig

View File

@ -1834,47 +1834,16 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
lp.path = b.fmt("{}", .{output_dir});
}
// -femit-bin[=path] (default) Output machine code
if (compile.generated_bin) |bin| {
bin.path = output_dir.joinString(b.allocator, compile.out_filename) catch @panic("OOM");
}
const sep = std.fs.path.sep_str;
// output PDB if someone requested it
if (compile.generated_pdb) |pdb| {
pdb.path = b.fmt("{}" ++ sep ++ "{s}.pdb", .{ output_dir, compile.name });
}
// -femit-implib[=path] (default) Produce an import .lib when building a Windows DLL
if (compile.generated_implib) |implib| {
implib.path = b.fmt("{}" ++ sep ++ "{s}.lib", .{ output_dir, compile.name });
}
// -femit-h[=path] Generate a C header file (.h)
if (compile.generated_h) |lp| {
lp.path = b.fmt("{}" ++ sep ++ "{s}.h", .{ output_dir, compile.name });
}
// -femit-docs[=path] Create a docs/ dir with html documentation
if (compile.generated_docs) |generated_docs| {
generated_docs.path = output_dir.joinString(b.allocator, "docs") catch @panic("OOM");
}
// -femit-asm[=path] Output .s (assembly code)
if (compile.generated_asm) |lp| {
lp.path = b.fmt("{}" ++ sep ++ "{s}.s", .{ output_dir, compile.name });
}
// -femit-llvm-ir[=path] Produce a .ll file with optimized LLVM IR (requires LLVM extensions)
if (compile.generated_llvm_ir) |lp| {
lp.path = b.fmt("{}" ++ sep ++ "{s}.ll", .{ output_dir, compile.name });
}
// -femit-llvm-bc[=path] Produce an optimized LLVM module as a .bc file (requires LLVM extensions)
if (compile.generated_llvm_bc) |lp| {
lp.path = b.fmt("{}" ++ sep ++ "{s}.bc", .{ output_dir, compile.name });
}
// zig fmt: off
if (compile.generated_bin) |lp| lp.path = compile.outputPath(output_dir, .bin);
if (compile.generated_pdb) |lp| lp.path = compile.outputPath(output_dir, .pdb);
if (compile.generated_implib) |lp| lp.path = compile.outputPath(output_dir, .implib);
if (compile.generated_h) |lp| lp.path = compile.outputPath(output_dir, .h);
if (compile.generated_docs) |lp| lp.path = compile.outputPath(output_dir, .docs);
if (compile.generated_asm) |lp| lp.path = compile.outputPath(output_dir, .@"asm");
if (compile.generated_llvm_ir) |lp| lp.path = compile.outputPath(output_dir, .llvm_ir);
if (compile.generated_llvm_bc) |lp| lp.path = compile.outputPath(output_dir, .llvm_bc);
// zig fmt: on
}
if (compile.kind == .lib and compile.linkage != null and compile.linkage.? == .dynamic and
@ -1888,6 +1857,21 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
);
}
}
fn outputPath(c: *Compile, out_dir: std.Build.Cache.Path, ea: std.zig.EmitArtifact) []const u8 {
const arena = c.step.owner.graph.arena;
const name = ea.cacheName(arena, .{
.root_name = c.name,
.target = c.root_module.resolved_target.?.result,
.output_mode = switch (c.kind) {
.lib => .Lib,
.obj, .test_obj => .Obj,
.exe, .@"test" => .Exe,
},
.link_mode = c.linkage,
.version = c.version,
}) catch @panic("OOM");
return out_dir.joinString(arena, name) catch @panic("OOM");
}
pub fn rebuildInFuzzMode(c: *Compile, progress_node: std.Progress.Node) !Path {
const gpa = c.step.owner.allocator;

View File

@ -234,6 +234,28 @@ pub const Node = struct {
_ = @atomicRmw(u32, &storage.completed_count, .Add, 1, .monotonic);
}
/// Thread-safe. Bytes after '0' in `new_name` are ignored.
pub fn setName(n: Node, new_name: []const u8) void {
const index = n.index.unwrap() orelse return;
const storage = storageByIndex(index);
const name_len = @min(max_name_len, std.mem.indexOfScalar(u8, new_name, 0) orelse new_name.len);
copyAtomicStore(storage.name[0..name_len], new_name[0..name_len]);
if (name_len < storage.name.len)
@atomicStore(u8, &storage.name[name_len], 0, .monotonic);
}
/// Gets the name of this `Node`.
/// A pointer to this array can later be passed to `setName` to restore the name.
pub fn getName(n: Node) [max_name_len]u8 {
var dest: [max_name_len]u8 align(@alignOf(usize)) = undefined;
if (n.index.unwrap()) |index| {
copyAtomicLoad(&dest, &storageByIndex(index).name);
}
return dest;
}
/// Thread-safe.
pub fn setCompletedItems(n: Node, completed_items: usize) void {
const index = n.index.unwrap() orelse return;

View File

@ -212,8 +212,8 @@ pub fn DebugAllocator(comptime config: Config) type {
DummyMutex{};
const DummyMutex = struct {
inline fn lock(_: *DummyMutex) void {}
inline fn unlock(_: *DummyMutex) void {}
inline fn lock(_: DummyMutex) void {}
inline fn unlock(_: DummyMutex) void {}
};
const stack_n = config.stack_trace_frames;

View File

@ -135,6 +135,22 @@ pub fn MultiArrayList(comptime T: type) type {
self.* = undefined;
}
/// Returns a `Slice` representing a range of elements in `s`, analagous to `arr[off..len]`.
/// It is illegal to call `deinit` or `toMultiArrayList` on the returned `Slice`.
/// Asserts that `off + len <= s.len`.
pub fn subslice(s: Slice, off: usize, len: usize) Slice {
assert(off + len <= s.len);
var ptrs: [fields.len][*]u8 = undefined;
inline for (s.ptrs, &ptrs, fields) |in, *out, field| {
out.* = in + (off * @sizeOf(field.type));
}
return .{
.ptrs = ptrs,
.len = len,
.capacity = len,
};
}
/// This function is used in the debugger pretty formatters in tools/ to fetch the
/// child field order and entry type to facilitate fancy debug printing for this type.
fn dbHelper(self: *Slice, child: *Elem, field: *Field, entry: *Entry) void {

View File

@ -884,6 +884,35 @@ pub const SimpleComptimeReason = enum(u32) {
}
};
/// Every kind of artifact which the compiler can emit.
pub const EmitArtifact = enum {
bin,
@"asm",
implib,
llvm_ir,
llvm_bc,
docs,
pdb,
h,
/// If using `Server` to communicate with the compiler, it will place requested artifacts in
/// paths under the output directory, where those paths are named according to this function.
/// Returned string is allocated with `gpa` and owned by the caller.
pub fn cacheName(ea: EmitArtifact, gpa: Allocator, opts: BinNameOptions) Allocator.Error![]const u8 {
const suffix: []const u8 = switch (ea) {
.bin => return binNameAlloc(gpa, opts),
.@"asm" => ".s",
.implib => ".lib",
.llvm_ir => ".ll",
.llvm_bc => ".bc",
.docs => "-docs",
.pdb => ".pdb",
.h => ".h",
};
return std.fmt.allocPrint(gpa, "{s}{s}", .{ opts.root_name, suffix });
}
};
test {
_ = Ast;
_ = AstRlAnnotate;

View File

@ -4861,6 +4861,15 @@ pub fn getParamBody(zir: Zir, fn_inst: Inst.Index) []const Zir.Inst.Index {
}
}
pub fn getParamName(zir: Zir, param_inst: Inst.Index) ?NullTerminatedString {
const inst = zir.instructions.get(@intFromEnum(param_inst));
return switch (inst.tag) {
.param, .param_comptime => zir.extraData(Inst.Param, inst.data.pl_tok.payload_index).data.name,
.param_anytype, .param_anytype_comptime => inst.data.str_tok.start,
else => null,
};
}
pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo {
const tags = zir.instructions.items(.tag);
const datas = zir.instructions.items(.data);

View File

@ -1153,9 +1153,7 @@ pub const Inst = struct {
ty: Type,
arg: struct {
ty: Ref,
/// Index into `extra` of a null-terminated string representing the parameter name.
/// This is `.none` if debug info is stripped.
name: NullTerminatedString,
zir_param_index: u32,
},
ty_op: struct {
ty: Ref,

View File

@ -363,10 +363,7 @@ const Writer = struct {
fn writeArg(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const arg = w.air.instructions.items(.data)[@intFromEnum(inst)].arg;
try w.writeType(s, arg.ty.toType());
switch (arg.name) {
.none => {},
_ => try s.print(", \"{}\"", .{std.zig.fmtEscapes(arg.name.toSlice(w.air))}),
}
try s.print(", {d}", .{arg.zir_param_index});
}
fn writeTyOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {

File diff suppressed because it is too large Load Diff

View File

@ -3249,6 +3249,9 @@ pub const LoadedUnionType = struct {
name: NullTerminatedString,
/// Represents the declarations inside this union.
namespace: NamespaceIndex,
/// If this is a declared type with the `.parent` name strategy, this is the `Nav` it was named after.
/// Otherwise, this is `.none`.
name_nav: Nav.Index.Optional,
/// The enum tag type.
enum_tag_ty: Index,
/// List of field types in declaration order.
@ -3567,6 +3570,7 @@ pub fn loadUnionType(ip: *const InternPool, index: Index) LoadedUnionType {
.tid = unwrapped_index.tid,
.extra_index = data,
.name = type_union.data.name,
.name_nav = type_union.data.name_nav,
.namespace = type_union.data.namespace,
.enum_tag_ty = type_union.data.tag_ty,
.field_types = field_types,
@ -3584,6 +3588,9 @@ pub const LoadedStructType = struct {
/// The name of this struct type.
name: NullTerminatedString,
namespace: NamespaceIndex,
/// If this is a declared type with the `.parent` name strategy, this is the `Nav` it was named after.
/// Otherwise, or if this is a file's root struct type, this is `.none`.
name_nav: Nav.Index.Optional,
/// Index of the `struct_decl` or `reify` ZIR instruction.
zir_index: TrackedInst.Index,
layout: std.builtin.Type.ContainerLayout,
@ -4173,6 +4180,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType {
switch (item.tag) {
.type_struct => {
const name: NullTerminatedString = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "name").?]);
const name_nav: Nav.Index.Optional = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "name_nav").?]);
const namespace: NamespaceIndex = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "namespace").?]);
const zir_index: TrackedInst.Index = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?]);
const fields_len = extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "fields_len").?];
@ -4259,6 +4267,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType {
.tid = unwrapped_index.tid,
.extra_index = item.data,
.name = name,
.name_nav = name_nav,
.namespace = namespace,
.zir_index = zir_index,
.layout = if (flags.is_extern) .@"extern" else .auto,
@ -4275,6 +4284,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType {
},
.type_struct_packed, .type_struct_packed_inits => {
const name: NullTerminatedString = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "name").?]);
const name_nav: Nav.Index.Optional = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "name_nav").?]);
const zir_index: TrackedInst.Index = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "zir_index").?]);
const fields_len = extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "fields_len").?];
const namespace: NamespaceIndex = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "namespace").?]);
@ -4321,6 +4331,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType {
.tid = unwrapped_index.tid,
.extra_index = item.data,
.name = name,
.name_nav = name_nav,
.namespace = namespace,
.zir_index = zir_index,
.layout = .@"packed",
@ -4345,6 +4356,9 @@ pub const LoadedEnumType = struct {
name: NullTerminatedString,
/// Represents the declarations inside this enum.
namespace: NamespaceIndex,
/// If this is a declared type with the `.parent` name strategy, this is the `Nav` it was named after.
/// Otherwise, this is `.none`.
name_nav: Nav.Index.Optional,
/// An integer type which is used for the numerical value of the enum.
/// This field is present regardless of whether the enum has an
/// explicitly provided tag type or auto-numbered.
@ -4428,6 +4442,7 @@ pub fn loadEnumType(ip: *const InternPool, index: Index) LoadedEnumType {
} else extra.data.captures_len;
return .{
.name = extra.data.name,
.name_nav = extra.data.name_nav,
.namespace = extra.data.namespace,
.tag_ty = extra.data.int_tag_type,
.names = .{
@ -4462,6 +4477,7 @@ pub fn loadEnumType(ip: *const InternPool, index: Index) LoadedEnumType {
} else extra.data.captures_len;
return .{
.name = extra.data.name,
.name_nav = extra.data.name_nav,
.namespace = extra.data.namespace,
.tag_ty = extra.data.int_tag_type,
.names = .{
@ -4493,6 +4509,9 @@ pub const LoadedOpaqueType = struct {
// TODO: the non-fqn will be needed by the new dwarf structure
/// The name of this opaque type.
name: NullTerminatedString,
/// If this is a declared type with the `.parent` name strategy, this is the `Nav` it was named after.
/// Otherwise, this is `.none`.
name_nav: Nav.Index.Optional,
/// Index of the `opaque_decl` or `reify` instruction.
zir_index: TrackedInst.Index,
captures: CaptureValue.Slice,
@ -4509,6 +4528,7 @@ pub fn loadOpaqueType(ip: *const InternPool, index: Index) LoadedOpaqueType {
extra.data.captures_len;
return .{
.name = extra.data.name,
.name_nav = extra.data.name_nav,
.namespace = extra.data.namespace,
.zir_index = extra.data.zir_index,
.captures = .{
@ -6022,6 +6042,7 @@ pub const Tag = enum(u8) {
/// 4. field align: Alignment for each field; declaration order
pub const TypeUnion = struct {
name: NullTerminatedString,
name_nav: Nav.Index.Optional,
flags: Flags,
/// This could be provided through the tag type, but it is more convenient
/// to store it directly. This is also necessary for `dumpStatsFallible` to
@ -6061,6 +6082,7 @@ pub const Tag = enum(u8) {
/// 5. init: Index for each fields_len // if tag is type_struct_packed_inits
pub const TypeStructPacked = struct {
name: NullTerminatedString,
name_nav: Nav.Index.Optional,
zir_index: TrackedInst.Index,
fields_len: u32,
namespace: NamespaceIndex,
@ -6108,6 +6130,7 @@ pub const Tag = enum(u8) {
/// 8. field_offset: u32 // for each field in declared order, undef until layout_resolved
pub const TypeStruct = struct {
name: NullTerminatedString,
name_nav: Nav.Index.Optional,
zir_index: TrackedInst.Index,
namespace: NamespaceIndex,
fields_len: u32,
@ -6151,6 +6174,7 @@ pub const Tag = enum(u8) {
/// 0. capture: CaptureValue // for each `captures_len`
pub const TypeOpaque = struct {
name: NullTerminatedString,
name_nav: Nav.Index.Optional,
/// Contains the declarations inside this opaque.
namespace: NamespaceIndex,
/// The index of the `opaque_decl` instruction.
@ -6429,6 +6453,7 @@ pub const Array = struct {
/// 4. tag value: Index for each fields_len; declaration order
pub const EnumExplicit = struct {
name: NullTerminatedString,
name_nav: Nav.Index.Optional,
/// `std.math.maxInt(u32)` indicates this type is reified.
captures_len: u32,
namespace: NamespaceIndex,
@ -6454,6 +6479,7 @@ pub const EnumExplicit = struct {
/// 3. field name: NullTerminatedString for each fields_len; declaration order
pub const EnumAuto = struct {
name: NullTerminatedString,
name_nav: Nav.Index.Optional,
/// `std.math.maxInt(u32)` indicates this type is reified.
captures_len: u32,
namespace: NamespaceIndex,
@ -8666,6 +8692,7 @@ pub fn getUnionType(
.size = std.math.maxInt(u32),
.padding = std.math.maxInt(u32),
.name = undefined, // set by `finish`
.name_nav = undefined, // set by `finish`
.namespace = undefined, // set by `finish`
.tag_ty = ini.enum_tag_ty,
.zir_index = switch (ini.key) {
@ -8717,6 +8744,7 @@ pub fn getUnionType(
.tid = tid,
.index = gop.put(),
.type_name_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeUnion, "name").?,
.name_nav_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeUnion, "name_nav").?,
.namespace_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeUnion, "namespace").?,
} };
}
@ -8726,15 +8754,20 @@ pub const WipNamespaceType = struct {
index: Index,
type_name_extra_index: u32,
namespace_extra_index: u32,
name_nav_extra_index: u32,
pub fn setName(
wip: WipNamespaceType,
ip: *InternPool,
type_name: NullTerminatedString,
/// This should be the `Nav` we are named after if we use the `.parent` name strategy; `.none` otherwise.
/// This is also `.none` if we use `.parent` because we are the root struct type for a file.
name_nav: Nav.Index.Optional,
) void {
const extra = ip.getLocalShared(wip.tid).extra.acquire();
const extra_items = extra.view().items(.@"0");
extra_items[wip.type_name_extra_index] = @intFromEnum(type_name);
extra_items[wip.name_nav_extra_index] = @intFromEnum(name_nav);
}
pub fn finish(
@ -8843,6 +8876,7 @@ pub fn getStructType(
ini.fields_len); // inits
const extra_index = addExtraAssumeCapacity(extra, Tag.TypeStructPacked{
.name = undefined, // set by `finish`
.name_nav = undefined, // set by `finish`
.zir_index = zir_index,
.fields_len = ini.fields_len,
.namespace = undefined, // set by `finish`
@ -8887,6 +8921,7 @@ pub fn getStructType(
.tid = tid,
.index = gop.put(),
.type_name_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStructPacked, "name").?,
.name_nav_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStructPacked, "name_nav").?,
.namespace_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStructPacked, "namespace").?,
} };
},
@ -8909,6 +8944,7 @@ pub fn getStructType(
1); // names_map
const extra_index = addExtraAssumeCapacity(extra, Tag.TypeStruct{
.name = undefined, // set by `finish`
.name_nav = undefined, // set by `finish`
.zir_index = zir_index,
.namespace = undefined, // set by `finish`
.fields_len = ini.fields_len,
@ -8977,6 +9013,7 @@ pub fn getStructType(
.tid = tid,
.index = gop.put(),
.type_name_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStruct, "name").?,
.name_nav_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStruct, "name_nav").?,
.namespace_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStruct, "namespace").?,
} };
}
@ -9766,6 +9803,7 @@ pub const WipEnumType = struct {
tag_ty_index: u32,
type_name_extra_index: u32,
namespace_extra_index: u32,
name_nav_extra_index: u32,
names_map: MapIndex,
names_start: u32,
values_map: OptionalMapIndex,
@ -9775,10 +9813,13 @@ pub const WipEnumType = struct {
wip: WipEnumType,
ip: *InternPool,
type_name: NullTerminatedString,
/// This should be the `Nav` we are named after if we use the `.parent` name strategy; `.none` otherwise.
name_nav: Nav.Index.Optional,
) void {
const extra = ip.getLocalShared(wip.tid).extra.acquire();
const extra_items = extra.view().items(.@"0");
extra_items[wip.type_name_extra_index] = @intFromEnum(type_name);
extra_items[wip.name_nav_extra_index] = @intFromEnum(name_nav);
}
pub fn prepare(
@ -9893,6 +9934,7 @@ pub fn getEnumType(
const extra_index = addExtraAssumeCapacity(extra, EnumAuto{
.name = undefined, // set by `prepare`
.name_nav = undefined, // set by `prepare`
.captures_len = switch (ini.key) {
inline .declared, .declared_owned_captures => |d| @intCast(d.captures.len),
.reified => std.math.maxInt(u32),
@ -9921,6 +9963,7 @@ pub fn getEnumType(
.index = gop.put(),
.tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?,
.type_name_extra_index = extra_index + std.meta.fieldIndex(EnumAuto, "name").?,
.name_nav_extra_index = extra_index + std.meta.fieldIndex(EnumAuto, "name_nav").?,
.namespace_extra_index = extra_index + std.meta.fieldIndex(EnumAuto, "namespace").?,
.names_map = names_map,
.names_start = @intCast(names_start),
@ -9950,6 +9993,7 @@ pub fn getEnumType(
const extra_index = addExtraAssumeCapacity(extra, EnumExplicit{
.name = undefined, // set by `prepare`
.name_nav = undefined, // set by `prepare`
.captures_len = switch (ini.key) {
inline .declared, .declared_owned_captures => |d| @intCast(d.captures.len),
.reified => std.math.maxInt(u32),
@ -9987,6 +10031,7 @@ pub fn getEnumType(
.index = gop.put(),
.tag_ty_index = extra_index + std.meta.fieldIndex(EnumExplicit, "int_tag_type").?,
.type_name_extra_index = extra_index + std.meta.fieldIndex(EnumExplicit, "name").?,
.name_nav_extra_index = extra_index + std.meta.fieldIndex(EnumExplicit, "name_nav").?,
.namespace_extra_index = extra_index + std.meta.fieldIndex(EnumExplicit, "namespace").?,
.names_map = names_map,
.names_start = @intCast(names_start),
@ -10055,6 +10100,7 @@ pub fn getGeneratedTagEnumType(
.tag = .type_enum_auto,
.data = addExtraAssumeCapacity(extra, EnumAuto{
.name = ini.name,
.name_nav = .none,
.captures_len = 0,
.namespace = namespace,
.int_tag_type = ini.tag_ty,
@ -10088,6 +10134,7 @@ pub fn getGeneratedTagEnumType(
},
.data = addExtraAssumeCapacity(extra, EnumExplicit{
.name = ini.name,
.name_nav = .none,
.captures_len = 0,
.namespace = namespace,
.int_tag_type = ini.tag_ty,
@ -10161,6 +10208,7 @@ pub fn getOpaqueType(
});
const extra_index = addExtraAssumeCapacity(extra, Tag.TypeOpaque{
.name = undefined, // set by `finish`
.name_nav = undefined, // set by `finish`
.namespace = undefined, // set by `finish`
.zir_index = switch (ini.key) {
inline else => |x| x.zir_index,
@ -10183,6 +10231,7 @@ pub fn getOpaqueType(
.tid = tid,
.index = gop.put(),
.type_name_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeOpaque, "name").?,
.name_nav_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeOpaque, "name_nav").?,
.namespace_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeOpaque, "namespace").?,
},
};
@ -10299,6 +10348,7 @@ fn addExtraAssumeCapacity(extra: Local.Extra.Mutable, item: anytype) u32 {
extra.appendAssumeCapacity(.{switch (field.type) {
Index,
Nav.Index,
Nav.Index.Optional,
NamespaceIndex,
OptionalNamespaceIndex,
MapIndex,
@ -10361,6 +10411,7 @@ fn extraDataTrail(extra: Local.Extra, comptime T: type, index: u32) struct { dat
@field(result, field.name) = switch (field.type) {
Index,
Nav.Index,
Nav.Index.Optional,
NamespaceIndex,
OptionalNamespaceIndex,
MapIndex,

View File

@ -2963,13 +2963,14 @@ fn zirStructDecl(
};
errdefer wip_ty.cancel(ip, pt.tid);
wip_ty.setName(ip, try sema.createTypeName(
const type_name = try sema.createTypeName(
block,
small.name_strategy,
"struct",
inst,
wip_ty.index,
));
);
wip_ty.setName(ip, type_name.name, type_name.nav);
const new_namespace_index: InternPool.NamespaceIndex = try pt.createNamespace(.{
.parent = block.namespace.toOptional(),
@ -2991,7 +2992,8 @@ fn zirStructDecl(
if (zcu.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
zcu.comp.link_prog_node.increaseEstimatedTotalItems(1);
try zcu.comp.queueJob(.{ .link_type = wip_ty.index });
}
try sema.declareDependency(.{ .interned = wip_ty.index });
try sema.addTypeReferenceEntry(src, wip_ty.index);
@ -3007,7 +3009,10 @@ pub fn createTypeName(
inst: ?Zir.Inst.Index,
/// This is used purely to give the type a unique name in the `anon` case.
type_index: InternPool.Index,
) !InternPool.NullTerminatedString {
) !struct {
name: InternPool.NullTerminatedString,
nav: InternPool.Nav.Index.Optional,
} {
const pt = sema.pt;
const zcu = pt.zcu;
const gpa = zcu.gpa;
@ -3015,7 +3020,10 @@ pub fn createTypeName(
switch (name_strategy) {
.anon => {}, // handled after switch
.parent => return block.type_name_ctx,
.parent => return .{
.name = block.type_name_ctx,
.nav = sema.owner.unwrap().nav_val.toOptional(),
},
.func => func_strat: {
const fn_info = sema.code.getFnInfo(ip.funcZirBodyInst(sema.func_index).resolve(ip) orelse return error.AnalysisFail);
const zir_tags = sema.code.instructions.items(.tag);
@ -3057,7 +3065,10 @@ pub fn createTypeName(
};
try writer.writeByte(')');
return ip.getOrPutString(gpa, pt.tid, buf.items, .no_embedded_nulls);
return .{
.name = try ip.getOrPutString(gpa, pt.tid, buf.items, .no_embedded_nulls),
.nav = .none,
};
},
.dbg_var => {
// TODO: this logic is questionable. We ideally should be traversing the `Block` rather than relying on the order of AstGen instructions.
@ -3066,9 +3077,12 @@ pub fn createTypeName(
const zir_data = sema.code.instructions.items(.data);
for (@intFromEnum(inst.?)..zir_tags.len) |i| switch (zir_tags[i]) {
.dbg_var_ptr, .dbg_var_val => if (zir_data[i].str_op.operand == ref) {
return ip.getOrPutStringFmt(gpa, pt.tid, "{}.{s}", .{
block.type_name_ctx.fmt(ip), zir_data[i].str_op.getStr(sema.code),
}, .no_embedded_nulls);
return .{
.name = try ip.getOrPutStringFmt(gpa, pt.tid, "{}.{s}", .{
block.type_name_ctx.fmt(ip), zir_data[i].str_op.getStr(sema.code),
}, .no_embedded_nulls),
.nav = .none,
};
},
else => {},
};
@ -3086,9 +3100,12 @@ pub fn createTypeName(
// types appropriately. However, `@typeName` becomes a problem then. If we remove
// that builtin from the language, we can consider this.
return ip.getOrPutStringFmt(gpa, pt.tid, "{}__{s}_{d}", .{
block.type_name_ctx.fmt(ip), anon_prefix, @intFromEnum(type_index),
}, .no_embedded_nulls);
return .{
.name = try ip.getOrPutStringFmt(gpa, pt.tid, "{}__{s}_{d}", .{
block.type_name_ctx.fmt(ip), anon_prefix, @intFromEnum(type_index),
}, .no_embedded_nulls),
.nav = .none,
};
}
fn zirEnumDecl(
@ -3209,7 +3226,7 @@ fn zirEnumDecl(
inst,
wip_ty.index,
);
wip_ty.setName(ip, type_name);
wip_ty.setName(ip, type_name.name, type_name.nav);
const new_namespace_index: InternPool.NamespaceIndex = try pt.createNamespace(.{
.parent = block.namespace.toOptional(),
@ -3236,7 +3253,7 @@ fn zirEnumDecl(
inst,
tracked_inst,
new_namespace_index,
type_name,
type_name.name,
small,
body,
tag_type_ref,
@ -3250,7 +3267,8 @@ fn zirEnumDecl(
if (zcu.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
zcu.comp.link_prog_node.increaseEstimatedTotalItems(1);
try zcu.comp.queueJob(.{ .link_type = wip_ty.index });
}
return Air.internedToRef(wip_ty.index);
}
@ -3340,13 +3358,14 @@ fn zirUnionDecl(
};
errdefer wip_ty.cancel(ip, pt.tid);
wip_ty.setName(ip, try sema.createTypeName(
const type_name = try sema.createTypeName(
block,
small.name_strategy,
"union",
inst,
wip_ty.index,
));
);
wip_ty.setName(ip, type_name.name, type_name.nav);
const new_namespace_index: InternPool.NamespaceIndex = try pt.createNamespace(.{
.parent = block.namespace.toOptional(),
@ -3368,7 +3387,8 @@ fn zirUnionDecl(
if (zcu.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
zcu.comp.link_prog_node.increaseEstimatedTotalItems(1);
try zcu.comp.queueJob(.{ .link_type = wip_ty.index });
}
try sema.declareDependency(.{ .interned = wip_ty.index });
try sema.addTypeReferenceEntry(src, wip_ty.index);
@ -3432,13 +3452,14 @@ fn zirOpaqueDecl(
};
errdefer wip_ty.cancel(ip, pt.tid);
wip_ty.setName(ip, try sema.createTypeName(
const type_name = try sema.createTypeName(
block,
small.name_strategy,
"opaque",
inst,
wip_ty.index,
));
);
wip_ty.setName(ip, type_name.name, type_name.nav);
const new_namespace_index: InternPool.NamespaceIndex = try pt.createNamespace(.{
.parent = block.namespace.toOptional(),
@ -3455,7 +3476,8 @@ fn zirOpaqueDecl(
if (zcu.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
zcu.comp.link_prog_node.increaseEstimatedTotalItems(1);
try zcu.comp.queueJob(.{ .link_type = wip_ty.index });
}
try sema.addTypeReferenceEntry(src, wip_ty.index);
if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip_ty.index);
@ -20052,7 +20074,8 @@ fn structInitAnon(
}, false)) {
.wip => |wip| ty: {
errdefer wip.cancel(ip, pt.tid);
wip.setName(ip, try sema.createTypeName(block, .anon, "struct", inst, wip.index));
const type_name = try sema.createTypeName(block, .anon, "struct", inst, wip.index);
wip.setName(ip, type_name.name, type_name.nav);
const struct_type = ip.loadStructType(wip.index);
@ -20076,7 +20099,8 @@ fn structInitAnon(
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
try zcu.comp.queueJob(.{ .codegen_type = wip.index });
zcu.comp.link_prog_node.increaseEstimatedTotalItems(1);
try zcu.comp.queueJob(.{ .link_type = wip.index });
}
if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip.index);
break :ty wip.finish(ip, new_namespace_index);
@ -21112,13 +21136,14 @@ fn zirReify(
};
errdefer wip_ty.cancel(ip, pt.tid);
wip_ty.setName(ip, try sema.createTypeName(
const type_name = try sema.createTypeName(
block,
name_strategy,
"opaque",
inst,
wip_ty.index,
));
);
wip_ty.setName(ip, type_name.name, type_name.nav);
const new_namespace_index = try pt.createNamespace(.{
.parent = block.namespace.toOptional(),
@ -21317,13 +21342,14 @@ fn reifyEnum(
return sema.fail(block, src, "Type.Enum.tag_type must be an integer type", .{});
}
wip_ty.setName(ip, try sema.createTypeName(
const type_name = try sema.createTypeName(
block,
name_strategy,
"enum",
inst,
wip_ty.index,
));
);
wip_ty.setName(ip, type_name.name, type_name.nav);
const new_namespace_index = try pt.createNamespace(.{
.parent = block.namespace.toOptional(),
@ -21386,7 +21412,8 @@ fn reifyEnum(
if (zcu.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
zcu.comp.link_prog_node.increaseEstimatedTotalItems(1);
try zcu.comp.queueJob(.{ .link_type = wip_ty.index });
}
return Air.internedToRef(wip_ty.index);
}
@ -21488,7 +21515,7 @@ fn reifyUnion(
inst,
wip_ty.index,
);
wip_ty.setName(ip, type_name);
wip_ty.setName(ip, type_name.name, type_name.nav);
const field_types = try sema.arena.alloc(InternPool.Index, fields_len);
const field_aligns = if (any_aligns) try sema.arena.alloc(InternPool.Alignment, fields_len) else undefined;
@ -21581,7 +21608,7 @@ fn reifyUnion(
}
}
const enum_tag_ty = try sema.generateUnionTagTypeSimple(block, field_names.keys(), wip_ty.index, type_name);
const enum_tag_ty = try sema.generateUnionTagTypeSimple(block, field_names.keys(), wip_ty.index, type_name.name);
break :tag_ty .{ enum_tag_ty, false };
};
errdefer if (!has_explicit_tag) ip.remove(pt.tid, enum_tag_ty); // remove generated tag type on error
@ -21640,7 +21667,8 @@ fn reifyUnion(
if (zcu.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
zcu.comp.link_prog_node.increaseEstimatedTotalItems(1);
try zcu.comp.queueJob(.{ .link_type = wip_ty.index });
}
try sema.declareDependency(.{ .interned = wip_ty.index });
try sema.addTypeReferenceEntry(src, wip_ty.index);
@ -21843,13 +21871,14 @@ fn reifyStruct(
};
errdefer wip_ty.cancel(ip, pt.tid);
wip_ty.setName(ip, try sema.createTypeName(
const type_name = try sema.createTypeName(
block,
name_strategy,
"struct",
inst,
wip_ty.index,
));
);
wip_ty.setName(ip, type_name.name, type_name.nav);
const struct_type = ip.loadStructType(wip_ty.index);
@ -21994,7 +22023,8 @@ fn reifyStruct(
if (zcu.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
zcu.comp.link_prog_node.increaseEstimatedTotalItems(1);
try zcu.comp.queueJob(.{ .link_type = wip_ty.index });
}
try sema.declareDependency(.{ .interned = wip_ty.index });
try sema.addTypeReferenceEntry(src, wip_ty.index);
@ -35022,7 +35052,7 @@ pub fn resolveUnionAlignment(
union_type.setAlignment(ip, max_align);
}
/// This logic must be kept in sync with `Zcu.getUnionLayout`.
/// This logic must be kept in sync with `Type.getUnionLayout`.
pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
const pt = sema.pt;
const ip = &pt.zcu.intern_pool;
@ -35056,24 +35086,32 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
var max_align: Alignment = .@"1";
for (0..union_type.field_types.len) |field_index| {
const field_ty: Type = .fromInterned(union_type.field_types.get(ip)[field_index]);
if (field_ty.isNoReturn(pt.zcu)) continue;
if (try field_ty.comptimeOnlySema(pt) or field_ty.zigTypeTag(pt.zcu) == .noreturn) continue; // TODO: should this affect alignment?
// We need to call `hasRuntimeBits` before calling `abiSize` to prevent reachable `unreachable`s,
// but `hasRuntimeBits` only resolves field types and so may infinite recurse on a layout wip type,
// so we must resolve the layout manually first, instead of waiting for `abiSize` to do it for us.
// This is arguably just hacking around bugs in both `abiSize` for not allowing arbitrary types to
// be queried, enabling failures to be handled with the emission of a compile error, and also in
// `hasRuntimeBits` for ever being able to infinite recurse in the first place.
try field_ty.resolveLayout(pt);
max_size = @max(max_size, field_ty.abiSizeSema(pt) catch |err| switch (err) {
error.AnalysisFail => {
const msg = sema.err orelse return err;
try sema.addFieldErrNote(ty, field_index, msg, "while checking this field", .{});
return err;
},
else => return err,
});
if (try field_ty.hasRuntimeBitsSema(pt)) {
max_size = @max(max_size, field_ty.abiSizeSema(pt) catch |err| switch (err) {
error.AnalysisFail => {
const msg = sema.err orelse return err;
try sema.addFieldErrNote(ty, field_index, msg, "while checking this field", .{});
return err;
},
else => return err,
});
}
const explicit_align = union_type.fieldAlign(ip, field_index);
const field_align = if (explicit_align != .none)
explicit_align
else
try field_ty.abiAlignmentSema(pt);
max_align = max_align.max(field_align);
}

View File

@ -157,13 +157,14 @@ fn lowerExprAnonResTy(self: *LowerZon, node: Zoir.Node.Index) CompileError!Inter
)) {
.wip => |wip| ty: {
errdefer wip.cancel(ip, pt.tid);
wip.setName(ip, try self.sema.createTypeName(
const type_name = try self.sema.createTypeName(
self.block,
.anon,
"struct",
self.base_node_inst.resolve(ip),
wip.index,
));
);
wip.setName(ip, type_name.name, type_name.nav);
const struct_type = ip.loadStructType(wip.index);
@ -194,7 +195,8 @@ fn lowerExprAnonResTy(self: *LowerZon, node: Zoir.Node.Index) CompileError!Inter
codegen_type: {
if (pt.zcu.comp.config.use_llvm) break :codegen_type;
if (self.block.ownerModule().strip) break :codegen_type;
try pt.zcu.comp.queueJob(.{ .codegen_type = wip.index });
pt.zcu.comp.link_prog_node.increaseEstimatedTotalItems(1);
try pt.zcu.comp.queueJob(.{ .link_type = wip.index });
}
break :ty wip.finish(ip, new_namespace_index);
},

View File

@ -1,72 +0,0 @@
const std = @import("std");
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
pub fn ThreadSafeQueue(comptime T: type) type {
return struct {
worker_owned: std.ArrayListUnmanaged(T),
/// Protected by `mutex`.
shared: std.ArrayListUnmanaged(T),
mutex: std.Thread.Mutex,
state: State,
const Self = @This();
pub const State = enum { wait, run };
pub const empty: Self = .{
.worker_owned = .empty,
.shared = .empty,
.mutex = .{},
.state = .wait,
};
pub fn deinit(self: *Self, gpa: Allocator) void {
self.worker_owned.deinit(gpa);
self.shared.deinit(gpa);
self.* = undefined;
}
/// Must be called from the worker thread.
pub fn check(self: *Self) ?[]T {
assert(self.worker_owned.items.len == 0);
{
self.mutex.lock();
defer self.mutex.unlock();
assert(self.state == .run);
if (self.shared.items.len == 0) {
self.state = .wait;
return null;
}
std.mem.swap(std.ArrayListUnmanaged(T), &self.worker_owned, &self.shared);
}
const result = self.worker_owned.items;
self.worker_owned.clearRetainingCapacity();
return result;
}
/// Adds items to the queue, returning true if and only if the worker
/// thread is waiting. Thread-safe.
/// Not safe to call from the worker thread.
pub fn enqueue(self: *Self, gpa: Allocator, items: []const T) error{OutOfMemory}!bool {
self.mutex.lock();
defer self.mutex.unlock();
try self.shared.appendSlice(gpa, items);
return switch (self.state) {
.run => false,
.wait => {
self.state = .run;
return true;
},
};
}
/// Safe only to call exactly once when initially starting the worker.
pub fn start(self: *Self) bool {
assert(self.state == .wait);
if (self.shared.items.len == 0) return false;
self.state = .run;
return true;
}
};
}

View File

@ -177,6 +177,7 @@ pub fn print(ty: Type, writer: anytype, pt: Zcu.PerThread) @TypeOf(writer).Error
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.undef => return writer.writeAll("@as(type, undefined)"),
.int_type => |int_type| {
const sign_char: u8 = switch (int_type.signedness) {
.signed => 'i',
@ -398,7 +399,6 @@ pub fn print(ty: Type, writer: anytype, pt: Zcu.PerThread) @TypeOf(writer).Error
},
// values, not types
.undef,
.simple_value,
.variable,
.@"extern",
@ -3915,29 +3915,32 @@ fn resolveUnionInner(
pub fn getUnionLayout(loaded_union: InternPool.LoadedUnionType, zcu: *const Zcu) Zcu.UnionLayout {
const ip = &zcu.intern_pool;
assert(loaded_union.haveLayout(ip));
var most_aligned_field: u32 = undefined;
var most_aligned_field_size: u64 = undefined;
var biggest_field: u32 = undefined;
var most_aligned_field: u32 = 0;
var most_aligned_field_align: InternPool.Alignment = .@"1";
var most_aligned_field_size: u64 = 0;
var biggest_field: u32 = 0;
var payload_size: u64 = 0;
var payload_align: InternPool.Alignment = .@"1";
for (loaded_union.field_types.get(ip), 0..) |field_ty, field_index| {
if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(zcu)) continue;
for (loaded_union.field_types.get(ip), 0..) |field_ty_ip_index, field_index| {
const field_ty: Type = .fromInterned(field_ty_ip_index);
if (field_ty.isNoReturn(zcu)) continue;
const explicit_align = loaded_union.fieldAlign(ip, field_index);
const field_align = if (explicit_align != .none)
explicit_align
else
Type.fromInterned(field_ty).abiAlignment(zcu);
const field_size = Type.fromInterned(field_ty).abiSize(zcu);
field_ty.abiAlignment(zcu);
const field_size = field_ty.abiSize(zcu);
if (field_size > payload_size) {
payload_size = field_size;
biggest_field = @intCast(field_index);
}
if (field_align.compare(.gte, payload_align)) {
payload_align = field_align;
if (field_size > 0 and field_align.compare(.gte, most_aligned_field_align)) {
most_aligned_field = @intCast(field_index);
most_aligned_field_align = field_align;
most_aligned_field_size = field_size;
}
payload_align = payload_align.max(field_align);
}
const have_tag = loaded_union.flagsUnordered(ip).runtime_tag.hasTag();
if (!have_tag or !Type.fromInterned(loaded_union.enum_tag_ty).hasRuntimeBits(zcu)) {

View File

@ -56,9 +56,8 @@ comptime {
/// General-purpose allocator. Used for both temporary and long-term storage.
gpa: Allocator,
comp: *Compilation,
/// Usually, the LlvmObject is managed by linker code, however, in the case
/// that -fno-emit-bin is specified, the linker code never executes, so we
/// store the LlvmObject here.
/// If the ZCU is emitting an LLVM object (i.e. we are using the LLVM backend), then this is the
/// `LlvmObject` we are emitting to.
llvm_object: ?LlvmObject.Ptr,
/// Pointer to externally managed resource.
@ -67,8 +66,18 @@ root_mod: *Package.Module,
/// `root_mod` is the test runner, and `main_mod` is the user's source file which has the tests.
main_mod: *Package.Module,
std_mod: *Package.Module,
sema_prog_node: std.Progress.Node = std.Progress.Node.none,
codegen_prog_node: std.Progress.Node = std.Progress.Node.none,
sema_prog_node: std.Progress.Node = .none,
codegen_prog_node: std.Progress.Node = .none,
/// The number of codegen jobs which are pending or in-progress. Whichever thread drops this value
/// to 0 is responsible for ending `codegen_prog_node`. While semantic analysis is happening, this
/// value bottoms out at 1 instead of 0, to ensure that it can only drop to 0 after analysis is
/// completed (since semantic analysis could trigger more codegen work).
pending_codegen_jobs: std.atomic.Value(u32) = .init(0),
/// This is the progress node *under* `sema_prog_node` which is currently running.
/// When we have to pause to analyze something else, we just temporarily rename this node.
/// Eventually, when we thread semantic analysis, we will want one of these per thread.
cur_sema_prog_node: std.Progress.Node = .none,
/// Used by AstGen worker to load and store ZIR cache.
global_zir_cache: Cache.Directory,
@ -172,6 +181,8 @@ transitive_failed_analysis: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .emp
/// This `Nav` succeeded analysis, but failed codegen.
/// This may be a simple "value" `Nav`, or it may be a function.
/// The ErrorMsg memory is owned by the `AnalUnit`, using Module's general purpose allocator.
/// While multiple threads are active (most of the time!), this is guarded by `zcu.comp.mutex`, as
/// codegen and linking run on a separate thread.
failed_codegen: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, *ErrorMsg) = .empty,
failed_types: std.AutoArrayHashMapUnmanaged(InternPool.Index, *ErrorMsg) = .empty,
/// Keep track of `@compileLog`s per `AnalUnit`.
@ -267,16 +278,6 @@ resolved_references: ?std.AutoHashMapUnmanaged(AnalUnit, ?ResolvedReference) = n
/// Reset to `false` at the start of each update in `Compilation.update`.
skip_analysis_this_update: bool = false,
stage1_flags: packed struct {
have_winmain: bool = false,
have_wwinmain: bool = false,
have_winmain_crt_startup: bool = false,
have_wwinmain_crt_startup: bool = false,
have_dllmain_crt_startup: bool = false,
have_c_main: bool = false,
reserved: u2 = 0,
} = .{},
test_functions: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, void) = .empty,
global_assembly: std.AutoArrayHashMapUnmanaged(AnalUnit, []u8) = .empty,
@ -3828,7 +3829,36 @@ pub const Feature = enum {
is_named_enum_value,
error_set_has_value,
field_reordering,
/// If the backend supports running from another thread.
/// In theory, backends are supposed to work like this:
///
/// * The AIR emitted by `Sema` is converted into MIR by `codegen.generateFunction`. This pass
/// is "pure", in that it does not depend on or modify any external mutable state.
///
/// * That MIR is sent to the linker, which calls `codegen.emitFunction` to convert the MIR to
/// finalized machine code. This process is permitted to query and modify linker state.
///
/// * The linker stores the resulting machine code in the binary as needed.
///
/// The first stage described above can run in parallel to the rest of the compiler, and even to
/// other code generation work; we can run as many codegen threads as we want in parallel because
/// of the fact that this pass is pure. Emit and link must be single-threaded, but are generally
/// very fast, so that isn't a problem.
///
/// Unfortunately, some code generation implementations currently query and/or mutate linker state
/// or even (in the case of the LLVM backend) semantic analysis state. Such backends cannot be run
/// in parallel with each other, with linking, or (potentially) with semantic analysis.
///
/// Additionally, some backends continue to need the AIR in the "emit" stage, despite this pass
/// operating on MIR. This complicates memory management under the threading model above.
///
/// These are both **bugs** in backend implementations, left over from legacy code. However, they
/// are difficult to fix. So, this `Feature` currently guards correct threading of code generation:
///
/// * With this feature enabled, the backend is threaded as described above. The "emit" stage does
/// not have access to AIR (it will be `undefined`; see `codegen.emitFunction`).
///
/// * With this feature disabled, semantic analysis, code generation, and linking all occur on the
/// same thread, and the "emit" stage has access to AIR.
separate_thread,
};
@ -4577,22 +4607,29 @@ pub fn codegenFail(
comptime format: []const u8,
args: anytype,
) CodegenFailError {
const gpa = zcu.gpa;
try zcu.failed_codegen.ensureUnusedCapacity(gpa, 1);
const msg = try Zcu.ErrorMsg.create(gpa, zcu.navSrcLoc(nav_index), format, args);
zcu.failed_codegen.putAssumeCapacityNoClobber(nav_index, msg);
return error.CodegenFail;
const msg = try Zcu.ErrorMsg.create(zcu.gpa, zcu.navSrcLoc(nav_index), format, args);
return zcu.codegenFailMsg(nav_index, msg);
}
/// Takes ownership of `msg`, even on OOM.
pub fn codegenFailMsg(zcu: *Zcu, nav_index: InternPool.Nav.Index, msg: *ErrorMsg) CodegenFailError {
const gpa = zcu.gpa;
{
zcu.comp.mutex.lock();
defer zcu.comp.mutex.unlock();
errdefer msg.deinit(gpa);
try zcu.failed_codegen.putNoClobber(gpa, nav_index, msg);
}
return error.CodegenFail;
}
/// Asserts that `zcu.failed_codegen` contains the key `nav`, with the necessary lock held.
pub fn assertCodegenFailed(zcu: *Zcu, nav: InternPool.Nav.Index) void {
zcu.comp.mutex.lock();
defer zcu.comp.mutex.unlock();
assert(zcu.failed_codegen.contains(nav));
}
pub fn codegenFailType(
zcu: *Zcu,
ty_index: InternPool.Index,
@ -4726,3 +4763,27 @@ fn explainWhyFileIsInModule(
import = importer_ref.import;
}
}
const SemaProgNode = struct {
/// `null` means we created the node, so should end it.
old_name: ?[std.Progress.Node.max_name_len]u8,
pub fn end(spn: SemaProgNode, zcu: *Zcu) void {
if (spn.old_name) |old_name| {
zcu.sema_prog_node.completeOne(); // we're just renaming, but it's effectively completion
zcu.cur_sema_prog_node.setName(&old_name);
} else {
zcu.cur_sema_prog_node.end();
zcu.cur_sema_prog_node = .none;
}
}
};
pub fn startSemaProgNode(zcu: *Zcu, name: []const u8) SemaProgNode {
if (zcu.cur_sema_prog_node.index != .none) {
const old_name = zcu.cur_sema_prog_node.getName();
zcu.cur_sema_prog_node.setName(name);
return .{ .old_name = old_name };
} else {
zcu.cur_sema_prog_node = zcu.sema_prog_node.start(name, 0);
return .{ .old_name = null };
}
}

View File

@ -27,6 +27,7 @@ const Type = @import("../Type.zig");
const Value = @import("../Value.zig");
const Zcu = @import("../Zcu.zig");
const Compilation = @import("../Compilation.zig");
const codegen = @import("../codegen.zig");
const Zir = std.zig.Zir;
const Zoir = std.zig.Zoir;
const ZonGen = std.zig.ZonGen;
@ -795,8 +796,8 @@ pub fn ensureComptimeUnitUpToDate(pt: Zcu.PerThread, cu_id: InternPool.ComptimeU
info.deps.clearRetainingCapacity();
}
const unit_prog_node = zcu.sema_prog_node.start("comptime", 0);
defer unit_prog_node.end();
const unit_prog_node = zcu.startSemaProgNode("comptime");
defer unit_prog_node.end(zcu);
return pt.analyzeComptimeUnit(cu_id) catch |err| switch (err) {
error.AnalysisFail => {
@ -975,8 +976,8 @@ pub fn ensureNavValUpToDate(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu
info.deps.clearRetainingCapacity();
}
const unit_prog_node = zcu.sema_prog_node.start(nav.fqn.toSlice(ip), 0);
defer unit_prog_node.end();
const unit_prog_node = zcu.startSemaProgNode(nav.fqn.toSlice(ip));
defer unit_prog_node.end(zcu);
const invalidate_value: bool, const new_failed: bool = if (pt.analyzeNavVal(nav_id)) |result| res: {
break :res .{
@ -1320,7 +1321,8 @@ fn analyzeNavVal(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileErr
}
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_nav = nav_id });
zcu.comp.link_prog_node.increaseEstimatedTotalItems(1);
try zcu.comp.queueJob(.{ .link_nav = nav_id });
}
switch (old_nav.status) {
@ -1395,8 +1397,8 @@ pub fn ensureNavTypeUpToDate(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zc
info.deps.clearRetainingCapacity();
}
const unit_prog_node = zcu.sema_prog_node.start(nav.fqn.toSlice(ip), 0);
defer unit_prog_node.end();
const unit_prog_node = zcu.startSemaProgNode(nav.fqn.toSlice(ip));
defer unit_prog_node.end(zcu);
const invalidate_type: bool, const new_failed: bool = if (pt.analyzeNavType(nav_id)) |result| res: {
break :res .{
@ -1616,8 +1618,8 @@ pub fn ensureFuncBodyUpToDate(pt: Zcu.PerThread, maybe_coerced_func_index: Inter
info.deps.clearRetainingCapacity();
}
const func_prog_node = zcu.sema_prog_node.start(ip.getNav(func.owner_nav).fqn.toSlice(ip), 0);
defer func_prog_node.end();
const func_prog_node = zcu.startSemaProgNode(ip.getNav(func.owner_nav).fqn.toSlice(ip));
defer func_prog_node.end(zcu);
const ies_outdated, const new_failed = if (pt.analyzeFuncBody(func_index)) |result|
.{ prev_failed or result.ies_outdated, false }
@ -1716,6 +1718,8 @@ fn analyzeFuncBody(
}
// This job depends on any resolve_type_fully jobs queued up before it.
zcu.codegen_prog_node.increaseEstimatedTotalItems(1);
comp.link_prog_node.increaseEstimatedTotalItems(1);
try comp.queueJob(.{ .codegen_func = .{
.func = func_index,
.air = air,
@ -1724,87 +1728,6 @@ fn analyzeFuncBody(
return .{ .ies_outdated = ies_outdated };
}
/// Takes ownership of `air`, even on error.
/// If any types referenced by `air` are unresolved, marks the codegen as failed.
pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) Allocator.Error!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const comp = zcu.comp;
const func = zcu.funcInfo(func_index);
const nav_index = func.owner_nav;
const nav = ip.getNav(nav_index);
const codegen_prog_node = zcu.codegen_prog_node.start(nav.fqn.toSlice(ip), 0);
defer codegen_prog_node.end();
if (!air.typesFullyResolved(zcu)) {
// A type we depend on failed to resolve. This is a transitive failure.
// Correcting this failure will involve changing a type this function
// depends on, hence triggering re-analysis of this function, so this
// interacts correctly with incremental compilation.
return;
}
legalize: {
try air.legalize(pt, @import("../codegen.zig").legalizeFeatures(pt, nav_index) orelse break :legalize);
}
var liveness = try Air.Liveness.analyze(zcu, air.*, ip);
defer liveness.deinit(gpa);
if (build_options.enable_debug_extensions and comp.verbose_air) {
std.debug.print("# Begin Function AIR: {}:\n", .{nav.fqn.fmt(ip)});
air.dump(pt, liveness);
std.debug.print("# End Function AIR: {}\n\n", .{nav.fqn.fmt(ip)});
}
if (std.debug.runtime_safety) {
var verify: Air.Liveness.Verify = .{
.gpa = gpa,
.zcu = zcu,
.air = air.*,
.liveness = liveness,
.intern_pool = ip,
};
defer verify.deinit();
verify.verify() catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => {
try zcu.failed_codegen.putNoClobber(gpa, nav_index, try Zcu.ErrorMsg.create(
gpa,
zcu.navSrcLoc(nav_index),
"invalid liveness: {s}",
.{@errorName(err)},
));
return;
},
};
}
if (comp.bin_file) |lf| {
lf.updateFunc(pt, func_index, air.*, liveness) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.CodegenFail => assert(zcu.failed_codegen.contains(nav_index)),
error.Overflow, error.RelocationNotByteAligned => {
try zcu.failed_codegen.putNoClobber(gpa, nav_index, try Zcu.ErrorMsg.create(
gpa,
zcu.navSrcLoc(nav_index),
"unable to codegen: {s}",
.{@errorName(err)},
));
// Not a retryable failure.
},
};
} else if (zcu.llvm_object) |llvm_object| {
llvm_object.updateFunc(pt, func_index, air.*, liveness) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
};
}
}
pub fn semaMod(pt: Zcu.PerThread, mod: *Module) !void {
dev.check(.sema);
const file_index = pt.zcu.module_roots.get(mod).?.unwrap().?;
@ -1867,7 +1790,7 @@ fn createFileRootStruct(
};
errdefer wip_ty.cancel(ip, pt.tid);
wip_ty.setName(ip, try file.internFullyQualifiedName(pt));
wip_ty.setName(ip, try file.internFullyQualifiedName(pt), .none);
ip.namespacePtr(namespace_index).owner_type = wip_ty.index;
if (zcu.comp.incremental) {
@ -1877,10 +1800,10 @@ fn createFileRootStruct(
try pt.scanNamespace(namespace_index, decls);
try zcu.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (file.mod.?.strip) break :codegen_type;
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
zcu.comp.link_prog_node.increaseEstimatedTotalItems(1);
try zcu.comp.queueJob(.{ .link_type = wip_ty.index });
}
zcu.setFileRootType(file_index, wip_ty.index);
if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip_ty.index);
@ -2574,7 +2497,7 @@ fn newEmbedFile(
cache: {
const whole = switch (zcu.comp.cache_use) {
.whole => |whole| whole,
.incremental => break :cache,
.incremental, .none => break :cache,
};
const man = whole.cache_manifest orelse break :cache;
const ip_str = opt_ip_str orelse break :cache; // this will be a compile error
@ -2974,17 +2897,10 @@ fn analyzeFnBodyInner(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaE
runtime_params_len;
var runtime_param_index: usize = 0;
for (fn_info.param_body[0..src_params_len]) |inst| {
for (fn_info.param_body[0..src_params_len], 0..) |inst, zir_param_index| {
const gop = sema.inst_map.getOrPutAssumeCapacity(inst);
if (gop.found_existing) continue; // provided above by comptime arg
const param_inst_info = sema.code.instructions.get(@intFromEnum(inst));
const param_name: Zir.NullTerminatedString = switch (param_inst_info.tag) {
.param_anytype => param_inst_info.data.str_tok.start,
.param => sema.code.extraData(Zir.Inst.Param, param_inst_info.data.pl_tok.payload_index).data.name,
else => unreachable,
};
const param_ty = fn_ty_info.param_types.get(ip)[runtime_param_index];
runtime_param_index += 1;
@ -3004,10 +2920,7 @@ fn analyzeFnBodyInner(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaE
.tag = .arg,
.data = .{ .arg = .{
.ty = Air.internedToRef(param_ty),
.name = if (inner_block.ownerModule().strip)
.none
else
try sema.appendAirString(sema.code.nullTerminatedString(param_name)),
.zir_param_index = @intCast(zir_param_index),
} },
});
}
@ -3189,7 +3102,9 @@ pub fn processExports(pt: Zcu.PerThread) !void {
// This export might already have been sent to the linker on a previous update, in which case we need to delete it.
// The linker export API should be modified to eliminate this call. #23616
if (zcu.comp.bin_file) |lf| {
lf.deleteExport(exp.exported, exp.opts.name);
if (zcu.llvm_object == null) {
lf.deleteExport(exp.exported, exp.opts.name);
}
}
continue;
}
@ -3213,8 +3128,10 @@ pub fn processExports(pt: Zcu.PerThread) !void {
// This export might already have been sent to the linker on a previous update, in which case we need to delete it.
// The linker export API should be modified to eliminate this loop. #23616
if (zcu.comp.bin_file) |lf| {
for (exports) |exp| {
lf.deleteExport(exp.exported, exp.opts.name);
if (zcu.llvm_object == null) {
for (exports) |exp| {
lf.deleteExport(exp.exported, exp.opts.name);
}
}
}
continue;
@ -3309,46 +3226,49 @@ fn processExportsInner(
.uav => {},
}
if (zcu.comp.bin_file) |lf| {
try zcu.handleUpdateExports(export_indices, lf.updateExports(pt, exported, export_indices));
} else if (zcu.llvm_object) |llvm_object| {
if (zcu.llvm_object) |llvm_object| {
try zcu.handleUpdateExports(export_indices, llvm_object.updateExports(pt, exported, export_indices));
} else if (zcu.comp.bin_file) |lf| {
try zcu.handleUpdateExports(export_indices, lf.updateExports(pt, exported, export_indices));
}
}
pub fn populateTestFunctions(
pt: Zcu.PerThread,
main_progress_node: std.Progress.Node,
) Allocator.Error!void {
pub fn populateTestFunctions(pt: Zcu.PerThread) Allocator.Error!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
// Our job is to correctly set the value of the `test_functions` declaration if it has been
// analyzed and sent to codegen, It usually will have been, because the test runner will
// reference it, and `std.builtin` shouldn't have type errors. However, if it hasn't been
// analyzed, we will just terminate early, since clearly the test runner hasn't referenced
// `test_functions` so there's no point populating it. More to the the point, we potentially
// *can't* populate it without doing some type resolution, and... let's try to leave Sema in
// the past here.
const builtin_mod = zcu.builtin_modules.get(zcu.root_mod.getBuiltinOptions(zcu.comp.config).hash()).?;
const builtin_file_index = zcu.module_roots.get(builtin_mod).?.unwrap().?;
pt.ensureFileAnalyzed(builtin_file_index) catch |err| switch (err) {
error.AnalysisFail => unreachable, // builtin module is generated so cannot be corrupt
error.OutOfMemory => |e| return e,
};
const builtin_root_type = Type.fromInterned(zcu.fileRootType(builtin_file_index));
const builtin_namespace = builtin_root_type.getNamespace(zcu).unwrap().?;
const builtin_root_type = zcu.fileRootType(builtin_file_index);
if (builtin_root_type == .none) return; // `@import("builtin")` never analyzed
const builtin_namespace = Type.fromInterned(builtin_root_type).getNamespace(zcu).unwrap().?;
// We know that the namespace has a `test_functions`...
const nav_index = zcu.namespacePtr(builtin_namespace).pub_decls.getKeyAdapted(
try ip.getOrPutString(gpa, pt.tid, "test_functions", .no_embedded_nulls),
Zcu.Namespace.NameAdapter{ .zcu = zcu },
).?;
// ...but it might not be populated, so let's check that!
if (zcu.failed_analysis.contains(.wrap(.{ .nav_val = nav_index })) or
zcu.transitive_failed_analysis.contains(.wrap(.{ .nav_val = nav_index })) or
ip.getNav(nav_index).status != .fully_resolved)
{
// We have to call `ensureNavValUpToDate` here in case `builtin.test_functions`
// was not referenced by start code.
zcu.sema_prog_node = main_progress_node.start("Semantic Analysis", 0);
defer {
zcu.sema_prog_node.end();
zcu.sema_prog_node = std.Progress.Node.none;
}
pt.ensureNavValUpToDate(nav_index) catch |err| switch (err) {
error.AnalysisFail => return,
error.OutOfMemory => return error.OutOfMemory,
};
// The value of `builtin.test_functions` was either never referenced, or failed analysis.
// Either way, we don't need to do anything.
return;
}
// Okay, `builtin.test_functions` is (potentially) referenced and valid. Our job now is to swap
// its placeholder `&.{}` value for the actual list of all test functions.
const test_fns_val = zcu.navValue(nav_index);
const test_fn_ty = test_fns_val.typeOf(zcu).slicePtrFieldType(zcu).childType(zcu);
@ -3450,81 +3370,8 @@ pub fn populateTestFunctions(
} });
ip.mutateVarInit(test_fns_val.toIntern(), new_init);
}
{
zcu.codegen_prog_node = main_progress_node.start("Code Generation", 0);
defer {
zcu.codegen_prog_node.end();
zcu.codegen_prog_node = std.Progress.Node.none;
}
try pt.linkerUpdateNav(nav_index);
}
}
pub fn linkerUpdateNav(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) error{OutOfMemory}!void {
const zcu = pt.zcu;
const comp = zcu.comp;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const nav = zcu.intern_pool.getNav(nav_index);
const codegen_prog_node = zcu.codegen_prog_node.start(nav.fqn.toSlice(ip), 0);
defer codegen_prog_node.end();
if (!Air.valFullyResolved(zcu.navValue(nav_index), zcu)) {
// The value of this nav failed to resolve. This is a transitive failure.
// TODO: do we need to mark this failure anywhere? I don't think so, since compilation
// will fail due to the type error anyway.
} else if (comp.bin_file) |lf| {
lf.updateNav(pt, nav_index) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.CodegenFail => assert(zcu.failed_codegen.contains(nav_index)),
error.Overflow, error.RelocationNotByteAligned => {
try zcu.failed_codegen.putNoClobber(gpa, nav_index, try Zcu.ErrorMsg.create(
gpa,
zcu.navSrcLoc(nav_index),
"unable to codegen: {s}",
.{@errorName(err)},
));
// Not a retryable failure.
},
};
} else if (zcu.llvm_object) |llvm_object| {
llvm_object.updateNav(pt, nav_index) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
};
}
}
pub fn linkerUpdateContainerType(pt: Zcu.PerThread, ty: InternPool.Index) error{OutOfMemory}!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const comp = zcu.comp;
const ip = &zcu.intern_pool;
const codegen_prog_node = zcu.codegen_prog_node.start(Type.fromInterned(ty).containerTypeName(ip).toSlice(ip), 0);
defer codegen_prog_node.end();
if (zcu.failed_types.fetchSwapRemove(ty)) |*entry| entry.value.deinit(gpa);
if (!Air.typeFullyResolved(Type.fromInterned(ty), zcu)) {
// This type failed to resolve. This is a transitive failure.
return;
}
if (comp.bin_file) |lf| lf.updateContainerType(pt, ty) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.TypeFailureReported => assert(zcu.failed_types.contains(ty)),
};
}
pub fn linkerUpdateLineNumber(pt: Zcu.PerThread, ti: InternPool.TrackedInst.Index) !void {
if (pt.zcu.comp.bin_file) |lf| {
lf.updateLineNumber(pt, ti) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => |e| log.err("update line number failed: {s}", .{@errorName(e)}),
};
}
// The linker thread is not running, so we actually need to dispatch this task directly.
@import("../link.zig").linkTestFunctionsNav(pt, nav_index);
}
/// Stores an error in `pt.zcu.failed_files` for this file, and sets the file
@ -3984,7 +3831,8 @@ pub fn getExtern(pt: Zcu.PerThread, key: InternPool.Key.Extern) Allocator.Error!
const result = try pt.zcu.intern_pool.getExtern(pt.zcu.gpa, pt.tid, key);
if (result.new_nav.unwrap()) |nav| {
// This job depends on any resolve_type_fully jobs queued up before it.
try pt.zcu.comp.queueJob(.{ .codegen_nav = nav });
pt.zcu.comp.link_prog_node.increaseEstimatedTotalItems(1);
try pt.zcu.comp.queueJob(.{ .link_nav = nav });
if (pt.zcu.comp.debugIncremental()) try pt.zcu.incremental_debug_state.newNav(pt.zcu, nav);
}
return result.index;
@ -4122,17 +3970,17 @@ fn recreateStructType(
};
errdefer wip_ty.cancel(ip, pt.tid);
wip_ty.setName(ip, struct_obj.name);
wip_ty.setName(ip, struct_obj.name, struct_obj.name_nav);
try pt.addDependency(.wrap(.{ .type = wip_ty.index }), .{ .src_hash = key.zir_index });
zcu.namespacePtr(struct_obj.namespace).owner_type = wip_ty.index;
// No need to re-scan the namespace -- `zirStructDecl` will ultimately do that if the type is still alive.
try zcu.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (file.mod.?.strip) break :codegen_type;
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
zcu.comp.link_prog_node.increaseEstimatedTotalItems(1);
try zcu.comp.queueJob(.{ .link_type = wip_ty.index });
}
if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip_ty.index);
@ -4215,17 +4063,17 @@ fn recreateUnionType(
};
errdefer wip_ty.cancel(ip, pt.tid);
wip_ty.setName(ip, union_obj.name);
wip_ty.setName(ip, union_obj.name, union_obj.name_nav);
try pt.addDependency(.wrap(.{ .type = wip_ty.index }), .{ .src_hash = key.zir_index });
zcu.namespacePtr(namespace_index).owner_type = wip_ty.index;
// No need to re-scan the namespace -- `zirUnionDecl` will ultimately do that if the type is still alive.
try zcu.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (file.mod.?.strip) break :codegen_type;
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
zcu.comp.link_prog_node.increaseEstimatedTotalItems(1);
try zcu.comp.queueJob(.{ .link_type = wip_ty.index });
}
if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip_ty.index);
@ -4325,7 +4173,7 @@ fn recreateEnumType(
var done = true;
errdefer if (!done) wip_ty.cancel(ip, pt.tid);
wip_ty.setName(ip, enum_obj.name);
wip_ty.setName(ip, enum_obj.name, enum_obj.name_nav);
zcu.namespacePtr(namespace_index).owner_type = wip_ty.index;
// No need to re-scan the namespace -- `zirEnumDecl` will ultimately do that if the type is still alive.
@ -4518,3 +4366,128 @@ pub fn addDependency(pt: Zcu.PerThread, unit: AnalUnit, dependee: InternPool.Dep
try info.deps.append(gpa, dependee);
}
}
/// Performs code generation, which comes after `Sema` but before `link` in the pipeline.
/// This part of the pipeline is self-contained/"pure", so can be run in parallel with most
/// other code. This function is currently run either on the main thread, or on a separate
/// codegen thread, depending on whether the backend supports `Zcu.Feature.separate_thread`.
pub fn runCodegen(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air, out: *@import("../link.zig").ZcuTask.LinkFunc.SharedMir) void {
const zcu = pt.zcu;
if (runCodegenInner(pt, func_index, air)) |mir| {
out.value = mir;
out.status.store(.ready, .release);
} else |err| switch (err) {
error.OutOfMemory => {
zcu.comp.setAllocFailure();
out.status.store(.failed, .monotonic);
},
error.CodegenFail => {
zcu.assertCodegenFailed(zcu.funcInfo(func_index).owner_nav);
out.status.store(.failed, .monotonic);
},
error.NoLinkFile => {
assert(zcu.comp.bin_file == null);
out.status.store(.failed, .monotonic);
},
error.BackendDoesNotProduceMir => {
const backend = target_util.zigBackend(zcu.root_mod.resolved_target.result, zcu.comp.config.use_llvm);
switch (backend) {
else => unreachable, // assertion failure
.stage2_spirv64,
.stage2_llvm,
=> {},
}
out.status.store(.failed, .monotonic);
},
}
zcu.comp.link_task_queue.mirReady(zcu.comp, out);
if (zcu.pending_codegen_jobs.rmw(.Sub, 1, .monotonic) == 1) {
// Decremented to 0, so all done.
zcu.codegen_prog_node.end();
zcu.codegen_prog_node = .none;
}
}
fn runCodegenInner(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) error{
OutOfMemory,
CodegenFail,
NoLinkFile,
BackendDoesNotProduceMir,
}!codegen.AnyMir {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const comp = zcu.comp;
const nav = zcu.funcInfo(func_index).owner_nav;
const fqn = ip.getNav(nav).fqn;
const codegen_prog_node = zcu.codegen_prog_node.start(fqn.toSlice(ip), 0);
defer codegen_prog_node.end();
if (codegen.legalizeFeatures(pt, nav)) |features| {
try air.legalize(pt, features);
}
var liveness: Air.Liveness = try .analyze(zcu, air.*, ip);
defer liveness.deinit(gpa);
if (build_options.enable_debug_extensions and comp.verbose_air) {
std.debug.lockStdErr();
defer std.debug.unlockStdErr();
const stderr = std.io.getStdErr().writer();
stderr.print("# Begin Function AIR: {}:\n", .{fqn.fmt(ip)}) catch {};
air.write(stderr, pt, liveness);
stderr.print("# End Function AIR: {}\n\n", .{fqn.fmt(ip)}) catch {};
}
if (std.debug.runtime_safety) {
var verify: Air.Liveness.Verify = .{
.gpa = gpa,
.zcu = zcu,
.air = air.*,
.liveness = liveness,
.intern_pool = ip,
};
defer verify.deinit();
verify.verify() catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => return zcu.codegenFail(nav, "invalid liveness: {s}", .{@errorName(err)}),
};
}
// The LLVM backend is special, because we only need to do codegen. There is no equivalent to the
// "emit" step because LLVM does not support incremental linking. Our linker (LLD or self-hosted)
// will just see the ZCU object file which LLVM ultimately emits.
if (zcu.llvm_object) |llvm_object| {
assert(pt.tid == .main); // LLVM has a lot of shared state
try llvm_object.updateFunc(pt, func_index, air, &liveness);
return error.BackendDoesNotProduceMir;
}
const lf = comp.bin_file orelse return error.NoLinkFile;
// TODO: self-hosted codegen should always have a type of MIR; codegen should produce that MIR,
// and the linker should consume it. However, our SPIR-V backend is currently tightly coupled
// with our SPIR-V linker, so needs to work more like the LLVM backend. This should be fixed to
// unblock threaded codegen for SPIR-V.
if (lf.cast(.spirv)) |spirv_file| {
assert(pt.tid == .main); // SPIR-V has a lot of shared state
spirv_file.object.updateFunc(pt, func_index, air, &liveness) catch |err| {
switch (err) {
error.OutOfMemory => comp.link_diags.setAllocFailure(),
}
return error.CodegenFail;
};
return error.BackendDoesNotProduceMir;
}
return codegen.generateFunction(lf, pt, zcu.navSrcLoc(nav), func_index, air, &liveness) catch |err| switch (err) {
error.OutOfMemory,
error.CodegenFail,
=> |e| return e,
error.Overflow,
error.RelocationNotByteAligned,
=> return zcu.codegenFail(nav, "unable to codegen: {s}", .{@errorName(err)}),
};
}

View File

@ -49,7 +49,6 @@ pt: Zcu.PerThread,
air: Air,
liveness: Air.Liveness,
bin_file: *link.File,
debug_output: link.File.DebugInfoOutput,
target: *const std.Target,
func_index: InternPool.Index,
owner_nav: InternPool.Nav.Index,
@ -185,6 +184,9 @@ const DbgInfoReloc = struct {
}
fn genArgDbgInfo(reloc: DbgInfoReloc, function: Self) !void {
// TODO: Add a pseudo-instruction or something to defer this work until Emit.
// We aren't allowed to interact with linker state here.
if (true) return;
switch (function.debug_output) {
.dwarf => |dw| {
const loc: link.File.Dwarf.Loc = switch (reloc.mcv) {
@ -213,6 +215,9 @@ const DbgInfoReloc = struct {
}
fn genVarDbgInfo(reloc: DbgInfoReloc, function: Self) !void {
// TODO: Add a pseudo-instruction or something to defer this work until Emit.
// We aren't allowed to interact with linker state here.
if (true) return;
switch (function.debug_output) {
.dwarf => |dwarf| {
const loc: link.File.Dwarf.Loc = switch (reloc.mcv) {
@ -326,11 +331,9 @@ pub fn generate(
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: Air,
liveness: Air.Liveness,
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) CodeGenError!void {
air: *const Air,
liveness: *const Air.Liveness,
) CodeGenError!Mir {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const func = zcu.funcInfo(func_index);
@ -349,9 +352,8 @@ pub fn generate(
var function: Self = .{
.gpa = gpa,
.pt = pt,
.air = air,
.liveness = liveness,
.debug_output = debug_output,
.air = air.*,
.liveness = liveness.*,
.target = target,
.bin_file = lf,
.func_index = func_index,
@ -395,29 +397,13 @@ pub fn generate(
var mir: Mir = .{
.instructions = function.mir_instructions.toOwnedSlice(),
.extra = try function.mir_extra.toOwnedSlice(gpa),
};
defer mir.deinit(gpa);
var emit: Emit = .{
.mir = mir,
.bin_file = lf,
.debug_output = debug_output,
.target = target,
.src_loc = src_loc,
.code = code,
.prev_di_pc = 0,
.prev_di_line = func.lbrace_line,
.prev_di_column = func.lbrace_column,
.stack_size = function.max_end_stack,
.extra = &.{}, // fallible, so assign after errdefer
.max_end_stack = function.max_end_stack,
.saved_regs_stack_space = function.saved_regs_stack_space,
};
defer emit.deinit();
emit.emitMir() catch |err| switch (err) {
error.EmitFail => return function.failMsg(emit.err_msg.?),
else => |e| return e,
};
errdefer mir.deinit(gpa);
mir.extra = try function.mir_extra.toOwnedSlice(gpa);
return mir;
}
fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index {
@ -4222,15 +4208,22 @@ fn airArg(self: *Self, inst: Air.Inst.Index) InnerError!void {
while (self.args[arg_index] == .none) arg_index += 1;
self.arg_index = arg_index + 1;
const ty = self.typeOfIndex(inst);
const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
const name = self.air.instructions.items(.data)[@intFromEnum(inst)].arg.name;
if (name != .none) try self.dbg_info_relocs.append(self.gpa, .{
.tag = tag,
.ty = ty,
.name = name.toSlice(self.air),
.mcv = self.args[arg_index],
});
const zcu = self.pt.zcu;
const func_zir = zcu.funcInfo(self.func_index).zir_body_inst.resolveFull(&zcu.intern_pool).?;
const file = zcu.fileByIndex(func_zir.file);
if (!file.mod.?.strip) {
const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
const arg = self.air.instructions.items(.data)[@intFromEnum(inst)].arg;
const ty = self.typeOfIndex(inst);
const zir = &file.zir.?;
const name = zir.nullTerminatedString(zir.getParamName(zir.getParamBody(func_zir.inst)[arg.zir_param_index]).?);
try self.dbg_info_relocs.append(self.gpa, .{
.tag = tag,
.ty = ty,
.name = name,
.mcv = self.args[arg_index],
});
}
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else self.args[arg_index];
return self.finishAir(inst, result, .{ .none, .none, .none });

View File

@ -13,6 +13,14 @@ const assert = std.debug.assert;
const bits = @import("bits.zig");
const Register = bits.Register;
const InternPool = @import("../../InternPool.zig");
const Emit = @import("Emit.zig");
const codegen = @import("../../codegen.zig");
const link = @import("../../link.zig");
const Zcu = @import("../../Zcu.zig");
max_end_stack: u32,
saved_regs_stack_space: u32,
instructions: std.MultiArrayList(Inst).Slice,
/// The meaning of this data is determined by `Inst.Tag` value.
@ -498,6 +506,39 @@ pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
mir.* = undefined;
}
pub fn emit(
mir: Mir,
lf: *link.File,
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) codegen.CodeGenError!void {
const zcu = pt.zcu;
const func = zcu.funcInfo(func_index);
const nav = func.owner_nav;
const mod = zcu.navFileScope(nav).mod.?;
var e: Emit = .{
.mir = mir,
.bin_file = lf,
.debug_output = debug_output,
.target = &mod.resolved_target.result,
.src_loc = src_loc,
.code = code,
.prev_di_pc = 0,
.prev_di_line = func.lbrace_line,
.prev_di_column = func.lbrace_column,
.stack_size = mir.max_end_stack,
.saved_regs_stack_space = mir.saved_regs_stack_space,
};
defer e.deinit();
e.emitMir() catch |err| switch (err) {
error.EmitFail => return zcu.codegenFailMsg(nav, e.err_msg.?),
else => |e1| return e1,
};
}
/// Returns the requested data, as well as the new index which is at the start of the
/// trailers for the object.
pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end: usize } {

View File

@ -50,7 +50,6 @@ pt: Zcu.PerThread,
air: Air,
liveness: Air.Liveness,
bin_file: *link.File,
debug_output: link.File.DebugInfoOutput,
target: *const std.Target,
func_index: InternPool.Index,
err_msg: ?*ErrorMsg,
@ -264,6 +263,9 @@ const DbgInfoReloc = struct {
}
fn genArgDbgInfo(reloc: DbgInfoReloc, function: Self) !void {
// TODO: Add a pseudo-instruction or something to defer this work until Emit.
// We aren't allowed to interact with linker state here.
if (true) return;
switch (function.debug_output) {
.dwarf => |dw| {
const loc: link.File.Dwarf.Loc = switch (reloc.mcv) {
@ -292,6 +294,9 @@ const DbgInfoReloc = struct {
}
fn genVarDbgInfo(reloc: DbgInfoReloc, function: Self) !void {
// TODO: Add a pseudo-instruction or something to defer this work until Emit.
// We aren't allowed to interact with linker state here.
if (true) return;
switch (function.debug_output) {
.dwarf => |dw| {
const loc: link.File.Dwarf.Loc = switch (reloc.mcv) {
@ -335,11 +340,9 @@ pub fn generate(
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: Air,
liveness: Air.Liveness,
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) CodeGenError!void {
air: *const Air,
liveness: *const Air.Liveness,
) CodeGenError!Mir {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const func = zcu.funcInfo(func_index);
@ -358,11 +361,10 @@ pub fn generate(
var function: Self = .{
.gpa = gpa,
.pt = pt,
.air = air,
.liveness = liveness,
.air = air.*,
.liveness = liveness.*,
.target = target,
.bin_file = lf,
.debug_output = debug_output,
.func_index = func_index,
.err_msg = null,
.args = undefined, // populated after `resolveCallingConventionValues`
@ -402,31 +404,15 @@ pub fn generate(
return function.fail("failed to generate debug info: {s}", .{@errorName(err)});
}
var mir = Mir{
var mir: Mir = .{
.instructions = function.mir_instructions.toOwnedSlice(),
.extra = try function.mir_extra.toOwnedSlice(gpa),
};
defer mir.deinit(gpa);
var emit = Emit{
.mir = mir,
.bin_file = lf,
.debug_output = debug_output,
.target = target,
.src_loc = src_loc,
.code = code,
.prev_di_pc = 0,
.prev_di_line = func.lbrace_line,
.prev_di_column = func.lbrace_column,
.stack_size = function.max_end_stack,
.extra = &.{}, // fallible, so assign after errdefer
.max_end_stack = function.max_end_stack,
.saved_regs_stack_space = function.saved_regs_stack_space,
};
defer emit.deinit();
emit.emitMir() catch |err| switch (err) {
error.EmitFail => return function.failMsg(emit.err_msg.?),
else => |e| return e,
};
errdefer mir.deinit(gpa);
mir.extra = try function.mir_extra.toOwnedSlice(gpa);
return mir;
}
fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index {
@ -4205,16 +4191,22 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
while (self.args[arg_index] == .none) arg_index += 1;
self.arg_index = arg_index + 1;
const ty = self.typeOfIndex(inst);
const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
const name = self.air.instructions.items(.data)[@intFromEnum(inst)].arg.name;
if (name != .none) try self.dbg_info_relocs.append(self.gpa, .{
.tag = tag,
.ty = ty,
.name = name.toSlice(self.air),
.mcv = self.args[arg_index],
});
const zcu = self.pt.zcu;
const func_zir = zcu.funcInfo(self.func_index).zir_body_inst.resolveFull(&zcu.intern_pool).?;
const file = zcu.fileByIndex(func_zir.file);
if (!file.mod.?.strip) {
const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
const arg = self.air.instructions.items(.data)[@intFromEnum(inst)].arg;
const ty = self.typeOfIndex(inst);
const zir = &file.zir.?;
const name = zir.nullTerminatedString(zir.getParamName(zir.getParamBody(func_zir.inst)[arg.zir_param_index]).?);
try self.dbg_info_relocs.append(self.gpa, .{
.tag = tag,
.ty = ty,
.name = name,
.mcv = self.args[arg_index],
});
}
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else self.args[arg_index];
return self.finishAir(inst, result, .{ .none, .none, .none });

View File

@ -13,6 +13,14 @@ const assert = std.debug.assert;
const bits = @import("bits.zig");
const Register = bits.Register;
const InternPool = @import("../../InternPool.zig");
const Emit = @import("Emit.zig");
const codegen = @import("../../codegen.zig");
const link = @import("../../link.zig");
const Zcu = @import("../../Zcu.zig");
max_end_stack: u32,
saved_regs_stack_space: u32,
instructions: std.MultiArrayList(Inst).Slice,
/// The meaning of this data is determined by `Inst.Tag` value.
@ -278,6 +286,39 @@ pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
mir.* = undefined;
}
pub fn emit(
mir: Mir,
lf: *link.File,
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) codegen.CodeGenError!void {
const zcu = pt.zcu;
const func = zcu.funcInfo(func_index);
const nav = func.owner_nav;
const mod = zcu.navFileScope(nav).mod.?;
var e: Emit = .{
.mir = mir,
.bin_file = lf,
.debug_output = debug_output,
.target = &mod.resolved_target.result,
.src_loc = src_loc,
.code = code,
.prev_di_pc = 0,
.prev_di_line = func.lbrace_line,
.prev_di_column = func.lbrace_column,
.stack_size = mir.max_end_stack,
.saved_regs_stack_space = mir.saved_regs_stack_space,
};
defer e.deinit();
e.emitMir() catch |err| switch (err) {
error.EmitFail => return zcu.codegenFailMsg(nav, e.err_msg.?),
else => |e1| return e1,
};
}
/// Returns the requested data, as well as the new index which is at the start of the
/// trailers for the object.
pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end: usize } {

View File

@ -19,19 +19,15 @@ pub fn generate(
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: Air,
liveness: Air.Liveness,
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) codegen.CodeGenError!void {
air: *const Air,
liveness: *const Air.Liveness,
) codegen.CodeGenError!noreturn {
_ = bin_file;
_ = pt;
_ = src_loc;
_ = func_index;
_ = air;
_ = liveness;
_ = code;
_ = debug_output;
unreachable;
}

View File

@ -68,9 +68,9 @@ gpa: Allocator,
mod: *Package.Module,
target: *const std.Target,
debug_output: link.File.DebugInfoOutput,
args: []MCValue,
ret_mcv: InstTracking,
func_index: InternPool.Index,
fn_type: Type,
arg_index: usize,
src_loc: Zcu.LazySrcLoc,
@ -746,13 +746,10 @@ pub fn generate(
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: Air,
liveness: Air.Liveness,
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) CodeGenError!void {
air: *const Air,
liveness: *const Air.Liveness,
) CodeGenError!Mir {
const zcu = pt.zcu;
const comp = zcu.comp;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const func = zcu.funcInfo(func_index);
@ -769,16 +766,16 @@ pub fn generate(
var function: Func = .{
.gpa = gpa,
.air = air,
.air = air.*,
.pt = pt,
.mod = mod,
.bin_file = bin_file,
.liveness = liveness,
.liveness = liveness.*,
.target = &mod.resolved_target.result,
.debug_output = debug_output,
.owner = .{ .nav_index = func.owner_nav },
.args = undefined, // populated after `resolveCallingConventionValues`
.ret_mcv = undefined, // populated after `resolveCallingConventionValues`
.func_index = func_index,
.fn_type = fn_type,
.arg_index = 0,
.branch_stack = &branch_stack,
@ -855,33 +852,8 @@ pub fn generate(
.instructions = function.mir_instructions.toOwnedSlice(),
.frame_locs = function.frame_locs.toOwnedSlice(),
};
defer mir.deinit(gpa);
var emit: Emit = .{
.lower = .{
.pt = pt,
.allocator = gpa,
.mir = mir,
.cc = fn_info.cc,
.src_loc = src_loc,
.output_mode = comp.config.output_mode,
.link_mode = comp.config.link_mode,
.pic = mod.pic,
},
.bin_file = bin_file,
.debug_output = debug_output,
.code = code,
.prev_di_pc = 0,
.prev_di_line = func.lbrace_line,
.prev_di_column = func.lbrace_column,
};
defer emit.deinit();
emit.emitMir() catch |err| switch (err) {
error.LowerFail, error.EmitFail => return function.failMsg(emit.lower.err_msg.?),
error.InvalidInstruction => |e| return function.fail("emit MIR failed: {s} (Zig compiler bug)", .{@errorName(e)}),
else => |e| return e,
};
errdefer mir.deinit(gpa);
return mir;
}
pub fn generateLazy(
@ -904,10 +876,10 @@ pub fn generateLazy(
.bin_file = bin_file,
.liveness = undefined,
.target = &mod.resolved_target.result,
.debug_output = debug_output,
.owner = .{ .lazy_sym = lazy_sym },
.args = undefined, // populated after `resolveCallingConventionValues`
.ret_mcv = undefined, // populated after `resolveCallingConventionValues`
.func_index = undefined,
.fn_type = undefined,
.arg_index = 0,
.branch_stack = undefined,
@ -3631,9 +3603,7 @@ fn airRuntimeNavPtr(func: *Func, inst: Air.Inst.Index) !void {
const tlv_sym_index = if (func.bin_file.cast(.elf)) |elf_file| sym: {
const zo = elf_file.zigObjectPtr().?;
if (nav.getExtern(ip)) |e| {
const sym = try elf_file.getGlobalSymbol(nav.name.toSlice(ip), e.lib_name.toSlice(ip));
zo.symbol(sym).flags.is_extern_ptr = true;
break :sym sym;
break :sym try elf_file.getGlobalSymbol(nav.name.toSlice(ip), e.lib_name.toSlice(ip));
}
break :sym try zo.getOrCreateMetadataForNav(zcu, ty_nav.nav);
} else return func.fail("TODO runtime_nav_ptr on {}", .{func.bin_file.tag});
@ -4755,16 +4725,17 @@ fn airFieldParentPtr(func: *Func, inst: Air.Inst.Index) !void {
return func.fail("TODO implement codegen airFieldParentPtr", .{});
}
fn genArgDbgInfo(func: *const Func, inst: Air.Inst.Index, mcv: MCValue) InnerError!void {
const arg = func.air.instructions.items(.data)[@intFromEnum(inst)].arg;
const ty = arg.ty.toType();
if (arg.name == .none) return;
fn genArgDbgInfo(func: *const Func, name: []const u8, ty: Type, mcv: MCValue) InnerError!void {
assert(!func.mod.strip);
// TODO: Add a pseudo-instruction or something to defer this work until Emit.
// We aren't allowed to interact with linker state here.
if (true) return;
switch (func.debug_output) {
.dwarf => |dw| switch (mcv) {
.register => |reg| dw.genLocalDebugInfo(
.local_arg,
arg.name.toSlice(func.air),
name,
ty,
.{ .reg = reg.dwarfNum() },
) catch |err| return func.fail("failed to generate debug info: {s}", .{@errorName(err)}),
@ -4777,6 +4748,8 @@ fn genArgDbgInfo(func: *const Func, inst: Air.Inst.Index, mcv: MCValue) InnerErr
}
fn airArg(func: *Func, inst: Air.Inst.Index) InnerError!void {
const zcu = func.pt.zcu;
var arg_index = func.arg_index;
// we skip over args that have no bits
@ -4793,7 +4766,14 @@ fn airArg(func: *Func, inst: Air.Inst.Index) InnerError!void {
try func.genCopy(arg_ty, dst_mcv, src_mcv);
try func.genArgDbgInfo(inst, src_mcv);
const arg = func.air.instructions.items(.data)[@intFromEnum(inst)].arg;
// can delete `func.func_index` if this logic is moved to emit
const func_zir = zcu.funcInfo(func.func_index).zir_body_inst.resolveFull(&zcu.intern_pool).?;
const file = zcu.fileByIndex(func_zir.file);
const zir = &file.zir.?;
const name = zir.nullTerminatedString(zir.getParamName(zir.getParamBody(func_zir.inst)[arg.zir_param_index]).?);
try func.genArgDbgInfo(name, arg_ty, src_mcv);
break :result dst_mcv;
};
@ -5273,6 +5253,9 @@ fn genVarDbgInfo(
mcv: MCValue,
name: []const u8,
) !void {
// TODO: Add a pseudo-instruction or something to defer this work until Emit.
// We aren't allowed to interact with linker state here.
if (true) return;
switch (func.debug_output) {
.dwarf => |dwarf| {
const loc: link.File.Dwarf.Loc = switch (mcv) {

View File

@ -50,8 +50,8 @@ pub fn emitMir(emit: *Emit) Error!void {
const atom_ptr = zo.symbol(symbol.atom_index).atom(elf_file).?;
const sym = zo.symbol(symbol.sym_index);
if (sym.flags.is_extern_ptr and emit.lower.pic) {
return emit.fail("emit GOT relocation for symbol '{s}'", .{sym.name(elf_file)});
if (emit.lower.pic) {
return emit.fail("know when to emit GOT relocation for symbol '{s}'", .{sym.name(elf_file)});
}
const hi_r_type: u32 = @intFromEnum(std.elf.R_RISCV.HI20);

View File

@ -109,6 +109,48 @@ pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
mir.* = undefined;
}
pub fn emit(
mir: Mir,
lf: *link.File,
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) codegen.CodeGenError!void {
const zcu = pt.zcu;
const comp = zcu.comp;
const gpa = comp.gpa;
const func = zcu.funcInfo(func_index);
const fn_info = zcu.typeToFunc(.fromInterned(func.ty)).?;
const nav = func.owner_nav;
const mod = zcu.navFileScope(nav).mod.?;
var e: Emit = .{
.lower = .{
.pt = pt,
.allocator = gpa,
.mir = mir,
.cc = fn_info.cc,
.src_loc = src_loc,
.output_mode = comp.config.output_mode,
.link_mode = comp.config.link_mode,
.pic = mod.pic,
},
.bin_file = lf,
.debug_output = debug_output,
.code = code,
.prev_di_pc = 0,
.prev_di_line = func.lbrace_line,
.prev_di_column = func.lbrace_column,
};
defer e.deinit();
e.emitMir() catch |err| switch (err) {
error.LowerFail, error.EmitFail => return zcu.codegenFailMsg(nav, e.lower.err_msg.?),
error.InvalidInstruction => return zcu.codegenFail(nav, "emit MIR failed: {s} (Zig compiler bug)", .{@errorName(err)}),
else => |err1| return err1,
};
}
pub const FrameLoc = struct {
base: Register,
disp: i32,
@ -202,3 +244,9 @@ const FrameIndex = bits.FrameIndex;
const FrameAddr = @import("CodeGen.zig").FrameAddr;
const IntegerBitSet = std.bit_set.IntegerBitSet;
const Mnemonic = @import("mnem.zig").Mnemonic;
const InternPool = @import("../../InternPool.zig");
const Emit = @import("Emit.zig");
const codegen = @import("../../codegen.zig");
const link = @import("../../link.zig");
const Zcu = @import("../../Zcu.zig");

View File

@ -57,8 +57,6 @@ liveness: Air.Liveness,
bin_file: *link.File,
target: *const std.Target,
func_index: InternPool.Index,
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
err_msg: ?*ErrorMsg,
args: []MCValue,
ret_mcv: MCValue,
@ -268,11 +266,9 @@ pub fn generate(
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: Air,
liveness: Air.Liveness,
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) CodeGenError!void {
air: *const Air,
liveness: *const Air.Liveness,
) CodeGenError!Mir {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const func = zcu.funcInfo(func_index);
@ -291,13 +287,11 @@ pub fn generate(
var function: Self = .{
.gpa = gpa,
.pt = pt,
.air = air,
.liveness = liveness,
.air = air.*,
.liveness = liveness.*,
.target = target,
.bin_file = lf,
.func_index = func_index,
.code = code,
.debug_output = debug_output,
.err_msg = null,
.args = undefined, // populated after `resolveCallingConventionValues`
.ret_mcv = undefined, // populated after `resolveCallingConventionValues`
@ -330,29 +324,13 @@ pub fn generate(
else => |e| return e,
};
var mir = Mir{
var mir: Mir = .{
.instructions = function.mir_instructions.toOwnedSlice(),
.extra = try function.mir_extra.toOwnedSlice(gpa),
};
defer mir.deinit(gpa);
var emit: Emit = .{
.mir = mir,
.bin_file = lf,
.debug_output = debug_output,
.target = target,
.src_loc = src_loc,
.code = code,
.prev_di_pc = 0,
.prev_di_line = func.lbrace_line,
.prev_di_column = func.lbrace_column,
};
defer emit.deinit();
emit.emitMir() catch |err| switch (err) {
error.EmitFail => return function.failMsg(emit.err_msg.?),
else => |e| return e,
.extra = &.{}, // fallible, so populated after errdefer
};
errdefer mir.deinit(gpa);
mir.extra = try function.mir_extra.toOwnedSlice(gpa);
return mir;
}
fn gen(self: *Self) !void {
@ -1017,23 +995,29 @@ fn airArg(self: *Self, inst: Air.Inst.Index) InnerError!void {
self.arg_index += 1;
const ty = self.typeOfIndex(inst);
const arg = self.args[arg_index];
const mcv = blk: {
switch (arg) {
const mcv: MCValue = blk: {
switch (self.args[arg_index]) {
.stack_offset => |off| {
const abi_size = math.cast(u32, ty.abiSize(zcu)) orelse {
return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(pt)});
};
const offset = off + abi_size;
break :blk MCValue{ .stack_offset = offset };
break :blk .{ .stack_offset = offset };
},
else => break :blk arg,
else => |mcv| break :blk mcv,
}
};
self.genArgDbgInfo(inst, mcv) catch |err|
return self.fail("failed to generate debug info for parameter: {s}", .{@errorName(err)});
const func_zir = zcu.funcInfo(self.func_index).zir_body_inst.resolveFull(&zcu.intern_pool).?;
const file = zcu.fileByIndex(func_zir.file);
if (!file.mod.?.strip) {
const arg = self.air.instructions.items(.data)[@intFromEnum(inst)].arg;
const zir = &file.zir.?;
const name = zir.nullTerminatedString(zir.getParamName(zir.getParamBody(func_zir.inst)[arg.zir_param_index]).?);
self.genArgDbgInfo(name, ty, mcv) catch |err|
return self.fail("failed to generate debug info for parameter: {s}", .{@errorName(err)});
}
if (self.liveness.isUnused(inst))
return self.finishAirBookkeeping();
@ -3561,16 +3545,15 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Air.
self.finishAirBookkeeping();
}
fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void {
const arg = self.air.instructions.items(.data)[@intFromEnum(inst)].arg;
const ty = arg.ty.toType();
if (arg.name == .none) return;
fn genArgDbgInfo(self: Self, name: []const u8, ty: Type, mcv: MCValue) !void {
// TODO: Add a pseudo-instruction or something to defer this work until Emit.
// We aren't allowed to interact with linker state here.
if (true) return;
switch (self.debug_output) {
.dwarf => |dw| switch (mcv) {
.register => |reg| try dw.genLocalDebugInfo(
.local_arg,
arg.name.toSlice(self.air),
name,
ty,
.{ .reg = reg.dwarfNum() },
),

View File

@ -12,7 +12,11 @@ const assert = std.debug.assert;
const Mir = @This();
const bits = @import("bits.zig");
const Air = @import("../../Air.zig");
const InternPool = @import("../../InternPool.zig");
const Emit = @import("Emit.zig");
const codegen = @import("../../codegen.zig");
const link = @import("../../link.zig");
const Zcu = @import("../../Zcu.zig");
const Instruction = bits.Instruction;
const ASI = bits.Instruction.ASI;
@ -370,6 +374,37 @@ pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
mir.* = undefined;
}
pub fn emit(
mir: Mir,
lf: *link.File,
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) codegen.CodeGenError!void {
const zcu = pt.zcu;
const func = zcu.funcInfo(func_index);
const nav = func.owner_nav;
const mod = zcu.navFileScope(nav).mod.?;
var e: Emit = .{
.mir = mir,
.bin_file = lf,
.debug_output = debug_output,
.target = &mod.resolved_target.result,
.src_loc = src_loc,
.code = code,
.prev_di_pc = 0,
.prev_di_line = func.lbrace_line,
.prev_di_column = func.lbrace_column,
};
defer e.deinit();
e.emitMir() catch |err| switch (err) {
error.EmitFail => return zcu.codegenFailMsg(nav, e.err_msg.?),
else => |err1| return err1,
};
}
/// Returns the requested data, as well as the new index which is at the start of the
/// trailers for the object.
pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end: usize } {

View File

@ -3,7 +3,6 @@ const builtin = @import("builtin");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const testing = std.testing;
const leb = std.leb;
const mem = std.mem;
const log = std.log.scoped(.codegen);
@ -18,12 +17,10 @@ const Compilation = @import("../../Compilation.zig");
const link = @import("../../link.zig");
const Air = @import("../../Air.zig");
const Mir = @import("Mir.zig");
const Emit = @import("Emit.zig");
const abi = @import("abi.zig");
const Alignment = InternPool.Alignment;
const errUnionPayloadOffset = codegen.errUnionPayloadOffset;
const errUnionErrorOffset = codegen.errUnionErrorOffset;
const Wasm = link.File.Wasm;
const target_util = @import("../../target.zig");
const libcFloatPrefix = target_util.libcFloatPrefix;
@ -78,17 +75,24 @@ simd_immediates: std.ArrayListUnmanaged([16]u8) = .empty,
/// The Target we're emitting (used to call intInfo)
target: *const std.Target,
ptr_size: enum { wasm32, wasm64 },
wasm: *link.File.Wasm,
pt: Zcu.PerThread,
/// List of MIR Instructions
mir_instructions: *std.MultiArrayList(Mir.Inst),
mir_instructions: std.MultiArrayList(Mir.Inst),
/// Contains extra data for MIR
mir_extra: *std.ArrayListUnmanaged(u32),
start_mir_extra_off: u32,
start_locals_off: u32,
mir_extra: std.ArrayListUnmanaged(u32),
/// List of all locals' types generated throughout this declaration
/// used to emit locals count at start of 'code' section.
locals: *std.ArrayListUnmanaged(std.wasm.Valtype),
mir_locals: std.ArrayListUnmanaged(std.wasm.Valtype),
/// Set of all UAVs referenced by this function. Key is the UAV value, value is the alignment.
/// `.none` means naturally aligned. An explicit alignment is never less than the natural alignment.
mir_uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment),
/// Set of all functions whose address this function has taken and which therefore might be called
/// via a `call_indirect` function.
mir_indirect_function_set: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, void),
/// Set of all function types used by this function. These must be interned by the linker.
mir_func_tys: std.AutoArrayHashMapUnmanaged(InternPool.Index, void),
/// The number of `error_name_table_ref` instructions emitted.
error_name_table_ref_count: u32,
/// When a function is executing, we store the the current stack pointer's value within this local.
/// This value is then used to restore the stack pointer to the original value at the return of the function.
initial_stack_value: WValue = .none,
@ -219,7 +223,7 @@ const WValue = union(enum) {
if (local_value < reserved + 2) return; // reserved locals may never be re-used. Also accounts for 2 stack locals.
const index = local_value - reserved;
const valtype = gen.locals.items[gen.start_locals_off + index];
const valtype = gen.mir_locals.items[index];
switch (valtype) {
.i32 => gen.free_locals_i32.append(gen.gpa, local_value) catch return, // It's ok to fail any of those, a new local can be allocated instead
.i64 => gen.free_locals_i64.append(gen.gpa, local_value) catch return,
@ -716,6 +720,12 @@ pub fn deinit(cg: *CodeGen) void {
cg.free_locals_f32.deinit(gpa);
cg.free_locals_f64.deinit(gpa);
cg.free_locals_v128.deinit(gpa);
cg.mir_instructions.deinit(gpa);
cg.mir_extra.deinit(gpa);
cg.mir_locals.deinit(gpa);
cg.mir_uavs.deinit(gpa);
cg.mir_indirect_function_set.deinit(gpa);
cg.mir_func_tys.deinit(gpa);
cg.* = undefined;
}
@ -876,7 +886,7 @@ fn addTag(cg: *CodeGen, tag: Mir.Inst.Tag) error{OutOfMemory}!void {
}
fn addExtended(cg: *CodeGen, opcode: std.wasm.MiscOpcode) error{OutOfMemory}!void {
const extra_index = cg.extraLen();
const extra_index: u32 = @intCast(cg.mir_extra.items.len);
try cg.mir_extra.append(cg.gpa, @intFromEnum(opcode));
try cg.addInst(.{ .tag = .misc_prefix, .data = .{ .payload = extra_index } });
}
@ -889,10 +899,6 @@ fn addLocal(cg: *CodeGen, tag: Mir.Inst.Tag, local: u32) error{OutOfMemory}!void
try cg.addInst(.{ .tag = tag, .data = .{ .local = local } });
}
fn addFuncTy(cg: *CodeGen, tag: Mir.Inst.Tag, i: Wasm.FunctionType.Index) error{OutOfMemory}!void {
try cg.addInst(.{ .tag = tag, .data = .{ .func_ty = i } });
}
/// Accepts an unsigned 32bit integer rather than a signed integer to
/// prevent us from having to bitcast multiple times as most values
/// within codegen are represented as unsigned rather than signed.
@ -911,7 +917,7 @@ fn addImm64(cg: *CodeGen, imm: u64) error{OutOfMemory}!void {
/// Accepts the index into the list of 128bit-immediates
fn addImm128(cg: *CodeGen, index: u32) error{OutOfMemory}!void {
const simd_values = cg.simd_immediates.items[index];
const extra_index = cg.extraLen();
const extra_index: u32 = @intCast(cg.mir_extra.items.len);
// tag + 128bit value
try cg.mir_extra.ensureUnusedCapacity(cg.gpa, 5);
cg.mir_extra.appendAssumeCapacity(@intFromEnum(std.wasm.SimdOpcode.v128_const));
@ -956,15 +962,13 @@ fn addExtra(cg: *CodeGen, extra: anytype) error{OutOfMemory}!u32 {
/// Returns the index into `mir_extra`
fn addExtraAssumeCapacity(cg: *CodeGen, extra: anytype) error{OutOfMemory}!u32 {
const fields = std.meta.fields(@TypeOf(extra));
const result = cg.extraLen();
const result: u32 = @intCast(cg.mir_extra.items.len);
inline for (fields) |field| {
cg.mir_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
i32 => @bitCast(@field(extra, field.name)),
InternPool.Index,
InternPool.Nav.Index,
Wasm.UavsObjIndex,
Wasm.UavsExeIndex,
=> @intFromEnum(@field(extra, field.name)),
else => |field_type| @compileError("Unsupported field type " ++ @typeName(field_type)),
});
@ -1034,18 +1038,12 @@ fn emitWValue(cg: *CodeGen, value: WValue) InnerError!void {
.float32 => |val| try cg.addInst(.{ .tag = .f32_const, .data = .{ .float32 = val } }),
.float64 => |val| try cg.addFloat64(val),
.nav_ref => |nav_ref| {
const wasm = cg.wasm;
const comp = wasm.base.comp;
const zcu = comp.zcu.?;
const zcu = cg.pt.zcu;
const ip = &zcu.intern_pool;
if (ip.getNav(nav_ref.nav_index).isFn(ip)) {
assert(nav_ref.offset == 0);
const gop = try wasm.zcu_indirect_function_set.getOrPut(comp.gpa, nav_ref.nav_index);
if (!gop.found_existing) gop.value_ptr.* = {};
try cg.addInst(.{
.tag = .func_ref,
.data = .{ .indirect_function_table_index = @enumFromInt(gop.index) },
});
try cg.mir_indirect_function_set.put(cg.gpa, nav_ref.nav_index, {});
try cg.addInst(.{ .tag = .func_ref, .data = .{ .nav_index = nav_ref.nav_index } });
} else if (nav_ref.offset == 0) {
try cg.addInst(.{ .tag = .nav_ref, .data = .{ .nav_index = nav_ref.nav_index } });
} else {
@ -1061,41 +1059,37 @@ fn emitWValue(cg: *CodeGen, value: WValue) InnerError!void {
}
},
.uav_ref => |uav| {
const wasm = cg.wasm;
const comp = wasm.base.comp;
const is_obj = comp.config.output_mode == .Obj;
const zcu = comp.zcu.?;
const zcu = cg.pt.zcu;
const ip = &zcu.intern_pool;
if (ip.isFunctionType(ip.typeOf(uav.ip_index))) {
assert(uav.offset == 0);
const owner_nav = ip.toFunc(uav.ip_index).owner_nav;
const gop = try wasm.zcu_indirect_function_set.getOrPut(comp.gpa, owner_nav);
if (!gop.found_existing) gop.value_ptr.* = {};
try cg.addInst(.{
.tag = .func_ref,
.data = .{ .indirect_function_table_index = @enumFromInt(gop.index) },
});
} else if (uav.offset == 0) {
assert(!ip.isFunctionType(ip.typeOf(uav.ip_index)));
const gop = try cg.mir_uavs.getOrPut(cg.gpa, uav.ip_index);
const this_align: Alignment = a: {
if (uav.orig_ptr_ty == .none) break :a .none;
const ptr_type = ip.indexToKey(uav.orig_ptr_ty).ptr_type;
const this_align = ptr_type.flags.alignment;
if (this_align == .none) break :a .none;
const abi_align = Type.fromInterned(ptr_type.child).abiAlignment(zcu);
if (this_align.compare(.lte, abi_align)) break :a .none;
break :a this_align;
};
if (!gop.found_existing or
gop.value_ptr.* == .none or
(this_align != .none and this_align.compare(.gt, gop.value_ptr.*)))
{
gop.value_ptr.* = this_align;
}
if (uav.offset == 0) {
try cg.addInst(.{
.tag = .uav_ref,
.data = if (is_obj) .{
.uav_obj = try wasm.refUavObj(uav.ip_index, uav.orig_ptr_ty),
} else .{
.uav_exe = try wasm.refUavExe(uav.ip_index, uav.orig_ptr_ty),
},
.data = .{ .ip_index = uav.ip_index },
});
} else {
try cg.addInst(.{
.tag = .uav_ref_off,
.data = .{
.payload = if (is_obj) try cg.addExtra(Mir.UavRefOffObj{
.uav_obj = try wasm.refUavObj(uav.ip_index, uav.orig_ptr_ty),
.offset = uav.offset,
}) else try cg.addExtra(Mir.UavRefOffExe{
.uav_exe = try wasm.refUavExe(uav.ip_index, uav.orig_ptr_ty),
.offset = uav.offset,
}),
},
.data = .{ .payload = try cg.addExtra(@as(Mir.UavRefOff, .{
.value = uav.ip_index,
.offset = uav.offset,
})) },
});
}
},
@ -1157,106 +1151,12 @@ fn allocLocal(cg: *CodeGen, ty: Type) InnerError!WValue {
/// to use a zero-initialized local.
fn ensureAllocLocal(cg: *CodeGen, ty: Type) InnerError!WValue {
const zcu = cg.pt.zcu;
try cg.locals.append(cg.gpa, typeToValtype(ty, zcu, cg.target));
try cg.mir_locals.append(cg.gpa, typeToValtype(ty, zcu, cg.target));
const initial_index = cg.local_index;
cg.local_index += 1;
return .{ .local = .{ .value = initial_index, .references = 1 } };
}
pub const Function = extern struct {
/// Index into `Wasm.mir_instructions`.
mir_off: u32,
/// This is unused except for as a safety slice bound and could be removed.
mir_len: u32,
/// Index into `Wasm.mir_extra`.
mir_extra_off: u32,
/// This is unused except for as a safety slice bound and could be removed.
mir_extra_len: u32,
locals_off: u32,
locals_len: u32,
prologue: Prologue,
pub const Prologue = extern struct {
flags: Flags,
sp_local: u32,
stack_size: u32,
bottom_stack_local: u32,
pub const Flags = packed struct(u32) {
stack_alignment: Alignment,
padding: u26 = 0,
};
pub const none: Prologue = .{
.sp_local = 0,
.flags = .{ .stack_alignment = .none },
.stack_size = 0,
.bottom_stack_local = 0,
};
pub fn isNone(p: *const Prologue) bool {
return p.flags.stack_alignment != .none;
}
};
pub fn lower(f: *Function, wasm: *Wasm, code: *std.ArrayListUnmanaged(u8)) Allocator.Error!void {
const gpa = wasm.base.comp.gpa;
// Write the locals in the prologue of the function body.
const locals = wasm.all_zcu_locals.items[f.locals_off..][0..f.locals_len];
try code.ensureUnusedCapacity(gpa, 5 + locals.len * 6 + 38);
std.leb.writeUleb128(code.fixedWriter(), @as(u32, @intCast(locals.len))) catch unreachable;
for (locals) |local| {
std.leb.writeUleb128(code.fixedWriter(), @as(u32, 1)) catch unreachable;
code.appendAssumeCapacity(@intFromEnum(local));
}
// Stack management section of function prologue.
const stack_alignment = f.prologue.flags.stack_alignment;
if (stack_alignment.toByteUnits()) |align_bytes| {
const sp_global: Wasm.GlobalIndex = .stack_pointer;
// load stack pointer
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.global_get));
std.leb.writeULEB128(code.fixedWriter(), @intFromEnum(sp_global)) catch unreachable;
// store stack pointer so we can restore it when we return from the function
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_tee));
leb.writeUleb128(code.fixedWriter(), f.prologue.sp_local) catch unreachable;
// get the total stack size
const aligned_stack: i32 = @intCast(stack_alignment.forward(f.prologue.stack_size));
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
leb.writeIleb128(code.fixedWriter(), aligned_stack) catch unreachable;
// subtract it from the current stack pointer
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_sub));
// Get negative stack alignment
const neg_stack_align = @as(i32, @intCast(align_bytes)) * -1;
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
leb.writeIleb128(code.fixedWriter(), neg_stack_align) catch unreachable;
// Bitwise-and the value to get the new stack pointer to ensure the
// pointers are aligned with the abi alignment.
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_and));
// The bottom will be used to calculate all stack pointer offsets.
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_tee));
leb.writeUleb128(code.fixedWriter(), f.prologue.bottom_stack_local) catch unreachable;
// Store the current stack pointer value into the global stack pointer so other function calls will
// start from this value instead and not overwrite the current stack.
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.global_set));
std.leb.writeULEB128(code.fixedWriter(), @intFromEnum(sp_global)) catch unreachable;
}
var emit: Emit = .{
.mir = .{
.instruction_tags = wasm.mir_instructions.items(.tag)[f.mir_off..][0..f.mir_len],
.instruction_datas = wasm.mir_instructions.items(.data)[f.mir_off..][0..f.mir_len],
.extra = wasm.mir_extra.items[f.mir_extra_off..][0..f.mir_extra_len],
},
.wasm = wasm,
.code = code,
};
try emit.lowerToCode();
}
};
pub const Error = error{
OutOfMemory,
/// Compiler was asked to operate on a number larger than supported.
@ -1265,13 +1165,16 @@ pub const Error = error{
CodegenFail,
};
pub fn function(
wasm: *Wasm,
pub fn generate(
bin_file: *link.File,
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: Air,
liveness: Air.Liveness,
) Error!Function {
air: *const Air,
liveness: *const Air.Liveness,
) Error!Mir {
_ = src_loc;
_ = bin_file;
const zcu = pt.zcu;
const gpa = zcu.gpa;
const cg = zcu.funcInfo(func_index);
@ -1279,10 +1182,8 @@ pub fn function(
const target = &file_scope.mod.?.resolved_target.result;
const fn_ty = zcu.navValue(cg.owner_nav).typeOf(zcu);
const fn_info = zcu.typeToFunc(fn_ty).?;
const ip = &zcu.intern_pool;
const fn_ty_index = try wasm.internFunctionType(fn_info.cc, fn_info.param_types.get(ip), .fromInterned(fn_info.return_type), target);
const returns = fn_ty_index.ptr(wasm).returns.slice(wasm);
const any_returns = returns.len != 0;
const ret_ty: Type = .fromInterned(fn_info.return_type);
const any_returns = !firstParamSRet(fn_info.cc, ret_ty, zcu, target) and ret_ty.hasRuntimeBitsIgnoreComptime(zcu);
var cc_result = try resolveCallingConventionValues(zcu, fn_ty, target);
defer cc_result.deinit(gpa);
@ -1290,8 +1191,8 @@ pub fn function(
var code_gen: CodeGen = .{
.gpa = gpa,
.pt = pt,
.air = air,
.liveness = liveness,
.air = air.*,
.liveness = liveness.*,
.owner_nav = cg.owner_nav,
.target = target,
.ptr_size = switch (target.cpu.arch) {
@ -1299,31 +1200,33 @@ pub fn function(
.wasm64 => .wasm64,
else => unreachable,
},
.wasm = wasm,
.func_index = func_index,
.args = cc_result.args,
.return_value = cc_result.return_value,
.local_index = cc_result.local_index,
.mir_instructions = &wasm.mir_instructions,
.mir_extra = &wasm.mir_extra,
.locals = &wasm.all_zcu_locals,
.start_mir_extra_off = @intCast(wasm.mir_extra.items.len),
.start_locals_off = @intCast(wasm.all_zcu_locals.items.len),
.mir_instructions = .empty,
.mir_extra = .empty,
.mir_locals = .empty,
.mir_uavs = .empty,
.mir_indirect_function_set = .empty,
.mir_func_tys = .empty,
.error_name_table_ref_count = 0,
};
defer code_gen.deinit();
return functionInner(&code_gen, any_returns) catch |err| switch (err) {
error.CodegenFail => return error.CodegenFail,
try code_gen.mir_func_tys.putNoClobber(gpa, fn_ty.toIntern(), {});
return generateInner(&code_gen, any_returns) catch |err| switch (err) {
error.CodegenFail,
error.OutOfMemory,
error.Overflow,
=> |e| return e,
else => |e| return code_gen.fail("failed to generate function: {s}", .{@errorName(e)}),
};
}
fn functionInner(cg: *CodeGen, any_returns: bool) InnerError!Function {
const wasm = cg.wasm;
fn generateInner(cg: *CodeGen, any_returns: bool) InnerError!Mir {
const zcu = cg.pt.zcu;
const start_mir_off: u32 = @intCast(wasm.mir_instructions.len);
try cg.branches.append(cg.gpa, .{});
// clean up outer branch
defer {
@ -1347,20 +1250,25 @@ fn functionInner(cg: *CodeGen, any_returns: bool) InnerError!Function {
try cg.addTag(.end);
try cg.addTag(.dbg_epilogue_begin);
return .{
.mir_off = start_mir_off,
.mir_len = @intCast(wasm.mir_instructions.len - start_mir_off),
.mir_extra_off = cg.start_mir_extra_off,
.mir_extra_len = cg.extraLen(),
.locals_off = cg.start_locals_off,
.locals_len = @intCast(wasm.all_zcu_locals.items.len - cg.start_locals_off),
var mir: Mir = .{
.instructions = cg.mir_instructions.toOwnedSlice(),
.extra = &.{}, // fallible so assigned after errdefer
.locals = &.{}, // fallible so assigned after errdefer
.prologue = if (cg.initial_stack_value == .none) .none else .{
.sp_local = cg.initial_stack_value.local.value,
.flags = .{ .stack_alignment = cg.stack_alignment },
.stack_size = cg.stack_size,
.bottom_stack_local = cg.bottom_stack_value.local.value,
},
.uavs = cg.mir_uavs.move(),
.indirect_function_set = cg.mir_indirect_function_set.move(),
.func_tys = cg.mir_func_tys.move(),
.error_name_table_ref_count = cg.error_name_table_ref_count,
};
errdefer mir.deinit(cg.gpa);
mir.extra = try cg.mir_extra.toOwnedSlice(cg.gpa);
mir.locals = try cg.mir_locals.toOwnedSlice(cg.gpa);
return mir;
}
const CallWValues = struct {
@ -1969,7 +1877,7 @@ fn genInst(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.dbg_inline_block => cg.airDbgInlineBlock(inst),
.dbg_var_ptr => cg.airDbgVar(inst, .local_var, true),
.dbg_var_val => cg.airDbgVar(inst, .local_var, false),
.dbg_arg_inline => cg.airDbgVar(inst, .local_arg, false),
.dbg_arg_inline => cg.airDbgVar(inst, .arg, false),
.call => cg.airCall(inst, .auto),
.call_always_tail => cg.airCall(inst, .always_tail),
@ -2220,7 +2128,6 @@ fn airRetLoad(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airCall(cg: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) InnerError!void {
const wasm = cg.wasm;
if (modifier == .always_tail) return cg.fail("TODO implement tail calls for wasm", .{});
const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = cg.air.extraData(Air.Call, pl_op.payload);
@ -2277,8 +2184,11 @@ fn airCall(cg: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifie
const operand = try cg.resolveInst(pl_op.operand);
try cg.emitWValue(operand);
const fn_type_index = try wasm.internFunctionType(fn_info.cc, fn_info.param_types.get(ip), .fromInterned(fn_info.return_type), cg.target);
try cg.addFuncTy(.call_indirect, fn_type_index);
try cg.mir_func_tys.put(cg.gpa, fn_ty.toIntern(), {});
try cg.addInst(.{
.tag = .call_indirect,
.data = .{ .ip_index = fn_ty.toIntern() },
});
}
const result_value = result_value: {
@ -2449,7 +2359,7 @@ fn store(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErr
try cg.emitWValue(lhs);
try cg.lowerToStack(rhs);
// TODO: Add helper functions for simd opcodes
const extra_index = cg.extraLen();
const extra_index: u32 = @intCast(cg.mir_extra.items.len);
// stores as := opcode, offset, alignment (opcode::memarg)
try cg.mir_extra.appendSlice(cg.gpa, &[_]u32{
@intFromEnum(std.wasm.SimdOpcode.v128_store),
@ -2574,7 +2484,7 @@ fn load(cg: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValue
if (ty.zigTypeTag(zcu) == .vector) {
// TODO: Add helper functions for simd opcodes
const extra_index = cg.extraLen();
const extra_index: u32 = @intCast(cg.mir_extra.items.len);
// stores as := opcode, offset, alignment (opcode::memarg)
try cg.mir_extra.appendSlice(cg.gpa, &[_]u32{
@intFromEnum(std.wasm.SimdOpcode.v128_load),
@ -4971,7 +4881,7 @@ fn airArrayElemVal(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try cg.emitWValue(array);
const extra_index = cg.extraLen();
const extra_index: u32 = @intCast(cg.mir_extra.items.len);
try cg.mir_extra.appendSlice(cg.gpa, &operands);
try cg.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
@ -5123,7 +5033,7 @@ fn airSplat(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => break :blk, // Cannot make use of simd-instructions
};
try cg.emitWValue(operand);
const extra_index: u32 = cg.extraLen();
const extra_index: u32 = @intCast(cg.mir_extra.items.len);
// stores as := opcode, offset, alignment (opcode::memarg)
try cg.mir_extra.appendSlice(cg.gpa, &[_]u32{
opcode,
@ -5142,7 +5052,7 @@ fn airSplat(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => break :blk, // Cannot make use of simd-instructions
};
try cg.emitWValue(operand);
const extra_index = cg.extraLen();
const extra_index: u32 = @intCast(cg.mir_extra.items.len);
try cg.mir_extra.append(cg.gpa, opcode);
try cg.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
return cg.finishAir(inst, .stack, &.{ty_op.operand});
@ -5246,7 +5156,7 @@ fn airShuffleTwo(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
try cg.emitWValue(operand_a);
try cg.emitWValue(operand_b);
const extra_index = cg.extraLen();
const extra_index: u32 = @intCast(cg.mir_extra.items.len);
try cg.mir_extra.appendSlice(cg.gpa, &.{
@intFromEnum(std.wasm.SimdOpcode.i8x16_shuffle),
@bitCast(lane_map[0..4].*),
@ -6016,9 +5926,8 @@ fn airErrorName(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const name_ty = Type.slice_const_u8_sentinel_0;
const abi_size = name_ty.abiSize(pt.zcu);
cg.wasm.error_name_table_ref_count += 1;
// Lowers to a i32.const or i64.const with the error table memory address.
cg.error_name_table_ref_count += 1;
try cg.addTag(.error_name_table_ref);
try cg.emitWValue(operand);
switch (cg.ptr_size) {
@ -6046,7 +5955,7 @@ fn airPtrSliceFieldPtr(cg: *CodeGen, inst: Air.Inst.Index, offset: u32) InnerErr
/// NOTE: Allocates place for result on virtual stack, when integer size > 64 bits
fn intZeroValue(cg: *CodeGen, ty: Type) InnerError!WValue {
const zcu = cg.wasm.base.comp.zcu.?;
const zcu = cg.pt.zcu;
const int_info = ty.intInfo(zcu);
const wasm_bits = toWasmBits(int_info.bits) orelse {
return cg.fail("TODO: Implement intZeroValue for integer bitsize: {d}", .{int_info.bits});
@ -6518,7 +6427,7 @@ fn airDbgInlineBlock(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airDbgVar(
cg: *CodeGen,
inst: Air.Inst.Index,
local_tag: link.File.Dwarf.WipNav.LocalTag,
local_tag: link.File.Dwarf.WipNav.LocalVarTag,
is_ptr: bool,
) InnerError!void {
_ = is_ptr;
@ -7673,7 +7582,3 @@ fn floatCmpIntrinsic(op: std.math.CompareOperator, bits: u16) Mir.Intrinsic {
},
};
}
fn extraLen(cg: *const CodeGen) u32 {
return @intCast(cg.mir_extra.items.len - cg.start_mir_extra_off);
}

View File

@ -31,8 +31,8 @@ pub fn lowerToCode(emit: *Emit) Error!void {
const target = &comp.root_mod.resolved_target.result;
const is_wasm32 = target.cpu.arch == .wasm32;
const tags = mir.instruction_tags;
const datas = mir.instruction_datas;
const tags = mir.instructions.items(.tag);
const datas = mir.instructions.items(.data);
var inst: u32 = 0;
loop: switch (tags[inst]) {
@ -50,18 +50,19 @@ pub fn lowerToCode(emit: *Emit) Error!void {
},
.uav_ref => {
if (is_obj) {
try uavRefOffObj(wasm, code, .{ .uav_obj = datas[inst].uav_obj, .offset = 0 }, is_wasm32);
try uavRefObj(wasm, code, datas[inst].ip_index, 0, is_wasm32);
} else {
try uavRefOffExe(wasm, code, .{ .uav_exe = datas[inst].uav_exe, .offset = 0 }, is_wasm32);
try uavRefExe(wasm, code, datas[inst].ip_index, 0, is_wasm32);
}
inst += 1;
continue :loop tags[inst];
},
.uav_ref_off => {
const extra = mir.extraData(Mir.UavRefOff, datas[inst].payload).data;
if (is_obj) {
try uavRefOffObj(wasm, code, mir.extraData(Mir.UavRefOffObj, datas[inst].payload).data, is_wasm32);
try uavRefObj(wasm, code, extra.value, extra.offset, is_wasm32);
} else {
try uavRefOffExe(wasm, code, mir.extraData(Mir.UavRefOffExe, datas[inst].payload).data, is_wasm32);
try uavRefExe(wasm, code, extra.value, extra.offset, is_wasm32);
}
inst += 1;
continue :loop tags[inst];
@ -77,11 +78,14 @@ pub fn lowerToCode(emit: *Emit) Error!void {
continue :loop tags[inst];
},
.func_ref => {
const indirect_func_idx: Wasm.ZcuIndirectFunctionSetIndex = @enumFromInt(
wasm.zcu_indirect_function_set.getIndex(datas[inst].nav_index).?,
);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
if (is_obj) {
@panic("TODO");
} else {
leb.writeUleb128(code.fixedWriter(), 1 + @intFromEnum(datas[inst].indirect_function_table_index)) catch unreachable;
leb.writeUleb128(code.fixedWriter(), 1 + @intFromEnum(indirect_func_idx)) catch unreachable;
}
inst += 1;
continue :loop tags[inst];
@ -101,6 +105,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
continue :loop tags[inst];
},
.error_name_table_ref => {
wasm.error_name_table_ref_count += 1;
try code.ensureUnusedCapacity(gpa, 11);
const opcode: std.wasm.Opcode = if (is_wasm32) .i32_const else .i64_const;
code.appendAssumeCapacity(@intFromEnum(opcode));
@ -176,7 +181,13 @@ pub fn lowerToCode(emit: *Emit) Error!void {
.call_indirect => {
try code.ensureUnusedCapacity(gpa, 11);
const func_ty_index = datas[inst].func_ty;
const fn_info = comp.zcu.?.typeToFunc(.fromInterned(datas[inst].ip_index)).?;
const func_ty_index = wasm.getExistingFunctionType(
fn_info.cc,
fn_info.param_types.get(&comp.zcu.?.intern_pool),
.fromInterned(fn_info.return_type),
target,
).?;
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.call_indirect));
if (is_obj) {
try wasm.out_relocs.append(gpa, .{
@ -912,7 +923,7 @@ fn encodeMemArg(code: *std.ArrayListUnmanaged(u8), mem_arg: Mir.MemArg) void {
leb.writeUleb128(code.fixedWriter(), mem_arg.offset) catch unreachable;
}
fn uavRefOffObj(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), data: Mir.UavRefOffObj, is_wasm32: bool) !void {
fn uavRefObj(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), value: InternPool.Index, offset: i32, is_wasm32: bool) !void {
const comp = wasm.base.comp;
const gpa = comp.gpa;
const opcode: std.wasm.Opcode = if (is_wasm32) .i32_const else .i64_const;
@ -922,14 +933,14 @@ fn uavRefOffObj(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), data: Mir.UavRef
try wasm.out_relocs.append(gpa, .{
.offset = @intCast(code.items.len),
.pointee = .{ .symbol_index = try wasm.uavSymbolIndex(data.uav_obj.key(wasm).*) },
.pointee = .{ .symbol_index = try wasm.uavSymbolIndex(value) },
.tag = if (is_wasm32) .memory_addr_leb else .memory_addr_leb64,
.addend = data.offset,
.addend = offset,
});
code.appendNTimesAssumeCapacity(0, if (is_wasm32) 5 else 10);
}
fn uavRefOffExe(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), data: Mir.UavRefOffExe, is_wasm32: bool) !void {
fn uavRefExe(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), value: InternPool.Index, offset: i32, is_wasm32: bool) !void {
const comp = wasm.base.comp;
const gpa = comp.gpa;
const opcode: std.wasm.Opcode = if (is_wasm32) .i32_const else .i64_const;
@ -937,8 +948,8 @@ fn uavRefOffExe(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), data: Mir.UavRef
try code.ensureUnusedCapacity(gpa, 11);
code.appendAssumeCapacity(@intFromEnum(opcode));
const addr = wasm.uavAddr(data.uav_exe);
leb.writeUleb128(code.fixedWriter(), @as(u32, @intCast(@as(i64, addr) + data.offset))) catch unreachable;
const addr = wasm.uavAddr(value);
leb.writeUleb128(code.fixedWriter(), @as(u32, @intCast(@as(i64, addr) + offset))) catch unreachable;
}
fn navRefOff(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), data: Mir.NavRefOff, is_wasm32: bool) !void {

View File

@ -9,16 +9,53 @@
const Mir = @This();
const InternPool = @import("../../InternPool.zig");
const Wasm = @import("../../link/Wasm.zig");
const Emit = @import("Emit.zig");
const Alignment = InternPool.Alignment;
const builtin = @import("builtin");
const std = @import("std");
const assert = std.debug.assert;
const leb = std.leb;
instruction_tags: []const Inst.Tag,
instruction_datas: []const Inst.Data,
instructions: std.MultiArrayList(Inst).Slice,
/// A slice of indexes where the meaning of the data is determined by the
/// `Inst.Tag` value.
extra: []const u32,
locals: []const std.wasm.Valtype,
prologue: Prologue,
/// Not directly used by `Emit`, but the linker needs this to merge it with a global set.
/// Value is the explicit alignment if greater than natural alignment, `.none` otherwise.
uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment),
/// Not directly used by `Emit`, but the linker needs this to merge it with a global set.
indirect_function_set: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, void),
/// Not directly used by `Emit`, but the linker needs this to ensure these types are interned.
func_tys: std.AutoArrayHashMapUnmanaged(InternPool.Index, void),
/// Not directly used by `Emit`, but the linker needs this to add it to its own refcount.
error_name_table_ref_count: u32,
pub const Prologue = extern struct {
flags: Flags,
sp_local: u32,
stack_size: u32,
bottom_stack_local: u32,
pub const Flags = packed struct(u32) {
stack_alignment: Alignment,
padding: u26 = 0,
};
pub const none: Prologue = .{
.sp_local = 0,
.flags = .{ .stack_alignment = .none },
.stack_size = 0,
.bottom_stack_local = 0,
};
pub fn isNone(p: *const Prologue) bool {
return p.flags.stack_alignment != .none;
}
};
pub const Inst = struct {
/// The opcode that represents this instruction
@ -80,7 +117,7 @@ pub const Inst = struct {
/// Lowers to an i32_const which is the index of the function in the
/// table section.
///
/// Uses `indirect_function_table_index`.
/// Uses `nav_index`.
func_ref,
/// Inserts debug information about the current line and column
/// of the source code
@ -123,7 +160,7 @@ pub const Inst = struct {
/// Calls a function pointer by its function signature
/// and index into the function table.
///
/// Uses `func_ty`
/// Uses `ip_index`; the `InternPool.Index` is the function type.
call_indirect,
/// Calls a function by its index.
///
@ -611,11 +648,7 @@ pub const Inst = struct {
ip_index: InternPool.Index,
nav_index: InternPool.Nav.Index,
func_ty: Wasm.FunctionType.Index,
intrinsic: Intrinsic,
uav_obj: Wasm.UavsObjIndex,
uav_exe: Wasm.UavsExeIndex,
indirect_function_table_index: Wasm.ZcuIndirectFunctionSetIndex,
comptime {
switch (builtin.mode) {
@ -626,10 +659,66 @@ pub const Inst = struct {
};
};
pub fn deinit(self: *Mir, gpa: std.mem.Allocator) void {
self.instructions.deinit(gpa);
gpa.free(self.extra);
self.* = undefined;
pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
mir.instructions.deinit(gpa);
gpa.free(mir.extra);
gpa.free(mir.locals);
mir.uavs.deinit(gpa);
mir.indirect_function_set.deinit(gpa);
mir.func_tys.deinit(gpa);
mir.* = undefined;
}
pub fn lower(mir: *const Mir, wasm: *Wasm, code: *std.ArrayListUnmanaged(u8)) std.mem.Allocator.Error!void {
const gpa = wasm.base.comp.gpa;
// Write the locals in the prologue of the function body.
try code.ensureUnusedCapacity(gpa, 5 + mir.locals.len * 6 + 38);
std.leb.writeUleb128(code.fixedWriter(), @as(u32, @intCast(mir.locals.len))) catch unreachable;
for (mir.locals) |local| {
std.leb.writeUleb128(code.fixedWriter(), @as(u32, 1)) catch unreachable;
code.appendAssumeCapacity(@intFromEnum(local));
}
// Stack management section of function prologue.
const stack_alignment = mir.prologue.flags.stack_alignment;
if (stack_alignment.toByteUnits()) |align_bytes| {
const sp_global: Wasm.GlobalIndex = .stack_pointer;
// load stack pointer
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.global_get));
std.leb.writeULEB128(code.fixedWriter(), @intFromEnum(sp_global)) catch unreachable;
// store stack pointer so we can restore it when we return from the function
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_tee));
leb.writeUleb128(code.fixedWriter(), mir.prologue.sp_local) catch unreachable;
// get the total stack size
const aligned_stack: i32 = @intCast(stack_alignment.forward(mir.prologue.stack_size));
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
leb.writeIleb128(code.fixedWriter(), aligned_stack) catch unreachable;
// subtract it from the current stack pointer
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_sub));
// Get negative stack alignment
const neg_stack_align = @as(i32, @intCast(align_bytes)) * -1;
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
leb.writeIleb128(code.fixedWriter(), neg_stack_align) catch unreachable;
// Bitwise-and the value to get the new stack pointer to ensure the
// pointers are aligned with the abi alignment.
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_and));
// The bottom will be used to calculate all stack pointer offsets.
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_tee));
leb.writeUleb128(code.fixedWriter(), mir.prologue.bottom_stack_local) catch unreachable;
// Store the current stack pointer value into the global stack pointer so other function calls will
// start from this value instead and not overwrite the current stack.
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.global_set));
std.leb.writeULEB128(code.fixedWriter(), @intFromEnum(sp_global)) catch unreachable;
}
var emit: Emit = .{
.mir = mir.*,
.wasm = wasm,
.code = code,
};
try emit.lowerToCode();
}
pub fn extraData(self: *const Mir, comptime T: type, index: usize) struct { data: T, end: usize } {
@ -643,6 +732,7 @@ pub fn extraData(self: *const Mir, comptime T: type, index: usize) struct { data
Wasm.UavsObjIndex,
Wasm.UavsExeIndex,
InternPool.Nav.Index,
InternPool.Index,
=> @enumFromInt(self.extra[i]),
else => |field_type| @compileError("Unsupported field type " ++ @typeName(field_type)),
};
@ -695,13 +785,8 @@ pub const MemArg = struct {
alignment: u32,
};
pub const UavRefOffObj = struct {
uav_obj: Wasm.UavsObjIndex,
offset: i32,
};
pub const UavRefOffExe = struct {
uav_exe: Wasm.UavsExeIndex,
pub const UavRefOff = struct {
value: InternPool.Index,
offset: i32,
};

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,10 +1,6 @@
//! This file contains the functionality for lowering x86_64 MIR to Instructions
bin_file: *link.File,
target: *const std.Target,
output_mode: std.builtin.OutputMode,
link_mode: std.builtin.LinkMode,
pic: bool,
allocator: std.mem.Allocator,
mir: Mir,
cc: std.builtin.CallingConvention,
@ -17,7 +13,6 @@ result_relocs: [max_result_relocs]Reloc = undefined,
const max_result_insts = @max(
1, // non-pseudo instructions
3, // (ELF only) TLS local dynamic (LD) sequence in PIC mode
2, // cmovcc: cmovcc \ cmovcc
3, // setcc: setcc \ setcc \ logicop
2, // jcc: jcc \ jcc
@ -25,6 +20,7 @@ const max_result_insts = @max(
pseudo_probe_adjust_unrolled_max_insts,
pseudo_probe_adjust_setup_insts,
pseudo_probe_adjust_loop_insts,
abi.zigcc.callee_preserved_regs.len * 2, // push_regs/pop_regs
abi.Win64.callee_preserved_regs.len * 2, // push_regs/pop_regs
abi.SysV.callee_preserved_regs.len * 2, // push_regs/pop_regs
);
@ -33,14 +29,13 @@ const max_result_relocs = @max(
2, // jcc: jcc \ jcc
2, // test \ jcc \ probe \ sub \ jmp
1, // probe \ sub \ jcc
3, // (ELF only) TLS local dynamic (LD) sequence in PIC mode
);
const ResultInstIndex = std.math.IntFittingRange(0, max_result_insts - 1);
const ResultRelocIndex = std.math.IntFittingRange(0, max_result_relocs - 1);
const InstOpIndex = std.math.IntFittingRange(
const ResultInstIndex = std.math.IntFittingRange(0, max_result_insts);
const ResultRelocIndex = std.math.IntFittingRange(0, max_result_relocs);
pub const InstOpIndex = std.math.IntFittingRange(
0,
@typeInfo(@FieldType(Instruction, "ops")).array.len - 1,
@typeInfo(@FieldType(Instruction, "ops")).array.len,
);
pub const pseudo_probe_align_insts = 5; // test \ jcc \ probe \ sub \ jmp
@ -54,7 +49,8 @@ pub const Error = error{
LowerFail,
InvalidInstruction,
CannotEncode,
};
CodegenFail,
} || codegen.GenerateSymbolError;
pub const Reloc = struct {
lowered_inst_index: ResultInstIndex,
@ -65,14 +61,10 @@ pub const Reloc = struct {
const Target = union(enum) {
inst: Mir.Inst.Index,
table,
linker_reloc: u32,
linker_pcrel: u32,
linker_tlsld: u32,
linker_dtpoff: u32,
linker_extern_fn: u32,
linker_got: u32,
linker_direct: u32,
linker_import: u32,
nav: InternPool.Nav.Index,
uav: InternPool.Key.Ptr.BaseAddr.Uav,
lazy_sym: link.File.LazySymbol,
extern_func: Mir.NullTerminatedString,
};
};
@ -80,7 +72,7 @@ const Options = struct { allow_frame_locs: bool };
/// The returned slice is overwritten by the next call to lowerMir.
pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
insts: []const Instruction,
insts: []Instruction,
relocs: []const Reloc,
} {
lower.result_insts = undefined;
@ -98,130 +90,130 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
.pseudo => switch (inst.ops) {
.pseudo_cmov_z_and_np_rr => {
assert(inst.data.rr.fixes == ._);
try lower.emit(.none, .cmovnz, &.{
try lower.encode(.none, .cmovnz, &.{
.{ .reg = inst.data.rr.r2 },
.{ .reg = inst.data.rr.r1 },
});
try lower.emit(.none, .cmovnp, &.{
try lower.encode(.none, .cmovnp, &.{
.{ .reg = inst.data.rr.r1 },
.{ .reg = inst.data.rr.r2 },
});
},
.pseudo_cmov_nz_or_p_rr => {
assert(inst.data.rr.fixes == ._);
try lower.emit(.none, .cmovnz, &.{
try lower.encode(.none, .cmovnz, &.{
.{ .reg = inst.data.rr.r1 },
.{ .reg = inst.data.rr.r2 },
});
try lower.emit(.none, .cmovp, &.{
try lower.encode(.none, .cmovp, &.{
.{ .reg = inst.data.rr.r1 },
.{ .reg = inst.data.rr.r2 },
});
},
.pseudo_cmov_nz_or_p_rm => {
assert(inst.data.rx.fixes == ._);
try lower.emit(.none, .cmovnz, &.{
try lower.encode(.none, .cmovnz, &.{
.{ .reg = inst.data.rx.r1 },
.{ .mem = lower.mem(1, inst.data.rx.payload) },
});
try lower.emit(.none, .cmovp, &.{
try lower.encode(.none, .cmovp, &.{
.{ .reg = inst.data.rx.r1 },
.{ .mem = lower.mem(1, inst.data.rx.payload) },
});
},
.pseudo_set_z_and_np_r => {
assert(inst.data.rr.fixes == ._);
try lower.emit(.none, .setz, &.{
try lower.encode(.none, .setz, &.{
.{ .reg = inst.data.rr.r1 },
});
try lower.emit(.none, .setnp, &.{
try lower.encode(.none, .setnp, &.{
.{ .reg = inst.data.rr.r2 },
});
try lower.emit(.none, .@"and", &.{
try lower.encode(.none, .@"and", &.{
.{ .reg = inst.data.rr.r1 },
.{ .reg = inst.data.rr.r2 },
});
},
.pseudo_set_z_and_np_m => {
assert(inst.data.rx.fixes == ._);
try lower.emit(.none, .setz, &.{
try lower.encode(.none, .setz, &.{
.{ .mem = lower.mem(0, inst.data.rx.payload) },
});
try lower.emit(.none, .setnp, &.{
try lower.encode(.none, .setnp, &.{
.{ .reg = inst.data.rx.r1 },
});
try lower.emit(.none, .@"and", &.{
try lower.encode(.none, .@"and", &.{
.{ .mem = lower.mem(0, inst.data.rx.payload) },
.{ .reg = inst.data.rx.r1 },
});
},
.pseudo_set_nz_or_p_r => {
assert(inst.data.rr.fixes == ._);
try lower.emit(.none, .setnz, &.{
try lower.encode(.none, .setnz, &.{
.{ .reg = inst.data.rr.r1 },
});
try lower.emit(.none, .setp, &.{
try lower.encode(.none, .setp, &.{
.{ .reg = inst.data.rr.r2 },
});
try lower.emit(.none, .@"or", &.{
try lower.encode(.none, .@"or", &.{
.{ .reg = inst.data.rr.r1 },
.{ .reg = inst.data.rr.r2 },
});
},
.pseudo_set_nz_or_p_m => {
assert(inst.data.rx.fixes == ._);
try lower.emit(.none, .setnz, &.{
try lower.encode(.none, .setnz, &.{
.{ .mem = lower.mem(0, inst.data.rx.payload) },
});
try lower.emit(.none, .setp, &.{
try lower.encode(.none, .setp, &.{
.{ .reg = inst.data.rx.r1 },
});
try lower.emit(.none, .@"or", &.{
try lower.encode(.none, .@"or", &.{
.{ .mem = lower.mem(0, inst.data.rx.payload) },
.{ .reg = inst.data.rx.r1 },
});
},
.pseudo_j_z_and_np_inst => {
assert(inst.data.inst.fixes == ._);
try lower.emit(.none, .jnz, &.{
try lower.encode(.none, .jnz, &.{
.{ .imm = lower.reloc(0, .{ .inst = index + 1 }, 0) },
});
try lower.emit(.none, .jnp, &.{
try lower.encode(.none, .jnp, &.{
.{ .imm = lower.reloc(0, .{ .inst = inst.data.inst.inst }, 0) },
});
},
.pseudo_j_nz_or_p_inst => {
assert(inst.data.inst.fixes == ._);
try lower.emit(.none, .jnz, &.{
try lower.encode(.none, .jnz, &.{
.{ .imm = lower.reloc(0, .{ .inst = inst.data.inst.inst }, 0) },
});
try lower.emit(.none, .jp, &.{
try lower.encode(.none, .jp, &.{
.{ .imm = lower.reloc(0, .{ .inst = inst.data.inst.inst }, 0) },
});
},
.pseudo_probe_align_ri_s => {
try lower.emit(.none, .@"test", &.{
try lower.encode(.none, .@"test", &.{
.{ .reg = inst.data.ri.r1 },
.{ .imm = .s(@bitCast(inst.data.ri.i)) },
});
try lower.emit(.none, .jz, &.{
try lower.encode(.none, .jz, &.{
.{ .imm = lower.reloc(0, .{ .inst = index + 1 }, 0) },
});
try lower.emit(.none, .lea, &.{
try lower.encode(.none, .lea, &.{
.{ .reg = inst.data.ri.r1 },
.{ .mem = Memory.initSib(.qword, .{
.base = .{ .reg = inst.data.ri.r1 },
.disp = -page_size,
}) },
});
try lower.emit(.none, .@"test", &.{
try lower.encode(.none, .@"test", &.{
.{ .mem = Memory.initSib(.dword, .{
.base = .{ .reg = inst.data.ri.r1 },
}) },
.{ .reg = inst.data.ri.r1.to32() },
});
try lower.emit(.none, .jmp, &.{
try lower.encode(.none, .jmp, &.{
.{ .imm = lower.reloc(0, .{ .inst = index }, 0) },
});
assert(lower.result_insts_len == pseudo_probe_align_insts);
@ -229,7 +221,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
.pseudo_probe_adjust_unrolled_ri_s => {
var offset = page_size;
while (offset < @as(i32, @bitCast(inst.data.ri.i))) : (offset += page_size) {
try lower.emit(.none, .@"test", &.{
try lower.encode(.none, .@"test", &.{
.{ .mem = Memory.initSib(.dword, .{
.base = .{ .reg = inst.data.ri.r1 },
.disp = -offset,
@ -237,25 +229,25 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
.{ .reg = inst.data.ri.r1.to32() },
});
}
try lower.emit(.none, .sub, &.{
try lower.encode(.none, .sub, &.{
.{ .reg = inst.data.ri.r1 },
.{ .imm = .s(@bitCast(inst.data.ri.i)) },
});
assert(lower.result_insts_len <= pseudo_probe_adjust_unrolled_max_insts);
},
.pseudo_probe_adjust_setup_rri_s => {
try lower.emit(.none, .mov, &.{
try lower.encode(.none, .mov, &.{
.{ .reg = inst.data.rri.r2.to32() },
.{ .imm = .s(@bitCast(inst.data.rri.i)) },
});
try lower.emit(.none, .sub, &.{
try lower.encode(.none, .sub, &.{
.{ .reg = inst.data.rri.r1 },
.{ .reg = inst.data.rri.r2 },
});
assert(lower.result_insts_len == pseudo_probe_adjust_setup_insts);
},
.pseudo_probe_adjust_loop_rr => {
try lower.emit(.none, .@"test", &.{
try lower.encode(.none, .@"test", &.{
.{ .mem = Memory.initSib(.dword, .{
.base = .{ .reg = inst.data.rr.r1 },
.scale_index = .{ .scale = 1, .index = inst.data.rr.r2 },
@ -263,11 +255,11 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
}) },
.{ .reg = inst.data.rr.r1.to32() },
});
try lower.emit(.none, .sub, &.{
try lower.encode(.none, .sub, &.{
.{ .reg = inst.data.rr.r2 },
.{ .imm = .s(page_size) },
});
try lower.emit(.none, .jae, &.{
try lower.encode(.none, .jae, &.{
.{ .imm = lower.reloc(0, .{ .inst = index }, 0) },
});
assert(lower.result_insts_len == pseudo_probe_adjust_loop_insts);
@ -275,47 +267,47 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
.pseudo_push_reg_list => try lower.pushPopRegList(.push, inst),
.pseudo_pop_reg_list => try lower.pushPopRegList(.pop, inst),
.pseudo_cfi_def_cfa_ri_s => try lower.emit(.directive, .@".cfi_def_cfa", &.{
.pseudo_cfi_def_cfa_ri_s => try lower.encode(.directive, .@".cfi_def_cfa", &.{
.{ .reg = inst.data.ri.r1 },
.{ .imm = lower.imm(.ri_s, inst.data.ri.i) },
}),
.pseudo_cfi_def_cfa_register_r => try lower.emit(.directive, .@".cfi_def_cfa_register", &.{
.pseudo_cfi_def_cfa_register_r => try lower.encode(.directive, .@".cfi_def_cfa_register", &.{
.{ .reg = inst.data.r.r1 },
}),
.pseudo_cfi_def_cfa_offset_i_s => try lower.emit(.directive, .@".cfi_def_cfa_offset", &.{
.pseudo_cfi_def_cfa_offset_i_s => try lower.encode(.directive, .@".cfi_def_cfa_offset", &.{
.{ .imm = lower.imm(.i_s, inst.data.i.i) },
}),
.pseudo_cfi_adjust_cfa_offset_i_s => try lower.emit(.directive, .@".cfi_adjust_cfa_offset", &.{
.pseudo_cfi_adjust_cfa_offset_i_s => try lower.encode(.directive, .@".cfi_adjust_cfa_offset", &.{
.{ .imm = lower.imm(.i_s, inst.data.i.i) },
}),
.pseudo_cfi_offset_ri_s => try lower.emit(.directive, .@".cfi_offset", &.{
.pseudo_cfi_offset_ri_s => try lower.encode(.directive, .@".cfi_offset", &.{
.{ .reg = inst.data.ri.r1 },
.{ .imm = lower.imm(.ri_s, inst.data.ri.i) },
}),
.pseudo_cfi_val_offset_ri_s => try lower.emit(.directive, .@".cfi_val_offset", &.{
.pseudo_cfi_val_offset_ri_s => try lower.encode(.directive, .@".cfi_val_offset", &.{
.{ .reg = inst.data.ri.r1 },
.{ .imm = lower.imm(.ri_s, inst.data.ri.i) },
}),
.pseudo_cfi_rel_offset_ri_s => try lower.emit(.directive, .@".cfi_rel_offset", &.{
.pseudo_cfi_rel_offset_ri_s => try lower.encode(.directive, .@".cfi_rel_offset", &.{
.{ .reg = inst.data.ri.r1 },
.{ .imm = lower.imm(.ri_s, inst.data.ri.i) },
}),
.pseudo_cfi_register_rr => try lower.emit(.directive, .@".cfi_register", &.{
.pseudo_cfi_register_rr => try lower.encode(.directive, .@".cfi_register", &.{
.{ .reg = inst.data.rr.r1 },
.{ .reg = inst.data.rr.r2 },
}),
.pseudo_cfi_restore_r => try lower.emit(.directive, .@".cfi_restore", &.{
.pseudo_cfi_restore_r => try lower.encode(.directive, .@".cfi_restore", &.{
.{ .reg = inst.data.r.r1 },
}),
.pseudo_cfi_undefined_r => try lower.emit(.directive, .@".cfi_undefined", &.{
.pseudo_cfi_undefined_r => try lower.encode(.directive, .@".cfi_undefined", &.{
.{ .reg = inst.data.r.r1 },
}),
.pseudo_cfi_same_value_r => try lower.emit(.directive, .@".cfi_same_value", &.{
.pseudo_cfi_same_value_r => try lower.encode(.directive, .@".cfi_same_value", &.{
.{ .reg = inst.data.r.r1 },
}),
.pseudo_cfi_remember_state_none => try lower.emit(.directive, .@".cfi_remember_state", &.{}),
.pseudo_cfi_restore_state_none => try lower.emit(.directive, .@".cfi_restore_state", &.{}),
.pseudo_cfi_escape_bytes => try lower.emit(.directive, .@".cfi_escape", &.{
.pseudo_cfi_remember_state_none => try lower.encode(.directive, .@".cfi_remember_state", &.{}),
.pseudo_cfi_restore_state_none => try lower.encode(.directive, .@".cfi_restore_state", &.{}),
.pseudo_cfi_escape_bytes => try lower.encode(.directive, .@".cfi_escape", &.{
.{ .bytes = inst.data.bytes.get(lower.mir) },
}),
@ -327,16 +319,23 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
.pseudo_dbg_leave_block_none,
.pseudo_dbg_enter_inline_func,
.pseudo_dbg_leave_inline_func,
.pseudo_dbg_local_a,
.pseudo_dbg_local_ai_s,
.pseudo_dbg_local_ai_u,
.pseudo_dbg_local_ai_64,
.pseudo_dbg_local_as,
.pseudo_dbg_local_aso,
.pseudo_dbg_local_aro,
.pseudo_dbg_local_af,
.pseudo_dbg_local_am,
.pseudo_dbg_arg_none,
.pseudo_dbg_arg_i_s,
.pseudo_dbg_arg_i_u,
.pseudo_dbg_arg_i_64,
.pseudo_dbg_arg_ro,
.pseudo_dbg_arg_fa,
.pseudo_dbg_arg_m,
.pseudo_dbg_arg_val,
.pseudo_dbg_var_args_none,
.pseudo_dbg_var_none,
.pseudo_dbg_var_i_s,
.pseudo_dbg_var_i_u,
.pseudo_dbg_var_i_64,
.pseudo_dbg_var_ro,
.pseudo_dbg_var_fa,
.pseudo_dbg_var_m,
.pseudo_dbg_var_val,
.pseudo_dead_none,
=> {},
@ -353,7 +352,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
pub fn fail(lower: *Lower, comptime format: []const u8, args: anytype) Error {
@branchHint(.cold);
assert(lower.err_msg == null);
lower.err_msg = try Zcu.ErrorMsg.create(lower.allocator, lower.src_loc, format, args);
lower.err_msg = try .create(lower.allocator, lower.src_loc, format, args);
return error.LowerFail;
}
@ -364,7 +363,8 @@ pub fn imm(lower: *const Lower, ops: Mir.Inst.Ops, i: u32) Immediate {
.i_s,
.mi_s,
.rmi_s,
.pseudo_dbg_local_ai_s,
.pseudo_dbg_arg_i_s,
.pseudo_dbg_var_i_s,
=> .s(@bitCast(i)),
.ii,
@ -379,24 +379,32 @@ pub fn imm(lower: *const Lower, ops: Mir.Inst.Ops, i: u32) Immediate {
.mri,
.rrm,
.rrmi,
.pseudo_dbg_local_ai_u,
.pseudo_dbg_arg_i_u,
.pseudo_dbg_var_i_u,
=> .u(i),
.ri_64,
.pseudo_dbg_local_ai_64,
=> .u(lower.mir.extraData(Mir.Imm64, i).data.decode()),
.pseudo_dbg_arg_i_64,
.pseudo_dbg_var_i_64,
=> unreachable,
else => unreachable,
};
}
pub fn mem(lower: *Lower, op_index: InstOpIndex, payload: u32) Memory {
var m = lower.mir.resolveFrameLoc(lower.mir.extraData(Mir.Memory, payload).data).decode();
fn mem(lower: *Lower, op_index: InstOpIndex, payload: u32) Memory {
var m = lower.mir.resolveMemoryExtra(payload).decode();
switch (m) {
.sib => |*sib| switch (sib.base) {
else => {},
.none, .reg, .frame => {},
.table => sib.disp = lower.reloc(op_index, .table, sib.disp).signed,
.rip_inst => |inst_index| sib.disp = lower.reloc(op_index, .{ .inst = inst_index }, sib.disp).signed,
.nav => |nav| sib.disp = lower.reloc(op_index, .{ .nav = nav }, sib.disp).signed,
.uav => |uav| sib.disp = lower.reloc(op_index, .{ .uav = uav }, sib.disp).signed,
.lazy_sym => |lazy_sym| sib.disp = lower.reloc(op_index, .{ .lazy_sym = lazy_sym }, sib.disp).signed,
.extern_func => |extern_func| sib.disp = lower.reloc(op_index, .{ .extern_func = extern_func }, sib.disp).signed,
},
else => {},
}
@ -414,177 +422,40 @@ fn reloc(lower: *Lower, op_index: InstOpIndex, target: Reloc.Target, off: i32) I
return .s(0);
}
fn emit(lower: *Lower, prefix: Prefix, mnemonic: Mnemonic, ops: []const Operand) Error!void {
const emit_prefix = prefix;
var emit_mnemonic = mnemonic;
var emit_ops_storage: [4]Operand = undefined;
const emit_ops = emit_ops_storage[0..ops.len];
for (emit_ops, ops, 0..) |*emit_op, op, op_index| {
emit_op.* = switch (op) {
else => op,
.mem => |mem_op| op: switch (mem_op.base()) {
else => op,
.reloc => |sym_index| {
assert(prefix == .none);
assert(mem_op.sib.disp == 0);
assert(mem_op.sib.scale_index.scale == 0);
if (lower.bin_file.cast(.elf)) |elf_file| {
const zo = elf_file.zigObjectPtr().?;
const elf_sym = zo.symbol(sym_index);
if (elf_sym.flags.is_tls) {
// TODO handle extern TLS vars, i.e., emit GD model
if (lower.pic) {
// Here, we currently assume local dynamic TLS vars, and so
// we emit LD model.
_ = lower.reloc(1, .{ .linker_tlsld = sym_index }, 0);
lower.result_insts[lower.result_insts_len] = try .new(.none, .lea, &.{
.{ .reg = .rdi },
.{ .mem = Memory.initRip(.none, 0) },
}, lower.target);
lower.result_insts_len += 1;
_ = lower.reloc(0, .{
.linker_extern_fn = try elf_file.getGlobalSymbol("__tls_get_addr", null),
}, 0);
lower.result_insts[lower.result_insts_len] = try .new(.none, .call, &.{
.{ .imm = .s(0) },
}, lower.target);
lower.result_insts_len += 1;
_ = lower.reloc(@intCast(op_index), .{ .linker_dtpoff = sym_index }, 0);
emit_mnemonic = .lea;
break :op .{ .mem = Memory.initSib(.none, .{
.base = .{ .reg = .rax },
.disp = std.math.minInt(i32),
}) };
} else {
// Since we are linking statically, we emit LE model directly.
lower.result_insts[lower.result_insts_len] = try .new(.none, .mov, &.{
.{ .reg = .rax },
.{ .mem = Memory.initSib(.qword, .{ .base = .{ .reg = .fs } }) },
}, lower.target);
lower.result_insts_len += 1;
_ = lower.reloc(@intCast(op_index), .{ .linker_reloc = sym_index }, 0);
emit_mnemonic = .lea;
break :op .{ .mem = Memory.initSib(.none, .{
.base = .{ .reg = .rax },
.disp = std.math.minInt(i32),
}) };
}
}
if (lower.pic) switch (mnemonic) {
.lea => {
_ = lower.reloc(@intCast(op_index), .{ .linker_reloc = sym_index }, 0);
if (!elf_sym.flags.is_extern_ptr) break :op .{ .mem = Memory.initRip(.none, 0) };
emit_mnemonic = .mov;
break :op .{ .mem = Memory.initRip(.ptr, 0) };
},
.mov => {
if (elf_sym.flags.is_extern_ptr) {
const reg = ops[0].reg;
_ = lower.reloc(1, .{ .linker_reloc = sym_index }, 0);
lower.result_insts[lower.result_insts_len] = try .new(.none, .mov, &.{
.{ .reg = reg.to64() },
.{ .mem = Memory.initRip(.qword, 0) },
}, lower.target);
lower.result_insts_len += 1;
break :op .{ .mem = Memory.initSib(mem_op.sib.ptr_size, .{ .base = .{
.reg = reg.to64(),
} }) };
}
_ = lower.reloc(@intCast(op_index), .{ .linker_reloc = sym_index }, 0);
break :op .{ .mem = Memory.initRip(mem_op.sib.ptr_size, 0) };
},
else => unreachable,
};
_ = lower.reloc(@intCast(op_index), .{ .linker_reloc = sym_index }, 0);
switch (mnemonic) {
.call => break :op .{ .mem = Memory.initSib(mem_op.sib.ptr_size, .{
.base = .{ .reg = .ds },
}) },
.lea => {
emit_mnemonic = .mov;
break :op .{ .imm = .s(0) };
},
.mov => break :op .{ .mem = Memory.initSib(mem_op.sib.ptr_size, .{
.base = .{ .reg = .ds },
}) },
else => unreachable,
}
} else if (lower.bin_file.cast(.macho)) |macho_file| {
const zo = macho_file.getZigObject().?;
const macho_sym = zo.symbols.items[sym_index];
if (macho_sym.flags.tlv) {
_ = lower.reloc(1, .{ .linker_reloc = sym_index }, 0);
lower.result_insts[lower.result_insts_len] = try .new(.none, .mov, &.{
.{ .reg = .rdi },
.{ .mem = Memory.initRip(.ptr, 0) },
}, lower.target);
lower.result_insts_len += 1;
lower.result_insts[lower.result_insts_len] = try .new(.none, .call, &.{
.{ .mem = Memory.initSib(.qword, .{ .base = .{ .reg = .rdi } }) },
}, lower.target);
lower.result_insts_len += 1;
emit_mnemonic = .mov;
break :op .{ .reg = .rax };
}
break :op switch (mnemonic) {
.lea => {
_ = lower.reloc(@intCast(op_index), .{ .linker_reloc = sym_index }, 0);
if (!macho_sym.flags.is_extern_ptr) break :op .{ .mem = Memory.initRip(.none, 0) };
emit_mnemonic = .mov;
break :op .{ .mem = Memory.initRip(.ptr, 0) };
},
.mov => {
if (macho_sym.flags.is_extern_ptr) {
const reg = ops[0].reg;
_ = lower.reloc(1, .{ .linker_reloc = sym_index }, 0);
lower.result_insts[lower.result_insts_len] = try .new(.none, .mov, &.{
.{ .reg = reg.to64() },
.{ .mem = Memory.initRip(.qword, 0) },
}, lower.target);
lower.result_insts_len += 1;
break :op .{ .mem = Memory.initSib(mem_op.sib.ptr_size, .{ .base = .{
.reg = reg.to64(),
} }) };
}
_ = lower.reloc(@intCast(op_index), .{ .linker_reloc = sym_index }, 0);
break :op .{ .mem = Memory.initRip(mem_op.sib.ptr_size, 0) };
},
else => unreachable,
};
} else {
return lower.fail("TODO: bin format '{s}'", .{@tagName(lower.bin_file.tag)});
}
},
.pcrel => |sym_index| {
assert(prefix == .none);
assert(mem_op.sib.disp == 0);
assert(mem_op.sib.scale_index.scale == 0);
_ = lower.reloc(@intCast(op_index), .{ .linker_pcrel = sym_index }, 0);
break :op switch (lower.bin_file.tag) {
.elf => op,
.macho => switch (mnemonic) {
.lea => .{ .mem = Memory.initRip(.none, 0) },
.mov => .{ .mem = Memory.initRip(mem_op.sib.ptr_size, 0) },
else => unreachable,
},
else => |tag| return lower.fail("TODO: bin format '{s}'", .{@tagName(tag)}),
};
},
},
};
}
lower.result_insts[lower.result_insts_len] = try .new(emit_prefix, emit_mnemonic, emit_ops, lower.target);
fn encode(lower: *Lower, prefix: Prefix, mnemonic: Mnemonic, ops: []const Operand) Error!void {
lower.result_insts[lower.result_insts_len] = try .new(prefix, mnemonic, ops, lower.target);
lower.result_insts_len += 1;
}
const inst_tags_len = @typeInfo(Mir.Inst.Tag).@"enum".fields.len;
const inst_fixes_len = @typeInfo(Mir.Inst.Fixes).@"enum".fields.len;
/// Lookup table, indexed by `@intFromEnum(inst.tag) * inst_fixes_len + @intFromEnum(fixes)`.
/// The value is the resulting `Mnemonic`, or `null` if the combination is not valid.
const mnemonic_table: [inst_tags_len * inst_fixes_len]?Mnemonic = table: {
@setEvalBranchQuota(80_000);
var table: [inst_tags_len * inst_fixes_len]?Mnemonic = undefined;
for (0..inst_fixes_len) |fixes_i| {
const fixes: Mir.Inst.Fixes = @enumFromInt(fixes_i);
const prefix, const suffix = affix: {
const pattern = if (std.mem.indexOfScalar(u8, @tagName(fixes), ' ')) |i|
@tagName(fixes)[i + 1 ..]
else
@tagName(fixes);
const wildcard_idx = std.mem.indexOfScalar(u8, pattern, '_').?;
break :affix .{ pattern[0..wildcard_idx], pattern[wildcard_idx + 1 ..] };
};
for (0..inst_tags_len) |inst_tag_i| {
const inst_tag: Mir.Inst.Tag = @enumFromInt(inst_tag_i);
const name = prefix ++ @tagName(inst_tag) ++ suffix;
const idx = inst_tag_i * inst_fixes_len + fixes_i;
table[idx] = if (@hasField(Mnemonic, name)) @field(Mnemonic, name) else null;
}
}
break :table table;
};
fn generic(lower: *Lower, inst: Mir.Inst) Error!void {
@setEvalBranchQuota(2_800);
@setEvalBranchQuota(2_000);
const fixes = switch (inst.ops) {
.none => inst.data.none.fixes,
.inst => inst.data.inst.fixes,
@ -604,28 +475,27 @@ fn generic(lower: *Lower, inst: Mir.Inst) Error!void {
.rrmi => inst.data.rrix.fixes,
.mi_u, .mi_s => inst.data.x.fixes,
.m => inst.data.x.fixes,
.extern_fn_reloc, .got_reloc, .direct_reloc, .import_reloc, .tlv_reloc, .rel => ._,
.nav, .uav, .lazy_sym, .extern_func => ._,
else => return lower.fail("TODO lower .{s}", .{@tagName(inst.ops)}),
};
try lower.emit(switch (fixes) {
try lower.encode(switch (fixes) {
inline else => |tag| comptime if (std.mem.indexOfScalar(u8, @tagName(tag), ' ')) |space|
@field(Prefix, @tagName(tag)[0..space])
else
.none,
}, mnemonic: {
comptime var max_len = 0;
inline for (@typeInfo(Mnemonic).@"enum".fields) |field| max_len = @max(field.name.len, max_len);
var buf: [max_len]u8 = undefined;
if (mnemonic_table[@intFromEnum(inst.tag) * inst_fixes_len + @intFromEnum(fixes)]) |mnemonic| {
break :mnemonic mnemonic;
}
// This combination is invalid; make the theoretical mnemonic name and emit an error with it.
const fixes_name = @tagName(fixes);
const pattern = fixes_name[if (std.mem.indexOfScalar(u8, fixes_name, ' ')) |i| i + " ".len else 0..];
const wildcard_index = std.mem.indexOfScalar(u8, pattern, '_').?;
const parts = .{ pattern[0..wildcard_index], @tagName(inst.tag), pattern[wildcard_index + "_".len ..] };
const err_msg = "unsupported mnemonic: ";
const mnemonic = std.fmt.bufPrint(&buf, "{s}{s}{s}", parts) catch
return lower.fail(err_msg ++ "'{s}{s}{s}'", parts);
break :mnemonic std.meta.stringToEnum(Mnemonic, mnemonic) orelse
return lower.fail(err_msg ++ "'{s}'", .{mnemonic});
return lower.fail("unsupported mnemonic: '{s}{s}{s}'", .{
pattern[0..wildcard_index],
@tagName(inst.tag),
pattern[wildcard_index + "_".len ..],
});
}, switch (inst.ops) {
.none => &.{},
.inst => &.{
@ -738,22 +608,17 @@ fn generic(lower: *Lower, inst: Mir.Inst) Error!void {
.{ .mem = lower.mem(2, inst.data.rrix.payload) },
.{ .imm = lower.imm(inst.ops, inst.data.rrix.i) },
},
.extern_fn_reloc, .rel => &.{
.{ .imm = lower.reloc(0, .{ .linker_extern_fn = inst.data.reloc.sym_index }, inst.data.reloc.off) },
.nav => &.{
.{ .imm = lower.reloc(0, .{ .nav = inst.data.nav.index }, inst.data.nav.off) },
},
.got_reloc, .direct_reloc, .import_reloc => ops: {
const reg = inst.data.rx.r1;
const extra = lower.mir.extraData(bits.SymbolOffset, inst.data.rx.payload).data;
_ = lower.reloc(1, switch (inst.ops) {
.got_reloc => .{ .linker_got = extra.sym_index },
.direct_reloc => .{ .linker_direct = extra.sym_index },
.import_reloc => .{ .linker_import = extra.sym_index },
else => unreachable,
}, extra.off);
break :ops &.{
.{ .reg = reg },
.{ .mem = Memory.initRip(Memory.PtrSize.fromBitSize(reg.bitSize()), 0) },
};
.uav => &.{
.{ .imm = lower.reloc(0, .{ .uav = inst.data.uav }, 0) },
},
.lazy_sym => &.{
.{ .imm = lower.reloc(0, .{ .lazy_sym = inst.data.lazy_sym }, 0) },
},
.extern_func => &.{
.{ .imm = lower.reloc(0, .{ .extern_func = inst.data.extern_func }, 0) },
},
else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }),
});
@ -773,7 +638,7 @@ fn pushPopRegList(lower: *Lower, comptime mnemonic: Mnemonic, inst: Mir.Inst) Er
else => unreachable,
} });
while (it.next()) |i| {
try lower.emit(.none, mnemonic, &.{.{
try lower.encode(.none, mnemonic, &.{.{
.reg = callee_preserved_regs[i],
}});
switch (mnemonic) {
@ -787,7 +652,7 @@ fn pushPopRegList(lower: *Lower, comptime mnemonic: Mnemonic, inst: Mir.Inst) Er
.push => {
var it = inst.data.reg_list.iterator(.{});
while (it.next()) |i| {
try lower.emit(.directive, .@".cfi_rel_offset", &.{
try lower.encode(.directive, .@".cfi_rel_offset", &.{
.{ .reg = callee_preserved_regs[i] },
.{ .imm = .s(off) },
});
@ -805,12 +670,14 @@ const page_size: i32 = 1 << 12;
const abi = @import("abi.zig");
const assert = std.debug.assert;
const bits = @import("bits.zig");
const codegen = @import("../../codegen.zig");
const encoder = @import("encoder.zig");
const link = @import("../../link.zig");
const std = @import("std");
const Immediate = Instruction.Immediate;
const Instruction = encoder.Instruction;
const InternPool = @import("../../InternPool.zig");
const Lower = @This();
const Memory = Instruction.Memory;
const Mir = @import("Mir.zig");
@ -819,3 +686,4 @@ const Zcu = @import("../../Zcu.zig");
const Operand = Instruction.Operand;
const Prefix = Instruction.Prefix;
const Register = bits.Register;
const Type = @import("../../Type.zig");

View File

@ -9,6 +9,8 @@
instructions: std.MultiArrayList(Inst).Slice,
/// The meaning of this data is determined by `Inst.Tag` value.
extra: []const u32,
string_bytes: []const u8,
locals: []const Local,
table: []const Inst.Index,
frame_locs: std.MultiArrayList(FrameLoc).Slice,
@ -1361,9 +1363,6 @@ pub const Inst = struct {
/// Immediate (byte), register operands.
/// Uses `ri` payload.
ir,
/// Relative displacement operand.
/// Uses `reloc` payload.
rel,
/// Register, memory operands.
/// Uses `rx` payload with extra data of type `Memory`.
rm,
@ -1409,21 +1408,18 @@ pub const Inst = struct {
/// References another Mir instruction directly.
/// Uses `inst` payload.
inst,
/// Linker relocation - external function.
/// Uses `reloc` payload.
extern_fn_reloc,
/// Linker relocation - GOT indirection.
/// Uses `rx` payload with extra data of type `bits.SymbolOffset`.
got_reloc,
/// Linker relocation - direct reference.
/// Uses `rx` payload with extra data of type `bits.SymbolOffset`.
direct_reloc,
/// Linker relocation - imports table indirection (binding).
/// Uses `rx` payload with extra data of type `bits.SymbolOffset`.
import_reloc,
/// Linker relocation - threadlocal variable via GOT indirection.
/// Uses `rx` payload with extra data of type `bits.SymbolOffset`.
tlv_reloc,
/// References a nav.
/// Uses `nav` payload.
nav,
/// References an uav.
/// Uses `uav` payload.
uav,
/// References a lazy symbol.
/// Uses `lazy_sym` payload.
lazy_sym,
/// References an external symbol.
/// Uses `extern_func` payload.
extern_func,
// Pseudo instructions:
@ -1522,6 +1518,7 @@ pub const Inst = struct {
pseudo_cfi_escape_bytes,
/// End of prologue
/// Uses `none` payload.
pseudo_dbg_prologue_end_none,
/// Update debug line with is_stmt register set
/// Uses `line_column` payload.
@ -1530,44 +1527,70 @@ pub const Inst = struct {
/// Uses `line_column` payload.
pseudo_dbg_line_line_column,
/// Start of epilogue
/// Uses `none` payload.
pseudo_dbg_epilogue_begin_none,
/// Start of lexical block
/// Uses `none` payload.
pseudo_dbg_enter_block_none,
/// End of lexical block
/// Uses `none` payload.
pseudo_dbg_leave_block_none,
/// Start of inline function
/// Uses `ip_index` payload.
pseudo_dbg_enter_inline_func,
/// End of inline function
/// Uses `ip_index` payload.
pseudo_dbg_leave_inline_func,
/// Local argument or variable.
/// Uses `a` payload.
pseudo_dbg_local_a,
/// Local argument or variable.
/// Uses `ai` payload.
pseudo_dbg_local_ai_s,
/// Local argument or variable.
/// Uses `ai` payload.
pseudo_dbg_local_ai_u,
/// Local argument or variable.
/// Uses `ai` payload with extra data of type `Imm64`.
pseudo_dbg_local_ai_64,
/// Local argument or variable.
/// Uses `as` payload.
pseudo_dbg_local_as,
/// Local argument or variable.
/// Uses `ax` payload with extra data of type `bits.SymbolOffset`.
pseudo_dbg_local_aso,
/// Local argument or variable.
/// Uses `rx` payload with extra data of type `AirOffset`.
pseudo_dbg_local_aro,
/// Local argument or variable.
/// Uses `ax` payload with extra data of type `bits.FrameAddr`.
pseudo_dbg_local_af,
/// Local argument or variable.
/// Uses `ax` payload with extra data of type `Memory`.
pseudo_dbg_local_am,
/// Local argument.
/// Uses `none` payload.
pseudo_dbg_arg_none,
/// Local argument.
/// Uses `i` payload.
pseudo_dbg_arg_i_s,
/// Local argument.
/// Uses `i` payload.
pseudo_dbg_arg_i_u,
/// Local argument.
/// Uses `i64` payload.
pseudo_dbg_arg_i_64,
/// Local argument.
/// Uses `ro` payload.
pseudo_dbg_arg_ro,
/// Local argument.
/// Uses `fa` payload.
pseudo_dbg_arg_fa,
/// Local argument.
/// Uses `x` payload with extra data of type `Memory`.
pseudo_dbg_arg_m,
/// Local argument.
/// Uses `ip_index` payload.
pseudo_dbg_arg_val,
/// Remaining arguments are varargs.
pseudo_dbg_var_args_none,
/// Local variable.
/// Uses `none` payload.
pseudo_dbg_var_none,
/// Local variable.
/// Uses `i` payload.
pseudo_dbg_var_i_s,
/// Local variable.
/// Uses `i` payload.
pseudo_dbg_var_i_u,
/// Local variable.
/// Uses `i64` payload.
pseudo_dbg_var_i_64,
/// Local variable.
/// Uses `ro` payload.
pseudo_dbg_var_ro,
/// Local variable.
/// Uses `fa` payload.
pseudo_dbg_var_fa,
/// Local variable.
/// Uses `x` payload with extra data of type `Memory`.
pseudo_dbg_var_m,
/// Local variable.
/// Uses `ip_index` payload.
pseudo_dbg_var_val,
/// Tombstone
/// Emitter should skip this instruction.
@ -1584,6 +1607,7 @@ pub const Inst = struct {
inst: Index,
},
/// A 32-bit immediate value.
i64: u64,
i: struct {
fixes: Fixes = ._,
i: u32,
@ -1683,31 +1707,18 @@ pub const Inst = struct {
return std.mem.sliceAsBytes(mir.extra[bytes.payload..])[0..bytes.len];
}
},
a: struct {
air_inst: Air.Inst.Index,
},
ai: struct {
air_inst: Air.Inst.Index,
i: u32,
},
as: struct {
air_inst: Air.Inst.Index,
sym_index: u32,
},
ax: struct {
air_inst: Air.Inst.Index,
payload: u32,
},
/// Relocation for the linker where:
/// * `sym_index` is the index of the target
/// * `off` is the offset from the target
reloc: bits.SymbolOffset,
fa: bits.FrameAddr,
ro: bits.RegisterOffset,
nav: bits.NavOffset,
uav: InternPool.Key.Ptr.BaseAddr.Uav,
lazy_sym: link.File.LazySymbol,
extern_func: Mir.NullTerminatedString,
/// Debug line and column position
line_column: struct {
line: u32,
column: u32,
},
func: InternPool.Index,
ip_index: InternPool.Index,
/// Register list
reg_list: RegisterList,
};
@ -1760,13 +1771,11 @@ pub const Inst = struct {
}
};
pub const AirOffset = struct { air_inst: Air.Inst.Index, off: i32 };
/// Used in conjunction with payload to transfer a list of used registers in a compact manner.
pub const RegisterList = struct {
bitset: BitSet,
const BitSet = IntegerBitSet(32);
const BitSet = std.bit_set.IntegerBitSet(32);
const Self = @This();
pub const empty: RegisterList = .{ .bitset = .initEmpty() };
@ -1805,6 +1814,22 @@ pub const RegisterList = struct {
}
};
pub const NullTerminatedString = enum(u32) {
none = std.math.maxInt(u32),
_,
pub fn toSlice(nts: NullTerminatedString, mir: *const Mir) ?[:0]const u8 {
if (nts == .none) return null;
const string_bytes = mir.string_bytes[@intFromEnum(nts)..];
return string_bytes[0..std.mem.indexOfScalar(u8, string_bytes, 0).? :0];
}
};
pub const Local = struct {
name: NullTerminatedString,
type: InternPool.Index,
};
pub const Imm32 = struct {
imm: u32,
};
@ -1840,11 +1865,10 @@ pub const Memory = struct {
size: bits.Memory.Size,
index: Register,
scale: bits.Memory.Scale,
_: u14 = undefined,
_: u13 = undefined,
};
pub fn encode(mem: bits.Memory) Memory {
assert(mem.base != .reloc or mem.mod != .off);
return .{
.info = .{
.base = mem.base,
@ -1866,17 +1890,27 @@ pub const Memory = struct {
.none, .table => undefined,
.reg => |reg| @intFromEnum(reg),
.frame => |frame_index| @intFromEnum(frame_index),
.reloc, .pcrel => |sym_index| sym_index,
.rip_inst => |inst_index| inst_index,
.nav => |nav| @intFromEnum(nav),
.uav => |uav| @intFromEnum(uav.val),
.lazy_sym => |lazy_sym| @intFromEnum(lazy_sym.ty),
.extern_func => |extern_func| @intFromEnum(extern_func),
},
.off = switch (mem.mod) {
.rm => |rm| @bitCast(rm.disp),
.off => |off| @truncate(off),
},
.extra = if (mem.mod == .off)
@intCast(mem.mod.off >> 32)
else
undefined,
.extra = switch (mem.mod) {
.rm => switch (mem.base) {
else => undefined,
.uav => |uav| @intFromEnum(uav.orig_ty),
.lazy_sym => |lazy_sym| @intFromEnum(lazy_sym.kind),
},
.off => switch (mem.base) {
.reg => @intCast(mem.mod.off >> 32),
else => unreachable,
},
},
};
}
@ -1894,9 +1928,11 @@ pub const Memory = struct {
.reg => .{ .reg = @enumFromInt(mem.base) },
.frame => .{ .frame = @enumFromInt(mem.base) },
.table => .table,
.reloc => .{ .reloc = mem.base },
.pcrel => .{ .pcrel = mem.base },
.rip_inst => .{ .rip_inst = mem.base },
.nav => .{ .nav = @enumFromInt(mem.base) },
.uav => .{ .uav = .{ .val = @enumFromInt(mem.base), .orig_ty = @enumFromInt(mem.extra) } },
.lazy_sym => .{ .lazy_sym = .{ .kind = @enumFromInt(mem.extra), .ty = @enumFromInt(mem.base) } },
.extern_func => .{ .extern_func = @enumFromInt(mem.base) },
},
.scale_index = switch (mem.info.index) {
.none => null,
@ -1924,11 +1960,132 @@ pub const Memory = struct {
pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
mir.instructions.deinit(gpa);
gpa.free(mir.extra);
gpa.free(mir.string_bytes);
gpa.free(mir.locals);
gpa.free(mir.table);
mir.frame_locs.deinit(gpa);
mir.* = undefined;
}
pub fn emit(
mir: Mir,
lf: *link.File,
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) codegen.CodeGenError!void {
const zcu = pt.zcu;
const comp = zcu.comp;
const gpa = comp.gpa;
const func = zcu.funcInfo(func_index);
const fn_info = zcu.typeToFunc(.fromInterned(func.ty)).?;
const nav = func.owner_nav;
const mod = zcu.navFileScope(nav).mod.?;
var e: Emit = .{
.lower = .{
.target = &mod.resolved_target.result,
.allocator = gpa,
.mir = mir,
.cc = fn_info.cc,
.src_loc = src_loc,
},
.bin_file = lf,
.pt = pt,
.pic = mod.pic,
.atom_index = sym: {
if (lf.cast(.elf)) |ef| break :sym try ef.zigObjectPtr().?.getOrCreateMetadataForNav(zcu, nav);
if (lf.cast(.macho)) |mf| break :sym try mf.getZigObject().?.getOrCreateMetadataForNav(mf, nav);
if (lf.cast(.coff)) |cf| {
const atom = try cf.getOrCreateAtomForNav(nav);
break :sym cf.getAtom(atom).getSymbolIndex().?;
}
if (lf.cast(.plan9)) |p9f| break :sym try p9f.seeNav(pt, nav);
unreachable;
},
.debug_output = debug_output,
.code = code,
.prev_di_loc = .{
.line = func.lbrace_line,
.column = func.lbrace_column,
.is_stmt = switch (debug_output) {
.dwarf => |dwarf| dwarf.dwarf.debug_line.header.default_is_stmt,
.plan9 => undefined,
.none => undefined,
},
},
.prev_di_pc = 0,
.code_offset_mapping = .empty,
.relocs = .empty,
.table_relocs = .empty,
};
defer e.deinit();
e.emitMir() catch |err| switch (err) {
error.LowerFail, error.EmitFail => return zcu.codegenFailMsg(nav, e.lower.err_msg.?),
error.InvalidInstruction, error.CannotEncode => return zcu.codegenFail(nav, "emit MIR failed: {s} (Zig compiler bug)", .{@errorName(err)}),
else => return zcu.codegenFail(nav, "emit MIR failed: {s}", .{@errorName(err)}),
};
}
pub fn emitLazy(
mir: Mir,
lf: *link.File,
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
lazy_sym: link.File.LazySymbol,
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) codegen.CodeGenError!void {
const zcu = pt.zcu;
const comp = zcu.comp;
const gpa = comp.gpa;
const mod = comp.root_mod;
var e: Emit = .{
.lower = .{
.target = &mod.resolved_target.result,
.allocator = gpa,
.mir = mir,
.cc = .auto,
.src_loc = src_loc,
},
.bin_file = lf,
.pt = pt,
.pic = mod.pic,
.atom_index = sym: {
if (lf.cast(.elf)) |ef| break :sym ef.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(ef, pt, lazy_sym) catch |err|
return zcu.codegenFailType(lazy_sym.ty, "{s} creating lazy symbol", .{@errorName(err)});
if (lf.cast(.macho)) |mf| break :sym mf.getZigObject().?.getOrCreateMetadataForLazySymbol(mf, pt, lazy_sym) catch |err|
return zcu.codegenFailType(lazy_sym.ty, "{s} creating lazy symbol", .{@errorName(err)});
if (lf.cast(.coff)) |cf| {
const atom = cf.getOrCreateAtomForLazySymbol(pt, lazy_sym) catch |err|
return zcu.codegenFailType(lazy_sym.ty, "{s} creating lazy symbol", .{@errorName(err)});
break :sym cf.getAtom(atom).getSymbolIndex().?;
}
if (lf.cast(.plan9)) |p9f| break :sym p9f.getOrCreateAtomForLazySymbol(pt, lazy_sym) catch |err|
return zcu.codegenFailType(lazy_sym.ty, "{s} creating lazy symbol", .{@errorName(err)});
unreachable;
},
.debug_output = debug_output,
.code = code,
.prev_di_loc = undefined,
.prev_di_pc = undefined,
.code_offset_mapping = .empty,
.relocs = .empty,
.table_relocs = .empty,
};
defer e.deinit();
e.emitMir() catch |err| switch (err) {
error.LowerFail, error.EmitFail => return zcu.codegenFailTypeMsg(lazy_sym.ty, e.lower.err_msg.?),
error.InvalidInstruction, error.CannotEncode => return zcu.codegenFailType(lazy_sym.ty, "emit MIR failed: {s} (Zig compiler bug)", .{@errorName(err)}),
else => return zcu.codegenFailType(lazy_sym.ty, "emit MIR failed: {s}", .{@errorName(err)}),
};
}
pub fn extraData(mir: Mir, comptime T: type, index: u32) struct { data: T, end: u32 } {
const fields = std.meta.fields(T);
var i: u32 = index;
@ -1937,7 +2094,7 @@ pub fn extraData(mir: Mir, comptime T: type, index: u32) struct { data: T, end:
@field(result, field.name) = switch (field.type) {
u32 => mir.extra[i],
i32, Memory.Info => @bitCast(mir.extra[i]),
bits.FrameIndex, Air.Inst.Index => @enumFromInt(mir.extra[i]),
bits.FrameIndex => @enumFromInt(mir.extra[i]),
else => @compileError("bad field type: " ++ field.name ++ ": " ++ @typeName(field.type)),
};
i += 1;
@ -1958,9 +2115,10 @@ pub fn resolveFrameAddr(mir: Mir, frame_addr: bits.FrameAddr) bits.RegisterOffse
return .{ .reg = frame_loc.base, .off = frame_loc.disp + frame_addr.off };
}
pub fn resolveFrameLoc(mir: Mir, mem: Memory) Memory {
pub fn resolveMemoryExtra(mir: Mir, payload: u32) Memory {
const mem = mir.extraData(Mir.Memory, payload).data;
return switch (mem.info.base) {
.none, .reg, .table, .reloc, .pcrel, .rip_inst => mem,
.none, .reg, .table, .rip_inst, .nav, .uav, .lazy_sym, .extern_func => mem,
.frame => if (mir.frame_locs.len > 0) .{
.info = .{
.base = .reg,
@ -1982,8 +2140,10 @@ const builtin = @import("builtin");
const encoder = @import("encoder.zig");
const std = @import("std");
const Air = @import("../../Air.zig");
const IntegerBitSet = std.bit_set.IntegerBitSet;
const InternPool = @import("../../InternPool.zig");
const Mir = @This();
const Register = bits.Register;
const Emit = @import("Emit.zig");
const codegen = @import("../../codegen.zig");
const link = @import("../../link.zig");
const Zcu = @import("../../Zcu.zig");

View File

@ -4,6 +4,8 @@ const expect = std.testing.expect;
const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
const InternPool = @import("../../InternPool.zig");
const link = @import("../../link.zig");
const Mir = @import("Mir.zig");
/// EFLAGS condition codes
@ -684,8 +686,6 @@ test "Register id - different classes" {
try expect(Register.xmm0.id() == Register.ymm0.id());
try expect(Register.xmm0.id() != Register.mm0.id());
try expect(Register.mm0.id() != Register.st0.id());
try expect(Register.es.id() == 0b110000);
}
test "Register enc - different classes" {
@ -750,20 +750,22 @@ pub const FrameAddr = struct { index: FrameIndex, off: i32 = 0 };
pub const RegisterOffset = struct { reg: Register, off: i32 = 0 };
pub const SymbolOffset = struct { sym_index: u32, off: i32 = 0 };
pub const NavOffset = struct { index: InternPool.Nav.Index, off: i32 = 0 };
pub const Memory = struct {
base: Base = .none,
mod: Mod = .{ .rm = .{} },
pub const Base = union(enum(u3)) {
pub const Base = union(enum(u4)) {
none,
reg: Register,
frame: FrameIndex,
table,
reloc: u32,
pcrel: u32,
rip_inst: Mir.Inst.Index,
nav: InternPool.Nav.Index,
uav: InternPool.Key.Ptr.BaseAddr.Uav,
lazy_sym: link.File.LazySymbol,
extern_func: Mir.NullTerminatedString,
pub const Tag = @typeInfo(Base).@"union".tag_type.?;
};
@ -899,7 +901,10 @@ pub const Memory = struct {
pub const Immediate = union(enum) {
signed: i32,
unsigned: u64,
reloc: SymbolOffset,
nav: NavOffset,
uav: InternPool.Key.Ptr.BaseAddr.Uav,
lazy_sym: link.File.LazySymbol,
extern_func: Mir.NullTerminatedString,
pub fn u(x: u64) Immediate {
return .{ .unsigned = x };
@ -909,10 +914,6 @@ pub const Immediate = union(enum) {
return .{ .signed = x };
}
pub fn rel(sym_off: SymbolOffset) Immediate {
return .{ .reloc = sym_off };
}
pub fn format(
imm: Immediate,
comptime _: []const u8,
@ -921,7 +922,10 @@ pub const Immediate = union(enum) {
) @TypeOf(writer).Error!void {
switch (imm) {
inline else => |int| try writer.print("{d}", .{int}),
.reloc => |sym_off| try writer.print("Symbol({[sym_index]d}) + {[off]d}", sym_off),
.nav => |nav_off| try writer.print("Nav({d}) + {d}", .{ @intFromEnum(nav_off.nav), nav_off.off }),
.uav => |uav| try writer.print("Uav({d})", .{@intFromEnum(uav.val)}),
.lazy_sym => |lazy_sym| try writer.print("LazySym({s}, {d})", .{ @tagName(lazy_sym.kind), @intFromEnum(lazy_sym.ty) }),
.extern_func => |extern_func| try writer.print("ExternFunc({d})", .{@intFromEnum(extern_func)}),
}
}
};

View File

@ -138,7 +138,7 @@ pub const Instruction = struct {
.moffs => true,
.rip => false,
.sib => |s| switch (s.base) {
.none, .frame, .table, .reloc, .pcrel, .rip_inst => false,
.none, .frame, .table, .rip_inst, .nav, .uav, .lazy_sym, .extern_func => false,
.reg => |reg| reg.isClass(.segment),
},
};
@ -211,7 +211,7 @@ pub const Instruction = struct {
.none, .imm => 0b00,
.reg => |reg| @truncate(reg.enc() >> 3),
.mem => |mem| switch (mem.base()) {
.none, .frame, .table, .reloc, .pcrel, .rip_inst => 0b00, // rsp, rbp, and rip are not extended
.none, .frame, .table, .rip_inst, .nav, .uav, .lazy_sym, .extern_func => 0b00, // rsp, rbp, and rip are not extended
.reg => |reg| @truncate(reg.enc() >> 3),
},
.bytes => unreachable,
@ -281,9 +281,14 @@ pub const Instruction = struct {
.reg => |reg| try writer.print("{s}", .{@tagName(reg)}),
.frame => |frame_index| try writer.print("{}", .{frame_index}),
.table => try writer.print("Table", .{}),
.reloc => |sym_index| try writer.print("Symbol({d})", .{sym_index}),
.pcrel => |sym_index| try writer.print("PcRelSymbol({d})", .{sym_index}),
.rip_inst => |inst_index| try writer.print("RipInst({d})", .{inst_index}),
.nav => |nav| try writer.print("Nav({d})", .{@intFromEnum(nav)}),
.uav => |uav| try writer.print("Uav({d})", .{@intFromEnum(uav.val)}),
.lazy_sym => |lazy_sym| try writer.print("LazySym({s}, {d})", .{
@tagName(lazy_sym.kind),
@intFromEnum(lazy_sym.ty),
}),
.extern_func => |extern_func| try writer.print("ExternFunc({d})", .{@intFromEnum(extern_func)}),
}
if (mem.scaleIndex()) |si| {
if (any) try writer.writeAll(" + ");
@ -718,11 +723,11 @@ pub const Instruction = struct {
try encoder.modRm_indirectDisp32(operand_enc, 0);
try encoder.disp32(undefined);
} else return error.CannotEncode,
.reloc => if (@TypeOf(encoder).options.allow_symbols) {
.nav, .uav, .lazy_sym, .extern_func => if (@TypeOf(encoder).options.allow_symbols) {
try encoder.modRm_indirectDisp32(operand_enc, 0);
try encoder.disp32(undefined);
} else return error.CannotEncode,
.pcrel, .rip_inst => {
.rip_inst => {
try encoder.modRm_RIPDisp32(operand_enc);
try encoder.disp32(sib.disp);
},

View File

@ -85,13 +85,99 @@ pub fn legalizeFeatures(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) ?*co
}
}
/// Every code generation backend has a different MIR representation. However, we want to pass
/// MIR from codegen to the linker *regardless* of which backend is in use. So, we use this: a
/// union of all MIR types. The active tag is known from the backend in use; see `AnyMir.tag`.
pub const AnyMir = union {
aarch64: @import("arch/aarch64/Mir.zig"),
arm: @import("arch/arm/Mir.zig"),
powerpc: noreturn, //@import("arch/powerpc/Mir.zig"),
riscv64: @import("arch/riscv64/Mir.zig"),
sparc64: @import("arch/sparc64/Mir.zig"),
x86_64: @import("arch/x86_64/Mir.zig"),
wasm: @import("arch/wasm/Mir.zig"),
c: @import("codegen/c.zig").Mir,
pub inline fn tag(comptime backend: std.builtin.CompilerBackend) []const u8 {
return switch (backend) {
.stage2_aarch64 => "aarch64",
.stage2_arm => "arm",
.stage2_powerpc => "powerpc",
.stage2_riscv64 => "riscv64",
.stage2_sparc64 => "sparc64",
.stage2_x86_64 => "x86_64",
.stage2_wasm => "wasm",
.stage2_c => "c",
else => unreachable,
};
}
pub fn deinit(mir: *AnyMir, zcu: *const Zcu) void {
const gpa = zcu.gpa;
const backend = target_util.zigBackend(zcu.root_mod.resolved_target.result, zcu.comp.config.use_llvm);
switch (backend) {
else => unreachable,
inline .stage2_aarch64,
.stage2_arm,
.stage2_powerpc,
.stage2_riscv64,
.stage2_sparc64,
.stage2_x86_64,
.stage2_wasm,
.stage2_c,
=> |backend_ct| @field(mir, tag(backend_ct)).deinit(gpa),
}
}
};
/// Runs code generation for a function. This process converts the `Air` emitted by `Sema`,
/// alongside annotated `Liveness` data, to machine code in the form of MIR (see `AnyMir`).
///
/// This is supposed to be a "pure" process, but some backends are currently buggy; see
/// `Zcu.Feature.separate_thread` for details.
pub fn generateFunction(
lf: *link.File,
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: Air,
liveness: Air.Liveness,
air: *const Air,
liveness: *const Air.Liveness,
) CodeGenError!AnyMir {
const zcu = pt.zcu;
const func = zcu.funcInfo(func_index);
const target = zcu.navFileScope(func.owner_nav).mod.?.resolved_target.result;
switch (target_util.zigBackend(target, false)) {
else => unreachable,
inline .stage2_aarch64,
.stage2_arm,
.stage2_powerpc,
.stage2_riscv64,
.stage2_sparc64,
.stage2_x86_64,
.stage2_wasm,
.stage2_c,
=> |backend| {
dev.check(devFeatureForBackend(backend));
const CodeGen = importBackend(backend);
const mir = try CodeGen.generate(lf, pt, src_loc, func_index, air, liveness);
return @unionInit(AnyMir, AnyMir.tag(backend), mir);
},
}
}
/// Converts the MIR returned by `generateFunction` to finalized machine code to be placed in
/// the output binary. This is called from linker implementations, and may query linker state.
///
/// This function is not called for the C backend, as `link.C` directly understands its MIR.
///
/// The `air` parameter is not supposed to exist, but some backends are currently buggy; see
/// `Zcu.Feature.separate_thread` for details.
pub fn emitFunction(
lf: *link.File,
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
any_mir: *const AnyMir,
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) CodeGenError!void {
@ -108,7 +194,8 @@ pub fn generateFunction(
.stage2_x86_64,
=> |backend| {
dev.check(devFeatureForBackend(backend));
return importBackend(backend).generate(lf, pt, src_loc, func_index, air, liveness, code, debug_output);
const mir = &@field(any_mir, AnyMir.tag(backend));
return mir.emit(lf, pt, src_loc, func_index, code, debug_output);
},
}
}
@ -695,7 +782,6 @@ fn lowerUavRef(
const comp = lf.comp;
const target = &comp.root_mod.resolved_target.result;
const ptr_width_bytes = @divExact(target.ptrBitWidth(), 8);
const is_obj = comp.config.output_mode == .Obj;
const uav_val = uav.val;
const uav_ty = Type.fromInterned(ip.typeOf(uav_val));
const is_fn_body = uav_ty.zigTypeTag(zcu) == .@"fn";
@ -715,21 +801,7 @@ fn lowerUavRef(
dev.check(link.File.Tag.wasm.devFeature());
const wasm = lf.cast(.wasm).?;
assert(reloc_parent == .none);
if (is_obj) {
try wasm.out_relocs.append(gpa, .{
.offset = @intCast(code.items.len),
.pointee = .{ .symbol_index = try wasm.uavSymbolIndex(uav.val) },
.tag = if (ptr_width_bytes == 4) .memory_addr_i32 else .memory_addr_i64,
.addend = @intCast(offset),
});
} else {
try wasm.uav_fixups.ensureUnusedCapacity(gpa, 1);
wasm.uav_fixups.appendAssumeCapacity(.{
.uavs_exe_index = try wasm.refUavExe(uav.val, uav.orig_ty),
.offset = @intCast(code.items.len),
.addend = @intCast(offset),
});
}
try wasm.addUavReloc(code.items.len, uav.val, uav.orig_ty, @intCast(offset));
code.appendNTimesAssumeCapacity(0, ptr_width_bytes);
return;
},
@ -879,73 +951,39 @@ pub const GenResult = union(enum) {
};
};
fn genNavRef(
pub fn genNavRef(
lf: *link.File,
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
val: Value,
nav_index: InternPool.Nav.Index,
target: std.Target,
) CodeGenError!GenResult {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const ty = val.typeOf(zcu);
log.debug("genNavRef: val = {}", .{val.fmtValue(pt)});
if (!ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
const imm: u64 = switch (@divExact(target.ptrBitWidth(), 8)) {
1 => 0xaa,
2 => 0xaaaa,
4 => 0xaaaaaaaa,
8 => 0xaaaaaaaaaaaaaaaa,
else => unreachable,
};
return .{ .mcv = .{ .immediate = imm } };
}
const comp = lf.comp;
const gpa = comp.gpa;
// TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
if (ty.castPtrToFn(zcu)) |fn_ty| {
if (zcu.typeToFunc(fn_ty).?.is_generic) {
return .{ .mcv = .{ .immediate = fn_ty.abiAlignment(zcu).toByteUnits().? } };
}
} else if (ty.zigTypeTag(zcu) == .pointer) {
const elem_ty = ty.elemType2(zcu);
if (!elem_ty.hasRuntimeBits(zcu)) {
return .{ .mcv = .{ .immediate = elem_ty.abiAlignment(zcu).toByteUnits().? } };
}
}
const nav = ip.getNav(nav_index);
assert(!nav.isThreadlocal(ip));
log.debug("genNavRef({})", .{nav.fqn.fmt(ip)});
const lib_name, const linkage, const visibility = if (nav.getExtern(ip)) |e|
.{ e.lib_name, e.linkage, e.visibility }
const lib_name, const linkage, const is_threadlocal = if (nav.getExtern(ip)) |e|
.{ e.lib_name, e.linkage, e.is_threadlocal and zcu.comp.config.any_non_single_threaded }
else
.{ .none, .internal, .default };
const name = nav.name;
.{ .none, .internal, false };
if (lf.cast(.elf)) |elf_file| {
const zo = elf_file.zigObjectPtr().?;
switch (linkage) {
.internal => {
const sym_index = try zo.getOrCreateMetadataForNav(zcu, nav_index);
if (is_threadlocal) zo.symbol(sym_index).flags.is_tls = true;
return .{ .mcv = .{ .lea_symbol = sym_index } };
},
.strong, .weak => {
const sym_index = try elf_file.getGlobalSymbol(name.toSlice(ip), lib_name.toSlice(ip));
const sym_index = try elf_file.getGlobalSymbol(nav.name.toSlice(ip), lib_name.toSlice(ip));
switch (linkage) {
.internal => unreachable,
.strong => {},
.weak => zo.symbol(sym_index).flags.weak = true,
.link_once => unreachable,
}
switch (visibility) {
.default => zo.symbol(sym_index).flags.is_extern_ptr = true,
.hidden, .protected => {},
}
if (is_threadlocal) zo.symbol(sym_index).flags.is_tls = true;
return .{ .mcv = .{ .lea_symbol = sym_index } };
},
.link_once => unreachable,
@ -955,21 +993,18 @@ fn genNavRef(
switch (linkage) {
.internal => {
const sym_index = try zo.getOrCreateMetadataForNav(macho_file, nav_index);
const sym = zo.symbols.items[sym_index];
return .{ .mcv = .{ .lea_symbol = sym.nlist_idx } };
if (is_threadlocal) zo.symbols.items[sym_index].flags.tlv = true;
return .{ .mcv = .{ .lea_symbol = sym_index } };
},
.strong, .weak => {
const sym_index = try macho_file.getGlobalSymbol(name.toSlice(ip), lib_name.toSlice(ip));
const sym_index = try macho_file.getGlobalSymbol(nav.name.toSlice(ip), lib_name.toSlice(ip));
switch (linkage) {
.internal => unreachable,
.strong => {},
.weak => zo.symbols.items[sym_index].flags.weak = true,
.link_once => unreachable,
}
switch (visibility) {
.default => zo.symbols.items[sym_index].flags.is_extern_ptr = true,
.hidden, .protected => {},
}
if (is_threadlocal) zo.symbols.items[sym_index].flags.tlv = true;
return .{ .mcv = .{ .lea_symbol = sym_index } };
},
.link_once => unreachable,
@ -980,12 +1015,12 @@ fn genNavRef(
.internal => {
const atom_index = try coff_file.getOrCreateAtomForNav(nav_index);
const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
return .{ .mcv = .{ .load_got = sym_index } };
return .{ .mcv = .{ .lea_symbol = sym_index } };
},
.strong, .weak => {
const global_index = try coff_file.getGlobalSymbol(name.toSlice(ip), lib_name.toSlice(ip));
try coff_file.need_got_table.put(gpa, global_index, {}); // needs GOT
return .{ .mcv = .{ .load_got = link.File.Coff.global_symbol_bit | global_index } };
const global_index = try coff_file.getGlobalSymbol(nav.name.toSlice(ip), lib_name.toSlice(ip));
try coff_file.need_got_table.put(zcu.gpa, global_index, {}); // needs GOT
return .{ .mcv = .{ .lea_symbol = global_index } };
},
.link_once => unreachable,
}
@ -994,11 +1029,12 @@ fn genNavRef(
const atom = p9.getAtom(atom_index);
return .{ .mcv = .{ .memory = atom.getOffsetTableAddress(p9) } };
} else {
const msg = try ErrorMsg.create(gpa, src_loc, "TODO genNavRef for target {}", .{target});
const msg = try ErrorMsg.create(zcu.gpa, src_loc, "TODO genNavRef for target {}", .{target});
return .{ .fail = msg };
}
}
/// deprecated legacy code path
pub fn genTypedValue(
lf: *link.File,
pt: Zcu.PerThread,
@ -1006,45 +1042,96 @@ pub fn genTypedValue(
val: Value,
target: std.Target,
) CodeGenError!GenResult {
return switch (try lowerValue(pt, val, &target)) {
.none => .{ .mcv = .none },
.undef => .{ .mcv = .undef },
.immediate => |imm| .{ .mcv = .{ .immediate = imm } },
.lea_nav => |nav| genNavRef(lf, pt, src_loc, nav, target),
.lea_uav => |uav| switch (try lf.lowerUav(
pt,
uav.val,
Type.fromInterned(uav.orig_ty).ptrAlignment(pt.zcu),
src_loc,
)) {
.mcv => |mcv| .{ .mcv = switch (mcv) {
else => unreachable,
.load_direct => |sym_index| .{ .lea_direct = sym_index },
.load_symbol => |sym_index| .{ .lea_symbol = sym_index },
} },
.fail => |em| .{ .fail = em },
},
.load_uav => |uav| lf.lowerUav(
pt,
uav.val,
Type.fromInterned(uav.orig_ty).ptrAlignment(pt.zcu),
src_loc,
),
};
}
const LowerResult = union(enum) {
none,
undef,
/// The bit-width of the immediate may be smaller than `u64`. For example, on 32-bit targets
/// such as ARM, the immediate will never exceed 32-bits.
immediate: u64,
lea_nav: InternPool.Nav.Index,
lea_uav: InternPool.Key.Ptr.BaseAddr.Uav,
load_uav: InternPool.Key.Ptr.BaseAddr.Uav,
};
pub fn lowerValue(pt: Zcu.PerThread, val: Value, target: *const std.Target) Allocator.Error!LowerResult {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const ty = val.typeOf(zcu);
log.debug("genTypedValue: val = {}", .{val.fmtValue(pt)});
log.debug("lowerValue(@as({}, {}))", .{ ty.fmt(pt), val.fmtValue(pt) });
if (val.isUndef(zcu)) return .{ .mcv = .undef };
if (val.isUndef(zcu)) return .undef;
switch (ty.zigTypeTag(zcu)) {
.void => return .{ .mcv = .none },
.void => return .none,
.pointer => switch (ty.ptrSize(zcu)) {
.slice => {},
else => switch (val.toIntern()) {
.null_value => {
return .{ .mcv = .{ .immediate = 0 } };
return .{ .immediate = 0 };
},
else => switch (ip.indexToKey(val.toIntern())) {
.int => {
return .{ .mcv = .{ .immediate = val.toUnsignedInt(zcu) } };
return .{ .immediate = val.toUnsignedInt(zcu) };
},
.ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
.nav => |nav| return genNavRef(lf, pt, src_loc, val, nav, target),
.uav => |uav| if (Value.fromInterned(uav.val).typeOf(zcu).hasRuntimeBits(zcu))
return switch (try lf.lowerUav(
pt,
uav.val,
Type.fromInterned(uav.orig_ty).ptrAlignment(zcu),
src_loc,
)) {
.mcv => |mcv| return .{ .mcv = switch (mcv) {
.load_direct => |sym_index| .{ .lea_direct = sym_index },
.load_symbol => |sym_index| .{ .lea_symbol = sym_index },
.nav => |nav| {
if (!ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
const imm: u64 = switch (@divExact(target.ptrBitWidth(), 8)) {
1 => 0xaa,
2 => 0xaaaa,
4 => 0xaaaaaaaa,
8 => 0xaaaaaaaaaaaaaaaa,
else => unreachable,
} },
.fail => |em| return .{ .fail = em },
};
return .{ .immediate = imm };
}
if (ty.castPtrToFn(zcu)) |fn_ty| {
if (zcu.typeToFunc(fn_ty).?.is_generic) {
return .{ .immediate = fn_ty.abiAlignment(zcu).toByteUnits().? };
}
} else if (ty.zigTypeTag(zcu) == .pointer) {
const elem_ty = ty.elemType2(zcu);
if (!elem_ty.hasRuntimeBits(zcu)) {
return .{ .immediate = elem_ty.abiAlignment(zcu).toByteUnits().? };
}
}
return .{ .lea_nav = nav };
},
.uav => |uav| if (Value.fromInterned(uav.val).typeOf(zcu).hasRuntimeBits(zcu))
return .{ .lea_uav = uav }
else
return .{ .mcv = .{ .immediate = Type.fromInterned(uav.orig_ty).ptrAlignment(zcu)
.forward(@intCast((@as(u66, 1) << @intCast(target.ptrBitWidth() | 1)) / 3)) } },
return .{ .immediate = Type.fromInterned(uav.orig_ty).ptrAlignment(zcu)
.forward(@intCast((@as(u66, 1) << @intCast(target.ptrBitWidth() | 1)) / 3)) },
else => {},
},
else => {},
@ -1058,39 +1145,35 @@ pub fn genTypedValue(
.signed => @bitCast(val.toSignedInt(zcu)),
.unsigned => val.toUnsignedInt(zcu),
};
return .{ .mcv = .{ .immediate = unsigned } };
return .{ .immediate = unsigned };
}
},
.bool => {
return .{ .mcv = .{ .immediate = @intFromBool(val.toBool()) } };
return .{ .immediate = @intFromBool(val.toBool()) };
},
.optional => {
if (ty.isPtrLikeOptional(zcu)) {
return genTypedValue(
lf,
return lowerValue(
pt,
src_loc,
val.optionalValue(zcu) orelse return .{ .mcv = .{ .immediate = 0 } },
val.optionalValue(zcu) orelse return .{ .immediate = 0 },
target,
);
} else if (ty.abiSize(zcu) == 1) {
return .{ .mcv = .{ .immediate = @intFromBool(!val.isNull(zcu)) } };
return .{ .immediate = @intFromBool(!val.isNull(zcu)) };
}
},
.@"enum" => {
const enum_tag = ip.indexToKey(val.toIntern()).enum_tag;
return genTypedValue(
lf,
return lowerValue(
pt,
src_loc,
Value.fromInterned(enum_tag.int),
target,
);
},
.error_set => {
const err_name = ip.indexToKey(val.toIntern()).err.name;
const error_index = try pt.getErrorValue(err_name);
return .{ .mcv = .{ .immediate = error_index } };
const error_index = ip.getErrorValueIfExists(err_name).?;
return .{ .immediate = error_index };
},
.error_union => {
const err_type = ty.errorUnionSet(zcu);
@ -1099,20 +1182,16 @@ pub fn genTypedValue(
// We use the error type directly as the type.
const err_int_ty = try pt.errorIntType();
switch (ip.indexToKey(val.toIntern()).error_union.val) {
.err_name => |err_name| return genTypedValue(
lf,
.err_name => |err_name| return lowerValue(
pt,
src_loc,
Value.fromInterned(try pt.intern(.{ .err = .{
.ty = err_type.toIntern(),
.name = err_name,
} })),
target,
),
.payload => return genTypedValue(
lf,
.payload => return lowerValue(
pt,
src_loc,
try pt.intValue(err_int_ty, 0),
target,
),
@ -1132,7 +1211,10 @@ pub fn genTypedValue(
else => {},
}
return lf.lowerUav(pt, val.toIntern(), .none, src_loc);
return .{ .load_uav = .{
.val = val.toIntern(),
.orig_ty = (try pt.singleConstPtrType(ty)).toIntern(),
} };
}
pub fn errUnionPayloadOffset(payload_ty: Type, zcu: *Zcu) u64 {

View File

@ -3,6 +3,7 @@ const builtin = @import("builtin");
const assert = std.debug.assert;
const mem = std.mem;
const log = std.log.scoped(.c);
const Allocator = mem.Allocator;
const dev = @import("../dev.zig");
const link = @import("../link.zig");
@ -30,6 +31,35 @@ pub fn legalizeFeatures(_: *const std.Target) ?*const Air.Legalize.Features {
}) else null; // we don't currently ask zig1 to use safe optimization modes
}
/// For most backends, MIR is basically a sequence of machine code instructions, perhaps with some
/// "pseudo instructions" thrown in. For the C backend, it is instead the generated C code for a
/// single function. We also need to track some information to get merged into the global `link.C`
/// state, including:
/// * The UAVs used, so declarations can be emitted in `flush`
/// * The types used, so declarations can be emitted in `flush`
/// * The lazy functions used, so definitions can be emitted in `flush`
pub const Mir = struct {
/// This map contains all the UAVs we saw generating this function.
/// `link.C` will merge them into its `uavs`/`aligned_uavs` fields.
/// Key is the value of the UAV; value is the UAV's alignment, or
/// `.none` for natural alignment. The specified alignment is never
/// less than the natural alignment.
uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment),
// These remaining fields are essentially just an owned version of `link.C.AvBlock`.
code: []u8,
fwd_decl: []u8,
ctype_pool: CType.Pool,
lazy_fns: LazyFnMap,
pub fn deinit(mir: *Mir, gpa: Allocator) void {
mir.uavs.deinit(gpa);
gpa.free(mir.code);
gpa.free(mir.fwd_decl);
mir.ctype_pool.deinit(gpa);
mir.lazy_fns.deinit(gpa);
}
};
pub const CType = @import("c/Type.zig");
pub const CValue = union(enum) {
@ -671,7 +701,7 @@ pub const Object = struct {
/// This data is available both when outputting .c code and when outputting an .h file.
pub const DeclGen = struct {
gpa: mem.Allocator,
gpa: Allocator,
pt: Zcu.PerThread,
mod: *Module,
pass: Pass,
@ -682,10 +712,12 @@ pub const DeclGen = struct {
error_msg: ?*Zcu.ErrorMsg,
ctype_pool: CType.Pool,
scratch: std.ArrayListUnmanaged(u32),
/// Keeps track of anonymous decls that need to be rendered before this
/// (named) Decl in the output C code.
uav_deps: std.AutoArrayHashMapUnmanaged(InternPool.Index, C.AvBlock),
aligned_uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment),
/// This map contains all the UAVs we saw generating this function.
/// `link.C` will merge them into its `uavs`/`aligned_uavs` fields.
/// Key is the value of the UAV; value is the UAV's alignment, or
/// `.none` for natural alignment. The specified alignment is never
/// less than the natural alignment.
uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment),
pub const Pass = union(enum) {
nav: InternPool.Nav.Index,
@ -753,21 +785,17 @@ pub const DeclGen = struct {
// Indicate that the anon decl should be rendered to the output so that
// our reference above is not undefined.
const ptr_type = ip.indexToKey(uav.orig_ty).ptr_type;
const gop = try dg.uav_deps.getOrPut(dg.gpa, uav.val);
if (!gop.found_existing) gop.value_ptr.* = .{};
// Only insert an alignment entry if the alignment is greater than ABI
// alignment. If there is already an entry, keep the greater alignment.
const explicit_alignment = ptr_type.flags.alignment;
if (explicit_alignment != .none) {
const abi_alignment = Type.fromInterned(ptr_type.child).abiAlignment(zcu);
if (explicit_alignment.order(abi_alignment).compare(.gt)) {
const aligned_gop = try dg.aligned_uavs.getOrPut(dg.gpa, uav.val);
aligned_gop.value_ptr.* = if (aligned_gop.found_existing)
aligned_gop.value_ptr.maxStrict(explicit_alignment)
else
explicit_alignment;
}
const gop = try dg.uavs.getOrPut(dg.gpa, uav.val);
if (!gop.found_existing) gop.value_ptr.* = .none;
// If there is an explicit alignment, greater than the current one, use it.
// Note that we intentionally start at `.none`, so `gop.value_ptr.*` is never
// underaligned, so we don't need to worry about the `.none` case here.
if (ptr_type.flags.alignment != .none) {
// Resolve the current alignment so we can choose the bigger one.
const cur_alignment: Alignment = if (gop.value_ptr.* == .none) abi: {
break :abi Type.fromInterned(ptr_type.child).abiAlignment(zcu);
} else gop.value_ptr.*;
gop.value_ptr.* = cur_alignment.maxStrict(ptr_type.flags.alignment);
}
}
@ -2895,7 +2923,79 @@ pub fn genLazyFn(o: *Object, lazy_ctype_pool: *const CType.Pool, lazy_fn: LazyFn
}
}
pub fn genFunc(f: *Function) !void {
pub fn generate(
lf: *link.File,
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: *const Air,
liveness: *const Air.Liveness,
) @import("../codegen.zig").CodeGenError!Mir {
const zcu = pt.zcu;
const gpa = zcu.gpa;
_ = src_loc;
assert(lf.tag == .c);
const func = zcu.funcInfo(func_index);
var function: Function = .{
.value_map = .init(gpa),
.air = air.*,
.liveness = liveness.*,
.func_index = func_index,
.object = .{
.dg = .{
.gpa = gpa,
.pt = pt,
.mod = zcu.navFileScope(func.owner_nav).mod.?,
.error_msg = null,
.pass = .{ .nav = func.owner_nav },
.is_naked_fn = Type.fromInterned(func.ty).fnCallingConvention(zcu) == .naked,
.expected_block = null,
.fwd_decl = .init(gpa),
.ctype_pool = .empty,
.scratch = .empty,
.uavs = .empty,
},
.code = .init(gpa),
.indent_writer = undefined, // set later so we can get a pointer to object.code
},
.lazy_fns = .empty,
};
defer {
function.object.code.deinit();
function.object.dg.fwd_decl.deinit();
function.object.dg.ctype_pool.deinit(gpa);
function.object.dg.scratch.deinit(gpa);
function.object.dg.uavs.deinit(gpa);
function.deinit();
}
try function.object.dg.ctype_pool.init(gpa);
function.object.indent_writer = .{ .underlying_writer = function.object.code.writer() };
genFunc(&function) catch |err| switch (err) {
error.AnalysisFail => return zcu.codegenFailMsg(func.owner_nav, function.object.dg.error_msg.?),
error.OutOfMemory => |e| return e,
};
var mir: Mir = .{
.uavs = .empty,
.code = &.{},
.fwd_decl = &.{},
.ctype_pool = .empty,
.lazy_fns = .empty,
};
errdefer mir.deinit(gpa);
mir.uavs = function.object.dg.uavs.move();
mir.code = try function.object.code.toOwnedSlice();
mir.fwd_decl = try function.object.dg.fwd_decl.toOwnedSlice();
mir.ctype_pool = function.object.dg.ctype_pool.move();
mir.lazy_fns = function.lazy_fns.move();
return mir;
}
fn genFunc(f: *Function) !void {
const tracy = trace(@src());
defer tracy.end();
@ -8482,7 +8582,7 @@ fn iterateBigTomb(f: *Function, inst: Air.Inst.Index) BigTomb {
/// A naive clone of this map would create copies of the ArrayList which is
/// stored in the values. This function additionally clones the values.
fn cloneFreeLocalsMap(gpa: mem.Allocator, map: *LocalsMap) !LocalsMap {
fn cloneFreeLocalsMap(gpa: Allocator, map: *LocalsMap) !LocalsMap {
var cloned = try map.clone(gpa);
const values = cloned.values();
var i: usize = 0;
@ -8499,7 +8599,7 @@ fn cloneFreeLocalsMap(gpa: mem.Allocator, map: *LocalsMap) !LocalsMap {
return cloned;
}
fn deinitFreeLocalsMap(gpa: mem.Allocator, map: *LocalsMap) void {
fn deinitFreeLocalsMap(gpa: Allocator, map: *LocalsMap) void {
for (map.values()) |*value| {
value.deinit(gpa);
}

View File

@ -1121,8 +1121,8 @@ pub const Object = struct {
o: *Object,
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Air.Liveness,
air: *const Air,
liveness: *const Air.Liveness,
) !void {
assert(std.meta.eql(pt, o.pt));
const zcu = pt.zcu;
@ -1479,8 +1479,8 @@ pub const Object = struct {
var fg: FuncGen = .{
.gpa = gpa,
.air = air,
.liveness = liveness,
.air = air.*,
.liveness = liveness.*,
.ng = &ng,
.wip = wip,
.is_naked = fn_info.cc == .naked,
@ -1506,10 +1506,9 @@ pub const Object = struct {
deinit_wip = false;
fg.genBody(air.getMainBody(), .poi) catch |err| switch (err) {
error.CodegenFail => {
try zcu.failed_codegen.put(gpa, func.owner_nav, ng.err_msg.?);
ng.err_msg = null;
return;
error.CodegenFail => switch (zcu.codegenFailMsg(func.owner_nav, ng.err_msg.?)) {
error.CodegenFail => return,
error.OutOfMemory => |e| return e,
},
else => |e| return e,
};
@ -1561,10 +1560,9 @@ pub const Object = struct {
.err_msg = null,
};
ng.genDecl() catch |err| switch (err) {
error.CodegenFail => {
try pt.zcu.failed_codegen.put(pt.zcu.gpa, nav_index, ng.err_msg.?);
ng.err_msg = null;
return;
error.CodegenFail => switch (pt.zcu.codegenFailMsg(nav_index, ng.err_msg.?)) {
error.CodegenFail => return,
error.OutOfMemory => |e| return e,
},
else => |e| return e,
};
@ -1586,6 +1584,27 @@ pub const Object = struct {
const global_index = self.nav_map.get(nav_index).?;
const comp = zcu.comp;
// If we're on COFF and linking with LLD, the linker cares about our exports to determine the subsystem in use.
coff_export_flags: {
const lf = comp.bin_file orelse break :coff_export_flags;
const lld = lf.cast(.lld) orelse break :coff_export_flags;
const coff = switch (lld.ofmt) {
.elf, .wasm => break :coff_export_flags,
.coff => |*coff| coff,
};
if (!ip.isFunctionType(ip.getNav(nav_index).typeOf(ip))) break :coff_export_flags;
const flags = &coff.lld_export_flags;
for (export_indices) |export_index| {
const name = export_index.ptr(zcu).opts.name;
if (name.eqlSlice("main", ip)) flags.c_main = true;
if (name.eqlSlice("WinMain", ip)) flags.winmain = true;
if (name.eqlSlice("wWinMain", ip)) flags.wwinmain = true;
if (name.eqlSlice("WinMainCRTStartup", ip)) flags.winmain_crt_startup = true;
if (name.eqlSlice("wWinMainCRTStartup", ip)) flags.wwinmain_crt_startup = true;
if (name.eqlSlice("DllMainCRTStartup", ip)) flags.dllmain_crt_startup = true;
}
}
if (export_indices.len != 0) {
return updateExportedGlobal(self, zcu, global_index, export_indices);
} else {
@ -9490,15 +9509,21 @@ pub const FuncGen = struct {
const inst_ty = self.typeOfIndex(inst);
const name = self.air.instructions.items(.data)[@intFromEnum(inst)].arg.name;
if (name == .none) return arg_val;
const func = zcu.funcInfo(zcu.navValue(self.ng.nav_index).toIntern());
const func_zir = func.zir_body_inst.resolveFull(&zcu.intern_pool).?;
const file = zcu.fileByIndex(func_zir.file);
const mod = file.mod.?;
if (mod.strip) return arg_val;
const arg = self.air.instructions.items(.data)[@intFromEnum(inst)].arg;
const zir = &file.zir.?;
const name = zir.nullTerminatedString(zir.getParamName(zir.getParamBody(func_zir.inst)[arg.zir_param_index]).?);
const lbrace_line = zcu.navSrcLine(func.owner_nav) + func.lbrace_line + 1;
const lbrace_col = func.lbrace_column + 1;
const debug_parameter = try o.builder.debugParameter(
try o.builder.metadataString(name.toSlice(self.air)),
try o.builder.metadataString(name),
self.file,
self.scope,
lbrace_line,
@ -9516,7 +9541,6 @@ pub const FuncGen = struct {
},
};
const mod = self.ng.ownerModule();
if (isByRef(inst_ty, zcu)) {
_ = try self.wip.callIntrinsic(
.normal,

View File

@ -230,8 +230,9 @@ pub const Object = struct {
defer nav_gen.deinit();
nav_gen.genNav(do_codegen) catch |err| switch (err) {
error.CodegenFail => {
try zcu.failed_codegen.put(gpa, nav_index, nav_gen.error_msg.?);
error.CodegenFail => switch (zcu.codegenFailMsg(nav_index, nav_gen.error_msg.?)) {
error.CodegenFail => {},
error.OutOfMemory => |e| return e,
},
else => |other| {
// There might be an error that happened *after* self.error_msg
@ -249,12 +250,12 @@ pub const Object = struct {
self: *Object,
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Air.Liveness,
air: *const Air,
liveness: *const Air.Liveness,
) !void {
const nav = pt.zcu.funcInfo(func_index).owner_nav;
// TODO: Separate types for generating decls and functions?
try self.genNav(pt, nav, air, liveness, true);
try self.genNav(pt, nav, air.*, liveness.*, true);
}
pub fn updateNav(

View File

@ -386,8 +386,6 @@ test "SPIR-V Section emit() - string" {
}
test "SPIR-V Section emit() - extended mask" {
if (@import("builtin").zig_backend == .stage1) return error.SkipZigTest;
var section = Section{};
defer section.deinit(std.testing.allocator);

View File

@ -25,6 +25,9 @@ pub const Env = enum {
/// - `zig build-* -fno-emit-bin`
sema,
/// - `zig build-* -ofmt=c`
cbe,
/// - sema
/// - `zig build-* -fincremental -fno-llvm -fno-lld -target x86_64-linux --listen=-`
@"x86_64-linux",
@ -144,6 +147,12 @@ pub const Env = enum {
=> true,
else => Env.ast_gen.supports(feature),
},
.cbe => switch (feature) {
.c_backend,
.c_linker,
=> true,
else => Env.sema.supports(feature),
},
.@"x86_64-linux" => switch (feature) {
.build_command,
.stdio_listen,

View File

@ -985,7 +985,7 @@ fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) void {
assert(comp.freebsd_so_files == null);
comp.freebsd_so_files = so_files;
var task_buffer: [libs.len]link.Task = undefined;
var task_buffer: [libs.len]link.PrelinkTask = undefined;
var task_buffer_i: usize = 0;
{
@ -1004,7 +1004,7 @@ fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) void {
}
}
comp.queueLinkTasks(task_buffer[0..task_buffer_i]);
comp.queuePrelinkTasks(task_buffer[0..task_buffer_i]);
}
fn buildSharedLib(
@ -1019,10 +1019,6 @@ fn buildSharedLib(
defer tracy.end();
const basename = try std.fmt.allocPrint(arena, "lib{s}.so.{d}", .{ lib.name, lib.sover });
const emit_bin = Compilation.EmitLoc{
.directory = bin_directory,
.basename = basename,
};
const version: Version = .{ .major = lib.sover, .minor = 0, .patch = 0 };
const ld_basename = path.basename(comp.getTarget().standardDynamicLinkerPath().get().?);
const soname = if (mem.eql(u8, lib.name, "ld")) ld_basename else basename;
@ -1077,13 +1073,14 @@ fn buildSharedLib(
.dirs = comp.dirs.withoutLocalCache(),
.thread_pool = comp.thread_pool,
.self_exe_path = comp.self_exe_path,
.cache_mode = .incremental,
// Because we manually cache the whole set of objects, we don't cache the individual objects
// within it. In fact, we *can't* do that, because we need `emit_bin` to specify the path.
.cache_mode = .none,
.config = config,
.root_mod = root_mod,
.root_name = lib.name,
.libc_installation = comp.libc_installation,
.emit_bin = emit_bin,
.emit_h = null,
.emit_bin = .{ .yes_path = try bin_directory.join(arena, &.{basename}) },
.verbose_cc = comp.verbose_cc,
.verbose_link = comp.verbose_link,
.verbose_air = comp.verbose_air,

View File

@ -1148,7 +1148,7 @@ fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) void {
assert(comp.glibc_so_files == null);
comp.glibc_so_files = so_files;
var task_buffer: [libs.len]link.Task = undefined;
var task_buffer: [libs.len]link.PrelinkTask = undefined;
var task_buffer_i: usize = 0;
{
@ -1170,7 +1170,7 @@ fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) void {
}
}
comp.queueLinkTasks(task_buffer[0..task_buffer_i]);
comp.queuePrelinkTasks(task_buffer[0..task_buffer_i]);
}
fn buildSharedLib(
@ -1185,10 +1185,6 @@ fn buildSharedLib(
defer tracy.end();
const basename = try std.fmt.allocPrint(arena, "lib{s}.so.{d}", .{ lib.name, lib.sover });
const emit_bin = Compilation.EmitLoc{
.directory = bin_directory,
.basename = basename,
};
const version: Version = .{ .major = lib.sover, .minor = 0, .patch = 0 };
const ld_basename = path.basename(comp.getTarget().standardDynamicLinkerPath().get().?);
const soname = if (mem.eql(u8, lib.name, "ld")) ld_basename else basename;
@ -1243,13 +1239,14 @@ fn buildSharedLib(
.dirs = comp.dirs.withoutLocalCache(),
.thread_pool = comp.thread_pool,
.self_exe_path = comp.self_exe_path,
.cache_mode = .incremental,
// Because we manually cache the whole set of objects, we don't cache the individual objects
// within it. In fact, we *can't* do that, because we need `emit_bin` to specify the path.
.cache_mode = .none,
.config = config,
.root_mod = root_mod,
.root_name = lib.name,
.libc_installation = comp.libc_installation,
.emit_bin = emit_bin,
.emit_h = null,
.emit_bin = .{ .yes_path = try bin_directory.join(arena, &.{basename}) },
.verbose_cc = comp.verbose_cc,
.verbose_link = comp.verbose_link,
.verbose_air = comp.verbose_air,

View File

@ -122,17 +122,6 @@ pub fn buildLibCxx(comp: *Compilation, prog_node: std.Progress.Node) BuildError!
const output_mode = .Lib;
const link_mode = .static;
const target = comp.root_mod.resolved_target.result;
const basename = try std.zig.binNameAlloc(arena, .{
.root_name = root_name,
.target = target,
.output_mode = output_mode,
.link_mode = link_mode,
});
const emit_bin = Compilation.EmitLoc{
.directory = null, // Put it in the cache directory.
.basename = basename,
};
const cxxabi_include_path = try comp.dirs.zig_lib.join(arena, &.{ "libcxxabi", "include" });
const cxx_include_path = try comp.dirs.zig_lib.join(arena, &.{ "libcxx", "include" });
@ -271,8 +260,7 @@ pub fn buildLibCxx(comp: *Compilation, prog_node: std.Progress.Node) BuildError!
.root_name = root_name,
.thread_pool = comp.thread_pool,
.libc_installation = comp.libc_installation,
.emit_bin = emit_bin,
.emit_h = null,
.emit_bin = .yes_cache,
.c_source_files = c_source_files.items,
.verbose_cc = comp.verbose_cc,
.verbose_link = comp.verbose_link,
@ -308,7 +296,7 @@ pub fn buildLibCxx(comp: *Compilation, prog_node: std.Progress.Node) BuildError!
assert(comp.libcxx_static_lib == null);
const crt_file = try sub_compilation.toCrtFile();
comp.libcxx_static_lib = crt_file;
comp.queueLinkTaskMode(crt_file.full_object_path, &config);
comp.queuePrelinkTaskMode(crt_file.full_object_path, &config);
}
pub fn buildLibCxxAbi(comp: *Compilation, prog_node: std.Progress.Node) BuildError!void {
@ -327,17 +315,6 @@ pub fn buildLibCxxAbi(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
const output_mode = .Lib;
const link_mode = .static;
const target = comp.root_mod.resolved_target.result;
const basename = try std.zig.binNameAlloc(arena, .{
.root_name = root_name,
.target = target,
.output_mode = output_mode,
.link_mode = link_mode,
});
const emit_bin = Compilation.EmitLoc{
.directory = null, // Put it in the cache directory.
.basename = basename,
};
const cxxabi_include_path = try comp.dirs.zig_lib.join(arena, &.{ "libcxxabi", "include" });
const cxx_include_path = try comp.dirs.zig_lib.join(arena, &.{ "libcxx", "include" });
@ -467,8 +444,7 @@ pub fn buildLibCxxAbi(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
.root_name = root_name,
.thread_pool = comp.thread_pool,
.libc_installation = comp.libc_installation,
.emit_bin = emit_bin,
.emit_h = null,
.emit_bin = .yes_cache,
.c_source_files = c_source_files.items,
.verbose_cc = comp.verbose_cc,
.verbose_link = comp.verbose_link,
@ -504,7 +480,7 @@ pub fn buildLibCxxAbi(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
assert(comp.libcxxabi_static_lib == null);
const crt_file = try sub_compilation.toCrtFile();
comp.libcxxabi_static_lib = crt_file;
comp.queueLinkTaskMode(crt_file.full_object_path, &config);
comp.queuePrelinkTaskMode(crt_file.full_object_path, &config);
}
pub fn addCxxArgs(

View File

@ -45,11 +45,6 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
.link_mode = link_mode,
});
const emit_bin = Compilation.EmitLoc{
.directory = null, // Put it in the cache directory.
.basename = basename,
};
const optimize_mode = comp.compilerRtOptMode();
const strip = comp.compilerRtStrip();
const unwind_tables: std.builtin.UnwindTables =
@ -287,8 +282,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
.root_mod = root_mod,
.root_name = root_name,
.libc_installation = comp.libc_installation,
.emit_bin = emit_bin,
.emit_h = null,
.emit_bin = .yes_cache,
.c_source_files = c_source_files.items,
.verbose_cc = comp.verbose_cc,
.verbose_link = comp.verbose_link,
@ -325,7 +319,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
};
const crt_file = try sub_compilation.toCrtFile();
comp.queueLinkTaskMode(crt_file.full_object_path, &config);
comp.queuePrelinkTaskMode(crt_file.full_object_path, &config);
assert(comp.tsan_lib == null);
comp.tsan_lib = crt_file;
}

View File

@ -31,7 +31,7 @@ pub fn buildStaticLib(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
const unwind_tables: std.builtin.UnwindTables =
if (target.cpu.arch == .x86 and target.os.tag == .windows) .none else .@"async";
const config = Compilation.Config.resolve(.{
.output_mode = .Lib,
.output_mode = output_mode,
.resolved_target = comp.root_mod.resolved_target,
.is_test = false,
.have_zcu = false,
@ -85,17 +85,6 @@ pub fn buildStaticLib(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
};
const root_name = "unwind";
const link_mode = .static;
const basename = try std.zig.binNameAlloc(arena, .{
.root_name = root_name,
.target = target,
.output_mode = output_mode,
.link_mode = link_mode,
});
const emit_bin = Compilation.EmitLoc{
.directory = null, // Put it in the cache directory.
.basename = basename,
};
var c_source_files: [unwind_src_list.len]Compilation.CSourceFile = undefined;
for (unwind_src_list, 0..) |unwind_src, i| {
var cflags = std.ArrayList([]const u8).init(arena);
@ -160,7 +149,7 @@ pub fn buildStaticLib(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
.main_mod = null,
.thread_pool = comp.thread_pool,
.libc_installation = comp.libc_installation,
.emit_bin = emit_bin,
.emit_bin = .yes_cache,
.function_sections = comp.function_sections,
.c_source_files = &c_source_files,
.verbose_cc = comp.verbose_cc,
@ -195,7 +184,7 @@ pub fn buildStaticLib(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
};
const crt_file = try sub_compilation.toCrtFile();
comp.queueLinkTaskMode(crt_file.full_object_path, &config);
comp.queuePrelinkTaskMode(crt_file.full_object_path, &config);
assert(comp.libunwind_static_lib == null);
comp.libunwind_static_lib = crt_file;
}

View File

@ -252,8 +252,7 @@ pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Pro
.thread_pool = comp.thread_pool,
.root_name = "c",
.libc_installation = comp.libc_installation,
.emit_bin = .{ .directory = null, .basename = "libc.so" },
.emit_h = null,
.emit_bin = .yes_cache,
.verbose_cc = comp.verbose_cc,
.verbose_link = comp.verbose_link,
.verbose_air = comp.verbose_air,
@ -278,7 +277,7 @@ pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Pro
errdefer comp.gpa.free(basename);
const crt_file = try sub_compilation.toCrtFile();
comp.queueLinkTaskMode(crt_file.full_object_path, &config);
comp.queuePrelinkTaskMode(crt_file.full_object_path, &config);
{
comp.mutex.lock();
defer comp.mutex.unlock();

View File

@ -650,7 +650,7 @@ fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) void {
assert(comp.netbsd_so_files == null);
comp.netbsd_so_files = so_files;
var task_buffer: [libs.len]link.Task = undefined;
var task_buffer: [libs.len]link.PrelinkTask = undefined;
var task_buffer_i: usize = 0;
{
@ -669,7 +669,7 @@ fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) void {
}
}
comp.queueLinkTasks(task_buffer[0..task_buffer_i]);
comp.queuePrelinkTasks(task_buffer[0..task_buffer_i]);
}
fn buildSharedLib(
@ -684,10 +684,6 @@ fn buildSharedLib(
defer tracy.end();
const basename = try std.fmt.allocPrint(arena, "lib{s}.so.{d}", .{ lib.name, lib.sover });
const emit_bin = Compilation.EmitLoc{
.directory = bin_directory,
.basename = basename,
};
const version: Version = .{ .major = lib.sover, .minor = 0, .patch = 0 };
const ld_basename = path.basename(comp.getTarget().standardDynamicLinkerPath().get().?);
const soname = if (mem.eql(u8, lib.name, "ld")) ld_basename else basename;
@ -741,13 +737,14 @@ fn buildSharedLib(
.dirs = comp.dirs.withoutLocalCache(),
.thread_pool = comp.thread_pool,
.self_exe_path = comp.self_exe_path,
.cache_mode = .incremental,
// Because we manually cache the whole set of objects, we don't cache the individual objects
// within it. In fact, we *can't* do that, because we need `emit_bin` to specify the path.
.cache_mode = .none,
.config = config,
.root_mod = root_mod,
.root_name = lib.name,
.libc_installation = comp.libc_installation,
.emit_bin = emit_bin,
.emit_h = null,
.emit_bin = .{ .yes_path = try bin_directory.join(arena, &.{basename}) },
.verbose_cc = comp.verbose_cc,
.verbose_link = comp.verbose_link,
.verbose_air = comp.verbose_air,

File diff suppressed because it is too large Load Diff

View File

@ -17,7 +17,7 @@ const link = @import("../link.zig");
const trace = @import("../tracy.zig").trace;
const Type = @import("../Type.zig");
const Value = @import("../Value.zig");
const Air = @import("../Air.zig");
const AnyMir = @import("../codegen.zig").AnyMir;
pub const zig_h = "#include \"zig.h\"\n";
@ -145,7 +145,6 @@ pub fn createEmpty(
.stack_size = options.stack_size orelse 16777216,
.allow_shlib_undefined = options.allow_shlib_undefined orelse false,
.file = file,
.disable_lld_caching = options.disable_lld_caching,
.build_id = options.build_id,
},
};
@ -167,6 +166,9 @@ pub fn deinit(self: *C) void {
self.uavs.deinit(gpa);
self.aligned_uavs.deinit(gpa);
self.exported_navs.deinit(gpa);
self.exported_uavs.deinit(gpa);
self.string_bytes.deinit(gpa);
self.fwd_decl_buf.deinit(gpa);
self.code_buf.deinit(gpa);
@ -178,73 +180,23 @@ pub fn updateFunc(
self: *C,
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Air.Liveness,
mir: *AnyMir,
) link.File.UpdateNavError!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const func = zcu.funcInfo(func_index);
const gop = try self.navs.getOrPut(gpa, func.owner_nav);
if (!gop.found_existing) gop.value_ptr.* = .{};
const ctype_pool = &gop.value_ptr.ctype_pool;
const lazy_fns = &gop.value_ptr.lazy_fns;
const fwd_decl = &self.fwd_decl_buf;
const code = &self.code_buf;
try ctype_pool.init(gpa);
ctype_pool.clearRetainingCapacity();
lazy_fns.clearRetainingCapacity();
fwd_decl.clearRetainingCapacity();
code.clearRetainingCapacity();
var function: codegen.Function = .{
.value_map = codegen.CValueMap.init(gpa),
.air = air,
.liveness = liveness,
.func_index = func_index,
.object = .{
.dg = .{
.gpa = gpa,
.pt = pt,
.mod = zcu.navFileScope(func.owner_nav).mod.?,
.error_msg = null,
.pass = .{ .nav = func.owner_nav },
.is_naked_fn = Type.fromInterned(func.ty).fnCallingConvention(zcu) == .naked,
.expected_block = null,
.fwd_decl = fwd_decl.toManaged(gpa),
.ctype_pool = ctype_pool.*,
.scratch = .{},
.uav_deps = self.uavs,
.aligned_uavs = self.aligned_uavs,
},
.code = code.toManaged(gpa),
.indent_writer = undefined, // set later so we can get a pointer to object.code
},
.lazy_fns = lazy_fns.*,
if (gop.found_existing) gop.value_ptr.deinit(gpa);
gop.value_ptr.* = .{
.code = .empty,
.fwd_decl = .empty,
.ctype_pool = mir.c.ctype_pool.move(),
.lazy_fns = mir.c.lazy_fns.move(),
};
function.object.indent_writer = .{ .underlying_writer = function.object.code.writer() };
defer {
self.uavs = function.object.dg.uav_deps;
self.aligned_uavs = function.object.dg.aligned_uavs;
fwd_decl.* = function.object.dg.fwd_decl.moveToUnmanaged();
ctype_pool.* = function.object.dg.ctype_pool.move();
ctype_pool.freeUnusedCapacity(gpa);
function.object.dg.scratch.deinit(gpa);
lazy_fns.* = function.lazy_fns.move();
lazy_fns.shrinkAndFree(gpa, lazy_fns.count());
code.* = function.object.code.moveToUnmanaged();
function.deinit();
}
try zcu.failed_codegen.ensureUnusedCapacity(gpa, 1);
codegen.genFunc(&function) catch |err| switch (err) {
error.AnalysisFail => {
zcu.failed_codegen.putAssumeCapacityNoClobber(func.owner_nav, function.object.dg.error_msg.?);
return;
},
else => |e| return e,
};
gop.value_ptr.fwd_decl = try self.addString(function.object.dg.fwd_decl.items);
gop.value_ptr.code = try self.addString(function.object.code.items);
gop.value_ptr.code = try self.addString(mir.c.code);
gop.value_ptr.fwd_decl = try self.addString(mir.c.fwd_decl);
try self.addUavsFromCodegen(&mir.c.uavs);
}
fn updateUav(self: *C, pt: Zcu.PerThread, i: usize) !void {
@ -268,16 +220,14 @@ fn updateUav(self: *C, pt: Zcu.PerThread, i: usize) !void {
.fwd_decl = fwd_decl.toManaged(gpa),
.ctype_pool = codegen.CType.Pool.empty,
.scratch = .{},
.uav_deps = self.uavs,
.aligned_uavs = self.aligned_uavs,
.uavs = .empty,
},
.code = code.toManaged(gpa),
.indent_writer = undefined, // set later so we can get a pointer to object.code
};
object.indent_writer = .{ .underlying_writer = object.code.writer() };
defer {
self.uavs = object.dg.uav_deps;
self.aligned_uavs = object.dg.aligned_uavs;
object.dg.uavs.deinit(gpa);
fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged();
object.dg.ctype_pool.deinit(object.dg.gpa);
object.dg.scratch.deinit(gpa);
@ -296,8 +246,10 @@ fn updateUav(self: *C, pt: Zcu.PerThread, i: usize) !void {
else => |e| return e,
};
try self.addUavsFromCodegen(&object.dg.uavs);
object.dg.ctype_pool.freeUnusedCapacity(gpa);
object.dg.uav_deps.values()[i] = .{
self.uavs.values()[i] = .{
.code = try self.addString(object.code.items),
.fwd_decl = try self.addString(object.dg.fwd_decl.items),
.ctype_pool = object.dg.ctype_pool.move(),
@ -344,16 +296,14 @@ pub fn updateNav(self: *C, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) l
.fwd_decl = fwd_decl.toManaged(gpa),
.ctype_pool = ctype_pool.*,
.scratch = .{},
.uav_deps = self.uavs,
.aligned_uavs = self.aligned_uavs,
.uavs = .empty,
},
.code = code.toManaged(gpa),
.indent_writer = undefined, // set later so we can get a pointer to object.code
};
object.indent_writer = .{ .underlying_writer = object.code.writer() };
defer {
self.uavs = object.dg.uav_deps;
self.aligned_uavs = object.dg.aligned_uavs;
object.dg.uavs.deinit(gpa);
fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged();
ctype_pool.* = object.dg.ctype_pool.move();
ctype_pool.freeUnusedCapacity(gpa);
@ -361,16 +311,16 @@ pub fn updateNav(self: *C, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) l
code.* = object.code.moveToUnmanaged();
}
try zcu.failed_codegen.ensureUnusedCapacity(gpa, 1);
codegen.genDecl(&object) catch |err| switch (err) {
error.AnalysisFail => {
zcu.failed_codegen.putAssumeCapacityNoClobber(nav_index, object.dg.error_msg.?);
return;
error.AnalysisFail => switch (zcu.codegenFailMsg(nav_index, object.dg.error_msg.?)) {
error.CodegenFail => return,
error.OutOfMemory => |e| return e,
},
else => |e| return e,
};
gop.value_ptr.code = try self.addString(object.code.items);
gop.value_ptr.fwd_decl = try self.addString(object.dg.fwd_decl.items);
try self.addUavsFromCodegen(&object.dg.uavs);
}
pub fn updateLineNumber(self: *C, pt: Zcu.PerThread, ti_id: InternPool.TrackedInst.Index) !void {
@ -381,10 +331,6 @@ pub fn updateLineNumber(self: *C, pt: Zcu.PerThread, ti_id: InternPool.TrackedIn
_ = ti_id;
}
pub fn flush(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
return self.flushModule(arena, tid, prog_node);
}
fn abiDefines(self: *C, target: std.Target) !std.ArrayList(u8) {
const gpa = self.base.comp.gpa;
var defines = std.ArrayList(u8).init(gpa);
@ -400,7 +346,7 @@ fn abiDefines(self: *C, target: std.Target) !std.ArrayList(u8) {
return defines;
}
pub fn flushModule(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
pub fn flush(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
_ = arena; // Has the same lifetime as the call to Compilation.update.
const tracy = trace(@src());
@ -676,16 +622,14 @@ fn flushErrDecls(self: *C, pt: Zcu.PerThread, ctype_pool: *codegen.CType.Pool) F
.fwd_decl = fwd_decl.toManaged(gpa),
.ctype_pool = ctype_pool.*,
.scratch = .{},
.uav_deps = self.uavs,
.aligned_uavs = self.aligned_uavs,
.uavs = .empty,
},
.code = code.toManaged(gpa),
.indent_writer = undefined, // set later so we can get a pointer to object.code
};
object.indent_writer = .{ .underlying_writer = object.code.writer() };
defer {
self.uavs = object.dg.uav_deps;
self.aligned_uavs = object.dg.aligned_uavs;
object.dg.uavs.deinit(gpa);
fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged();
ctype_pool.* = object.dg.ctype_pool.move();
ctype_pool.freeUnusedCapacity(gpa);
@ -697,6 +641,8 @@ fn flushErrDecls(self: *C, pt: Zcu.PerThread, ctype_pool: *codegen.CType.Pool) F
error.AnalysisFail => unreachable,
else => |e| return e,
};
try self.addUavsFromCodegen(&object.dg.uavs);
}
fn flushLazyFn(
@ -724,8 +670,7 @@ fn flushLazyFn(
.fwd_decl = fwd_decl.toManaged(gpa),
.ctype_pool = ctype_pool.*,
.scratch = .{},
.uav_deps = .{},
.aligned_uavs = .{},
.uavs = .empty,
},
.code = code.toManaged(gpa),
.indent_writer = undefined, // set later so we can get a pointer to object.code
@ -734,8 +679,7 @@ fn flushLazyFn(
defer {
// If this assert trips just handle the anon_decl_deps the same as
// `updateFunc()` does.
assert(object.dg.uav_deps.count() == 0);
assert(object.dg.aligned_uavs.count() == 0);
assert(object.dg.uavs.count() == 0);
fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged();
ctype_pool.* = object.dg.ctype_pool.move();
ctype_pool.freeUnusedCapacity(gpa);
@ -871,12 +815,10 @@ pub fn updateExports(
.fwd_decl = fwd_decl.toManaged(gpa),
.ctype_pool = decl_block.ctype_pool,
.scratch = .{},
.uav_deps = .{},
.aligned_uavs = .{},
.uavs = .empty,
};
defer {
assert(dg.uav_deps.count() == 0);
assert(dg.aligned_uavs.count() == 0);
assert(dg.uavs.count() == 0);
fwd_decl.* = dg.fwd_decl.moveToUnmanaged();
ctype_pool.* = dg.ctype_pool.move();
ctype_pool.freeUnusedCapacity(gpa);
@ -896,3 +838,21 @@ pub fn deleteExport(
.uav => |uav| _ = self.exported_uavs.swapRemove(uav),
}
}
fn addUavsFromCodegen(c: *C, uavs: *const std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment)) Allocator.Error!void {
const gpa = c.base.comp.gpa;
try c.uavs.ensureUnusedCapacity(gpa, uavs.count());
try c.aligned_uavs.ensureUnusedCapacity(gpa, uavs.count());
for (uavs.keys(), uavs.values()) |uav_val, uav_align| {
{
const gop = c.uavs.getOrPutAssumeCapacity(uav_val);
if (!gop.found_existing) gop.value_ptr.* = .{};
}
if (uav_align != .none) {
const gop = c.aligned_uavs.getOrPutAssumeCapacity(uav_val);
gop.value_ptr.* = if (gop.found_existing) max: {
break :max gop.value_ptr.*.maxStrict(uav_align);
} else uav_align;
}
}
}

View File

@ -1,26 +1,14 @@
//! The main driver of the COFF linker.
//! Currently uses our own implementation for the incremental linker, and falls back to
//! LLD for traditional linking (linking relocatable object files).
//! LLD is also the default linker for LLVM.
/// If this is not null, an object file is created by LLVM and emitted to zcu_object_sub_path.
llvm_object: ?LlvmObject.Ptr = null,
//! The main driver of the self-hosted COFF linker.
base: link.File,
image_base: u64,
subsystem: ?std.Target.SubSystem,
tsaware: bool,
nxcompat: bool,
dynamicbase: bool,
/// TODO this and minor_subsystem_version should be combined into one property and left as
/// default or populated together. They should not be separate fields.
major_subsystem_version: u16,
minor_subsystem_version: u16,
lib_directories: []const Directory,
entry: link.File.OpenOptions.Entry,
entry_addr: ?u32,
module_definition_file: ?[]const u8,
pdb_out_path: ?[]const u8,
repro: bool,
ptr_width: PtrWidth,
@ -226,7 +214,6 @@ pub fn createEmpty(
const output_mode = comp.config.output_mode;
const link_mode = comp.config.link_mode;
const use_llvm = comp.config.use_llvm;
const use_lld = build_options.have_llvm and comp.config.use_lld;
const ptr_width: PtrWidth = switch (target.ptrBitWidth()) {
0...32 => .p32,
@ -237,29 +224,21 @@ pub fn createEmpty(
else => 0x1000,
};
// If using LLD to link, this code should produce an object file so that it
// can be passed to LLD.
// If using LLVM to generate the object file for the zig compilation unit,
// we need a place to put the object file so that it can be subsequently
// handled.
const zcu_object_sub_path = if (!use_lld and !use_llvm)
null
else
try allocPrint(arena, "{s}.obj", .{emit.sub_path});
const coff = try arena.create(Coff);
coff.* = .{
.base = .{
.tag = .coff,
.comp = comp,
.emit = emit,
.zcu_object_sub_path = zcu_object_sub_path,
.zcu_object_basename = if (use_llvm)
try std.fmt.allocPrint(arena, "{s}_zcu.obj", .{fs.path.stem(emit.sub_path)})
else
null,
.stack_size = options.stack_size orelse 16777216,
.gc_sections = options.gc_sections orelse (optimize_mode != .Debug),
.print_gc_sections = options.print_gc_sections,
.allow_shlib_undefined = options.allow_shlib_undefined orelse false,
.file = null,
.disable_lld_caching = options.disable_lld_caching,
.build_id = options.build_id,
},
.ptr_width = ptr_width,
@ -284,45 +263,23 @@ pub fn createEmpty(
.Obj => 0,
},
// Subsystem depends on the set of public symbol names from linked objects.
// See LinkerDriver::inferSubsystem from the LLD project for the flow chart.
.subsystem = options.subsystem,
.entry = options.entry,
.tsaware = options.tsaware,
.nxcompat = options.nxcompat,
.dynamicbase = options.dynamicbase,
.major_subsystem_version = options.major_subsystem_version orelse 6,
.minor_subsystem_version = options.minor_subsystem_version orelse 0,
.lib_directories = options.lib_directories,
.entry_addr = math.cast(u32, options.entry_addr orelse 0) orelse
return error.EntryAddressTooBig,
.module_definition_file = options.module_definition_file,
.pdb_out_path = options.pdb_out_path,
.repro = options.repro,
};
if (use_llvm and comp.config.have_zcu) {
coff.llvm_object = try LlvmObject.create(arena, comp);
}
errdefer coff.base.destroy();
if (use_lld and (use_llvm or !comp.config.have_zcu)) {
// LLVM emits the object file (if any); LLD links it into the final product.
return coff;
}
// What path should this COFF linker code output to?
// If using LLD to link, this code should produce an object file so that it
// can be passed to LLD.
const sub_path = if (use_lld) zcu_object_sub_path.? else emit.sub_path;
coff.base.file = try emit.root_dir.handle.createFile(sub_path, .{
coff.base.file = try emit.root_dir.handle.createFile(emit.sub_path, .{
.truncate = true,
.read = true,
.mode = link.File.determineMode(use_lld, output_mode, link_mode),
.mode = link.File.determineMode(output_mode, link_mode),
});
assert(coff.llvm_object == null);
const gpa = comp.gpa;
try coff.strtab.buffer.ensureUnusedCapacity(gpa, @sizeOf(u32));
@ -428,8 +385,6 @@ pub fn open(
pub fn deinit(coff: *Coff) void {
const gpa = coff.base.comp.gpa;
if (coff.llvm_object) |llvm_object| llvm_object.deinit();
for (coff.sections.items(.free_list)) |*free_list| {
free_list.deinit(gpa);
}
@ -1097,15 +1052,11 @@ pub fn updateFunc(
coff: *Coff,
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Air.Liveness,
mir: *const codegen.AnyMir,
) link.File.UpdateNavError!void {
if (build_options.skip_non_native and builtin.object_format != .coff) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (coff.llvm_object) |llvm_object| {
return llvm_object.updateFunc(pt, func_index, air, liveness);
}
const tracy = trace(@src());
defer tracy.end();
@ -1122,29 +1073,15 @@ pub fn updateFunc(
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
defer code_buffer.deinit(gpa);
codegen.generateFunction(
try codegen.emitFunction(
&coff.base,
pt,
zcu.navSrcLoc(nav_index),
func_index,
air,
liveness,
mir,
&code_buffer,
.none,
) catch |err| switch (err) {
error.CodegenFail => return error.CodegenFail,
error.OutOfMemory => return error.OutOfMemory,
error.Overflow, error.RelocationNotByteAligned => |e| {
try zcu.failed_codegen.putNoClobber(gpa, nav_index, try Zcu.ErrorMsg.create(
gpa,
zcu.navSrcLoc(nav_index),
"unable to codegen: {s}",
.{@errorName(e)},
));
try zcu.retryable_failures.append(zcu.gpa, AnalUnit.wrap(.{ .func = func_index }));
return error.CodegenFail;
},
};
);
try coff.updateNavCode(pt, nav_index, code_buffer.items, .FUNCTION);
@ -1205,7 +1142,6 @@ pub fn updateNav(
if (build_options.skip_non_native and builtin.object_format != .coff) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (coff.llvm_object) |llvm_object| return llvm_object.updateNav(pt, nav_index);
const tracy = trace(@src());
defer tracy.end();
@ -1330,7 +1266,7 @@ pub fn getOrCreateAtomForLazySymbol(
}
state_ptr.* = .pending_flush;
const atom = atom_ptr.*;
// anyerror needs to be deferred until flushModule
// anyerror needs to be deferred until flush
if (lazy_sym.ty != .anyerror_type) try coff.updateLazySymbolAtom(pt, lazy_sym, atom, switch (lazy_sym.kind) {
.code => coff.text_section_index.?,
.const_data => coff.rdata_section_index.?,
@ -1463,8 +1399,6 @@ fn updateNavCode(
}
pub fn freeNav(coff: *Coff, nav_index: InternPool.NavIndex) void {
if (coff.llvm_object) |llvm_object| return llvm_object.freeNav(nav_index);
const gpa = coff.base.comp.gpa;
if (coff.decls.fetchOrderedRemove(nav_index)) |const_kv| {
@ -1485,50 +1419,7 @@ pub fn updateExports(
}
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const comp = coff.base.comp;
const target = comp.root_mod.resolved_target.result;
if (comp.config.use_llvm) {
// Even in the case of LLVM, we need to notice certain exported symbols in order to
// detect the default subsystem.
for (export_indices) |export_idx| {
const exp = export_idx.ptr(zcu);
const exported_nav_index = switch (exp.exported) {
.nav => |nav| nav,
.uav => continue,
};
const exported_nav = ip.getNav(exported_nav_index);
const exported_ty = exported_nav.typeOf(ip);
if (!ip.isFunctionType(exported_ty)) continue;
const c_cc = target.cCallingConvention().?;
const winapi_cc: std.builtin.CallingConvention = switch (target.cpu.arch) {
.x86 => .{ .x86_stdcall = .{} },
else => c_cc,
};
const exported_cc = Type.fromInterned(exported_ty).fnCallingConvention(zcu);
const CcTag = std.builtin.CallingConvention.Tag;
if (@as(CcTag, exported_cc) == @as(CcTag, c_cc) and exp.opts.name.eqlSlice("main", ip) and comp.config.link_libc) {
zcu.stage1_flags.have_c_main = true;
} else if (@as(CcTag, exported_cc) == @as(CcTag, winapi_cc) and target.os.tag == .windows) {
if (exp.opts.name.eqlSlice("WinMain", ip)) {
zcu.stage1_flags.have_winmain = true;
} else if (exp.opts.name.eqlSlice("wWinMain", ip)) {
zcu.stage1_flags.have_wwinmain = true;
} else if (exp.opts.name.eqlSlice("WinMainCRTStartup", ip)) {
zcu.stage1_flags.have_winmain_crt_startup = true;
} else if (exp.opts.name.eqlSlice("wWinMainCRTStartup", ip)) {
zcu.stage1_flags.have_wwinmain_crt_startup = true;
} else if (exp.opts.name.eqlSlice("DllMainCRTStartup", ip)) {
zcu.stage1_flags.have_dllmain_crt_startup = true;
}
}
}
}
if (coff.llvm_object) |llvm_object| return llvm_object.updateExports(pt, exported, export_indices);
const gpa = comp.gpa;
const gpa = zcu.gpa;
const metadata = switch (exported) {
.nav => |nav| blk: {
@ -1621,7 +1512,6 @@ pub fn deleteExport(
exported: Zcu.Exported,
name: InternPool.NullTerminatedString,
) void {
if (coff.llvm_object) |_| return;
const metadata = switch (exported) {
.nav => |nav| coff.navs.getPtr(nav),
.uav => |uav| coff.uavs.getPtr(uav),
@ -1680,571 +1570,7 @@ fn resolveGlobalSymbol(coff: *Coff, current: SymbolWithLoc) !void {
gop.value_ptr.* = current;
}
pub fn flush(coff: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
const comp = coff.base.comp;
const use_lld = build_options.have_llvm and comp.config.use_lld;
const diags = &comp.link_diags;
if (use_lld) {
return coff.linkWithLLD(arena, tid, prog_node) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.LinkFailure => return error.LinkFailure,
else => |e| return diags.fail("failed to link with LLD: {s}", .{@errorName(e)}),
};
}
switch (comp.config.output_mode) {
.Exe, .Obj => return coff.flushModule(arena, tid, prog_node),
.Lib => return diags.fail("writing lib files not yet implemented for COFF", .{}),
}
}
fn linkWithLLD(coff: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void {
dev.check(.lld_linker);
const tracy = trace(@src());
defer tracy.end();
const comp = coff.base.comp;
const gpa = comp.gpa;
const directory = coff.base.emit.root_dir; // Just an alias to make it shorter to type.
const full_out_path = try directory.join(arena, &[_][]const u8{coff.base.emit.sub_path});
// If there is no Zig code to compile, then we should skip flushing the output file because it
// will not be part of the linker line anyway.
const module_obj_path: ?[]const u8 = if (comp.zcu != null) blk: {
try coff.flushModule(arena, tid, prog_node);
if (fs.path.dirname(full_out_path)) |dirname| {
break :blk try fs.path.join(arena, &.{ dirname, coff.base.zcu_object_sub_path.? });
} else {
break :blk coff.base.zcu_object_sub_path.?;
}
} else null;
const sub_prog_node = prog_node.start("LLD Link", 0);
defer sub_prog_node.end();
const is_lib = comp.config.output_mode == .Lib;
const is_dyn_lib = comp.config.link_mode == .dynamic and is_lib;
const is_exe_or_dyn_lib = is_dyn_lib or comp.config.output_mode == .Exe;
const link_in_crt = comp.config.link_libc and is_exe_or_dyn_lib;
const target = comp.root_mod.resolved_target.result;
const optimize_mode = comp.root_mod.optimize_mode;
const entry_name: ?[]const u8 = switch (coff.entry) {
// This logic isn't quite right for disabled or enabled. No point in fixing it
// when the goal is to eliminate dependency on LLD anyway.
// https://github.com/ziglang/zig/issues/17751
.disabled, .default, .enabled => null,
.named => |name| name,
};
// See link/Elf.zig for comments on how this mechanism works.
const id_symlink_basename = "lld.id";
var man: Cache.Manifest = undefined;
defer if (!coff.base.disable_lld_caching) man.deinit();
var digest: [Cache.hex_digest_len]u8 = undefined;
if (!coff.base.disable_lld_caching) {
man = comp.cache_parent.obtain();
coff.base.releaseLock();
comptime assert(Compilation.link_hash_implementation_version == 14);
try link.hashInputs(&man, comp.link_inputs);
for (comp.c_object_table.keys()) |key| {
_ = try man.addFilePath(key.status.success.object_path, null);
}
for (comp.win32_resource_table.keys()) |key| {
_ = try man.addFile(key.status.success.res_path, null);
}
try man.addOptionalFile(module_obj_path);
man.hash.addOptionalBytes(entry_name);
man.hash.add(coff.base.stack_size);
man.hash.add(coff.image_base);
man.hash.add(coff.base.build_id);
{
// TODO remove this, libraries must instead be resolved by the frontend.
for (coff.lib_directories) |lib_directory| man.hash.addOptionalBytes(lib_directory.path);
}
man.hash.add(comp.skip_linker_dependencies);
if (comp.config.link_libc) {
man.hash.add(comp.libc_installation != null);
if (comp.libc_installation) |libc_installation| {
man.hash.addBytes(libc_installation.crt_dir.?);
if (target.abi == .msvc or target.abi == .itanium) {
man.hash.addBytes(libc_installation.msvc_lib_dir.?);
man.hash.addBytes(libc_installation.kernel32_lib_dir.?);
}
}
}
man.hash.addListOfBytes(comp.windows_libs.keys());
man.hash.addListOfBytes(comp.force_undefined_symbols.keys());
man.hash.addOptional(coff.subsystem);
man.hash.add(comp.config.is_test);
man.hash.add(coff.tsaware);
man.hash.add(coff.nxcompat);
man.hash.add(coff.dynamicbase);
man.hash.add(coff.base.allow_shlib_undefined);
// strip does not need to go into the linker hash because it is part of the hash namespace
man.hash.add(coff.major_subsystem_version);
man.hash.add(coff.minor_subsystem_version);
man.hash.add(coff.repro);
man.hash.addOptional(comp.version);
try man.addOptionalFile(coff.module_definition_file);
// We don't actually care whether it's a cache hit or miss; we just need the digest and the lock.
_ = try man.hit();
digest = man.final();
var prev_digest_buf: [digest.len]u8 = undefined;
const prev_digest: []u8 = Cache.readSmallFile(
directory.handle,
id_symlink_basename,
&prev_digest_buf,
) catch |err| blk: {
log.debug("COFF LLD new_digest={s} error: {s}", .{ std.fmt.fmtSliceHexLower(&digest), @errorName(err) });
// Handle this as a cache miss.
break :blk prev_digest_buf[0..0];
};
if (mem.eql(u8, prev_digest, &digest)) {
log.debug("COFF LLD digest={s} match - skipping invocation", .{std.fmt.fmtSliceHexLower(&digest)});
// Hot diggity dog! The output binary is already there.
coff.base.lock = man.toOwnedLock();
return;
}
log.debug("COFF LLD prev_digest={s} new_digest={s}", .{ std.fmt.fmtSliceHexLower(prev_digest), std.fmt.fmtSliceHexLower(&digest) });
// We are about to change the output file to be different, so we invalidate the build hash now.
directory.handle.deleteFile(id_symlink_basename) catch |err| switch (err) {
error.FileNotFound => {},
else => |e| return e,
};
}
if (comp.config.output_mode == .Obj) {
// LLD's COFF driver does not support the equivalent of `-r` so we do a simple file copy
// here. TODO: think carefully about how we can avoid this redundant operation when doing
// build-obj. See also the corresponding TODO in linkAsArchive.
const the_object_path = blk: {
if (link.firstObjectInput(comp.link_inputs)) |obj| break :blk obj.path;
if (comp.c_object_table.count() != 0)
break :blk comp.c_object_table.keys()[0].status.success.object_path;
if (module_obj_path) |p|
break :blk Path.initCwd(p);
// TODO I think this is unreachable. Audit this situation when solving the above TODO
// regarding eliding redundant object -> object transformations.
return error.NoObjectsToLink;
};
try std.fs.Dir.copyFile(
the_object_path.root_dir.handle,
the_object_path.sub_path,
directory.handle,
coff.base.emit.sub_path,
.{},
);
} else {
// Create an LLD command line and invoke it.
var argv = std.ArrayList([]const u8).init(gpa);
defer argv.deinit();
// We will invoke ourselves as a child process to gain access to LLD.
// This is necessary because LLD does not behave properly as a library -
// it calls exit() and does not reset all global data between invocations.
const linker_command = "lld-link";
try argv.appendSlice(&[_][]const u8{ comp.self_exe_path.?, linker_command });
if (target.isMinGW()) {
try argv.append("-lldmingw");
}
try argv.append("-ERRORLIMIT:0");
try argv.append("-NOLOGO");
if (comp.config.debug_format != .strip) {
try argv.append("-DEBUG");
const out_ext = std.fs.path.extension(full_out_path);
const out_pdb = coff.pdb_out_path orelse try allocPrint(arena, "{s}.pdb", .{
full_out_path[0 .. full_out_path.len - out_ext.len],
});
const out_pdb_basename = std.fs.path.basename(out_pdb);
try argv.append(try allocPrint(arena, "-PDB:{s}", .{out_pdb}));
try argv.append(try allocPrint(arena, "-PDBALTPATH:{s}", .{out_pdb_basename}));
}
if (comp.version) |version| {
try argv.append(try allocPrint(arena, "-VERSION:{}.{}", .{ version.major, version.minor }));
}
if (target_util.llvmMachineAbi(target)) |mabi| {
try argv.append(try allocPrint(arena, "-MLLVM:-target-abi={s}", .{mabi}));
}
try argv.append(try allocPrint(arena, "-MLLVM:-float-abi={s}", .{if (target.abi.float() == .hard) "hard" else "soft"}));
if (comp.config.lto != .none) {
switch (optimize_mode) {
.Debug => {},
.ReleaseSmall => try argv.append("-OPT:lldlto=2"),
.ReleaseFast, .ReleaseSafe => try argv.append("-OPT:lldlto=3"),
}
}
if (comp.config.output_mode == .Exe) {
try argv.append(try allocPrint(arena, "-STACK:{d}", .{coff.base.stack_size}));
}
try argv.append(try allocPrint(arena, "-BASE:{d}", .{coff.image_base}));
switch (coff.base.build_id) {
.none => try argv.append("-BUILD-ID:NO"),
.fast => try argv.append("-BUILD-ID"),
.uuid, .sha1, .md5, .hexstring => {},
}
if (target.cpu.arch == .x86) {
try argv.append("-MACHINE:X86");
} else if (target.cpu.arch == .x86_64) {
try argv.append("-MACHINE:X64");
} else if (target.cpu.arch == .thumb) {
try argv.append("-MACHINE:ARM");
} else if (target.cpu.arch == .aarch64) {
try argv.append("-MACHINE:ARM64");
}
for (comp.force_undefined_symbols.keys()) |symbol| {
try argv.append(try allocPrint(arena, "-INCLUDE:{s}", .{symbol}));
}
if (is_dyn_lib) {
try argv.append("-DLL");
}
if (entry_name) |name| {
try argv.append(try allocPrint(arena, "-ENTRY:{s}", .{name}));
}
if (coff.repro) {
try argv.append("-BREPRO");
}
if (coff.tsaware) {
try argv.append("-tsaware");
}
if (coff.nxcompat) {
try argv.append("-nxcompat");
}
if (!coff.dynamicbase) {
try argv.append("-dynamicbase:NO");
}
if (coff.base.allow_shlib_undefined) {
try argv.append("-FORCE:UNRESOLVED");
}
try argv.append(try allocPrint(arena, "-OUT:{s}", .{full_out_path}));
if (comp.implib_emit) |emit| {
const implib_out_path = try emit.root_dir.join(arena, &[_][]const u8{emit.sub_path});
try argv.append(try allocPrint(arena, "-IMPLIB:{s}", .{implib_out_path}));
}
if (comp.config.link_libc) {
if (comp.libc_installation) |libc_installation| {
try argv.append(try allocPrint(arena, "-LIBPATH:{s}", .{libc_installation.crt_dir.?}));
if (target.abi == .msvc or target.abi == .itanium) {
try argv.append(try allocPrint(arena, "-LIBPATH:{s}", .{libc_installation.msvc_lib_dir.?}));
try argv.append(try allocPrint(arena, "-LIBPATH:{s}", .{libc_installation.kernel32_lib_dir.?}));
}
}
}
for (coff.lib_directories) |lib_directory| {
try argv.append(try allocPrint(arena, "-LIBPATH:{s}", .{lib_directory.path orelse "."}));
}
try argv.ensureUnusedCapacity(comp.link_inputs.len);
for (comp.link_inputs) |link_input| switch (link_input) {
.dso_exact => unreachable, // not applicable to PE/COFF
inline .dso, .res => |x| {
argv.appendAssumeCapacity(try x.path.toString(arena));
},
.object, .archive => |obj| {
if (obj.must_link) {
argv.appendAssumeCapacity(try allocPrint(arena, "-WHOLEARCHIVE:{}", .{@as(Path, obj.path)}));
} else {
argv.appendAssumeCapacity(try obj.path.toString(arena));
}
},
};
for (comp.c_object_table.keys()) |key| {
try argv.append(try key.status.success.object_path.toString(arena));
}
for (comp.win32_resource_table.keys()) |key| {
try argv.append(key.status.success.res_path);
}
if (module_obj_path) |p| {
try argv.append(p);
}
if (coff.module_definition_file) |def| {
try argv.append(try allocPrint(arena, "-DEF:{s}", .{def}));
}
const resolved_subsystem: ?std.Target.SubSystem = blk: {
if (coff.subsystem) |explicit| break :blk explicit;
switch (target.os.tag) {
.windows => {
if (comp.zcu) |module| {
if (module.stage1_flags.have_dllmain_crt_startup or is_dyn_lib)
break :blk null;
if (module.stage1_flags.have_c_main or comp.config.is_test or
module.stage1_flags.have_winmain_crt_startup or
module.stage1_flags.have_wwinmain_crt_startup)
{
break :blk .Console;
}
if (module.stage1_flags.have_winmain or module.stage1_flags.have_wwinmain)
break :blk .Windows;
}
},
.uefi => break :blk .EfiApplication,
else => {},
}
break :blk null;
};
const Mode = enum { uefi, win32 };
const mode: Mode = mode: {
if (resolved_subsystem) |subsystem| {
const subsystem_suffix = try allocPrint(arena, ",{d}.{d}", .{
coff.major_subsystem_version, coff.minor_subsystem_version,
});
switch (subsystem) {
.Console => {
try argv.append(try allocPrint(arena, "-SUBSYSTEM:console{s}", .{
subsystem_suffix,
}));
break :mode .win32;
},
.EfiApplication => {
try argv.append(try allocPrint(arena, "-SUBSYSTEM:efi_application{s}", .{
subsystem_suffix,
}));
break :mode .uefi;
},
.EfiBootServiceDriver => {
try argv.append(try allocPrint(arena, "-SUBSYSTEM:efi_boot_service_driver{s}", .{
subsystem_suffix,
}));
break :mode .uefi;
},
.EfiRom => {
try argv.append(try allocPrint(arena, "-SUBSYSTEM:efi_rom{s}", .{
subsystem_suffix,
}));
break :mode .uefi;
},
.EfiRuntimeDriver => {
try argv.append(try allocPrint(arena, "-SUBSYSTEM:efi_runtime_driver{s}", .{
subsystem_suffix,
}));
break :mode .uefi;
},
.Native => {
try argv.append(try allocPrint(arena, "-SUBSYSTEM:native{s}", .{
subsystem_suffix,
}));
break :mode .win32;
},
.Posix => {
try argv.append(try allocPrint(arena, "-SUBSYSTEM:posix{s}", .{
subsystem_suffix,
}));
break :mode .win32;
},
.Windows => {
try argv.append(try allocPrint(arena, "-SUBSYSTEM:windows{s}", .{
subsystem_suffix,
}));
break :mode .win32;
},
}
} else if (target.os.tag == .uefi) {
break :mode .uefi;
} else {
break :mode .win32;
}
};
switch (mode) {
.uefi => try argv.appendSlice(&[_][]const u8{
"-BASE:0",
"-ENTRY:EfiMain",
"-OPT:REF",
"-SAFESEH:NO",
"-MERGE:.rdata=.data",
"-NODEFAULTLIB",
"-SECTION:.xdata,D",
}),
.win32 => {
if (link_in_crt) {
if (target.abi.isGnu()) {
if (target.cpu.arch == .x86) {
try argv.append("-ALTERNATENAME:__image_base__=___ImageBase");
} else {
try argv.append("-ALTERNATENAME:__image_base__=__ImageBase");
}
if (is_dyn_lib) {
try argv.append(try comp.crtFileAsString(arena, "dllcrt2.obj"));
if (target.cpu.arch == .x86) {
try argv.append("-ALTERNATENAME:__DllMainCRTStartup@12=_DllMainCRTStartup@12");
} else {
try argv.append("-ALTERNATENAME:_DllMainCRTStartup=DllMainCRTStartup");
}
} else {
try argv.append(try comp.crtFileAsString(arena, "crt2.obj"));
}
try argv.append(try comp.crtFileAsString(arena, "libmingw32.lib"));
} else {
try argv.append(switch (comp.config.link_mode) {
.static => "libcmt.lib",
.dynamic => "msvcrt.lib",
});
const lib_str = switch (comp.config.link_mode) {
.static => "lib",
.dynamic => "",
};
try argv.append(try allocPrint(arena, "{s}vcruntime.lib", .{lib_str}));
try argv.append(try allocPrint(arena, "{s}ucrt.lib", .{lib_str}));
//Visual C++ 2015 Conformance Changes
//https://msdn.microsoft.com/en-us/library/bb531344.aspx
try argv.append("legacy_stdio_definitions.lib");
// msvcrt depends on kernel32 and ntdll
try argv.append("kernel32.lib");
try argv.append("ntdll.lib");
}
} else {
try argv.append("-NODEFAULTLIB");
if (!is_lib and entry_name == null) {
if (comp.zcu) |module| {
if (module.stage1_flags.have_winmain_crt_startup) {
try argv.append("-ENTRY:WinMainCRTStartup");
} else {
try argv.append("-ENTRY:wWinMainCRTStartup");
}
} else {
try argv.append("-ENTRY:wWinMainCRTStartup");
}
}
}
},
}
if (comp.config.link_libc and link_in_crt) {
if (comp.zigc_static_lib) |zigc| {
try argv.append(try zigc.full_object_path.toString(arena));
}
}
// libc++ dep
if (comp.config.link_libcpp) {
try argv.append(try comp.libcxxabi_static_lib.?.full_object_path.toString(arena));
try argv.append(try comp.libcxx_static_lib.?.full_object_path.toString(arena));
}
// libunwind dep
if (comp.config.link_libunwind) {
try argv.append(try comp.libunwind_static_lib.?.full_object_path.toString(arena));
}
if (comp.config.any_fuzz) {
try argv.append(try comp.fuzzer_lib.?.full_object_path.toString(arena));
}
const ubsan_rt_path: ?Path = blk: {
if (comp.ubsan_rt_lib) |x| break :blk x.full_object_path;
if (comp.ubsan_rt_obj) |x| break :blk x.full_object_path;
break :blk null;
};
if (ubsan_rt_path) |path| {
try argv.append(try path.toString(arena));
}
if (is_exe_or_dyn_lib and !comp.skip_linker_dependencies) {
// MSVC compiler_rt is missing some stuff, so we build it unconditionally but
// and rely on weak linkage to allow MSVC compiler_rt functions to override ours.
if (comp.compiler_rt_obj) |obj| try argv.append(try obj.full_object_path.toString(arena));
if (comp.compiler_rt_lib) |lib| try argv.append(try lib.full_object_path.toString(arena));
}
try argv.ensureUnusedCapacity(comp.windows_libs.count());
for (comp.windows_libs.keys()) |key| {
const lib_basename = try allocPrint(arena, "{s}.lib", .{key});
if (comp.crt_files.get(lib_basename)) |crt_file| {
argv.appendAssumeCapacity(try crt_file.full_object_path.toString(arena));
continue;
}
if (try findLib(arena, lib_basename, coff.lib_directories)) |full_path| {
argv.appendAssumeCapacity(full_path);
continue;
}
if (target.abi.isGnu()) {
const fallback_name = try allocPrint(arena, "lib{s}.dll.a", .{key});
if (try findLib(arena, fallback_name, coff.lib_directories)) |full_path| {
argv.appendAssumeCapacity(full_path);
continue;
}
}
if (target.abi == .msvc or target.abi == .itanium) {
argv.appendAssumeCapacity(lib_basename);
continue;
}
log.err("DLL import library for -l{s} not found", .{key});
return error.DllImportLibraryNotFound;
}
try link.spawnLld(comp, arena, argv.items);
}
if (!coff.base.disable_lld_caching) {
// Update the file with the digest. If it fails we can continue; it only
// means that the next invocation will have an unnecessary cache miss.
Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest) catch |err| {
log.warn("failed to save linking hash digest file: {s}", .{@errorName(err)});
};
// Again failure here only means an unnecessary cache miss.
man.writeManifest() catch |err| {
log.warn("failed to write cache manifest when linking: {s}", .{@errorName(err)});
};
// We hang on to this lock so that the output file path can be used without
// other processes clobbering it.
coff.base.lock = man.toOwnedLock();
}
}
fn findLib(arena: Allocator, name: []const u8, lib_directories: []const Directory) !?[]const u8 {
for (lib_directories) |lib_directory| {
lib_directory.handle.access(name, .{}) catch |err| switch (err) {
error.FileNotFound => continue,
else => |e| return e,
};
return try lib_directory.join(arena, &.{name});
}
return null;
}
pub fn flushModule(
pub fn flush(
coff: *Coff,
arena: Allocator,
tid: Zcu.PerThread.Id,
@ -2256,22 +1582,22 @@ pub fn flushModule(
const comp = coff.base.comp;
const diags = &comp.link_diags;
if (coff.llvm_object) |llvm_object| {
try coff.base.emitLlvmObject(arena, llvm_object, prog_node);
return;
switch (coff.base.comp.config.output_mode) {
.Exe, .Obj => {},
.Lib => return diags.fail("writing lib files not yet implemented for COFF", .{}),
}
const sub_prog_node = prog_node.start("COFF Flush", 0);
defer sub_prog_node.end();
return flushModuleInner(coff, arena, tid) catch |err| switch (err) {
return flushInner(coff, arena, tid) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.LinkFailure => return error.LinkFailure,
else => |e| return diags.fail("COFF flush failed: {s}", .{@errorName(e)}),
};
}
fn flushModuleInner(coff: *Coff, arena: Allocator, tid: Zcu.PerThread.Id) !void {
fn flushInner(coff: *Coff, arena: Allocator, tid: Zcu.PerThread.Id) !void {
_ = arena;
const comp = coff.base.comp;
@ -2397,7 +1723,6 @@ pub fn getNavVAddr(
nav_index: InternPool.Nav.Index,
reloc_info: link.File.RelocInfo,
) !u64 {
assert(coff.llvm_object == null);
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const nav = ip.getNav(nav_index);
@ -2442,7 +1767,7 @@ pub fn lowerUav(
const atom = coff.getAtom(metadata.atom);
const existing_addr = atom.getSymbol(coff).value;
if (uav_alignment.check(existing_addr))
return .{ .mcv = .{ .load_direct = atom.getSymbolIndex().? } };
return .{ .mcv = .{ .load_symbol = atom.getSymbolIndex().? } };
}
var name_buf: [32]u8 = undefined;
@ -2474,7 +1799,7 @@ pub fn lowerUav(
.section = coff.rdata_section_index.?,
});
return .{ .mcv = .{
.load_direct = coff.getAtom(atom_index).getSymbolIndex().?,
.load_symbol = coff.getAtom(atom_index).getSymbolIndex().?,
} };
}
@ -2483,8 +1808,6 @@ pub fn getUavVAddr(
uav: InternPool.Index,
reloc_info: link.File.RelocInfo,
) !u64 {
assert(coff.llvm_object == null);
const this_atom_index = coff.uavs.get(uav).?.atom;
const sym_index = coff.getAtom(this_atom_index).getSymbolIndex().?;
const atom_index = coff.getAtomIndexForSymbol(.{
@ -3796,9 +3119,7 @@ const link = @import("../link.zig");
const target_util = @import("../target.zig");
const trace = @import("../tracy.zig").trace;
const Air = @import("../Air.zig");
const Compilation = @import("../Compilation.zig");
const LlvmObject = @import("../codegen/llvm.zig").Object;
const Zcu = @import("../Zcu.zig");
const InternPool = @import("../InternPool.zig");
const TableSection = @import("table_section.zig").TableSection;

View File

@ -1474,24 +1474,59 @@ pub const WipNav = struct {
try cfa.write(wip_nav);
}
pub const LocalTag = enum { local_arg, local_var };
pub fn genLocalDebugInfo(
pub const LocalVarTag = enum { arg, local_var };
pub fn genLocalVarDebugInfo(
wip_nav: *WipNav,
tag: LocalTag,
name: []const u8,
tag: LocalVarTag,
opt_name: ?[]const u8,
ty: Type,
loc: Loc,
) UpdateError!void {
assert(wip_nav.func != .none);
try wip_nav.abbrevCode(switch (tag) {
inline else => |ct_tag| @field(AbbrevCode, @tagName(ct_tag)),
.arg => if (opt_name) |_| .arg else .unnamed_arg,
.local_var => if (opt_name) |_| .local_var else unreachable,
});
try wip_nav.strp(name);
if (opt_name) |name| try wip_nav.strp(name);
try wip_nav.refType(ty);
try wip_nav.infoExprLoc(loc);
wip_nav.any_children = true;
}
pub const LocalConstTag = enum { comptime_arg, local_const };
pub fn genLocalConstDebugInfo(
wip_nav: *WipNav,
src_loc: Zcu.LazySrcLoc,
tag: LocalConstTag,
opt_name: ?[]const u8,
val: Value,
) UpdateError!void {
assert(wip_nav.func != .none);
const pt = wip_nav.pt;
const zcu = pt.zcu;
const ty = val.typeOf(zcu);
const has_runtime_bits = ty.hasRuntimeBits(zcu);
const has_comptime_state = ty.comptimeOnly(zcu) and try ty.onePossibleValue(pt) == null;
try wip_nav.abbrevCode(if (has_runtime_bits and has_comptime_state) switch (tag) {
.comptime_arg => if (opt_name) |_| .comptime_arg_runtime_bits_comptime_state else .unnamed_comptime_arg_runtime_bits_comptime_state,
.local_const => if (opt_name) |_| .local_const_runtime_bits_comptime_state else unreachable,
} else if (has_comptime_state) switch (tag) {
.comptime_arg => if (opt_name) |_| .comptime_arg_comptime_state else .unnamed_comptime_arg_comptime_state,
.local_const => if (opt_name) |_| .local_const_comptime_state else unreachable,
} else if (has_runtime_bits) switch (tag) {
.comptime_arg => if (opt_name) |_| .comptime_arg_runtime_bits else .unnamed_comptime_arg_runtime_bits,
.local_const => if (opt_name) |_| .local_const_runtime_bits else unreachable,
} else switch (tag) {
.comptime_arg => if (opt_name) |_| .comptime_arg else .unnamed_comptime_arg,
.local_const => if (opt_name) |_| .local_const else unreachable,
});
if (opt_name) |name| try wip_nav.strp(name);
try wip_nav.refType(ty);
if (has_runtime_bits) try wip_nav.blockValue(src_loc, val);
if (has_comptime_state) try wip_nav.refValue(val);
wip_nav.any_children = true;
}
pub fn genVarArgsDebugInfo(wip_nav: *WipNav) UpdateError!void {
assert(wip_nav.func != .none);
try wip_nav.abbrevCode(.is_var_args);
@ -1825,7 +1860,8 @@ pub const WipNav = struct {
fn getNavEntry(wip_nav: *WipNav, nav_index: InternPool.Nav.Index) UpdateError!struct { Unit.Index, Entry.Index } {
const zcu = wip_nav.pt.zcu;
const ip = &zcu.intern_pool;
const unit = try wip_nav.dwarf.getUnit(zcu.fileByIndex(ip.getNav(nav_index).srcInst(ip).resolveFile(ip)).mod.?);
const nav = ip.getNav(nav_index);
const unit = try wip_nav.dwarf.getUnit(zcu.fileByIndex(nav.srcInst(ip).resolveFile(ip)).mod.?);
const gop = try wip_nav.dwarf.navs.getOrPut(wip_nav.dwarf.gpa, nav_index);
if (gop.found_existing) return .{ unit, gop.value_ptr.* };
const entry = try wip_nav.dwarf.addCommonEntry(unit);
@ -1842,10 +1878,16 @@ pub const WipNav = struct {
const zcu = wip_nav.pt.zcu;
const ip = &zcu.intern_pool;
const maybe_inst_index = ty.typeDeclInst(zcu);
const unit = if (maybe_inst_index) |inst_index|
try wip_nav.dwarf.getUnit(zcu.fileByIndex(inst_index.resolveFile(ip)).mod.?)
else
.main;
const unit = if (maybe_inst_index) |inst_index| switch (switch (ip.indexToKey(ty.toIntern())) {
else => unreachable,
.struct_type => ip.loadStructType(ty.toIntern()).name_nav,
.union_type => ip.loadUnionType(ty.toIntern()).name_nav,
.enum_type => ip.loadEnumType(ty.toIntern()).name_nav,
.opaque_type => ip.loadOpaqueType(ty.toIntern()).name_nav,
}) {
.none => try wip_nav.dwarf.getUnit(zcu.fileByIndex(inst_index.resolveFile(ip)).mod.?),
else => |name_nav| return wip_nav.getNavEntry(name_nav.unwrap().?),
} else .main;
const gop = try wip_nav.dwarf.types.getOrPut(wip_nav.dwarf.gpa, ty.toIntern());
if (gop.found_existing) return .{ unit, gop.value_ptr.* };
const entry = try wip_nav.dwarf.addCommonEntry(unit);
@ -1864,10 +1906,8 @@ pub const WipNav = struct {
const ip = &zcu.intern_pool;
const ty = value.typeOf(zcu);
if (std.debug.runtime_safety) assert(ty.comptimeOnly(zcu) and try ty.onePossibleValue(wip_nav.pt) == null);
if (!value.isUndef(zcu)) {
if (ty.toIntern() == .type_type) return wip_nav.getTypeEntry(value.toType());
if (ip.isFunctionType(ty.toIntern())) return wip_nav.getNavEntry(zcu.funcInfo(value.toIntern()).owner_nav);
}
if (ty.toIntern() == .type_type) return wip_nav.getTypeEntry(value.toType());
if (ip.isFunctionType(ty.toIntern()) and !value.isUndef(zcu)) return wip_nav.getNavEntry(zcu.funcInfo(value.toIntern()).owner_nav);
const gop = try wip_nav.dwarf.values.getOrPut(wip_nav.dwarf.gpa, value.toIntern());
const unit: Unit.Index = .main;
if (gop.found_existing) return .{ unit, gop.value_ptr.* };
@ -1916,7 +1956,10 @@ pub const WipNav = struct {
&wip_nav.debug_info,
.{ .debug_output = .{ .dwarf = wip_nav } },
);
assert(old_len + bytes == wip_nav.debug_info.items.len);
if (old_len + bytes != wip_nav.debug_info.items.len) {
std.debug.print("{} [{}]: {} != {}\n", .{ ty.fmt(wip_nav.pt), ty.toIntern(), bytes, wip_nav.debug_info.items.len - old_len });
unreachable;
}
}
const AbbrevCodeForForm = struct {
@ -2788,6 +2831,7 @@ fn updateComptimeNavInner(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPoo
const type_gop = try dwarf.types.getOrPut(dwarf.gpa, nav_val.toIntern());
if (type_gop.found_existing) {
if (dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(type_gop.value_ptr.*).len > 0) break :tag .decl_alias;
assert(!nav_gop.found_existing);
nav_gop.value_ptr.* = type_gop.value_ptr.*;
} else {
if (nav_gop.found_existing)
@ -2890,6 +2934,7 @@ fn updateComptimeNavInner(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPoo
const type_gop = try dwarf.types.getOrPut(dwarf.gpa, nav_val.toIntern());
if (type_gop.found_existing) {
if (dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(type_gop.value_ptr.*).len > 0) break :tag .decl_alias;
assert(!nav_gop.found_existing);
nav_gop.value_ptr.* = type_gop.value_ptr.*;
} else {
if (nav_gop.found_existing)
@ -2928,6 +2973,7 @@ fn updateComptimeNavInner(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPoo
const type_gop = try dwarf.types.getOrPut(dwarf.gpa, nav_val.toIntern());
if (type_gop.found_existing) {
if (dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(type_gop.value_ptr.*).len > 0) break :tag .decl_alias;
assert(!nav_gop.found_existing);
nav_gop.value_ptr.* = type_gop.value_ptr.*;
} else {
if (nav_gop.found_existing)
@ -2998,6 +3044,7 @@ fn updateComptimeNavInner(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPoo
const type_gop = try dwarf.types.getOrPut(dwarf.gpa, nav_val.toIntern());
if (type_gop.found_existing) {
if (dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(type_gop.value_ptr.*).len > 0) break :tag .decl_alias;
assert(!nav_gop.found_existing);
nav_gop.value_ptr.* = type_gop.value_ptr.*;
} else {
if (nav_gop.found_existing)
@ -3164,6 +3211,7 @@ fn updateLazyType(
) UpdateError!void {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
assert(ip.typeOf(type_index) == .type_type);
const ty: Type = .fromInterned(type_index);
switch (type_index) {
.generic_poison_type => log.debug("updateLazyType({s})", .{"anytype"}),
@ -3200,6 +3248,10 @@ fn updateLazyType(
defer dwarf.gpa.free(name);
switch (ip.indexToKey(type_index)) {
.undef => {
try wip_nav.abbrevCode(.undefined_comptime_value);
try wip_nav.refType(.type);
},
.int_type => |int_type| {
try wip_nav.abbrevCode(.numeric_type);
try wip_nav.strp(name);
@ -3633,7 +3685,6 @@ fn updateLazyType(
},
// values, not types
.undef,
.simple_value,
.variable,
.@"extern",
@ -3666,7 +3717,11 @@ fn updateLazyValue(
) UpdateError!void {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
log.debug("updateLazyValue({})", .{Value.fromInterned(value_index).fmtValue(pt)});
assert(ip.typeOf(value_index) != .type_type);
log.debug("updateLazyValue(@as({}, {}))", .{
Value.fromInterned(value_index).typeOf(zcu).fmt(pt),
Value.fromInterned(value_index).fmtValue(pt),
});
var wip_nav: WipNav = .{
.dwarf = dwarf,
.pt = pt,
@ -3710,9 +3765,8 @@ fn updateLazyValue(
.inferred_error_set_type,
=> unreachable, // already handled
.undef => |ty| {
try wip_nav.abbrevCode(.aggregate_comptime_value);
try wip_nav.abbrevCode(.undefined_comptime_value);
try wip_nav.refType(.fromInterned(ty));
try uleb128(diw, @intFromEnum(AbbrevCode.null));
},
.simple_value => unreachable, // opv state
.variable, .@"extern" => unreachable, // not a value
@ -4391,7 +4445,7 @@ fn refAbbrevCode(dwarf: *Dwarf, abbrev_code: AbbrevCode) UpdateError!@typeInfo(A
return @intFromEnum(abbrev_code);
}
pub fn flushModule(dwarf: *Dwarf, pt: Zcu.PerThread) FlushError!void {
pub fn flush(dwarf: *Dwarf, pt: Zcu.PerThread) FlushError!void {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
@ -4890,8 +4944,22 @@ const AbbrevCode = enum {
block,
empty_inlined_func,
inlined_func,
local_arg,
arg,
unnamed_arg,
comptime_arg,
unnamed_comptime_arg,
comptime_arg_runtime_bits,
unnamed_comptime_arg_runtime_bits,
comptime_arg_comptime_state,
unnamed_comptime_arg_comptime_state,
comptime_arg_runtime_bits_comptime_state,
unnamed_comptime_arg_runtime_bits_comptime_state,
local_var,
local_const,
local_const_runtime_bits,
local_const_comptime_state,
local_const_runtime_bits_comptime_state,
undefined_comptime_value,
data2_comptime_value,
data4_comptime_value,
data8_comptime_value,
@ -5663,7 +5731,7 @@ const AbbrevCode = enum {
.{ .high_pc, .data4 },
},
},
.local_arg = .{
.arg = .{
.tag = .formal_parameter,
.attrs = &.{
.{ .name, .strp },
@ -5671,6 +5739,81 @@ const AbbrevCode = enum {
.{ .location, .exprloc },
},
},
.unnamed_arg = .{
.tag = .formal_parameter,
.attrs = &.{
.{ .type, .ref_addr },
.{ .location, .exprloc },
},
},
.comptime_arg = .{
.tag = .formal_parameter,
.attrs = &.{
.{ .const_expr, .flag_present },
.{ .name, .strp },
.{ .type, .ref_addr },
},
},
.unnamed_comptime_arg = .{
.tag = .formal_parameter,
.attrs = &.{
.{ .const_expr, .flag_present },
.{ .type, .ref_addr },
},
},
.comptime_arg_runtime_bits = .{
.tag = .formal_parameter,
.attrs = &.{
.{ .const_expr, .flag_present },
.{ .name, .strp },
.{ .type, .ref_addr },
.{ .const_value, .block },
},
},
.unnamed_comptime_arg_runtime_bits = .{
.tag = .formal_parameter,
.attrs = &.{
.{ .const_expr, .flag_present },
.{ .type, .ref_addr },
.{ .const_value, .block },
},
},
.comptime_arg_comptime_state = .{
.tag = .formal_parameter,
.attrs = &.{
.{ .const_expr, .flag_present },
.{ .name, .strp },
.{ .type, .ref_addr },
.{ .ZIG_comptime_value, .ref_addr },
},
},
.unnamed_comptime_arg_comptime_state = .{
.tag = .formal_parameter,
.attrs = &.{
.{ .const_expr, .flag_present },
.{ .type, .ref_addr },
.{ .ZIG_comptime_value, .ref_addr },
},
},
.comptime_arg_runtime_bits_comptime_state = .{
.tag = .formal_parameter,
.attrs = &.{
.{ .const_expr, .flag_present },
.{ .name, .strp },
.{ .type, .ref_addr },
.{ .const_value, .block },
.{ .ZIG_comptime_value, .ref_addr },
},
},
.unnamed_comptime_arg_runtime_bits_comptime_state = .{
.tag = .formal_parameter,
.attrs = &.{
.{ .const_expr, .flag_present },
.{ .type, .ref_addr },
.{ .const_value, .block },
.{ .ZIG_comptime_value, .ref_addr },
},
},
.local_var = .{
.tag = .variable,
.attrs = &.{
@ -5679,6 +5822,44 @@ const AbbrevCode = enum {
.{ .location, .exprloc },
},
},
.local_const = .{
.tag = .constant,
.attrs = &.{
.{ .name, .strp },
.{ .type, .ref_addr },
},
},
.local_const_runtime_bits = .{
.tag = .constant,
.attrs = &.{
.{ .name, .strp },
.{ .type, .ref_addr },
.{ .const_value, .block },
},
},
.local_const_comptime_state = .{
.tag = .constant,
.attrs = &.{
.{ .name, .strp },
.{ .type, .ref_addr },
.{ .ZIG_comptime_value, .ref_addr },
},
},
.local_const_runtime_bits_comptime_state = .{
.tag = .constant,
.attrs = &.{
.{ .name, .strp },
.{ .type, .ref_addr },
.{ .const_value, .block },
.{ .ZIG_comptime_value, .ref_addr },
},
},
.undefined_comptime_value = .{
.tag = .ZIG_comptime_value,
.attrs = &.{
.{ .type, .ref_addr },
},
},
.data2_comptime_value = .{
.tag = .ZIG_comptime_value,
.attrs = &.{

File diff suppressed because it is too large Load Diff

View File

@ -462,9 +462,6 @@ pub const Flags = packed struct {
/// Whether the symbol is a TLS variable.
is_tls: bool = false,
/// Whether the symbol is an extern pointer (as opposed to function).
is_extern_ptr: bool = false,
};
pub const Extra = struct {

View File

@ -310,7 +310,7 @@ pub fn flush(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !void {
if (self.dwarf) |*dwarf| {
const pt: Zcu.PerThread = .activate(elf_file.base.comp.zcu.?, tid);
defer pt.deactivate();
try dwarf.flushModule(pt);
try dwarf.flush(pt);
const gpa = elf_file.base.comp.gpa;
const cpu_arch = elf_file.getTarget().cpu.arch;
@ -481,7 +481,7 @@ pub fn flush(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !void {
self.debug_str_section_dirty = false;
}
// The point of flushModule() is to commit changes, so in theory, nothing should
// The point of flush() is to commit changes, so in theory, nothing should
// be dirty after this. However, it is possible for some things to remain
// dirty because they fail to be written in the event of compile errors,
// such as debug_line_header_dirty and debug_info_header_dirty.
@ -661,7 +661,7 @@ pub fn scanRelocs(self: *ZigObject, elf_file: *Elf, undefs: anytype) !void {
if (shdr.sh_type == elf.SHT_NOBITS) continue;
if (atom_ptr.scanRelocsRequiresCode(elf_file)) {
// TODO ideally we don't have to fetch the code here.
// Perhaps it would make sense to save the code until flushModule where we
// Perhaps it would make sense to save the code until flush where we
// would free all of generated code?
const code = try self.codeAlloc(elf_file, atom_index);
defer gpa.free(code);
@ -1075,7 +1075,7 @@ pub fn getOrCreateMetadataForLazySymbol(
}
state_ptr.* = .pending_flush;
const symbol_index = symbol_index_ptr.*;
// anyerror needs to be deferred until flushModule
// anyerror needs to be deferred until flush
if (lazy_sym.ty != .anyerror_type) try self.updateLazySymbol(elf_file, pt, lazy_sym, symbol_index);
return symbol_index;
}
@ -1142,7 +1142,6 @@ fn getNavShdrIndex(
const gpa = elf_file.base.comp.gpa;
const ptr_size = elf_file.ptrWidthBytes();
const ip = &zcu.intern_pool;
const any_non_single_threaded = elf_file.base.comp.config.any_non_single_threaded;
const nav_val = zcu.navValue(nav_index);
if (ip.isFunctionType(nav_val.typeOf(zcu).toIntern())) {
if (self.text_index) |symbol_index|
@ -1162,7 +1161,7 @@ fn getNavShdrIndex(
else => .{ true, false, nav_val.toIntern() },
};
const has_relocs = self.symbol(sym_index).atom(elf_file).?.relocs(elf_file).len > 0;
if (any_non_single_threaded and is_threadlocal) {
if (is_threadlocal and elf_file.base.comp.config.any_non_single_threaded) {
const is_bss = !has_relocs and for (code) |byte| {
if (byte != 0) break false;
} else true;
@ -1416,8 +1415,7 @@ pub fn updateFunc(
elf_file: *Elf,
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Air.Liveness,
mir: *const codegen.AnyMir,
) link.File.UpdateNavError!void {
const tracy = trace(@src());
defer tracy.end();
@ -1438,13 +1436,12 @@ pub fn updateFunc(
var debug_wip_nav = if (self.dwarf) |*dwarf| try dwarf.initWipNav(pt, func.owner_nav, sym_index) else null;
defer if (debug_wip_nav) |*wip_nav| wip_nav.deinit();
try codegen.generateFunction(
try codegen.emitFunction(
&elf_file.base,
pt,
zcu.navSrcLoc(func.owner_nav),
func_index,
air,
liveness,
mir,
&code_buffer,
if (debug_wip_nav) |*dn| .{ .dwarf = dn } else .none,
);
@ -1544,11 +1541,7 @@ pub fn updateNav(
nav.name.toSlice(ip),
@"extern".lib_name.toSlice(ip),
);
if (!ip.isFunctionType(@"extern".ty)) {
const sym = self.symbol(sym_index);
sym.flags.is_extern_ptr = true;
if (@"extern".is_threadlocal) sym.flags.is_tls = true;
}
if (@"extern".is_threadlocal and elf_file.base.comp.config.any_non_single_threaded) self.symbol(sym_index).flags.is_tls = true;
if (self.dwarf) |*dwarf| dwarf: {
var debug_wip_nav = try dwarf.initWipNav(pt, nav_index, sym_index) orelse break :dwarf;
defer debug_wip_nav.deinit();
@ -2361,7 +2354,6 @@ const trace = @import("../../tracy.zig").trace;
const std = @import("std");
const Allocator = std.mem.Allocator;
const Air = @import("../../Air.zig");
const Archive = @import("Archive.zig");
const Atom = @import("Atom.zig");
const Dwarf = @import("../Dwarf.zig");

View File

@ -13,14 +13,12 @@ const Path = std.Build.Cache.Path;
const Zcu = @import("../Zcu.zig");
const InternPool = @import("../InternPool.zig");
const Compilation = @import("../Compilation.zig");
const codegen = @import("../codegen.zig");
const link = @import("../link.zig");
const trace = @import("../tracy.zig").trace;
const build_options = @import("build_options");
const Air = @import("../Air.zig");
const LlvmObject = @import("../codegen/llvm.zig").Object;
base: link.File,
llvm_object: LlvmObject.Ptr,
pub fn createEmpty(
arena: Allocator,
@ -36,23 +34,20 @@ pub fn createEmpty(
assert(!use_lld); // Caught by Compilation.Config.resolve.
assert(target.os.tag == .zos); // Caught by Compilation.Config.resolve.
const llvm_object = try LlvmObject.create(arena, comp);
const goff = try arena.create(Goff);
goff.* = .{
.base = .{
.tag = .goff,
.comp = comp,
.emit = emit,
.zcu_object_sub_path = emit.sub_path,
.zcu_object_basename = emit.sub_path,
.gc_sections = options.gc_sections orelse false,
.print_gc_sections = options.print_gc_sections,
.stack_size = options.stack_size orelse 0,
.allow_shlib_undefined = options.allow_shlib_undefined orelse false,
.file = null,
.disable_lld_caching = options.disable_lld_caching,
.build_id = options.build_id,
},
.llvm_object = llvm_object,
};
return goff;
@ -70,27 +65,27 @@ pub fn open(
}
pub fn deinit(self: *Goff) void {
self.llvm_object.deinit();
_ = self;
}
pub fn updateFunc(
self: *Goff,
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Air.Liveness,
mir: *const codegen.AnyMir,
) link.File.UpdateNavError!void {
if (build_options.skip_non_native and builtin.object_format != .goff)
@panic("Attempted to compile for object format that was disabled by build configuration");
try self.llvm_object.updateFunc(pt, func_index, air, liveness);
_ = self;
_ = pt;
_ = func_index;
_ = mir;
unreachable; // we always use llvm
}
pub fn updateNav(self: *Goff, pt: Zcu.PerThread, nav: InternPool.Nav.Index) link.File.UpdateNavError!void {
if (build_options.skip_non_native and builtin.object_format != .goff)
@panic("Attempted to compile for object format that was disabled by build configuration");
return self.llvm_object.updateNav(pt, nav);
_ = self;
_ = pt;
_ = nav;
unreachable; // we always use llvm
}
pub fn updateExports(
@ -99,21 +94,19 @@ pub fn updateExports(
exported: Zcu.Exported,
export_indices: []const Zcu.Export.Index,
) !void {
if (build_options.skip_non_native and builtin.object_format != .goff)
@panic("Attempted to compile for object format that was disabled by build configuration");
return self.llvm_object.updateExports(pt, exported, export_indices);
_ = self;
_ = pt;
_ = exported;
_ = export_indices;
unreachable; // we always use llvm
}
pub fn flush(self: *Goff, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
return self.flushModule(arena, tid, prog_node);
}
pub fn flushModule(self: *Goff, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
if (build_options.skip_non_native and builtin.object_format != .goff)
@panic("Attempted to compile for object format that was disabled by build configuration");
_ = self;
_ = arena;
_ = tid;
try self.base.emitLlvmObject(arena, self.llvm_object, prog_node);
_ = prog_node;
}

1757
src/link/Lld.zig Normal file

File diff suppressed because it is too large Load Diff

View File

@ -6,9 +6,6 @@ base: link.File,
rpath_list: []const []const u8,
/// If this is not null, an object file is created by LLVM and emitted to zcu_object_sub_path.
llvm_object: ?LlvmObject.Ptr = null,
/// Debug symbols bundle (or dSym).
d_sym: ?DebugSymbols = null,
@ -176,13 +173,6 @@ pub fn createEmpty(
const output_mode = comp.config.output_mode;
const link_mode = comp.config.link_mode;
// If using LLVM to generate the object file for the zig compilation unit,
// we need a place to put the object file so that it can be subsequently
// handled.
const zcu_object_sub_path = if (!use_llvm)
null
else
try std.fmt.allocPrint(arena, "{s}.o", .{emit.sub_path});
const allow_shlib_undefined = options.allow_shlib_undefined orelse false;
const self = try arena.create(MachO);
@ -191,13 +181,15 @@ pub fn createEmpty(
.tag = .macho,
.comp = comp,
.emit = emit,
.zcu_object_sub_path = zcu_object_sub_path,
.zcu_object_basename = if (use_llvm)
try std.fmt.allocPrint(arena, "{s}_zcu.o", .{fs.path.stem(emit.sub_path)})
else
null,
.gc_sections = options.gc_sections orelse (optimize_mode != .Debug),
.print_gc_sections = options.print_gc_sections,
.stack_size = options.stack_size orelse 16777216,
.allow_shlib_undefined = allow_shlib_undefined,
.file = null,
.disable_lld_caching = options.disable_lld_caching,
.build_id = options.build_id,
},
.rpath_list = options.rpath_list,
@ -225,15 +217,12 @@ pub fn createEmpty(
.force_load_objc = options.force_load_objc,
.discard_local_symbols = options.discard_local_symbols,
};
if (use_llvm and comp.config.have_zcu) {
self.llvm_object = try LlvmObject.create(arena, comp);
}
errdefer self.base.destroy();
self.base.file = try emit.root_dir.handle.createFile(emit.sub_path, .{
.truncate = true,
.read = true,
.mode = link.File.determineMode(false, output_mode, link_mode),
.mode = link.File.determineMode(output_mode, link_mode),
});
// Append null file
@ -280,8 +269,6 @@ pub fn open(
pub fn deinit(self: *MachO) void {
const gpa = self.base.comp.gpa;
if (self.llvm_object) |llvm_object| llvm_object.deinit();
if (self.d_sym) |*d_sym| {
d_sym.deinit();
}
@ -349,15 +336,6 @@ pub fn flush(
arena: Allocator,
tid: Zcu.PerThread.Id,
prog_node: std.Progress.Node,
) link.File.FlushError!void {
try self.flushModule(arena, tid, prog_node);
}
pub fn flushModule(
self: *MachO,
arena: Allocator,
tid: Zcu.PerThread.Id,
prog_node: std.Progress.Node,
) link.File.FlushError!void {
const tracy = trace(@src());
defer tracy.end();
@ -366,28 +344,19 @@ pub fn flushModule(
const gpa = comp.gpa;
const diags = &self.base.comp.link_diags;
if (self.llvm_object) |llvm_object| {
try self.base.emitLlvmObject(arena, llvm_object, prog_node);
}
const sub_prog_node = prog_node.start("MachO Flush", 0);
defer sub_prog_node.end();
const directory = self.base.emit.root_dir;
const module_obj_path: ?Path = if (self.base.zcu_object_sub_path) |path| .{
.root_dir = directory,
.sub_path = if (fs.path.dirname(self.base.emit.sub_path)) |dirname|
try fs.path.join(arena, &.{ dirname, path })
else
path,
const zcu_obj_path: ?Path = if (self.base.zcu_object_basename) |raw| p: {
break :p try comp.resolveEmitPathFlush(arena, .temp, raw);
} else null;
// --verbose-link
if (comp.verbose_link) try self.dumpArgv(comp);
if (self.getZigObject()) |zo| try zo.flushModule(self, tid);
if (self.base.isStaticLib()) return relocatable.flushStaticLib(self, comp, module_obj_path);
if (self.base.isObject()) return relocatable.flushObject(self, comp, module_obj_path);
if (self.getZigObject()) |zo| try zo.flush(self, tid);
if (self.base.isStaticLib()) return relocatable.flushStaticLib(self, comp, zcu_obj_path);
if (self.base.isObject()) return relocatable.flushObject(self, comp, zcu_obj_path);
var positionals = std.ArrayList(link.Input).init(gpa);
defer positionals.deinit();
@ -409,7 +378,7 @@ pub fn flushModule(
positionals.appendAssumeCapacity(try link.openObjectInput(diags, key.status.success.object_path));
}
if (module_obj_path) |path| try positionals.append(try link.openObjectInput(diags, path));
if (zcu_obj_path) |path| try positionals.append(try link.openObjectInput(diags, path));
if (comp.config.any_sanitize_thread) {
try positionals.append(try link.openObjectInput(diags, comp.tsan_lib.?.full_object_path));
@ -629,7 +598,7 @@ pub fn flushModule(
error.LinkFailure => return error.LinkFailure,
else => |e| return diags.fail("failed to calculate and write uuid: {s}", .{@errorName(e)}),
};
if (self.getDebugSymbols()) |dsym| dsym.flushModule(self) catch |err| switch (err) {
if (self.getDebugSymbols()) |dsym| dsym.flush(self) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => |e| return diags.fail("failed to get debug symbols: {s}", .{@errorName(e)}),
};
@ -658,12 +627,9 @@ fn dumpArgv(self: *MachO, comp: *Compilation) !void {
const directory = self.base.emit.root_dir;
const full_out_path = try directory.join(arena, &[_][]const u8{self.base.emit.sub_path});
const module_obj_path: ?[]const u8 = if (self.base.zcu_object_sub_path) |path| blk: {
if (fs.path.dirname(full_out_path)) |dirname| {
break :blk try fs.path.join(arena, &.{ dirname, path });
} else {
break :blk path;
}
const zcu_obj_path: ?[]const u8 = if (self.base.zcu_object_basename) |raw| p: {
const p = try comp.resolveEmitPathFlush(arena, .temp, raw);
break :p try p.toString(arena);
} else null;
var argv = std.ArrayList([]const u8).init(arena);
@ -692,7 +658,7 @@ fn dumpArgv(self: *MachO, comp: *Compilation) !void {
try argv.append(try key.status.success.object_path.toString(arena));
}
if (module_obj_path) |p| {
if (zcu_obj_path) |p| {
try argv.append(p);
}
} else {
@ -784,7 +750,7 @@ fn dumpArgv(self: *MachO, comp: *Compilation) !void {
try argv.append(try key.status.success.object_path.toString(arena));
}
if (module_obj_path) |p| {
if (zcu_obj_path) |p| {
try argv.append(p);
}
@ -3073,26 +3039,22 @@ pub fn updateFunc(
self: *MachO,
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Air.Liveness,
mir: *const codegen.AnyMir,
) link.File.UpdateNavError!void {
if (build_options.skip_non_native and builtin.object_format != .macho) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(pt, func_index, air, liveness);
return self.getZigObject().?.updateFunc(self, pt, func_index, air, liveness);
return self.getZigObject().?.updateFunc(self, pt, func_index, mir);
}
pub fn updateNav(self: *MachO, pt: Zcu.PerThread, nav: InternPool.Nav.Index) link.File.UpdateNavError!void {
if (build_options.skip_non_native and builtin.object_format != .macho) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (self.llvm_object) |llvm_object| return llvm_object.updateNav(pt, nav);
return self.getZigObject().?.updateNav(self, pt, nav);
}
pub fn updateLineNumber(self: *MachO, pt: Zcu.PerThread, ti_id: InternPool.TrackedInst.Index) !void {
if (self.llvm_object) |_| return;
return self.getZigObject().?.updateLineNumber(pt, ti_id);
}
@ -3105,7 +3067,6 @@ pub fn updateExports(
if (build_options.skip_non_native and builtin.object_format != .macho) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (self.llvm_object) |llvm_object| return llvm_object.updateExports(pt, exported, export_indices);
return self.getZigObject().?.updateExports(self, pt, exported, export_indices);
}
@ -3114,17 +3075,14 @@ pub fn deleteExport(
exported: Zcu.Exported,
name: InternPool.NullTerminatedString,
) void {
if (self.llvm_object) |_| return;
return self.getZigObject().?.deleteExport(self, exported, name);
}
pub fn freeNav(self: *MachO, nav: InternPool.Nav.Index) void {
if (self.llvm_object) |llvm_object| return llvm_object.freeNav(nav);
return self.getZigObject().?.freeNav(nav);
}
pub fn getNavVAddr(self: *MachO, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index, reloc_info: link.File.RelocInfo) !u64 {
assert(self.llvm_object == null);
return self.getZigObject().?.getNavVAddr(self, pt, nav_index, reloc_info);
}
@ -3139,7 +3097,6 @@ pub fn lowerUav(
}
pub fn getUavVAddr(self: *MachO, uav: InternPool.Index, reloc_info: link.File.RelocInfo) !u64 {
assert(self.llvm_object == null);
return self.getZigObject().?.getUavVAddr(self, uav, reloc_info);
}
@ -5473,7 +5430,6 @@ const target_util = @import("../target.zig");
const trace = @import("../tracy.zig").trace;
const synthetic = @import("MachO/synthetic.zig");
const Air = @import("../Air.zig");
const Alignment = Atom.Alignment;
const Allocator = mem.Allocator;
const Archive = @import("MachO/Archive.zig");
@ -5496,7 +5452,6 @@ const ObjcStubsSection = synthetic.ObjcStubsSection;
const Object = @import("MachO/Object.zig");
const LazyBind = bind.LazyBind;
const LaSymbolPtrSection = synthetic.LaSymbolPtrSection;
const LlvmObject = @import("../codegen/llvm.zig").Object;
const Md5 = std.crypto.hash.Md5;
const Zcu = @import("../Zcu.zig");
const InternPool = @import("../InternPool.zig");

View File

@ -178,7 +178,7 @@ fn findFreeSpace(self: *DebugSymbols, object_size: u64, min_alignment: u64) !u64
return offset;
}
pub fn flushModule(self: *DebugSymbols, macho_file: *MachO) !void {
pub fn flush(self: *DebugSymbols, macho_file: *MachO) !void {
const zo = macho_file.getZigObject().?;
for (self.relocs.items) |*reloc| {
const sym = zo.symbols.items[reloc.target];

View File

@ -389,9 +389,6 @@ pub const Flags = packed struct {
/// ZigObject specific flags
/// Whether the symbol has a trampoline
trampoline: bool = false,
/// Whether the symbol is an extern pointer (as opposed to function).
is_extern_ptr: bool = false,
};
pub const SectionFlags = packed struct(u8) {

View File

@ -550,7 +550,7 @@ pub fn getInputSection(self: ZigObject, atom: Atom, macho_file: *MachO) macho.se
return sect;
}
pub fn flushModule(self: *ZigObject, macho_file: *MachO, tid: Zcu.PerThread.Id) link.File.FlushError!void {
pub fn flush(self: *ZigObject, macho_file: *MachO, tid: Zcu.PerThread.Id) link.File.FlushError!void {
const diags = &macho_file.base.comp.link_diags;
// Handle any lazy symbols that were emitted by incremental compilation.
@ -589,7 +589,7 @@ pub fn flushModule(self: *ZigObject, macho_file: *MachO, tid: Zcu.PerThread.Id)
if (self.dwarf) |*dwarf| {
const pt: Zcu.PerThread = .activate(macho_file.base.comp.zcu.?, tid);
defer pt.deactivate();
dwarf.flushModule(pt) catch |err| switch (err) {
dwarf.flush(pt) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => |e| return diags.fail("failed to flush dwarf module: {s}", .{@errorName(e)}),
};
@ -599,7 +599,7 @@ pub fn flushModule(self: *ZigObject, macho_file: *MachO, tid: Zcu.PerThread.Id)
self.debug_strtab_dirty = false;
}
// The point of flushModule() is to commit changes, so in theory, nothing should
// The point of flush() is to commit changes, so in theory, nothing should
// be dirty after this. However, it is possible for some things to remain
// dirty because they fail to be written in the event of compile errors,
// such as debug_line_header_dirty and debug_info_header_dirty.
@ -777,8 +777,7 @@ pub fn updateFunc(
macho_file: *MachO,
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Air.Liveness,
mir: *const codegen.AnyMir,
) link.File.UpdateNavError!void {
const tracy = trace(@src());
defer tracy.end();
@ -796,13 +795,12 @@ pub fn updateFunc(
var debug_wip_nav = if (self.dwarf) |*dwarf| try dwarf.initWipNav(pt, func.owner_nav, sym_index) else null;
defer if (debug_wip_nav) |*wip_nav| wip_nav.deinit();
try codegen.generateFunction(
try codegen.emitFunction(
&macho_file.base,
pt,
zcu.navSrcLoc(func.owner_nav),
func_index,
air,
liveness,
mir,
&code_buffer,
if (debug_wip_nav) |*wip_nav| .{ .dwarf = wip_nav } else .none,
);
@ -883,11 +881,7 @@ pub fn updateNav(
const name = @"extern".name.toSlice(ip);
const lib_name = @"extern".lib_name.toSlice(ip);
const sym_index = try self.getGlobalSymbol(macho_file, name, lib_name);
if (!ip.isFunctionType(@"extern".ty)) {
const sym = &self.symbols.items[sym_index];
sym.flags.is_extern_ptr = true;
if (@"extern".is_threadlocal) sym.flags.tlv = true;
}
if (@"extern".is_threadlocal and macho_file.base.comp.config.any_non_single_threaded) self.symbols.items[sym_index].flags.tlv = true;
if (self.dwarf) |*dwarf| dwarf: {
var debug_wip_nav = try dwarf.initWipNav(pt, nav_index, sym_index) orelse break :dwarf;
defer debug_wip_nav.deinit();
@ -1160,7 +1154,6 @@ fn getNavOutputSection(
) error{OutOfMemory}!u8 {
_ = self;
const ip = &zcu.intern_pool;
const any_non_single_threaded = macho_file.base.comp.config.any_non_single_threaded;
const nav_val = zcu.navValue(nav_index);
if (ip.isFunctionType(nav_val.typeOf(zcu).toIntern())) return macho_file.zig_text_sect_index.?;
const is_const, const is_threadlocal, const nav_init = switch (ip.indexToKey(nav_val.toIntern())) {
@ -1168,7 +1161,7 @@ fn getNavOutputSection(
.@"extern" => |@"extern"| .{ @"extern".is_const, @"extern".is_threadlocal, .none },
else => .{ true, false, nav_val.toIntern() },
};
if (any_non_single_threaded and is_threadlocal) {
if (is_threadlocal and macho_file.base.comp.config.any_non_single_threaded) {
for (code) |byte| {
if (byte != 0) break;
} else return macho_file.getSectionByName("__DATA", "__thread_bss") orelse try macho_file.addSection(
@ -1537,7 +1530,7 @@ pub fn getOrCreateMetadataForLazySymbol(
}
state_ptr.* = .pending_flush;
const symbol_index = symbol_index_ptr.*;
// anyerror needs to be deferred until flushModule
// anyerror needs to be deferred until flush
if (lazy_sym.ty != .anyerror_type) try self.updateLazySymbol(macho_file, pt, lazy_sym, symbol_index);
return symbol_index;
}
@ -1813,7 +1806,6 @@ const target_util = @import("../../target.zig");
const trace = @import("../../tracy.zig").trace;
const std = @import("std");
const Air = @import("../../Air.zig");
const Allocator = std.mem.Allocator;
const Archive = @import("Archive.zig");
const Atom = @import("Atom.zig");

View File

@ -301,7 +301,6 @@ pub fn createEmpty(
.stack_size = options.stack_size orelse 16777216,
.allow_shlib_undefined = options.allow_shlib_undefined orelse false,
.file = null,
.disable_lld_caching = options.disable_lld_caching,
.build_id = options.build_id,
},
.sixtyfour_bit = sixtyfour_bit,
@ -387,8 +386,7 @@ pub fn updateFunc(
self: *Plan9,
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Air.Liveness,
mir: *const codegen.AnyMir,
) link.File.UpdateNavError!void {
if (build_options.skip_non_native and builtin.object_format != .plan9) {
@panic("Attempted to compile for object format that was disabled by build configuration");
@ -413,13 +411,12 @@ pub fn updateFunc(
};
defer dbg_info_output.dbg_line.deinit();
try codegen.generateFunction(
try codegen.emitFunction(
&self.base,
pt,
zcu.navSrcLoc(func.owner_nav),
func_index,
air,
liveness,
mir,
&code_buffer,
.{ .plan9 = &dbg_info_output },
);
@ -494,7 +491,7 @@ fn updateFinish(self: *Plan9, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index
// write the symbol
// we already have the got index
const sym: aout.Sym = .{
.value = undefined, // the value of stuff gets filled in in flushModule
.value = undefined, // the value of stuff gets filled in in flush
.type = atom.type,
.name = try gpa.dupe(u8, nav.name.toSlice(ip)),
};
@ -527,25 +524,6 @@ fn allocateGotIndex(self: *Plan9) usize {
}
}
pub fn flush(
self: *Plan9,
arena: Allocator,
tid: Zcu.PerThread.Id,
prog_node: std.Progress.Node,
) link.File.FlushError!void {
const comp = self.base.comp;
const diags = &comp.link_diags;
const use_lld = build_options.have_llvm and comp.config.use_lld;
assert(!use_lld);
switch (link.File.effectiveOutputMode(use_lld, comp.config.output_mode)) {
.Exe => {},
.Obj => return diags.fail("writing plan9 object files unimplemented", .{}),
.Lib => return diags.fail("writing plan9 lib files unimplemented", .{}),
}
return self.flushModule(arena, tid, prog_node);
}
pub fn changeLine(l: *std.ArrayList(u8), delta_line: i32) !void {
if (delta_line > 0 and delta_line < 65) {
const toappend = @as(u8, @intCast(delta_line));
@ -586,7 +564,7 @@ fn atomCount(self: *Plan9) usize {
return data_nav_count + fn_nav_count + lazy_atom_count + extern_atom_count + uav_atom_count;
}
pub fn flushModule(
pub fn flush(
self: *Plan9,
arena: Allocator,
/// TODO: stop using this
@ -607,10 +585,16 @@ pub fn flushModule(
const gpa = comp.gpa;
const target = comp.root_mod.resolved_target.result;
switch (comp.config.output_mode) {
.Exe => {},
.Obj => return diags.fail("writing plan9 object files unimplemented", .{}),
.Lib => return diags.fail("writing plan9 lib files unimplemented", .{}),
}
const sub_prog_node = prog_node.start("Flush Module", 0);
defer sub_prog_node.end();
log.debug("flushModule", .{});
log.debug("flush", .{});
defer assert(self.hdr.entry != 0x0);
@ -1039,7 +1023,7 @@ pub fn getOrCreateAtomForLazySymbol(self: *Plan9, pt: Zcu.PerThread, lazy_sym: F
const atom = atom_ptr.*;
_ = try self.getAtomPtr(atom).getOrCreateSymbolTableEntry(self);
_ = self.getAtomPtr(atom).getOrCreateOffsetTableEntry(self);
// anyerror needs to be deferred until flushModule
// anyerror needs to be deferred until flush
if (lazy_sym.ty != .anyerror_type) try self.updateLazySymbolAtom(pt, lazy_sym, atom);
return atom;
}
@ -1182,11 +1166,7 @@ pub fn open(
const file = try emit.root_dir.handle.createFile(emit.sub_path, .{
.read = true,
.mode = link.File.determineMode(
use_lld,
comp.config.output_mode,
comp.config.link_mode,
),
.mode = link.File.determineMode(comp.config.output_mode, comp.config.link_mode),
});
errdefer file.close();
self.base.file = file;

279
src/link/Queue.zig Normal file
View File

@ -0,0 +1,279 @@
//! Stores and manages the queue of link tasks. Each task is either a `PrelinkTask` or a `ZcuTask`.
//!
//! There must be at most one link thread (the thread processing these tasks) active at a time. If
//! `!comp.separateCodegenThreadOk()`, then ZCU tasks will be run on the main thread, bypassing this
//! queue entirely.
//!
//! All prelink tasks must be processed before any ZCU tasks are processed. After all prelink tasks
//! are run, but before any ZCU tasks are run, `prelink` must be called on the `link.File`.
//!
//! There will sometimes be a `ZcuTask` in the queue which is not yet ready because it depends on
//! MIR which has not yet been generated by any codegen thread. In this case, we must pause
//! processing of linker tasks until the MIR is ready. It would be incorrect to run any other link
//! tasks first, since this would make builds unreproducible.
mutex: std.Thread.Mutex,
/// Validates that only one `flushTaskQueue` thread is running at a time.
flush_safety: std.debug.SafetyLock,
/// This is the number of prelink tasks which are expected but have not yet been enqueued.
/// Guarded by `mutex`.
pending_prelink_tasks: u32,
/// Prelink tasks which have been enqueued and are not yet owned by the worker thread.
/// Allocated into `gpa`, guarded by `mutex`.
queued_prelink: std.ArrayListUnmanaged(PrelinkTask),
/// The worker thread moves items from `queued_prelink` into this array in order to process them.
/// Allocated into `gpa`, accessed only by the worker thread.
wip_prelink: std.ArrayListUnmanaged(PrelinkTask),
/// Like `queued_prelink`, but for ZCU tasks.
/// Allocated into `gpa`, guarded by `mutex`.
queued_zcu: std.ArrayListUnmanaged(ZcuTask),
/// Like `wip_prelink`, but for ZCU tasks.
/// Allocated into `gpa`, accessed only by the worker thread.
wip_zcu: std.ArrayListUnmanaged(ZcuTask),
/// When processing ZCU link tasks, we might have to block due to unpopulated MIR. When this
/// happens, some tasks in `wip_zcu` have been run, and some are still pending. This is the
/// index into `wip_zcu` which we have reached.
wip_zcu_idx: usize,
/// The sum of all `air_bytes` for all currently-queued `ZcuTask.link_func` tasks. Because
/// MIR bytes are approximately proportional to AIR bytes, this acts to limit the amount of
/// AIR and MIR which is queued for codegen and link respectively, to prevent excessive
/// memory usage if analysis produces AIR faster than it can be processed by codegen/link.
/// The cap is `max_air_bytes_in_flight`.
/// Guarded by `mutex`.
air_bytes_in_flight: u32,
/// If nonzero, then a call to `enqueueZcu` is blocked waiting to add a `link_func` task, but
/// cannot until `air_bytes_in_flight` is no greater than this value.
/// Guarded by `mutex`.
air_bytes_waiting: u32,
/// After setting `air_bytes_waiting`, `enqueueZcu` will wait on this condition (with `mutex`).
/// When `air_bytes_waiting` many bytes can be queued, this condition should be signaled.
air_bytes_cond: std.Thread.Condition,
/// Guarded by `mutex`.
state: union(enum) {
/// The link thread is currently running or queued to run.
running,
/// The link thread is not running or queued, because it has exhausted all immediately available
/// tasks. It should be spawned when more tasks are enqueued. If `pending_prelink_tasks` is not
/// zero, we are specifically waiting for prelink tasks.
finished,
/// The link thread is not running or queued, because it is waiting for this MIR to be populated.
/// Once codegen completes, it must call `mirReady` which will restart the link thread.
wait_for_mir: *ZcuTask.LinkFunc.SharedMir,
},
/// In the worst observed case, MIR is around 50 times as large as AIR. More typically, the ratio is
/// around 20. Going by that 50x multiplier, and assuming we want to consume no more than 500 MiB of
/// memory on AIR/MIR, we see a limit of around 10 MiB of AIR in-flight.
const max_air_bytes_in_flight = 10 * 1024 * 1024;
/// The initial `Queue` state, containing no tasks, expecting no prelink tasks, and with no running worker thread.
/// The `pending_prelink_tasks` and `queued_prelink` fields may be modified as needed before calling `start`.
pub const empty: Queue = .{
.mutex = .{},
.flush_safety = .{},
.pending_prelink_tasks = 0,
.queued_prelink = .empty,
.wip_prelink = .empty,
.queued_zcu = .empty,
.wip_zcu = .empty,
.wip_zcu_idx = 0,
.state = .finished,
.air_bytes_in_flight = 0,
.air_bytes_waiting = 0,
.air_bytes_cond = .{},
};
/// `lf` is needed to correctly deinit any pending `ZcuTask`s.
pub fn deinit(q: *Queue, comp: *Compilation) void {
const gpa = comp.gpa;
for (q.queued_zcu.items) |t| t.deinit(comp.zcu.?);
for (q.wip_zcu.items[q.wip_zcu_idx..]) |t| t.deinit(comp.zcu.?);
q.queued_prelink.deinit(gpa);
q.wip_prelink.deinit(gpa);
q.queued_zcu.deinit(gpa);
q.wip_zcu.deinit(gpa);
}
/// This is expected to be called exactly once, after which the caller must not directly access
/// `queued_prelink` or `pending_prelink_tasks` any longer. This will spawn the link thread if
/// necessary.
pub fn start(q: *Queue, comp: *Compilation) void {
assert(q.state == .finished);
assert(q.queued_zcu.items.len == 0);
if (q.queued_prelink.items.len != 0) {
q.state = .running;
comp.thread_pool.spawnWgId(&comp.link_task_wait_group, flushTaskQueue, .{ q, comp });
}
}
/// Called by codegen workers after they have populated a `ZcuTask.LinkFunc.SharedMir`. If the link
/// thread was waiting for this MIR, it can resume.
pub fn mirReady(q: *Queue, comp: *Compilation, mir: *ZcuTask.LinkFunc.SharedMir) void {
// We would like to assert that `mir` is not pending, but that would race with a worker thread
// potentially freeing it.
{
q.mutex.lock();
defer q.mutex.unlock();
switch (q.state) {
.finished, .running => return,
.wait_for_mir => |wait_for| if (wait_for != mir) return,
}
// We were waiting for `mir`, so we will restart the linker thread.
q.state = .running;
}
assert(mir.status.load(.monotonic) != .pending);
comp.thread_pool.spawnWgId(&comp.link_task_wait_group, flushTaskQueue, .{ q, comp });
}
/// Enqueues all prelink tasks in `tasks`. Asserts that they were expected, i.e. that `tasks.len` is
/// less than or equal to `q.pending_prelink_tasks`. Also asserts that `tasks.len` is not 0.
pub fn enqueuePrelink(q: *Queue, comp: *Compilation, tasks: []const PrelinkTask) Allocator.Error!void {
{
q.mutex.lock();
defer q.mutex.unlock();
try q.queued_prelink.appendSlice(comp.gpa, tasks);
q.pending_prelink_tasks -= @intCast(tasks.len);
switch (q.state) {
.wait_for_mir => unreachable, // we've not started zcu tasks yet
.running => return,
.finished => {},
}
// Restart the linker thread, because it was waiting for a task
q.state = .running;
}
comp.thread_pool.spawnWgId(&comp.link_task_wait_group, flushTaskQueue, .{ q, comp });
}
pub fn enqueueZcu(q: *Queue, comp: *Compilation, task: ZcuTask) Allocator.Error!void {
assert(comp.separateCodegenThreadOk());
{
q.mutex.lock();
defer q.mutex.unlock();
// If this is a `link_func` task, we might need to wait for `air_bytes_in_flight` to fall.
if (task == .link_func) {
const max_in_flight = max_air_bytes_in_flight -| task.link_func.air_bytes;
while (q.air_bytes_in_flight > max_in_flight) {
q.air_bytes_waiting = task.link_func.air_bytes;
q.air_bytes_cond.wait(&q.mutex);
q.air_bytes_waiting = 0;
}
q.air_bytes_in_flight += task.link_func.air_bytes;
}
try q.queued_zcu.append(comp.gpa, task);
switch (q.state) {
.running, .wait_for_mir => return,
.finished => if (q.pending_prelink_tasks != 0) return,
}
// Restart the linker thread, unless it would immediately be blocked
if (task == .link_func and task.link_func.mir.status.load(.monotonic) == .pending) {
q.state = .{ .wait_for_mir = task.link_func.mir };
return;
}
q.state = .running;
}
comp.thread_pool.spawnWgId(&comp.link_task_wait_group, flushTaskQueue, .{ q, comp });
}
fn flushTaskQueue(tid: usize, q: *Queue, comp: *Compilation) void {
q.flush_safety.lock(); // every `return` site should unlock this before unlocking `q.mutex`
if (std.debug.runtime_safety) {
q.mutex.lock();
defer q.mutex.unlock();
assert(q.state == .running);
}
prelink: while (true) {
assert(q.wip_prelink.items.len == 0);
{
q.mutex.lock();
defer q.mutex.unlock();
std.mem.swap(std.ArrayListUnmanaged(PrelinkTask), &q.queued_prelink, &q.wip_prelink);
if (q.wip_prelink.items.len == 0) {
if (q.pending_prelink_tasks == 0) {
break :prelink; // prelink is done
} else {
// We're expecting more prelink tasks so can't move on to ZCU tasks.
q.state = .finished;
q.flush_safety.unlock();
return;
}
}
}
for (q.wip_prelink.items) |task| {
link.doPrelinkTask(comp, task);
}
q.wip_prelink.clearRetainingCapacity();
}
// We've finished the prelink tasks, so run prelink if necessary.
if (comp.bin_file) |lf| {
if (!lf.post_prelink) {
if (lf.prelink()) |_| {
lf.post_prelink = true;
} else |err| switch (err) {
error.OutOfMemory => comp.link_diags.setAllocFailure(),
error.LinkFailure => {},
}
}
}
// Now we can run ZCU tasks.
while (true) {
if (q.wip_zcu.items.len == q.wip_zcu_idx) {
q.wip_zcu.clearRetainingCapacity();
q.wip_zcu_idx = 0;
q.mutex.lock();
defer q.mutex.unlock();
std.mem.swap(std.ArrayListUnmanaged(ZcuTask), &q.queued_zcu, &q.wip_zcu);
if (q.wip_zcu.items.len == 0) {
// We've exhausted all available tasks.
q.state = .finished;
q.flush_safety.unlock();
return;
}
}
const task = q.wip_zcu.items[q.wip_zcu_idx];
// If the task is a `link_func`, we might have to stop until its MIR is populated.
pending: {
if (task != .link_func) break :pending;
const status_ptr = &task.link_func.mir.status;
// First check without the mutex to optimize for the common case where MIR is ready.
if (status_ptr.load(.monotonic) != .pending) break :pending;
q.mutex.lock();
defer q.mutex.unlock();
if (status_ptr.load(.monotonic) != .pending) break :pending;
// We will stop for now, and get restarted once this MIR is ready.
q.state = .{ .wait_for_mir = task.link_func.mir };
q.flush_safety.unlock();
return;
}
link.doZcuTask(comp, tid, task);
task.deinit(comp.zcu.?);
if (task == .link_func) {
// Decrease `air_bytes_in_flight`, since we've finished processing this MIR.
q.mutex.lock();
defer q.mutex.unlock();
q.air_bytes_in_flight -= task.link_func.air_bytes;
if (q.air_bytes_waiting != 0 and
q.air_bytes_in_flight <= max_air_bytes_in_flight -| q.air_bytes_waiting)
{
q.air_bytes_cond.signal();
}
}
q.wip_zcu_idx += 1;
}
}
const std = @import("std");
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const Compilation = @import("../Compilation.zig");
const link = @import("../link.zig");
const PrelinkTask = link.PrelinkTask;
const ZcuTask = link.ZcuTask;
const Queue = @This();

View File

@ -17,7 +17,7 @@
//! All regular functions.
// Because SPIR-V requires re-compilation anyway, and so hot swapping will not work
// anyway, we simply generate all the code in flushModule. This keeps
// anyway, we simply generate all the code in flush. This keeps
// things considerably simpler.
const SpirV = @This();
@ -83,7 +83,6 @@ pub fn createEmpty(
.stack_size = options.stack_size orelse 0,
.allow_shlib_undefined = options.allow_shlib_undefined orelse false,
.file = null,
.disable_lld_caching = options.disable_lld_caching,
.build_id = options.build_id,
},
.object = codegen.Object.init(gpa, comp.getTarget()),
@ -112,24 +111,6 @@ pub fn deinit(self: *SpirV) void {
self.object.deinit();
}
pub fn updateFunc(
self: *SpirV,
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Air.Liveness,
) link.File.UpdateNavError!void {
if (build_options.skip_non_native) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
const ip = &pt.zcu.intern_pool;
const func = pt.zcu.funcInfo(func_index);
log.debug("lowering function {}", .{ip.getNav(func.owner_nav).name.fmt(ip)});
try self.object.updateFunc(pt, func_index, air, liveness);
}
pub fn updateNav(self: *SpirV, pt: Zcu.PerThread, nav: InternPool.Nav.Index) link.File.UpdateNavError!void {
if (build_options.skip_non_native) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
@ -193,18 +174,14 @@ pub fn updateExports(
// TODO: Export regular functions, variables, etc using Linkage attributes.
}
pub fn flush(self: *SpirV, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
return self.flushModule(arena, tid, prog_node);
}
pub fn flushModule(
pub fn flush(
self: *SpirV,
arena: Allocator,
tid: Zcu.PerThread.Id,
prog_node: std.Progress.Node,
) link.File.FlushError!void {
// The goal is to never use this because it's only needed if we need to
// write to InternPool, but flushModule is too late to be writing to the
// write to InternPool, but flush is too late to be writing to the
// InternPool.
_ = tid;

View File

@ -29,19 +29,16 @@ const leb = std.leb;
const log = std.log.scoped(.link);
const mem = std.mem;
const Air = @import("../Air.zig");
const Mir = @import("../arch/wasm/Mir.zig");
const CodeGen = @import("../arch/wasm/CodeGen.zig");
const abi = @import("../arch/wasm/abi.zig");
const Compilation = @import("../Compilation.zig");
const Dwarf = @import("Dwarf.zig");
const InternPool = @import("../InternPool.zig");
const LlvmObject = @import("../codegen/llvm.zig").Object;
const Zcu = @import("../Zcu.zig");
const codegen = @import("../codegen.zig");
const dev = @import("../dev.zig");
const link = @import("../link.zig");
const lldMain = @import("../main.zig").lldMain;
const trace = @import("../tracy.zig").trace;
const wasi_libc = @import("../libs/wasi_libc.zig");
const Value = @import("../Value.zig");
@ -75,14 +72,10 @@ global_base: ?u64,
initial_memory: ?u64,
/// When defined, sets the maximum memory size of the memory.
max_memory: ?u64,
/// When true, will import the function table from the host environment.
import_table: bool,
/// When true, will export the function table to the host environment.
export_table: bool,
/// Output name of the file
name: []const u8,
/// If this is not null, an object file is created by LLVM and linked with LLD afterwards.
llvm_object: ?LlvmObject.Ptr = null,
/// List of relocatable files to be linked into the final binary.
objects: std.ArrayListUnmanaged(Object) = .{},
@ -288,7 +281,7 @@ mir_instructions: std.MultiArrayList(Mir.Inst) = .{},
/// Corresponds to `mir_instructions`.
mir_extra: std.ArrayListUnmanaged(u32) = .empty,
/// All local types for all Zcu functions.
all_zcu_locals: std.ArrayListUnmanaged(std.wasm.Valtype) = .empty,
mir_locals: std.ArrayListUnmanaged(std.wasm.Valtype) = .empty,
params_scratch: std.ArrayListUnmanaged(std.wasm.Valtype) = .empty,
returns_scratch: std.ArrayListUnmanaged(std.wasm.Valtype) = .empty,
@ -872,9 +865,24 @@ const ZcuDataStarts = struct {
};
pub const ZcuFunc = union {
function: CodeGen.Function,
function: Function,
tag_name: TagName,
pub const Function = extern struct {
/// Index into `Wasm.mir_instructions`.
instructions_off: u32,
/// This is unused except for as a safety slice bound and could be removed.
instructions_len: u32,
/// Index into `Wasm.mir_extra`.
extra_off: u32,
/// This is unused except for as a safety slice bound and could be removed.
extra_len: u32,
/// Index into `Wasm.mir_locals`.
locals_off: u32,
locals_len: u32,
prologue: Mir.Prologue,
};
pub const TagName = extern struct {
symbol_name: String,
type_index: FunctionType.Index,
@ -2938,28 +2946,20 @@ pub fn createEmpty(
const target = comp.root_mod.resolved_target.result;
assert(target.ofmt == .wasm);
const use_lld = build_options.have_llvm and comp.config.use_lld;
const use_llvm = comp.config.use_llvm;
const output_mode = comp.config.output_mode;
const wasi_exec_model = comp.config.wasi_exec_model;
// If using LLD to link, this code should produce an object file so that it
// can be passed to LLD.
// If using LLVM to generate the object file for the zig compilation unit,
// we need a place to put the object file so that it can be subsequently
// handled.
const zcu_object_sub_path = if (!use_lld and !use_llvm)
null
else
try std.fmt.allocPrint(arena, "{s}.o", .{emit.sub_path});
const wasm = try arena.create(Wasm);
wasm.* = .{
.base = .{
.tag = .wasm,
.comp = comp,
.emit = emit,
.zcu_object_sub_path = zcu_object_sub_path,
.zcu_object_basename = if (use_llvm)
try std.fmt.allocPrint(arena, "{s}_zcu.o", .{fs.path.stem(emit.sub_path)})
else
null,
// Garbage collection is so crucial to WebAssembly that we design
// the linker around the assumption that it will be on in the vast
// majority of cases, and therefore express "no garbage collection"
@ -2973,13 +2973,11 @@ pub fn createEmpty(
},
.allow_shlib_undefined = options.allow_shlib_undefined orelse false,
.file = null,
.disable_lld_caching = options.disable_lld_caching,
.build_id = options.build_id,
},
.name = undefined,
.string_table = .empty,
.string_bytes = .empty,
.import_table = options.import_table,
.export_table = options.export_table,
.import_symbols = options.import_symbols,
.export_symbol_names = options.export_symbol_names,
@ -2992,9 +2990,6 @@ pub fn createEmpty(
.object_host_name = .none,
.preloaded_strings = undefined,
};
if (use_llvm and comp.config.have_zcu) {
wasm.llvm_object = try LlvmObject.create(arena, comp);
}
errdefer wasm.base.destroy();
if (options.object_host_name) |name| wasm.object_host_name = (try wasm.internString(name)).toOptional();
@ -3010,17 +3005,7 @@ pub fn createEmpty(
.named => |name| (try wasm.internString(name)).toOptional(),
};
if (use_lld and (use_llvm or !comp.config.have_zcu)) {
// LLVM emits the object file (if any); LLD links it into the final product.
return wasm;
}
// What path should this Wasm linker code output to?
// If using LLD to link, this code should produce an object file so that it
// can be passed to LLD.
const sub_path = if (use_lld) zcu_object_sub_path.? else emit.sub_path;
wasm.base.file = try emit.root_dir.handle.createFile(sub_path, .{
wasm.base.file = try emit.root_dir.handle.createFile(emit.sub_path, .{
.truncate = true,
.read = true,
.mode = if (fs.has_executable_bit)
@ -3031,7 +3016,7 @@ pub fn createEmpty(
else
0,
});
wasm.name = sub_path;
wasm.name = emit.sub_path;
return wasm;
}
@ -3116,7 +3101,6 @@ fn parseArchive(wasm: *Wasm, obj: link.Input.Object) !void {
pub fn deinit(wasm: *Wasm) void {
const gpa = wasm.base.comp.gpa;
if (wasm.llvm_object) |llvm_object| llvm_object.deinit();
wasm.navs_exe.deinit(gpa);
wasm.navs_obj.deinit(gpa);
@ -3132,7 +3116,7 @@ pub fn deinit(wasm: *Wasm) void {
wasm.mir_instructions.deinit(gpa);
wasm.mir_extra.deinit(gpa);
wasm.all_zcu_locals.deinit(gpa);
wasm.mir_locals.deinit(gpa);
if (wasm.dwarf) |*dwarf| dwarf.deinit();
@ -3192,34 +3176,94 @@ pub fn deinit(wasm: *Wasm) void {
wasm.missing_exports.deinit(gpa);
}
pub fn updateFunc(wasm: *Wasm, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Air.Liveness) !void {
pub fn updateFunc(
wasm: *Wasm,
pt: Zcu.PerThread,
func_index: InternPool.Index,
any_mir: *const codegen.AnyMir,
) !void {
if (build_options.skip_non_native and builtin.object_format != .wasm) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (wasm.llvm_object) |llvm_object| return llvm_object.updateFunc(pt, func_index, air, liveness);
dev.check(.wasm_backend);
// This linker implementation only works with codegen backend `.stage2_wasm`.
const mir = &any_mir.wasm;
const zcu = pt.zcu;
const gpa = zcu.gpa;
try wasm.functions.ensureUnusedCapacity(gpa, 1);
try wasm.zcu_funcs.ensureUnusedCapacity(gpa, 1);
const ip = &zcu.intern_pool;
const is_obj = zcu.comp.config.output_mode == .Obj;
const target = &zcu.comp.root_mod.resolved_target.result;
const owner_nav = zcu.funcInfo(func_index).owner_nav;
log.debug("updateFunc {}", .{ip.getNav(owner_nav).fqn.fmt(ip)});
// For Wasm, we do not lower the MIR to code just yet. That lowering happens during `flush`,
// after garbage collection, which can affect function and global indexes, which affects the
// LEB integer encoding, which affects the output binary size.
// However, we do move the MIR into a more efficient in-memory representation, where the arrays
// for all functions are packed together rather than keeping them each in their own `Mir`.
const mir_instructions_off: u32 = @intCast(wasm.mir_instructions.len);
const mir_extra_off: u32 = @intCast(wasm.mir_extra.items.len);
const mir_locals_off: u32 = @intCast(wasm.mir_locals.items.len);
{
// Copying MultiArrayList data is a little non-trivial. Resize, then memcpy both slices.
const old_len = wasm.mir_instructions.len;
try wasm.mir_instructions.resize(gpa, old_len + mir.instructions.len);
const dest_slice = wasm.mir_instructions.slice().subslice(old_len, mir.instructions.len);
const src_slice = mir.instructions;
@memcpy(dest_slice.items(.tag), src_slice.items(.tag));
@memcpy(dest_slice.items(.data), src_slice.items(.data));
}
try wasm.mir_extra.appendSlice(gpa, mir.extra);
try wasm.mir_locals.appendSlice(gpa, mir.locals);
// We also need to populate some global state from `mir`.
try wasm.zcu_indirect_function_set.ensureUnusedCapacity(gpa, mir.indirect_function_set.count());
for (mir.indirect_function_set.keys()) |nav| wasm.zcu_indirect_function_set.putAssumeCapacity(nav, {});
for (mir.func_tys.keys()) |func_ty| {
const fn_info = zcu.typeToFunc(.fromInterned(func_ty)).?;
_ = try wasm.internFunctionType(fn_info.cc, fn_info.param_types.get(ip), .fromInterned(fn_info.return_type), target);
}
wasm.error_name_table_ref_count += mir.error_name_table_ref_count;
// We need to populate UAV data. In theory, we can lower the UAV values while we fill `mir.uavs`.
// However, lowering the data might cause *more* UAVs to be created, and mixing them up would be
// a headache. So instead, just write `undefined` placeholder code and use the `ZcuDataStarts`.
const zds: ZcuDataStarts = .init(wasm);
for (mir.uavs.keys(), mir.uavs.values()) |uav_val, uav_align| {
if (uav_align != .none) {
const gop = try wasm.overaligned_uavs.getOrPut(gpa, uav_val);
gop.value_ptr.* = if (gop.found_existing) gop.value_ptr.maxStrict(uav_align) else uav_align;
}
if (is_obj) {
const gop = try wasm.uavs_obj.getOrPut(gpa, uav_val);
if (!gop.found_existing) gop.value_ptr.* = undefined; // `zds` handles lowering
} else {
const gop = try wasm.uavs_exe.getOrPut(gpa, uav_val);
if (!gop.found_existing) gop.value_ptr.* = .{
.code = undefined, // `zds` handles lowering
.count = 0,
};
gop.value_ptr.count += 1;
}
}
try zds.finish(wasm, pt); // actually generates the UAVs
try wasm.functions.ensureUnusedCapacity(gpa, 1);
try wasm.zcu_funcs.ensureUnusedCapacity(gpa, 1);
// This converts AIR to MIR but does not yet lower to wasm code.
// That lowering happens during `flush`, after garbage collection, which
// can affect function and global indexes, which affects the LEB integer
// encoding, which affects the output binary size.
const function = try CodeGen.function(wasm, pt, func_index, air, liveness);
wasm.zcu_funcs.putAssumeCapacity(func_index, .{ .function = function });
wasm.zcu_funcs.putAssumeCapacity(func_index, .{ .function = .{
.instructions_off = mir_instructions_off,
.instructions_len = @intCast(mir.instructions.len),
.extra_off = mir_extra_off,
.extra_len = @intCast(mir.extra.len),
.locals_off = mir_locals_off,
.locals_len = @intCast(mir.locals.len),
.prologue = mir.prologue,
} });
wasm.functions.putAssumeCapacity(.pack(wasm, .{ .zcu_func = @enumFromInt(wasm.zcu_funcs.entries.len - 1) }), {});
try zds.finish(wasm, pt);
}
// Generate code for the "Nav", storing it in memory to be later written to
@ -3228,7 +3272,6 @@ pub fn updateNav(wasm: *Wasm, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index
if (build_options.skip_non_native and builtin.object_format != .wasm) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (wasm.llvm_object) |llvm_object| return llvm_object.updateNav(pt, nav_index);
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const nav = ip.getNav(nav_index);
@ -3308,8 +3351,6 @@ pub fn deleteExport(
exported: Zcu.Exported,
name: InternPool.NullTerminatedString,
) void {
if (wasm.llvm_object != null) return;
const zcu = wasm.base.comp.zcu.?;
const ip = &zcu.intern_pool;
const name_slice = name.toSlice(ip);
@ -3332,7 +3373,6 @@ pub fn updateExports(
if (build_options.skip_non_native and builtin.object_format != .wasm) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (wasm.llvm_object) |llvm_object| return llvm_object.updateExports(pt, exported, export_indices);
const zcu = pt.zcu;
const gpa = zcu.gpa;
@ -3379,21 +3419,6 @@ pub fn loadInput(wasm: *Wasm, input: link.Input) !void {
}
}
pub fn flush(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
const comp = wasm.base.comp;
const use_lld = build_options.have_llvm and comp.config.use_lld;
const diags = &comp.link_diags;
if (use_lld) {
return wasm.linkWithLLD(arena, tid, prog_node) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.LinkFailure => return error.LinkFailure,
else => |e| return diags.fail("failed to link with LLD: {s}", .{@errorName(e)}),
};
}
return wasm.flushModule(arena, tid, prog_node);
}
pub fn prelink(wasm: *Wasm, prog_node: std.Progress.Node) link.File.FlushError!void {
const tracy = trace(@src());
defer tracy.end();
@ -3785,37 +3810,25 @@ fn markTable(wasm: *Wasm, i: ObjectTableIndex) link.File.FlushError!void {
try wasm.tables.put(wasm.base.comp.gpa, .fromObjectTable(i), {});
}
pub fn flushModule(
pub fn flush(
wasm: *Wasm,
arena: Allocator,
tid: Zcu.PerThread.Id,
prog_node: std.Progress.Node,
) link.File.FlushError!void {
// The goal is to never use this because it's only needed if we need to
// write to InternPool, but flushModule is too late to be writing to the
// write to InternPool, but flush is too late to be writing to the
// InternPool.
_ = tid;
const comp = wasm.base.comp;
const use_lld = build_options.have_llvm and comp.config.use_lld;
const diags = &comp.link_diags;
const gpa = comp.gpa;
if (wasm.llvm_object) |llvm_object| {
try wasm.base.emitLlvmObject(arena, llvm_object, prog_node);
if (use_lld) return;
}
if (comp.verbose_link) Compilation.dump_argv(wasm.dump_argv_list.items);
if (wasm.base.zcu_object_sub_path) |path| {
const module_obj_path: Path = .{
.root_dir = wasm.base.emit.root_dir,
.sub_path = if (fs.path.dirname(wasm.base.emit.sub_path)) |dirname|
try fs.path.join(arena, &.{ dirname, path })
else
path,
};
openParseObjectReportingFailure(wasm, module_obj_path);
if (wasm.base.zcu_object_basename) |raw| {
const zcu_obj_path: Path = try comp.resolveEmitPathFlush(arena, .temp, raw);
openParseObjectReportingFailure(wasm, zcu_obj_path);
try prelink(wasm, prog_node);
}
@ -3850,432 +3863,6 @@ pub fn flushModule(
};
}
fn linkWithLLD(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void {
dev.check(.lld_linker);
const tracy = trace(@src());
defer tracy.end();
const comp = wasm.base.comp;
const diags = &comp.link_diags;
const shared_memory = comp.config.shared_memory;
const export_memory = comp.config.export_memory;
const import_memory = comp.config.import_memory;
const target = comp.root_mod.resolved_target.result;
const gpa = comp.gpa;
const directory = wasm.base.emit.root_dir; // Just an alias to make it shorter to type.
const full_out_path = try directory.join(arena, &[_][]const u8{wasm.base.emit.sub_path});
// If there is no Zig code to compile, then we should skip flushing the output file because it
// will not be part of the linker line anyway.
const module_obj_path: ?[]const u8 = if (comp.zcu != null) blk: {
try wasm.flushModule(arena, tid, prog_node);
if (fs.path.dirname(full_out_path)) |dirname| {
break :blk try fs.path.join(arena, &.{ dirname, wasm.base.zcu_object_sub_path.? });
} else {
break :blk wasm.base.zcu_object_sub_path.?;
}
} else null;
const sub_prog_node = prog_node.start("LLD Link", 0);
defer sub_prog_node.end();
const is_obj = comp.config.output_mode == .Obj;
const compiler_rt_path: ?Path = blk: {
if (comp.compiler_rt_lib) |lib| break :blk lib.full_object_path;
if (comp.compiler_rt_obj) |obj| break :blk obj.full_object_path;
break :blk null;
};
const ubsan_rt_path: ?Path = blk: {
if (comp.ubsan_rt_lib) |lib| break :blk lib.full_object_path;
if (comp.ubsan_rt_obj) |obj| break :blk obj.full_object_path;
break :blk null;
};
const id_symlink_basename = "lld.id";
var man: Cache.Manifest = undefined;
defer if (!wasm.base.disable_lld_caching) man.deinit();
var digest: [Cache.hex_digest_len]u8 = undefined;
if (!wasm.base.disable_lld_caching) {
man = comp.cache_parent.obtain();
// We are about to obtain this lock, so here we give other processes a chance first.
wasm.base.releaseLock();
comptime assert(Compilation.link_hash_implementation_version == 14);
try link.hashInputs(&man, comp.link_inputs);
for (comp.c_object_table.keys()) |key| {
_ = try man.addFilePath(key.status.success.object_path, null);
}
try man.addOptionalFile(module_obj_path);
try man.addOptionalFilePath(compiler_rt_path);
try man.addOptionalFilePath(ubsan_rt_path);
man.hash.addOptionalBytes(wasm.entry_name.slice(wasm));
man.hash.add(wasm.base.stack_size);
man.hash.add(wasm.base.build_id);
man.hash.add(import_memory);
man.hash.add(export_memory);
man.hash.add(wasm.import_table);
man.hash.add(wasm.export_table);
man.hash.addOptional(wasm.initial_memory);
man.hash.addOptional(wasm.max_memory);
man.hash.add(shared_memory);
man.hash.addOptional(wasm.global_base);
man.hash.addListOfBytes(wasm.export_symbol_names);
// strip does not need to go into the linker hash because it is part of the hash namespace
// We don't actually care whether it's a cache hit or miss; we just need the digest and the lock.
_ = try man.hit();
digest = man.final();
var prev_digest_buf: [digest.len]u8 = undefined;
const prev_digest: []u8 = Cache.readSmallFile(
directory.handle,
id_symlink_basename,
&prev_digest_buf,
) catch |err| blk: {
log.debug("WASM LLD new_digest={s} error: {s}", .{ std.fmt.fmtSliceHexLower(&digest), @errorName(err) });
// Handle this as a cache miss.
break :blk prev_digest_buf[0..0];
};
if (mem.eql(u8, prev_digest, &digest)) {
log.debug("WASM LLD digest={s} match - skipping invocation", .{std.fmt.fmtSliceHexLower(&digest)});
// Hot diggity dog! The output binary is already there.
wasm.base.lock = man.toOwnedLock();
return;
}
log.debug("WASM LLD prev_digest={s} new_digest={s}", .{ std.fmt.fmtSliceHexLower(prev_digest), std.fmt.fmtSliceHexLower(&digest) });
// We are about to change the output file to be different, so we invalidate the build hash now.
directory.handle.deleteFile(id_symlink_basename) catch |err| switch (err) {
error.FileNotFound => {},
else => |e| return e,
};
}
if (is_obj) {
// LLD's WASM driver does not support the equivalent of `-r` so we do a simple file copy
// here. TODO: think carefully about how we can avoid this redundant operation when doing
// build-obj. See also the corresponding TODO in linkAsArchive.
const the_object_path = blk: {
if (link.firstObjectInput(comp.link_inputs)) |obj| break :blk obj.path;
if (comp.c_object_table.count() != 0)
break :blk comp.c_object_table.keys()[0].status.success.object_path;
if (module_obj_path) |p|
break :blk Path.initCwd(p);
// TODO I think this is unreachable. Audit this situation when solving the above TODO
// regarding eliding redundant object -> object transformations.
return error.NoObjectsToLink;
};
try fs.Dir.copyFile(
the_object_path.root_dir.handle,
the_object_path.sub_path,
directory.handle,
wasm.base.emit.sub_path,
.{},
);
} else {
// Create an LLD command line and invoke it.
var argv = std.ArrayList([]const u8).init(gpa);
defer argv.deinit();
// We will invoke ourselves as a child process to gain access to LLD.
// This is necessary because LLD does not behave properly as a library -
// it calls exit() and does not reset all global data between invocations.
const linker_command = "wasm-ld";
try argv.appendSlice(&[_][]const u8{ comp.self_exe_path.?, linker_command });
try argv.append("--error-limit=0");
if (comp.config.lto != .none) {
switch (comp.root_mod.optimize_mode) {
.Debug => {},
.ReleaseSmall => try argv.append("-O2"),
.ReleaseFast, .ReleaseSafe => try argv.append("-O3"),
}
}
if (import_memory) {
try argv.append("--import-memory");
}
if (export_memory) {
try argv.append("--export-memory");
}
if (wasm.import_table) {
assert(!wasm.export_table);
try argv.append("--import-table");
}
if (wasm.export_table) {
assert(!wasm.import_table);
try argv.append("--export-table");
}
// For wasm-ld we only need to specify '--no-gc-sections' when the user explicitly
// specified it as garbage collection is enabled by default.
if (!wasm.base.gc_sections) {
try argv.append("--no-gc-sections");
}
if (comp.config.debug_format == .strip) {
try argv.append("-s");
}
if (wasm.initial_memory) |initial_memory| {
const arg = try std.fmt.allocPrint(arena, "--initial-memory={d}", .{initial_memory});
try argv.append(arg);
}
if (wasm.max_memory) |max_memory| {
const arg = try std.fmt.allocPrint(arena, "--max-memory={d}", .{max_memory});
try argv.append(arg);
}
if (shared_memory) {
try argv.append("--shared-memory");
}
if (wasm.global_base) |global_base| {
const arg = try std.fmt.allocPrint(arena, "--global-base={d}", .{global_base});
try argv.append(arg);
} else {
// We prepend it by default, so when a stack overflow happens the runtime will trap correctly,
// rather than silently overwrite all global declarations. See https://github.com/ziglang/zig/issues/4496
//
// The user can overwrite this behavior by setting the global-base
try argv.append("--stack-first");
}
// Users are allowed to specify which symbols they want to export to the wasm host.
for (wasm.export_symbol_names) |symbol_name| {
const arg = try std.fmt.allocPrint(arena, "--export={s}", .{symbol_name});
try argv.append(arg);
}
if (comp.config.rdynamic) {
try argv.append("--export-dynamic");
}
if (wasm.entry_name.slice(wasm)) |entry_name| {
try argv.appendSlice(&.{ "--entry", entry_name });
} else {
try argv.append("--no-entry");
}
try argv.appendSlice(&.{
"-z",
try std.fmt.allocPrint(arena, "stack-size={d}", .{wasm.base.stack_size}),
});
switch (wasm.base.build_id) {
.none => try argv.append("--build-id=none"),
.fast, .uuid, .sha1 => try argv.append(try std.fmt.allocPrint(arena, "--build-id={s}", .{
@tagName(wasm.base.build_id),
})),
.hexstring => |hs| try argv.append(try std.fmt.allocPrint(arena, "--build-id=0x{s}", .{
std.fmt.fmtSliceHexLower(hs.toSlice()),
})),
.md5 => {},
}
if (wasm.import_symbols) {
try argv.append("--allow-undefined");
}
if (comp.config.output_mode == .Lib and comp.config.link_mode == .dynamic) {
try argv.append("--shared");
}
if (comp.config.pie) {
try argv.append("--pie");
}
try argv.appendSlice(&.{ "-o", full_out_path });
if (target.cpu.arch == .wasm64) {
try argv.append("-mwasm64");
}
const is_exe_or_dyn_lib = comp.config.output_mode == .Exe or
(comp.config.output_mode == .Lib and comp.config.link_mode == .dynamic);
if (comp.config.link_libc and is_exe_or_dyn_lib) {
if (target.os.tag == .wasi) {
for (comp.wasi_emulated_libs) |crt_file| {
try argv.append(try comp.crtFileAsString(
arena,
wasi_libc.emulatedLibCRFileLibName(crt_file),
));
}
try argv.append(try comp.crtFileAsString(
arena,
wasi_libc.execModelCrtFileFullName(comp.config.wasi_exec_model),
));
try argv.append(try comp.crtFileAsString(arena, "libc.a"));
}
if (comp.zigc_static_lib) |zigc| {
try argv.append(try zigc.full_object_path.toString(arena));
}
if (comp.config.link_libcpp) {
try argv.append(try comp.libcxx_static_lib.?.full_object_path.toString(arena));
try argv.append(try comp.libcxxabi_static_lib.?.full_object_path.toString(arena));
}
}
// Positional arguments to the linker such as object files.
var whole_archive = false;
for (comp.link_inputs) |link_input| switch (link_input) {
.object, .archive => |obj| {
if (obj.must_link and !whole_archive) {
try argv.append("-whole-archive");
whole_archive = true;
} else if (!obj.must_link and whole_archive) {
try argv.append("-no-whole-archive");
whole_archive = false;
}
try argv.append(try obj.path.toString(arena));
},
.dso => |dso| {
try argv.append(try dso.path.toString(arena));
},
.dso_exact => unreachable,
.res => unreachable,
};
if (whole_archive) {
try argv.append("-no-whole-archive");
whole_archive = false;
}
for (comp.c_object_table.keys()) |key| {
try argv.append(try key.status.success.object_path.toString(arena));
}
if (module_obj_path) |p| {
try argv.append(p);
}
if (compiler_rt_path) |p| {
try argv.append(try p.toString(arena));
}
if (ubsan_rt_path) |p| {
try argv.append(try p.toStringZ(arena));
}
if (comp.verbose_link) {
// Skip over our own name so that the LLD linker name is the first argv item.
Compilation.dump_argv(argv.items[1..]);
}
if (std.process.can_spawn) {
// If possible, we run LLD as a child process because it does not always
// behave properly as a library, unfortunately.
// https://github.com/ziglang/zig/issues/3825
var child = std.process.Child.init(argv.items, arena);
if (comp.clang_passthrough_mode) {
child.stdin_behavior = .Inherit;
child.stdout_behavior = .Inherit;
child.stderr_behavior = .Inherit;
const term = child.spawnAndWait() catch |err| {
log.err("failed to spawn (passthrough mode) LLD {s}: {s}", .{ argv.items[0], @errorName(err) });
return error.UnableToSpawnWasm;
};
switch (term) {
.Exited => |code| {
if (code != 0) {
std.process.exit(code);
}
},
else => std.process.abort(),
}
} else {
child.stdin_behavior = .Ignore;
child.stdout_behavior = .Ignore;
child.stderr_behavior = .Pipe;
try child.spawn();
const stderr = try child.stderr.?.reader().readAllAlloc(arena, std.math.maxInt(usize));
const term = child.wait() catch |err| {
log.err("failed to spawn LLD {s}: {s}", .{ argv.items[0], @errorName(err) });
return error.UnableToSpawnWasm;
};
switch (term) {
.Exited => |code| {
if (code != 0) {
diags.lockAndParseLldStderr(linker_command, stderr);
return error.LinkFailure;
}
},
else => {
return diags.fail("{s} terminated with stderr:\n{s}", .{ argv.items[0], stderr });
},
}
if (stderr.len != 0) {
log.warn("unexpected LLD stderr:\n{s}", .{stderr});
}
}
} else {
const exit_code = try lldMain(arena, argv.items, false);
if (exit_code != 0) {
if (comp.clang_passthrough_mode) {
std.process.exit(exit_code);
} else {
return diags.fail("{s} returned exit code {d}:\n{s}", .{ argv.items[0], exit_code });
}
}
}
// Give +x to the .wasm file if it is an executable and the OS is WASI.
// Some systems may be configured to execute such binaries directly. Even if that
// is not the case, it means we will get "exec format error" when trying to run
// it, and then can react to that in the same way as trying to run an ELF file
// from a foreign CPU architecture.
if (fs.has_executable_bit and target.os.tag == .wasi and
comp.config.output_mode == .Exe)
{
// TODO: what's our strategy for reporting linker errors from this function?
// report a nice error here with the file path if it fails instead of
// just returning the error code.
// chmod does not interact with umask, so we use a conservative -rwxr--r-- here.
std.posix.fchmodat(fs.cwd().fd, full_out_path, 0o744, 0) catch |err| switch (err) {
error.OperationNotSupported => unreachable, // Not a symlink.
else => |e| return e,
};
}
}
if (!wasm.base.disable_lld_caching) {
// Update the file with the digest. If it fails we can continue; it only
// means that the next invocation will have an unnecessary cache miss.
Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest) catch |err| {
log.warn("failed to save linking hash digest symlink: {s}", .{@errorName(err)});
};
// Again failure here only means an unnecessary cache miss.
man.writeManifest() catch |err| {
log.warn("failed to write cache manifest when linking: {s}", .{@errorName(err)});
};
// We hang on to this lock so that the output file path can be used without
// other processes clobbering it.
wasm.base.lock = man.toOwnedLock();
}
}
fn defaultEntrySymbolName(
preloaded_strings: *const PreloadedStrings,
wasi_exec_model: std.builtin.WasiExecModel,
@ -4465,58 +4052,54 @@ pub fn symbolNameIndex(wasm: *Wasm, name: String) Allocator.Error!SymbolTableInd
return @enumFromInt(gop.index);
}
pub fn refUavObj(wasm: *Wasm, ip_index: InternPool.Index, orig_ptr_ty: InternPool.Index) !UavsObjIndex {
pub fn addUavReloc(
wasm: *Wasm,
reloc_offset: usize,
uav_val: InternPool.Index,
orig_ptr_ty: InternPool.Index,
addend: u32,
) !void {
const comp = wasm.base.comp;
const zcu = comp.zcu.?;
const ip = &zcu.intern_pool;
const gpa = comp.gpa;
assert(comp.config.output_mode == .Obj);
if (orig_ptr_ty != .none) {
const abi_alignment = Zcu.Type.fromInterned(ip.typeOf(ip_index)).abiAlignment(zcu);
const explicit_alignment = ip.indexToKey(orig_ptr_ty).ptr_type.flags.alignment;
if (explicit_alignment.compare(.gt, abi_alignment)) {
const gop = try wasm.overaligned_uavs.getOrPut(gpa, ip_index);
gop.value_ptr.* = if (gop.found_existing) gop.value_ptr.maxStrict(explicit_alignment) else explicit_alignment;
}
@"align": {
const ptr_type = ip.indexToKey(orig_ptr_ty).ptr_type;
const this_align = ptr_type.flags.alignment;
if (this_align == .none) break :@"align";
const abi_align = Zcu.Type.fromInterned(ptr_type.child).abiAlignment(zcu);
if (this_align.compare(.lte, abi_align)) break :@"align";
const gop = try wasm.overaligned_uavs.getOrPut(gpa, uav_val);
gop.value_ptr.* = if (gop.found_existing) gop.value_ptr.maxStrict(this_align) else this_align;
}
const gop = try wasm.uavs_obj.getOrPut(gpa, ip_index);
if (!gop.found_existing) gop.value_ptr.* = .{
// Lowering the value is delayed to avoid recursion.
.code = undefined,
.relocs = undefined,
};
return @enumFromInt(gop.index);
}
pub fn refUavExe(wasm: *Wasm, ip_index: InternPool.Index, orig_ptr_ty: InternPool.Index) !UavsExeIndex {
const comp = wasm.base.comp;
const zcu = comp.zcu.?;
const ip = &zcu.intern_pool;
const gpa = comp.gpa;
assert(comp.config.output_mode != .Obj);
if (orig_ptr_ty != .none) {
const abi_alignment = Zcu.Type.fromInterned(ip.typeOf(ip_index)).abiAlignment(zcu);
const explicit_alignment = ip.indexToKey(orig_ptr_ty).ptr_type.flags.alignment;
if (explicit_alignment.compare(.gt, abi_alignment)) {
const gop = try wasm.overaligned_uavs.getOrPut(gpa, ip_index);
gop.value_ptr.* = if (gop.found_existing) gop.value_ptr.maxStrict(explicit_alignment) else explicit_alignment;
}
}
const gop = try wasm.uavs_exe.getOrPut(gpa, ip_index);
if (gop.found_existing) {
gop.value_ptr.count += 1;
if (comp.config.output_mode == .Obj) {
const gop = try wasm.uavs_obj.getOrPut(gpa, uav_val);
if (!gop.found_existing) gop.value_ptr.* = undefined; // to avoid recursion, `ZcuDataStarts` will lower the value later
try wasm.out_relocs.append(gpa, .{
.offset = @intCast(reloc_offset),
.pointee = .{ .symbol_index = try wasm.uavSymbolIndex(uav_val) },
.tag = switch (wasm.pointerSize()) {
32 => .memory_addr_i32,
64 => .memory_addr_i64,
else => unreachable,
},
.addend = @intCast(addend),
});
} else {
gop.value_ptr.* = .{
// Lowering the value is delayed to avoid recursion.
.code = undefined,
.count = 1,
const gop = try wasm.uavs_exe.getOrPut(gpa, uav_val);
if (!gop.found_existing) gop.value_ptr.* = .{
.code = undefined, // to avoid recursion, `ZcuDataStarts` will lower the value later
.count = 0,
};
gop.value_ptr.count += 1;
try wasm.uav_fixups.append(gpa, .{
.uavs_exe_index = @enumFromInt(gop.index),
.offset = @intCast(reloc_offset),
.addend = addend,
});
}
return @enumFromInt(gop.index);
}
pub fn refNavObj(wasm: *Wasm, nav_index: InternPool.Nav.Index) !NavsObjIndex {
@ -4550,10 +4133,11 @@ pub fn refNavExe(wasm: *Wasm, nav_index: InternPool.Nav.Index) !NavsExeIndex {
}
/// Asserts it is called after `Flush.data_segments` is fully populated and sorted.
pub fn uavAddr(wasm: *Wasm, uav_index: UavsExeIndex) u32 {
pub fn uavAddr(wasm: *Wasm, ip_index: InternPool.Index) u32 {
assert(wasm.flush_buffer.memory_layout_finished);
const comp = wasm.base.comp;
assert(comp.config.output_mode != .Obj);
const uav_index: UavsExeIndex = @enumFromInt(wasm.uavs_exe.getIndex(ip_index).?);
const ds_id: DataSegmentId = .pack(wasm, .{ .uav_exe = uav_index });
return wasm.flush_buffer.data_segments.get(ds_id).?;
}

View File

@ -9,6 +9,7 @@ const Alignment = Wasm.Alignment;
const String = Wasm.String;
const Relocation = Wasm.Relocation;
const InternPool = @import("../../InternPool.zig");
const Mir = @import("../../arch/wasm/Mir.zig");
const build_options = @import("build_options");
@ -868,7 +869,21 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
.enum_type => {
try emitTagNameFunction(wasm, binary_bytes, f.data_segments.get(.__zig_tag_name_table).?, i.value(wasm).tag_name.table_index, ip_index);
},
else => try i.value(wasm).function.lower(wasm, binary_bytes),
else => {
const func = i.value(wasm).function;
const mir: Mir = .{
.instructions = wasm.mir_instructions.slice().subslice(func.instructions_off, func.instructions_len),
.extra = wasm.mir_extra.items[func.extra_off..][0..func.extra_len],
.locals = wasm.mir_locals.items[func.locals_off..][0..func.locals_len],
.prologue = func.prologue,
// These fields are unused by `lower`.
.uavs = undefined,
.indirect_function_set = undefined,
.func_tys = undefined,
.error_name_table_ref_count = undefined,
};
try mir.lower(wasm, binary_bytes);
},
}
},
};

View File

@ -13,14 +13,12 @@ const Path = std.Build.Cache.Path;
const Zcu = @import("../Zcu.zig");
const InternPool = @import("../InternPool.zig");
const Compilation = @import("../Compilation.zig");
const codegen = @import("../codegen.zig");
const link = @import("../link.zig");
const trace = @import("../tracy.zig").trace;
const build_options = @import("build_options");
const Air = @import("../Air.zig");
const LlvmObject = @import("../codegen/llvm.zig").Object;
base: link.File,
llvm_object: LlvmObject.Ptr,
pub fn createEmpty(
arena: Allocator,
@ -36,23 +34,20 @@ pub fn createEmpty(
assert(!use_lld); // Caught by Compilation.Config.resolve.
assert(target.os.tag == .aix); // Caught by Compilation.Config.resolve.
const llvm_object = try LlvmObject.create(arena, comp);
const xcoff = try arena.create(Xcoff);
xcoff.* = .{
.base = .{
.tag = .xcoff,
.comp = comp,
.emit = emit,
.zcu_object_sub_path = emit.sub_path,
.zcu_object_basename = emit.sub_path,
.gc_sections = options.gc_sections orelse false,
.print_gc_sections = options.print_gc_sections,
.stack_size = options.stack_size orelse 0,
.allow_shlib_undefined = options.allow_shlib_undefined orelse false,
.file = null,
.disable_lld_caching = options.disable_lld_caching,
.build_id = options.build_id,
},
.llvm_object = llvm_object,
};
return xcoff;
@ -70,27 +65,27 @@ pub fn open(
}
pub fn deinit(self: *Xcoff) void {
self.llvm_object.deinit();
_ = self;
}
pub fn updateFunc(
self: *Xcoff,
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Air.Liveness,
mir: *const codegen.AnyMir,
) link.File.UpdateNavError!void {
if (build_options.skip_non_native and builtin.object_format != .xcoff)
@panic("Attempted to compile for object format that was disabled by build configuration");
try self.llvm_object.updateFunc(pt, func_index, air, liveness);
_ = self;
_ = pt;
_ = func_index;
_ = mir;
unreachable; // we always use llvm
}
pub fn updateNav(self: *Xcoff, pt: Zcu.PerThread, nav: InternPool.Nav.Index) link.File.UpdateNavError!void {
if (build_options.skip_non_native and builtin.object_format != .xcoff)
@panic("Attempted to compile for object format that was disabled by build configuration");
return self.llvm_object.updateNav(pt, nav);
_ = self;
_ = pt;
_ = nav;
unreachable; // we always use llvm
}
pub fn updateExports(
@ -99,21 +94,19 @@ pub fn updateExports(
exported: Zcu.Exported,
export_indices: []const Zcu.Export.Index,
) !void {
if (build_options.skip_non_native and builtin.object_format != .xcoff)
@panic("Attempted to compile for object format that was disabled by build configuration");
return self.llvm_object.updateExports(pt, exported, export_indices);
_ = self;
_ = pt;
_ = exported;
_ = export_indices;
unreachable; // we always use llvm
}
pub fn flush(self: *Xcoff, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
return self.flushModule(arena, tid, prog_node);
}
pub fn flushModule(self: *Xcoff, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
if (build_options.skip_non_native and builtin.object_format != .xcoff)
@panic("Attempted to compile for object format that was disabled by build configuration");
_ = self;
_ = arena;
_ = tid;
try self.base.emitLlvmObject(arena, self.llvm_object, prog_node);
_ = prog_node;
}

View File

@ -699,55 +699,30 @@ const Emit = union(enum) {
yes_default_path,
yes: []const u8,
const Resolved = struct {
data: ?Compilation.EmitLoc,
dir: ?fs.Dir,
fn deinit(self: *Resolved) void {
if (self.dir) |*dir| {
dir.close();
}
}
};
fn resolve(emit: Emit, default_basename: []const u8, output_to_cache: bool) !Resolved {
var resolved: Resolved = .{ .data = null, .dir = null };
errdefer resolved.deinit();
switch (emit) {
.no => {},
.yes_default_path => {
resolved.data = Compilation.EmitLoc{
.directory = if (output_to_cache) null else .{
.path = null,
.handle = fs.cwd(),
},
.basename = default_basename,
};
},
.yes => |full_path| {
const basename = fs.path.basename(full_path);
if (fs.path.dirname(full_path)) |dirname| {
const handle = try fs.cwd().openDir(dirname, .{});
resolved = .{
.dir = handle,
.data = Compilation.EmitLoc{
.basename = basename,
.directory = .{
.path = dirname,
.handle = handle,
},
},
};
} else {
resolved.data = Compilation.EmitLoc{
.basename = basename,
.directory = .{ .path = null, .handle = fs.cwd() },
};
const OutputToCacheReason = enum { listen, @"zig run", @"zig test" };
fn resolve(emit: Emit, default_basename: []const u8, output_to_cache: ?OutputToCacheReason) Compilation.CreateOptions.Emit {
return switch (emit) {
.no => .no,
.yes_default_path => if (output_to_cache != null) .yes_cache else .{ .yes_path = default_basename },
.yes => |path| if (output_to_cache) |reason| {
switch (reason) {
.listen => fatal("--listen incompatible with explicit output path '{s}'", .{path}),
.@"zig run", .@"zig test" => fatal(
"'{s}' with explicit output path '{s}' requires explicit '-femit-bin=path' or '-fno-emit-bin'",
.{ @tagName(reason), path },
),
}
} else e: {
// If there's a dirname, check that dir exists. This will give a more descriptive error than `Compilation` otherwise would.
if (fs.path.dirname(path)) |dir_path| {
var dir = fs.cwd().openDir(dir_path, .{}) catch |err| {
fatal("unable to open output directory '{s}': {s}", .{ dir_path, @errorName(err) });
};
dir.close();
}
break :e .{ .yes_path = path };
},
}
return resolved;
};
}
};
@ -867,9 +842,9 @@ fn buildOutputType(
var linker_allow_undefined_version: bool = false;
var linker_enable_new_dtags: ?bool = null;
var disable_c_depfile = false;
var linker_sort_section: ?link.File.Elf.SortSection = null;
var linker_sort_section: ?link.File.Lld.Elf.SortSection = null;
var linker_gc_sections: ?bool = null;
var linker_compress_debug_sections: ?link.File.Elf.CompressDebugSections = null;
var linker_compress_debug_sections: ?link.File.Lld.Elf.CompressDebugSections = null;
var linker_allow_shlib_undefined: ?bool = null;
var allow_so_scripts: bool = false;
var linker_bind_global_refs_locally: ?bool = null;
@ -921,7 +896,7 @@ fn buildOutputType(
var debug_compiler_runtime_libs = false;
var opt_incremental: ?bool = null;
var install_name: ?[]const u8 = null;
var hash_style: link.File.Elf.HashStyle = .both;
var hash_style: link.File.Lld.Elf.HashStyle = .both;
var entitlements: ?[]const u8 = null;
var pagezero_size: ?u64 = null;
var lib_search_strategy: link.UnresolvedInput.SearchStrategy = .paths_first;
@ -1196,11 +1171,11 @@ fn buildOutputType(
install_name = args_iter.nextOrFatal();
} else if (mem.startsWith(u8, arg, "--compress-debug-sections=")) {
const param = arg["--compress-debug-sections=".len..];
linker_compress_debug_sections = std.meta.stringToEnum(link.File.Elf.CompressDebugSections, param) orelse {
linker_compress_debug_sections = std.meta.stringToEnum(link.File.Lld.Elf.CompressDebugSections, param) orelse {
fatal("expected --compress-debug-sections=[none|zlib|zstd], found '{s}'", .{param});
};
} else if (mem.eql(u8, arg, "--compress-debug-sections")) {
linker_compress_debug_sections = link.File.Elf.CompressDebugSections.zlib;
linker_compress_debug_sections = link.File.Lld.Elf.CompressDebugSections.zlib;
} else if (mem.eql(u8, arg, "-pagezero_size")) {
const next_arg = args_iter.nextOrFatal();
pagezero_size = std.fmt.parseUnsigned(u64, eatIntPrefix(next_arg, 16), 16) catch |err| {
@ -2368,7 +2343,7 @@ fn buildOutputType(
if (it.only_arg.len == 0) {
linker_compress_debug_sections = .zlib;
} else {
linker_compress_debug_sections = std.meta.stringToEnum(link.File.Elf.CompressDebugSections, it.only_arg) orelse {
linker_compress_debug_sections = std.meta.stringToEnum(link.File.Lld.Elf.CompressDebugSections, it.only_arg) orelse {
fatal("expected [none|zlib|zstd] after --compress-debug-sections, found '{s}'", .{it.only_arg});
};
}
@ -2505,7 +2480,7 @@ fn buildOutputType(
linker_print_map = true;
} else if (mem.eql(u8, arg, "--sort-section")) {
const arg1 = linker_args_it.nextOrFatal();
linker_sort_section = std.meta.stringToEnum(link.File.Elf.SortSection, arg1) orelse {
linker_sort_section = std.meta.stringToEnum(link.File.Lld.Elf.SortSection, arg1) orelse {
fatal("expected [name|alignment] after --sort-section, found '{s}'", .{arg1});
};
} else if (mem.eql(u8, arg, "--allow-shlib-undefined") or
@ -2551,7 +2526,7 @@ fn buildOutputType(
try linker_export_symbol_names.append(arena, linker_args_it.nextOrFatal());
} else if (mem.eql(u8, arg, "--compress-debug-sections")) {
const arg1 = linker_args_it.nextOrFatal();
linker_compress_debug_sections = std.meta.stringToEnum(link.File.Elf.CompressDebugSections, arg1) orelse {
linker_compress_debug_sections = std.meta.stringToEnum(link.File.Lld.Elf.CompressDebugSections, arg1) orelse {
fatal("expected [none|zlib|zstd] after --compress-debug-sections, found '{s}'", .{arg1});
};
} else if (mem.startsWith(u8, arg, "-z")) {
@ -2764,7 +2739,7 @@ fn buildOutputType(
mem.eql(u8, arg, "--hash-style"))
{
const next_arg = linker_args_it.nextOrFatal();
hash_style = std.meta.stringToEnum(link.File.Elf.HashStyle, next_arg) orelse {
hash_style = std.meta.stringToEnum(link.File.Lld.Elf.HashStyle, next_arg) orelse {
fatal("expected [sysv|gnu|both] after --hash-style, found '{s}'", .{
next_arg,
});
@ -2830,7 +2805,7 @@ fn buildOutputType(
.link => {
create_module.opts.output_mode = if (is_shared_lib) .Lib else .Exe;
if (emit_bin != .no) {
emit_bin = if (out_path) |p| .{ .yes = p } else EmitBin.yes_a_out;
emit_bin = if (out_path) |p| .{ .yes = p } else .yes_a_out;
}
if (emit_llvm) {
fatal("-emit-llvm cannot be used when linking", .{});
@ -3208,7 +3183,17 @@ fn buildOutputType(
var cleanup_emit_bin_dir: ?fs.Dir = null;
defer if (cleanup_emit_bin_dir) |*dir| dir.close();
const output_to_cache = listen != .none;
// For `zig run` and `zig test`, we don't want to put the binary in the cwd by default. So, if
// the binary is requested with no explicit path (as is the default), we emit to the cache.
const output_to_cache: ?Emit.OutputToCacheReason = switch (listen) {
.stdio, .ip4 => .listen,
.none => if (arg_mode == .run and emit_bin == .yes_default_path)
.@"zig run"
else if (arg_mode == .zig_test and emit_bin == .yes_default_path)
.@"zig test"
else
null,
};
const optional_version = if (have_version) version else null;
const root_name = if (provided_name) |n| n else main_mod.fully_qualified_name;
@ -3225,150 +3210,57 @@ fn buildOutputType(
},
};
const a_out_basename = switch (target.ofmt) {
.coff => "a.exe",
else => "a.out",
};
const emit_bin_loc: ?Compilation.EmitLoc = switch (emit_bin) {
.no => null,
.yes_default_path => Compilation.EmitLoc{
.directory = blk: {
switch (arg_mode) {
.run, .zig_test => break :blk null,
.build, .cc, .cpp, .translate_c, .zig_test_obj => {
if (output_to_cache) {
break :blk null;
} else {
break :blk .{ .path = null, .handle = fs.cwd() };
}
},
}
},
.basename = if (clang_preprocessor_mode == .pch)
try std.fmt.allocPrint(arena, "{s}.pch", .{root_name})
else
try std.zig.binNameAlloc(arena, .{
const emit_bin_resolved: Compilation.CreateOptions.Emit = switch (emit_bin) {
.no => .no,
.yes_default_path => emit: {
if (output_to_cache != null) break :emit .yes_cache;
const name = switch (clang_preprocessor_mode) {
.pch => try std.fmt.allocPrint(arena, "{s}.pch", .{root_name}),
else => try std.zig.binNameAlloc(arena, .{
.root_name = root_name,
.target = target,
.output_mode = create_module.resolved_options.output_mode,
.link_mode = create_module.resolved_options.link_mode,
.version = optional_version,
}),
};
break :emit .{ .yes_path = name };
},
.yes => |full_path| b: {
const basename = fs.path.basename(full_path);
if (fs.path.dirname(full_path)) |dirname| {
const handle = fs.cwd().openDir(dirname, .{}) catch |err| {
fatal("unable to open output directory '{s}': {s}", .{ dirname, @errorName(err) });
};
cleanup_emit_bin_dir = handle;
break :b Compilation.EmitLoc{
.basename = basename,
.directory = .{
.path = dirname,
.handle = handle,
},
};
} else {
break :b Compilation.EmitLoc{
.basename = basename,
.directory = .{ .path = null, .handle = fs.cwd() },
.yes => |path| if (output_to_cache != null) {
assert(output_to_cache == .listen); // there was an explicit bin path
fatal("--listen incompatible with explicit output path '{s}'", .{path});
} else emit: {
// If there's a dirname, check that dir exists. This will give a more descriptive error than `Compilation` otherwise would.
if (fs.path.dirname(path)) |dir_path| {
var dir = fs.cwd().openDir(dir_path, .{}) catch |err| {
fatal("unable to open output directory '{s}': {s}", .{ dir_path, @errorName(err) });
};
dir.close();
}
break :emit .{ .yes_path = path };
},
.yes_a_out => Compilation.EmitLoc{
.directory = .{ .path = null, .handle = fs.cwd() },
.basename = a_out_basename,
.yes_a_out => emit: {
assert(output_to_cache == null);
break :emit .{ .yes_path = switch (target.ofmt) {
.coff => "a.exe",
else => "a.out",
} };
},
};
const default_h_basename = try std.fmt.allocPrint(arena, "{s}.h", .{root_name});
var emit_h_resolved = emit_h.resolve(default_h_basename, output_to_cache) catch |err| {
switch (emit_h) {
.yes => |p| {
fatal("unable to open directory from argument '-femit-h', '{s}': {s}", .{
p, @errorName(err),
});
},
.yes_default_path => {
fatal("unable to open directory from arguments '--name' or '-fsoname', '{s}': {s}", .{
default_h_basename, @errorName(err),
});
},
.no => unreachable,
}
};
defer emit_h_resolved.deinit();
const emit_h_resolved = emit_h.resolve(default_h_basename, output_to_cache);
const default_asm_basename = try std.fmt.allocPrint(arena, "{s}.s", .{root_name});
var emit_asm_resolved = emit_asm.resolve(default_asm_basename, output_to_cache) catch |err| {
switch (emit_asm) {
.yes => |p| {
fatal("unable to open directory from argument '-femit-asm', '{s}': {s}", .{
p, @errorName(err),
});
},
.yes_default_path => {
fatal("unable to open directory from arguments '--name' or '-fsoname', '{s}': {s}", .{
default_asm_basename, @errorName(err),
});
},
.no => unreachable,
}
};
defer emit_asm_resolved.deinit();
const emit_asm_resolved = emit_asm.resolve(default_asm_basename, output_to_cache);
const default_llvm_ir_basename = try std.fmt.allocPrint(arena, "{s}.ll", .{root_name});
var emit_llvm_ir_resolved = emit_llvm_ir.resolve(default_llvm_ir_basename, output_to_cache) catch |err| {
switch (emit_llvm_ir) {
.yes => |p| {
fatal("unable to open directory from argument '-femit-llvm-ir', '{s}': {s}", .{
p, @errorName(err),
});
},
.yes_default_path => {
fatal("unable to open directory from arguments '--name' or '-fsoname', '{s}': {s}", .{
default_llvm_ir_basename, @errorName(err),
});
},
.no => unreachable,
}
};
defer emit_llvm_ir_resolved.deinit();
const emit_llvm_ir_resolved = emit_llvm_ir.resolve(default_llvm_ir_basename, output_to_cache);
const default_llvm_bc_basename = try std.fmt.allocPrint(arena, "{s}.bc", .{root_name});
var emit_llvm_bc_resolved = emit_llvm_bc.resolve(default_llvm_bc_basename, output_to_cache) catch |err| {
switch (emit_llvm_bc) {
.yes => |p| {
fatal("unable to open directory from argument '-femit-llvm-bc', '{s}': {s}", .{
p, @errorName(err),
});
},
.yes_default_path => {
fatal("unable to open directory from arguments '--name' or '-fsoname', '{s}': {s}", .{
default_llvm_bc_basename, @errorName(err),
});
},
.no => unreachable,
}
};
defer emit_llvm_bc_resolved.deinit();
const emit_llvm_bc_resolved = emit_llvm_bc.resolve(default_llvm_bc_basename, output_to_cache);
var emit_docs_resolved = emit_docs.resolve("docs", output_to_cache) catch |err| {
switch (emit_docs) {
.yes => |p| {
fatal("unable to open directory from argument '-femit-docs', '{s}': {s}", .{
p, @errorName(err),
});
},
.yes_default_path => {
fatal("unable to open directory 'docs': {s}", .{@errorName(err)});
},
.no => unreachable,
}
};
defer emit_docs_resolved.deinit();
const emit_docs_resolved = emit_docs.resolve("docs", output_to_cache);
const is_exe_or_dyn_lib = switch (create_module.resolved_options.output_mode) {
.Obj => false,
@ -3378,7 +3270,7 @@ fn buildOutputType(
// Note that cmake when targeting Windows will try to execute
// zig cc to make an executable and output an implib too.
const implib_eligible = is_exe_or_dyn_lib and
emit_bin_loc != null and target.os.tag == .windows;
emit_bin_resolved != .no and target.os.tag == .windows;
if (!implib_eligible) {
if (!emit_implib_arg_provided) {
emit_implib = .no;
@ -3387,22 +3279,18 @@ fn buildOutputType(
}
}
const default_implib_basename = try std.fmt.allocPrint(arena, "{s}.lib", .{root_name});
var emit_implib_resolved = switch (emit_implib) {
.no => Emit.Resolved{ .data = null, .dir = null },
.yes => |p| emit_implib.resolve(default_implib_basename, output_to_cache) catch |err| {
fatal("unable to open directory from argument '-femit-implib', '{s}': {s}", .{
p, @errorName(err),
const emit_implib_resolved: Compilation.CreateOptions.Emit = switch (emit_implib) {
.no => .no,
.yes => emit_implib.resolve(default_implib_basename, output_to_cache),
.yes_default_path => emit: {
if (output_to_cache != null) break :emit .yes_cache;
const p = try fs.path.join(arena, &.{
fs.path.dirname(emit_bin_resolved.yes_path) orelse ".",
default_implib_basename,
});
},
.yes_default_path => Emit.Resolved{
.data = Compilation.EmitLoc{
.directory = emit_bin_loc.?.directory,
.basename = default_implib_basename,
},
.dir = null,
break :emit .{ .yes_path = p };
},
};
defer emit_implib_resolved.deinit();
var thread_pool: ThreadPool = undefined;
try thread_pool.init(.{
@ -3456,7 +3344,7 @@ fn buildOutputType(
src.src_path = try dirs.local_cache.join(arena, &.{sub_path});
}
if (build_options.have_llvm and emit_asm != .no) {
if (build_options.have_llvm and emit_asm_resolved != .no) {
// LLVM has no way to set this non-globally.
const argv = [_][*:0]const u8{ "zig (LLVM option parsing)", "--x86-asm-syntax=intel" };
@import("codegen/llvm/bindings.zig").ParseCommandLineOptions(argv.len, &argv);
@ -3472,23 +3360,11 @@ fn buildOutputType(
fatal("--debug-incremental requires -fincremental", .{});
}
const disable_lld_caching = !output_to_cache;
const cache_mode: Compilation.CacheMode = b: {
// Once incremental compilation is the default, we'll want some smarter logic here,
// considering things like the backend in use and whether there's a ZCU.
if (output_to_cache == null) break :b .none;
if (incremental) break :b .incremental;
if (disable_lld_caching) break :b .incremental;
if (!create_module.resolved_options.have_zcu) break :b .whole;
// TODO: once we support incremental compilation for the LLVM backend
// via saving the LLVM module into a bitcode file and restoring it,
// along with compiler state, this clause can be removed so that
// incremental cache mode is used for LLVM backend too.
if (create_module.resolved_options.use_llvm) break :b .whole;
// Eventually, this default should be `.incremental`. However, since incremental
// compilation is currently an opt-in feature, it makes a strictly worse default cache mode
// than `.whole`.
// https://github.com/ziglang/zig/issues/21165
break :b .whole;
};
@ -3510,13 +3386,13 @@ fn buildOutputType(
.main_mod = main_mod,
.root_mod = root_mod,
.std_mod = std_mod,
.emit_bin = emit_bin_loc,
.emit_h = emit_h_resolved.data,
.emit_asm = emit_asm_resolved.data,
.emit_llvm_ir = emit_llvm_ir_resolved.data,
.emit_llvm_bc = emit_llvm_bc_resolved.data,
.emit_docs = emit_docs_resolved.data,
.emit_implib = emit_implib_resolved.data,
.emit_bin = emit_bin_resolved,
.emit_h = emit_h_resolved,
.emit_asm = emit_asm_resolved,
.emit_llvm_ir = emit_llvm_ir_resolved,
.emit_llvm_bc = emit_llvm_bc_resolved,
.emit_docs = emit_docs_resolved,
.emit_implib = emit_implib_resolved,
.lib_directories = create_module.lib_directories.items,
.rpath_list = create_module.rpath_list.items,
.symbol_wrap_set = symbol_wrap_set,
@ -3599,7 +3475,6 @@ fn buildOutputType(
.test_filters = test_filters.items,
.test_name_prefix = test_name_prefix,
.test_runner_path = test_runner_path,
.disable_lld_caching = disable_lld_caching,
.cache_mode = cache_mode,
.subsystem = subsystem,
.debug_compile_errors = debug_compile_errors,
@ -3744,13 +3619,8 @@ fn buildOutputType(
}) {
dev.checkAny(&.{ .run_command, .test_command });
if (test_exec_args.items.len == 0 and target.ofmt == .c) default_exec_args: {
if (test_exec_args.items.len == 0 and target.ofmt == .c and emit_bin_resolved != .no) {
// Default to using `zig run` to execute the produced .c code from `zig test`.
const c_code_loc = emit_bin_loc orelse break :default_exec_args;
const c_code_directory = c_code_loc.directory orelse comp.bin_file.?.emit.root_dir;
const c_code_path = try fs.path.join(arena, &[_][]const u8{
c_code_directory.path orelse ".", c_code_loc.basename,
});
try test_exec_args.appendSlice(arena, &.{ self_exe_path, "run" });
if (dirs.zig_lib.path) |p| {
try test_exec_args.appendSlice(arena, &.{ "-I", p });
@ -3775,7 +3645,7 @@ fn buildOutputType(
if (create_module.dynamic_linker) |dl| {
try test_exec_args.appendSlice(arena, &.{ "--dynamic-linker", dl });
}
try test_exec_args.append(arena, c_code_path);
try test_exec_args.append(arena, null); // placeholder for the path of the emitted C source file
}
try runOrTest(
@ -4354,12 +4224,22 @@ fn runOrTest(
runtime_args_start: ?usize,
link_libc: bool,
) !void {
const lf = comp.bin_file orelse return;
// A naive `directory.join` here will indeed get the correct path to the binary,
// however, in the case of cwd, we actually want `./foo` so that the path can be executed.
const exe_path = try fs.path.join(arena, &[_][]const u8{
lf.emit.root_dir.path orelse ".", lf.emit.sub_path,
});
const raw_emit_bin = comp.emit_bin orelse return;
const exe_path = switch (comp.cache_use) {
.none => p: {
if (fs.path.isAbsolute(raw_emit_bin)) break :p raw_emit_bin;
// Use `fs.path.join` to make a file in the cwd is still executed properly.
break :p try fs.path.join(arena, &.{
".",
raw_emit_bin,
});
},
.whole, .incremental => try comp.dirs.local_cache.join(arena, &.{
"o",
&Cache.binToHex(comp.digest.?),
raw_emit_bin,
}),
};
var argv = std.ArrayList([]const u8).init(gpa);
defer argv.deinit();
@ -5087,16 +4967,6 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
};
};
const exe_basename = try std.zig.binNameAlloc(arena, .{
.root_name = "build",
.target = resolved_target.result,
.output_mode = .Exe,
});
const emit_bin: Compilation.EmitLoc = .{
.directory = null, // Use the local zig-cache.
.basename = exe_basename,
};
process.raiseFileDescriptorLimit();
const cwd_path = try introspect.getResolvedCwd(arena);
@ -5357,8 +5227,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
.config = config,
.root_mod = root_mod,
.main_mod = build_mod,
.emit_bin = emit_bin,
.emit_h = null,
.emit_bin = .yes_cache,
.self_exe_path = self_exe_path,
.thread_pool = &thread_pool,
.verbose_cc = verbose_cc,
@ -5386,8 +5255,11 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
// Since incremental compilation isn't done yet, we use cache_mode = whole
// above, and thus the output file is already closed.
//try comp.makeBinFileExecutable();
child_argv.items[argv_index_exe] =
try dirs.local_cache.join(arena, &.{comp.cache_use.whole.bin_sub_path.?});
child_argv.items[argv_index_exe] = try dirs.local_cache.join(arena, &.{
"o",
&Cache.binToHex(comp.digest.?),
comp.emit_bin.?,
});
}
if (process.can_spawn) {
@ -5504,16 +5376,6 @@ fn jitCmd(
.is_explicit_dynamic_linker = false,
};
const exe_basename = try std.zig.binNameAlloc(arena, .{
.root_name = options.cmd_name,
.target = resolved_target.result,
.output_mode = .Exe,
});
const emit_bin: Compilation.EmitLoc = .{
.directory = null, // Use the global zig-cache.
.basename = exe_basename,
};
const self_exe_path = fs.selfExePathAlloc(arena) catch |err| {
fatal("unable to find self exe path: {s}", .{@errorName(err)});
};
@ -5605,8 +5467,7 @@ fn jitCmd(
.config = config,
.root_mod = root_mod,
.main_mod = root_mod,
.emit_bin = emit_bin,
.emit_h = null,
.emit_bin = .yes_cache,
.self_exe_path = self_exe_path,
.thread_pool = &thread_pool,
.cache_mode = .whole,
@ -5637,7 +5498,11 @@ fn jitCmd(
};
}
const exe_path = try dirs.global_cache.join(arena, &.{comp.cache_use.whole.bin_sub_path.?});
const exe_path = try dirs.global_cache.join(arena, &.{
"o",
&Cache.binToHex(comp.digest.?),
comp.emit_bin.?,
});
child_argv.appendAssumeCapacity(exe_path);
}

View File

@ -739,7 +739,7 @@ pub fn functionPointerMask(target: std.Target) ?u64 {
pub fn supportsTailCall(target: std.Target, backend: std.builtin.CompilerBackend) bool {
switch (backend) {
.stage1, .stage2_llvm => return @import("codegen/llvm.zig").supportsTailCall(target),
.stage2_llvm => return @import("codegen/llvm.zig").supportsTailCall(target),
.stage2_c => return true,
else => return false,
}
@ -850,7 +850,9 @@ pub inline fn backendSupportsFeature(backend: std.builtin.CompilerBackend, compt
},
.separate_thread => switch (backend) {
.stage2_llvm => false,
else => true,
.stage2_c, .stage2_wasm, .stage2_x86_64 => true,
// TODO: most self-hosted backends should be able to support this without too much work.
else => false,
},
};
}

View File

@ -517,7 +517,7 @@ uint32_t wasi_snapshot_preview1_fd_read(uint32_t fd, uint32_t iovs, uint32_t iov
case wasi_filetype_character_device: break;
case wasi_filetype_regular_file: break;
case wasi_filetype_directory: return wasi_errno_inval;
default: panic("unimplemented");
default: panic("unimplemented: fd_read special file");
}
size_t size = 0;
@ -629,7 +629,7 @@ uint32_t wasi_snapshot_preview1_fd_pwrite(uint32_t fd, uint32_t iovs, uint32_t i
case wasi_filetype_character_device: break;
case wasi_filetype_regular_file: break;
case wasi_filetype_directory: return wasi_errno_inval;
default: panic("unimplemented");
default: panic("unimplemented: fd_pwrite special file");
}
fpos_t pos;
@ -679,7 +679,7 @@ uint32_t wasi_snapshot_preview1_fd_filestat_set_times(uint32_t fd, uint64_t atim
fprintf(stderr, "wasi_snapshot_preview1_fd_filestat_set_times(%u, %llu, %llu, 0x%X)\n", fd, (unsigned long long)atim, (unsigned long long)mtim, fst_flags);
#endif
panic("unimplemented");
panic("unimplemented: fd_filestat_set_times");
return wasi_errno_success;
}
@ -703,7 +703,7 @@ uint32_t wasi_snapshot_preview1_environ_get(uint32_t environ, uint32_t environ_b
fprintf(stderr, "wasi_snapshot_preview1_environ_get()\n");
#endif
panic("unimplemented");
panic("unimplemented: environ_get");
return wasi_errno_success;
}
@ -757,7 +757,7 @@ uint32_t wasi_snapshot_preview1_fd_readdir(uint32_t fd, uint32_t buf, uint32_t b
fprintf(stderr, "wasi_snapshot_preview1_fd_readdir(%u, 0x%X, %u, %llu)\n", fd, buf, buf_len, (unsigned long long)cookie);
#endif
panic("unimplemented");
panic("unimplemented: fd_readdir");
return wasi_errno_success;
}
@ -774,7 +774,7 @@ uint32_t wasi_snapshot_preview1_fd_write(uint32_t fd, uint32_t iovs, uint32_t io
case wasi_filetype_character_device: break;
case wasi_filetype_regular_file: break;
case wasi_filetype_directory: return wasi_errno_inval;
default: panic("unimplemented");
default: panic("unimplemented: fd_write special file");
}
size_t size = 0;
@ -825,7 +825,7 @@ uint32_t wasi_snapshot_preview1_path_open(uint32_t fd, uint32_t dirflags, uint32
fds[fd_len].fdflags = fdflags;
switch (des[de].filetype) {
case wasi_filetype_directory: fds[fd_len].stream = NULL; break;
default: panic("unimplemented");
default: panic("unimplemented: path_open non-directory DirEntry");
}
fds[fd_len].fs_rights_inheriting = fs_rights_inheriting;
@ -943,7 +943,7 @@ uint32_t wasi_snapshot_preview1_path_unlink_file(uint32_t fd, uint32_t path, uin
enum wasi_errno lookup_errno = DirEntry_lookup(fd, 0, path_ptr, path_len, &de);
if (lookup_errno != wasi_errno_success) return lookup_errno;
if (des[de].filetype == wasi_filetype_directory) return wasi_errno_isdir;
if (des[de].filetype != wasi_filetype_regular_file) panic("unimplemented");
if (des[de].filetype != wasi_filetype_regular_file) panic("unimplemented: path_unlink_file special file");
DirEntry_unlink(de);
return wasi_errno_success;
}
@ -961,7 +961,7 @@ uint32_t wasi_snapshot_preview1_fd_pread(uint32_t fd, uint32_t iovs, uint32_t io
case wasi_filetype_character_device: break;
case wasi_filetype_regular_file: break;
case wasi_filetype_directory: return wasi_errno_inval;
default: panic("unimplemented");
default: panic("unimplemented: fd_pread special file");
}
fpos_t pos;
@ -975,7 +975,7 @@ uint32_t wasi_snapshot_preview1_fd_pread(uint32_t fd, uint32_t iovs, uint32_t io
if (fds[fd].stream != NULL)
read_size = fread(&m[load32_align2(&iovs_ptr[i].ptr)], 1, len, fds[fd].stream);
else
panic("unimplemented");
panic("unimplemented: fd_pread stream=NULL");
size += read_size;
if (read_size < len) break;
}
@ -1000,7 +1000,7 @@ uint32_t wasi_snapshot_preview1_fd_seek(uint32_t fd, uint64_t in_offset, uint32_
case wasi_filetype_character_device: break;
case wasi_filetype_regular_file: break;
case wasi_filetype_directory: return wasi_errno_inval;
default: panic("unimplemented");
default: panic("unimplemented: fd_seek special file");
}
if (fds[fd].stream == NULL) return wasi_errno_success;
@ -1035,7 +1035,7 @@ uint32_t wasi_snapshot_preview1_poll_oneoff(uint32_t in, uint32_t out, uint32_t
fprintf(stderr, "wasi_snapshot_preview1_poll_oneoff(%u)\n", nsubscriptions);
#endif
panic("unimplemented");
panic("unimplemented: poll_oneoff");
return wasi_errno_success;
}

View File

@ -211,7 +211,7 @@ fn testDuplicateDefinitions(b: *Build, opts: Options) *Step {
expectLinkErrors(exe, test_step, .{ .exact = &.{
"error: duplicate symbol definition: _strong",
"note: defined by /?/a.o",
"note: defined by /?/main.o",
"note: defined by /?/main_zcu.o",
} });
return test_step;
@ -2648,7 +2648,7 @@ fn testUnresolvedError(b: *Build, opts: Options) *Step {
expectLinkErrors(exe, test_step, .{ .exact = &.{
"error: undefined symbol: _foo",
"note: referenced by /?/a.o:_bar",
"note: referenced by /?/main.o:_main.main",
"note: referenced by /?/main_zcu.o:_main.main",
} });
} else {
expectLinkErrors(exe, test_step, .{ .exact = &.{

View File

@ -65,7 +65,7 @@ pub fn main() !void {
// This actually violates the DWARF specification (DWARF5 § 3.1.1, lines 24-27).
// The self-hosted backend uses the root Zig source file of the module (in compilance with the spec).
if (std.mem.eql(u8, file_name, "test") or
std.mem.eql(u8, file_name, "test.exe.obj") or
std.mem.eql(u8, file_name, "test_zcu.obj") or
std.mem.endsWith(u8, file_name, ".zig"))
{
try buf.appendSlice("[main_file]");

View File

@ -314,7 +314,7 @@ const Eval = struct {
const digest = body[@sizeOf(EbpHdr)..][0..Cache.bin_digest_len];
const result_dir = ".local-cache" ++ std.fs.path.sep_str ++ "o" ++ std.fs.path.sep_str ++ Cache.binToHex(digest.*);
const bin_name = try std.zig.binNameAlloc(arena, .{
const bin_name = try std.zig.EmitArtifact.bin.cacheName(arena, .{
.root_name = "root", // corresponds to the module name "root"
.target = eval.target.resolved,
.output_mode = .Exe,