mirror of
https://github.com/ziglang/zig.git
synced 2025-12-06 06:13:07 +00:00
Zcu: introduce PerThread and pass to all the functions
This commit is contained in:
parent
8f20e81b88
commit
525f341f33
@ -525,6 +525,7 @@ set(ZIG_STAGE2_SOURCES
|
||||
src/Type.zig
|
||||
src/Value.zig
|
||||
src/Zcu.zig
|
||||
src/Zcu/PerThread.zig
|
||||
src/arch/aarch64/CodeGen.zig
|
||||
src/arch/aarch64/Emit.zig
|
||||
src/arch/aarch64/Mir.zig
|
||||
|
||||
@ -9,17 +9,19 @@ run_queue: RunQueue = .{},
|
||||
is_running: bool = true,
|
||||
allocator: std.mem.Allocator,
|
||||
threads: []std.Thread,
|
||||
ids: std.AutoArrayHashMapUnmanaged(std.Thread.Id, void),
|
||||
|
||||
const RunQueue = std.SinglyLinkedList(Runnable);
|
||||
const Runnable = struct {
|
||||
runFn: RunProto,
|
||||
};
|
||||
|
||||
const RunProto = *const fn (*Runnable) void;
|
||||
const RunProto = *const fn (*Runnable, id: ?usize) void;
|
||||
|
||||
pub const Options = struct {
|
||||
allocator: std.mem.Allocator,
|
||||
n_jobs: ?u32 = null,
|
||||
track_ids: bool = false,
|
||||
};
|
||||
|
||||
pub fn init(pool: *Pool, options: Options) !void {
|
||||
@ -28,6 +30,7 @@ pub fn init(pool: *Pool, options: Options) !void {
|
||||
pool.* = .{
|
||||
.allocator = allocator,
|
||||
.threads = &[_]std.Thread{},
|
||||
.ids = .{},
|
||||
};
|
||||
|
||||
if (builtin.single_threaded) {
|
||||
@ -35,6 +38,10 @@ pub fn init(pool: *Pool, options: Options) !void {
|
||||
}
|
||||
|
||||
const thread_count = options.n_jobs orelse @max(1, std.Thread.getCpuCount() catch 1);
|
||||
if (options.track_ids) {
|
||||
try pool.ids.ensureTotalCapacity(allocator, 1 + thread_count);
|
||||
pool.ids.putAssumeCapacityNoClobber(std.Thread.getCurrentId(), {});
|
||||
}
|
||||
|
||||
// kill and join any threads we spawned and free memory on error.
|
||||
pool.threads = try allocator.alloc(std.Thread, thread_count);
|
||||
@ -49,6 +56,7 @@ pub fn init(pool: *Pool, options: Options) !void {
|
||||
|
||||
pub fn deinit(pool: *Pool) void {
|
||||
pool.join(pool.threads.len); // kill and join all threads.
|
||||
pool.ids.deinit(pool.allocator);
|
||||
pool.* = undefined;
|
||||
}
|
||||
|
||||
@ -96,7 +104,7 @@ pub fn spawnWg(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, args
|
||||
run_node: RunQueue.Node = .{ .data = .{ .runFn = runFn } },
|
||||
wait_group: *WaitGroup,
|
||||
|
||||
fn runFn(runnable: *Runnable) void {
|
||||
fn runFn(runnable: *Runnable, _: ?usize) void {
|
||||
const run_node: *RunQueue.Node = @fieldParentPtr("data", runnable);
|
||||
const closure: *@This() = @alignCast(@fieldParentPtr("run_node", run_node));
|
||||
@call(.auto, func, closure.arguments);
|
||||
@ -134,6 +142,70 @@ pub fn spawnWg(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, args
|
||||
pool.cond.signal();
|
||||
}
|
||||
|
||||
/// Runs `func` in the thread pool, calling `WaitGroup.start` beforehand, and
|
||||
/// `WaitGroup.finish` after it returns.
|
||||
///
|
||||
/// The first argument passed to `func` is a dense `usize` thread id, the rest
|
||||
/// of the arguments are passed from `args`. Requires the pool to have been
|
||||
/// initialized with `.track_ids = true`.
|
||||
///
|
||||
/// In the case that queuing the function call fails to allocate memory, or the
|
||||
/// target is single-threaded, the function is called directly.
|
||||
pub fn spawnWgId(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, args: anytype) void {
|
||||
wait_group.start();
|
||||
|
||||
if (builtin.single_threaded) {
|
||||
@call(.auto, func, .{0} ++ args);
|
||||
wait_group.finish();
|
||||
return;
|
||||
}
|
||||
|
||||
const Args = @TypeOf(args);
|
||||
const Closure = struct {
|
||||
arguments: Args,
|
||||
pool: *Pool,
|
||||
run_node: RunQueue.Node = .{ .data = .{ .runFn = runFn } },
|
||||
wait_group: *WaitGroup,
|
||||
|
||||
fn runFn(runnable: *Runnable, id: ?usize) void {
|
||||
const run_node: *RunQueue.Node = @fieldParentPtr("data", runnable);
|
||||
const closure: *@This() = @alignCast(@fieldParentPtr("run_node", run_node));
|
||||
@call(.auto, func, .{id.?} ++ closure.arguments);
|
||||
closure.wait_group.finish();
|
||||
|
||||
// The thread pool's allocator is protected by the mutex.
|
||||
const mutex = &closure.pool.mutex;
|
||||
mutex.lock();
|
||||
defer mutex.unlock();
|
||||
|
||||
closure.pool.allocator.destroy(closure);
|
||||
}
|
||||
};
|
||||
|
||||
{
|
||||
pool.mutex.lock();
|
||||
|
||||
const closure = pool.allocator.create(Closure) catch {
|
||||
const id = pool.ids.getIndex(std.Thread.getCurrentId());
|
||||
pool.mutex.unlock();
|
||||
@call(.auto, func, .{id.?} ++ args);
|
||||
wait_group.finish();
|
||||
return;
|
||||
};
|
||||
closure.* = .{
|
||||
.arguments = args,
|
||||
.pool = pool,
|
||||
.wait_group = wait_group,
|
||||
};
|
||||
|
||||
pool.run_queue.prepend(&closure.run_node);
|
||||
pool.mutex.unlock();
|
||||
}
|
||||
|
||||
// Notify waiting threads outside the lock to try and keep the critical section small.
|
||||
pool.cond.signal();
|
||||
}
|
||||
|
||||
pub fn spawn(pool: *Pool, comptime func: anytype, args: anytype) !void {
|
||||
if (builtin.single_threaded) {
|
||||
@call(.auto, func, args);
|
||||
@ -181,14 +253,16 @@ fn worker(pool: *Pool) void {
|
||||
pool.mutex.lock();
|
||||
defer pool.mutex.unlock();
|
||||
|
||||
const id = if (pool.ids.count() > 0) pool.ids.count() else null;
|
||||
if (id) |_| pool.ids.putAssumeCapacityNoClobber(std.Thread.getCurrentId(), {});
|
||||
|
||||
while (true) {
|
||||
while (pool.run_queue.popFirst()) |run_node| {
|
||||
// Temporarily unlock the mutex in order to execute the run_node
|
||||
pool.mutex.unlock();
|
||||
defer pool.mutex.lock();
|
||||
|
||||
const runFn = run_node.data.runFn;
|
||||
runFn(&run_node.data);
|
||||
run_node.data.runFn(&run_node.data, id);
|
||||
}
|
||||
|
||||
// Stop executing instead of waiting if the thread pool is no longer running.
|
||||
@ -201,16 +275,18 @@ fn worker(pool: *Pool) void {
|
||||
}
|
||||
|
||||
pub fn waitAndWork(pool: *Pool, wait_group: *WaitGroup) void {
|
||||
var id: ?usize = null;
|
||||
|
||||
while (!wait_group.isDone()) {
|
||||
if (blk: {
|
||||
pool.mutex.lock();
|
||||
defer pool.mutex.unlock();
|
||||
break :blk pool.run_queue.popFirst();
|
||||
}) |run_node| {
|
||||
run_node.data.runFn(&run_node.data);
|
||||
pool.mutex.lock();
|
||||
if (pool.run_queue.popFirst()) |run_node| {
|
||||
id = id orelse pool.ids.getIndex(std.Thread.getCurrentId());
|
||||
pool.mutex.unlock();
|
||||
run_node.data.runFn(&run_node.data, id);
|
||||
continue;
|
||||
}
|
||||
|
||||
pool.mutex.unlock();
|
||||
wait_group.wait();
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1563,12 +1563,12 @@ pub fn internedToRef(ip_index: InternPool.Index) Inst.Ref {
|
||||
}
|
||||
|
||||
/// Returns `null` if runtime-known.
|
||||
pub fn value(air: Air, inst: Inst.Ref, mod: *Module) !?Value {
|
||||
pub fn value(air: Air, inst: Inst.Ref, pt: Zcu.PerThread) !?Value {
|
||||
if (inst.toInterned()) |ip_index| {
|
||||
return Value.fromInterned(ip_index);
|
||||
}
|
||||
const index = inst.toIndex().?;
|
||||
return air.typeOfIndex(index, &mod.intern_pool).onePossibleValue(mod);
|
||||
return air.typeOfIndex(index, &pt.zcu.intern_pool).onePossibleValue(pt);
|
||||
}
|
||||
|
||||
pub fn nullTerminatedString(air: Air, index: usize) [:0]const u8 {
|
||||
|
||||
@ -2146,6 +2146,8 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
|
||||
try comp.performAllTheWork(main_progress_node);
|
||||
|
||||
if (comp.module) |zcu| {
|
||||
const pt: Zcu.PerThread = .{ .zcu = zcu, .tid = .main };
|
||||
|
||||
if (build_options.enable_debug_extensions and comp.verbose_intern_pool) {
|
||||
std.debug.print("intern pool stats for '{s}':\n", .{
|
||||
comp.root_name,
|
||||
@ -2165,10 +2167,10 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
|
||||
// The `test_functions` decl has been intentionally postponed until now,
|
||||
// at which point we must populate it with the list of test functions that
|
||||
// have been discovered and not filtered out.
|
||||
try zcu.populateTestFunctions(main_progress_node);
|
||||
try pt.populateTestFunctions(main_progress_node);
|
||||
}
|
||||
|
||||
try zcu.processExports();
|
||||
try pt.processExports();
|
||||
}
|
||||
|
||||
if (comp.totalErrorCount() != 0) {
|
||||
@ -2247,7 +2249,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
|
||||
}
|
||||
}
|
||||
|
||||
try flush(comp, arena, main_progress_node);
|
||||
try flush(comp, arena, .main, main_progress_node);
|
||||
if (comp.totalErrorCount() != 0) return;
|
||||
|
||||
// Failure here only means an unnecessary cache miss.
|
||||
@ -2264,16 +2266,16 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
|
||||
whole.lock = man.toOwnedLock();
|
||||
},
|
||||
.incremental => {
|
||||
try flush(comp, arena, main_progress_node);
|
||||
try flush(comp, arena, .main, main_progress_node);
|
||||
if (comp.totalErrorCount() != 0) return;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn flush(comp: *Compilation, arena: Allocator, prog_node: std.Progress.Node) !void {
|
||||
fn flush(comp: *Compilation, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void {
|
||||
if (comp.bin_file) |lf| {
|
||||
// This is needed before reading the error flags.
|
||||
lf.flush(arena, prog_node) catch |err| switch (err) {
|
||||
lf.flush(arena, tid, prog_node) catch |err| switch (err) {
|
||||
error.FlushFailure => {}, // error reported through link_error_flags
|
||||
error.LLDReportedFailure => {}, // error reported via lockAndParseLldStderr
|
||||
else => |e| return e,
|
||||
@ -3419,7 +3421,7 @@ pub fn performAllTheWork(
|
||||
|
||||
while (true) {
|
||||
if (comp.work_queue.readItem()) |work_item| {
|
||||
try processOneJob(comp, work_item, main_progress_node);
|
||||
try processOneJob(0, comp, work_item, main_progress_node);
|
||||
continue;
|
||||
}
|
||||
if (comp.module) |zcu| {
|
||||
@ -3447,11 +3449,11 @@ pub fn performAllTheWork(
|
||||
}
|
||||
}
|
||||
|
||||
fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !void {
|
||||
fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progress.Node) !void {
|
||||
switch (job) {
|
||||
.codegen_decl => |decl_index| {
|
||||
const zcu = comp.module.?;
|
||||
const decl = zcu.declPtr(decl_index);
|
||||
const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) };
|
||||
const decl = pt.zcu.declPtr(decl_index);
|
||||
|
||||
switch (decl.analysis) {
|
||||
.unreferenced => unreachable,
|
||||
@ -3469,7 +3471,7 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo
|
||||
|
||||
assert(decl.has_tv);
|
||||
|
||||
try zcu.linkerUpdateDecl(decl_index);
|
||||
try pt.linkerUpdateDecl(decl_index);
|
||||
return;
|
||||
},
|
||||
}
|
||||
@ -3478,16 +3480,16 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo
|
||||
const named_frame = tracy.namedFrame("codegen_func");
|
||||
defer named_frame.end();
|
||||
|
||||
const zcu = comp.module.?;
|
||||
const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) };
|
||||
// This call takes ownership of `func.air`.
|
||||
try zcu.linkerUpdateFunc(func.func, func.air);
|
||||
try pt.linkerUpdateFunc(func.func, func.air);
|
||||
},
|
||||
.analyze_func => |func| {
|
||||
const named_frame = tracy.namedFrame("analyze_func");
|
||||
defer named_frame.end();
|
||||
|
||||
const zcu = comp.module.?;
|
||||
zcu.ensureFuncBodyAnalyzed(func) catch |err| switch (err) {
|
||||
const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) };
|
||||
pt.ensureFuncBodyAnalyzed(func) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.AnalysisFail => return,
|
||||
};
|
||||
@ -3496,8 +3498,8 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo
|
||||
if (true) @panic("regressed compiler feature: emit-h should hook into updateExports, " ++
|
||||
"not decl analysis, which is too early to know about @export calls");
|
||||
|
||||
const zcu = comp.module.?;
|
||||
const decl = zcu.declPtr(decl_index);
|
||||
const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) };
|
||||
const decl = pt.zcu.declPtr(decl_index);
|
||||
|
||||
switch (decl.analysis) {
|
||||
.unreferenced => unreachable,
|
||||
@ -3515,7 +3517,7 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo
|
||||
defer named_frame.end();
|
||||
|
||||
const gpa = comp.gpa;
|
||||
const emit_h = zcu.emit_h.?;
|
||||
const emit_h = pt.zcu.emit_h.?;
|
||||
_ = try emit_h.decl_table.getOrPut(gpa, decl_index);
|
||||
const decl_emit_h = emit_h.declPtr(decl_index);
|
||||
const fwd_decl = &decl_emit_h.fwd_decl;
|
||||
@ -3523,11 +3525,11 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo
|
||||
var ctypes_arena = std.heap.ArenaAllocator.init(gpa);
|
||||
defer ctypes_arena.deinit();
|
||||
|
||||
const file_scope = zcu.namespacePtr(decl.src_namespace).fileScope(zcu);
|
||||
const file_scope = pt.zcu.namespacePtr(decl.src_namespace).fileScope(pt.zcu);
|
||||
|
||||
var dg: c_codegen.DeclGen = .{
|
||||
.gpa = gpa,
|
||||
.zcu = zcu,
|
||||
.pt = pt,
|
||||
.mod = file_scope.mod,
|
||||
.error_msg = null,
|
||||
.pass = .{ .decl = decl_index },
|
||||
@ -3557,25 +3559,25 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo
|
||||
}
|
||||
},
|
||||
.analyze_decl => |decl_index| {
|
||||
const zcu = comp.module.?;
|
||||
zcu.ensureDeclAnalyzed(decl_index) catch |err| switch (err) {
|
||||
const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) };
|
||||
pt.ensureDeclAnalyzed(decl_index) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.AnalysisFail => return,
|
||||
};
|
||||
const decl = zcu.declPtr(decl_index);
|
||||
const decl = pt.zcu.declPtr(decl_index);
|
||||
if (decl.kind == .@"test" and comp.config.is_test) {
|
||||
// Tests are always emitted in test binaries. The decl_refs are created by
|
||||
// Zcu.populateTestFunctions, but this will not queue body analysis, so do
|
||||
// that now.
|
||||
try zcu.ensureFuncBodyAnalysisQueued(decl.val.toIntern());
|
||||
try pt.zcu.ensureFuncBodyAnalysisQueued(decl.val.toIntern());
|
||||
}
|
||||
},
|
||||
.resolve_type_fully => |ty| {
|
||||
const named_frame = tracy.namedFrame("resolve_type_fully");
|
||||
defer named_frame.end();
|
||||
|
||||
const zcu = comp.module.?;
|
||||
Type.fromInterned(ty).resolveFully(zcu) catch |err| switch (err) {
|
||||
const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) };
|
||||
Type.fromInterned(ty).resolveFully(pt) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.AnalysisFail => return,
|
||||
};
|
||||
@ -3603,12 +3605,12 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo
|
||||
try zcu.retryable_failures.append(gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }));
|
||||
};
|
||||
},
|
||||
.analyze_mod => |pkg| {
|
||||
.analyze_mod => |mod| {
|
||||
const named_frame = tracy.namedFrame("analyze_mod");
|
||||
defer named_frame.end();
|
||||
|
||||
const zcu = comp.module.?;
|
||||
zcu.semaPkg(pkg) catch |err| switch (err) {
|
||||
const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) };
|
||||
pt.semaPkg(mod) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.AnalysisFail => return,
|
||||
};
|
||||
|
||||
@ -4548,17 +4548,14 @@ pub fn init(ip: *InternPool, gpa: Allocator) !void {
|
||||
|
||||
// This inserts all the statically-known values into the intern pool in the
|
||||
// order expected.
|
||||
for (static_keys[0..@intFromEnum(Index.empty_struct_type)]) |key| {
|
||||
_ = ip.get(gpa, key) catch unreachable;
|
||||
}
|
||||
_ = ip.getAnonStructType(gpa, .{
|
||||
.types = &.{},
|
||||
.names = &.{},
|
||||
.values = &.{},
|
||||
}) catch unreachable;
|
||||
for (static_keys[@intFromEnum(Index.empty_struct_type) + 1 ..]) |key| {
|
||||
_ = ip.get(gpa, key) catch unreachable;
|
||||
}
|
||||
for (&static_keys, 0..) |key, key_index| switch (@as(Index, @enumFromInt(key_index))) {
|
||||
.empty_struct_type => assert(try ip.getAnonStructType(gpa, .main, .{
|
||||
.types = &.{},
|
||||
.names = &.{},
|
||||
.values = &.{},
|
||||
}) == .empty_struct_type),
|
||||
else => |expected_index| assert(try ip.get(gpa, .main, key) == expected_index),
|
||||
};
|
||||
|
||||
if (std.debug.runtime_safety) {
|
||||
// Sanity check.
|
||||
@ -5242,7 +5239,7 @@ fn indexToKeyBigInt(ip: *const InternPool, limb_index: u32, positive: bool) Key
|
||||
} };
|
||||
}
|
||||
|
||||
pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
|
||||
pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) Allocator.Error!Index {
|
||||
const adapter: KeyAdapter = .{ .intern_pool = ip };
|
||||
const gop = try ip.map.getOrPutAdapted(gpa, key, adapter);
|
||||
if (gop.found_existing) return @enumFromInt(gop.index);
|
||||
@ -5266,8 +5263,9 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
|
||||
_ = ip.map.pop();
|
||||
var new_key = key;
|
||||
new_key.ptr_type.flags.size = .Many;
|
||||
const ptr_type_index = try ip.get(gpa, new_key);
|
||||
const ptr_type_index = try ip.get(gpa, tid, new_key);
|
||||
assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing);
|
||||
|
||||
try ip.items.ensureUnusedCapacity(gpa, 1);
|
||||
ip.items.appendAssumeCapacity(.{
|
||||
.tag = .type_slice,
|
||||
@ -5519,7 +5517,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
|
||||
else => unreachable,
|
||||
}
|
||||
_ = ip.map.pop();
|
||||
const index_index = try ip.get(gpa, .{ .int = .{
|
||||
const index_index = try ip.get(gpa, tid, .{ .int = .{
|
||||
.ty = .usize_type,
|
||||
.storage = .{ .u64 = base_index.index },
|
||||
} });
|
||||
@ -5932,7 +5930,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
|
||||
const elem = switch (aggregate.storage) {
|
||||
.bytes => |bytes| elem: {
|
||||
_ = ip.map.pop();
|
||||
const elem = try ip.get(gpa, .{ .int = .{
|
||||
const elem = try ip.get(gpa, tid, .{ .int = .{
|
||||
.ty = .u8_type,
|
||||
.storage = .{ .u64 = bytes.at(0, ip) },
|
||||
} });
|
||||
@ -6074,7 +6072,12 @@ pub const UnionTypeInit = struct {
|
||||
},
|
||||
};
|
||||
|
||||
pub fn getUnionType(ip: *InternPool, gpa: Allocator, ini: UnionTypeInit) Allocator.Error!WipNamespaceType.Result {
|
||||
pub fn getUnionType(
|
||||
ip: *InternPool,
|
||||
gpa: Allocator,
|
||||
_: Zcu.PerThread.Id,
|
||||
ini: UnionTypeInit,
|
||||
) Allocator.Error!WipNamespaceType.Result {
|
||||
const adapter: KeyAdapter = .{ .intern_pool = ip };
|
||||
const gop = try ip.map.getOrPutAdapted(gpa, Key{ .union_type = switch (ini.key) {
|
||||
.declared => |d| .{ .declared = .{
|
||||
@ -6221,6 +6224,7 @@ pub const StructTypeInit = struct {
|
||||
pub fn getStructType(
|
||||
ip: *InternPool,
|
||||
gpa: Allocator,
|
||||
_: Zcu.PerThread.Id,
|
||||
ini: StructTypeInit,
|
||||
) Allocator.Error!WipNamespaceType.Result {
|
||||
const adapter: KeyAdapter = .{ .intern_pool = ip };
|
||||
@ -6396,7 +6400,12 @@ pub const AnonStructTypeInit = struct {
|
||||
values: []const Index,
|
||||
};
|
||||
|
||||
pub fn getAnonStructType(ip: *InternPool, gpa: Allocator, ini: AnonStructTypeInit) Allocator.Error!Index {
|
||||
pub fn getAnonStructType(
|
||||
ip: *InternPool,
|
||||
gpa: Allocator,
|
||||
_: Zcu.PerThread.Id,
|
||||
ini: AnonStructTypeInit,
|
||||
) Allocator.Error!Index {
|
||||
assert(ini.types.len == ini.values.len);
|
||||
for (ini.types) |elem| assert(elem != .none);
|
||||
|
||||
@ -6450,7 +6459,12 @@ pub const GetFuncTypeKey = struct {
|
||||
addrspace_is_generic: bool = false,
|
||||
};
|
||||
|
||||
pub fn getFuncType(ip: *InternPool, gpa: Allocator, key: GetFuncTypeKey) Allocator.Error!Index {
|
||||
pub fn getFuncType(
|
||||
ip: *InternPool,
|
||||
gpa: Allocator,
|
||||
_: Zcu.PerThread.Id,
|
||||
key: GetFuncTypeKey,
|
||||
) Allocator.Error!Index {
|
||||
// Validate input parameters.
|
||||
assert(key.return_type != .none);
|
||||
for (key.param_types) |param_type| assert(param_type != .none);
|
||||
@ -6503,7 +6517,12 @@ pub fn getFuncType(ip: *InternPool, gpa: Allocator, key: GetFuncTypeKey) Allocat
|
||||
return @enumFromInt(ip.items.len - 1);
|
||||
}
|
||||
|
||||
pub fn getExternFunc(ip: *InternPool, gpa: Allocator, key: Key.ExternFunc) Allocator.Error!Index {
|
||||
pub fn getExternFunc(
|
||||
ip: *InternPool,
|
||||
gpa: Allocator,
|
||||
_: Zcu.PerThread.Id,
|
||||
key: Key.ExternFunc,
|
||||
) Allocator.Error!Index {
|
||||
const adapter: KeyAdapter = .{ .intern_pool = ip };
|
||||
const gop = try ip.map.getOrPutAdapted(gpa, Key{ .extern_func = key }, adapter);
|
||||
if (gop.found_existing) return @enumFromInt(gop.index);
|
||||
@ -6531,7 +6550,12 @@ pub const GetFuncDeclKey = struct {
|
||||
is_noinline: bool,
|
||||
};
|
||||
|
||||
pub fn getFuncDecl(ip: *InternPool, gpa: Allocator, key: GetFuncDeclKey) Allocator.Error!Index {
|
||||
pub fn getFuncDecl(
|
||||
ip: *InternPool,
|
||||
gpa: Allocator,
|
||||
_: Zcu.PerThread.Id,
|
||||
key: GetFuncDeclKey,
|
||||
) Allocator.Error!Index {
|
||||
// The strategy here is to add the function type unconditionally, then to
|
||||
// ask if it already exists, and if so, revert the lengths of the mutated
|
||||
// arrays. This is similar to what `getOrPutTrailingString` does.
|
||||
@ -6598,7 +6622,12 @@ pub const GetFuncDeclIesKey = struct {
|
||||
rbrace_column: u32,
|
||||
};
|
||||
|
||||
pub fn getFuncDeclIes(ip: *InternPool, gpa: Allocator, key: GetFuncDeclIesKey) Allocator.Error!Index {
|
||||
pub fn getFuncDeclIes(
|
||||
ip: *InternPool,
|
||||
gpa: Allocator,
|
||||
_: Zcu.PerThread.Id,
|
||||
key: GetFuncDeclIesKey,
|
||||
) Allocator.Error!Index {
|
||||
// Validate input parameters.
|
||||
assert(key.bare_return_type != .none);
|
||||
for (key.param_types) |param_type| assert(param_type != .none);
|
||||
@ -6707,6 +6736,7 @@ pub fn getFuncDeclIes(ip: *InternPool, gpa: Allocator, key: GetFuncDeclIesKey) A
|
||||
pub fn getErrorSetType(
|
||||
ip: *InternPool,
|
||||
gpa: Allocator,
|
||||
_: Zcu.PerThread.Id,
|
||||
names: []const NullTerminatedString,
|
||||
) Allocator.Error!Index {
|
||||
assert(std.sort.isSorted(NullTerminatedString, names, {}, NullTerminatedString.indexLessThan));
|
||||
@ -6770,11 +6800,16 @@ pub const GetFuncInstanceKey = struct {
|
||||
inferred_error_set: bool,
|
||||
};
|
||||
|
||||
pub fn getFuncInstance(ip: *InternPool, gpa: Allocator, arg: GetFuncInstanceKey) Allocator.Error!Index {
|
||||
pub fn getFuncInstance(
|
||||
ip: *InternPool,
|
||||
gpa: Allocator,
|
||||
tid: Zcu.PerThread.Id,
|
||||
arg: GetFuncInstanceKey,
|
||||
) Allocator.Error!Index {
|
||||
if (arg.inferred_error_set)
|
||||
return getFuncInstanceIes(ip, gpa, arg);
|
||||
return getFuncInstanceIes(ip, gpa, tid, arg);
|
||||
|
||||
const func_ty = try ip.getFuncType(gpa, .{
|
||||
const func_ty = try ip.getFuncType(gpa, tid, .{
|
||||
.param_types = arg.param_types,
|
||||
.return_type = arg.bare_return_type,
|
||||
.noalias_bits = arg.noalias_bits,
|
||||
@ -6844,6 +6879,7 @@ pub fn getFuncInstance(ip: *InternPool, gpa: Allocator, arg: GetFuncInstanceKey)
|
||||
pub fn getFuncInstanceIes(
|
||||
ip: *InternPool,
|
||||
gpa: Allocator,
|
||||
_: Zcu.PerThread.Id,
|
||||
arg: GetFuncInstanceKey,
|
||||
) Allocator.Error!Index {
|
||||
// Validate input parameters.
|
||||
@ -6955,7 +6991,6 @@ pub fn getFuncInstanceIes(
|
||||
assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{
|
||||
.func_type = extraFuncType(ip, func_type_extra_index),
|
||||
}, adapter).found_existing);
|
||||
|
||||
return finishFuncInstance(
|
||||
ip,
|
||||
gpa,
|
||||
@ -7096,6 +7131,7 @@ pub const WipEnumType = struct {
|
||||
pub fn getEnumType(
|
||||
ip: *InternPool,
|
||||
gpa: Allocator,
|
||||
_: Zcu.PerThread.Id,
|
||||
ini: EnumTypeInit,
|
||||
) Allocator.Error!WipEnumType.Result {
|
||||
const adapter: KeyAdapter = .{ .intern_pool = ip };
|
||||
@ -7172,7 +7208,7 @@ pub fn getEnumType(
|
||||
break :m values_map.toOptional();
|
||||
};
|
||||
errdefer if (ini.has_values) {
|
||||
_ = ip.map.pop();
|
||||
_ = ip.maps.pop();
|
||||
};
|
||||
|
||||
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumExplicit).Struct.fields.len +
|
||||
@ -7245,7 +7281,12 @@ const GeneratedTagEnumTypeInit = struct {
|
||||
/// Creates an enum type which was automatically-generated as the tag type of a
|
||||
/// `union` with no explicit tag type. Since this is only called once per union
|
||||
/// type, it asserts that no matching type yet exists.
|
||||
pub fn getGeneratedTagEnumType(ip: *InternPool, gpa: Allocator, ini: GeneratedTagEnumTypeInit) Allocator.Error!Index {
|
||||
pub fn getGeneratedTagEnumType(
|
||||
ip: *InternPool,
|
||||
gpa: Allocator,
|
||||
_: Zcu.PerThread.Id,
|
||||
ini: GeneratedTagEnumTypeInit,
|
||||
) Allocator.Error!Index {
|
||||
assert(ip.isUnion(ini.owner_union_ty));
|
||||
assert(ip.isIntegerType(ini.tag_ty));
|
||||
for (ini.values) |val| assert(ip.typeOf(val) == ini.tag_ty);
|
||||
@ -7342,7 +7383,12 @@ pub const OpaqueTypeInit = struct {
|
||||
},
|
||||
};
|
||||
|
||||
pub fn getOpaqueType(ip: *InternPool, gpa: Allocator, ini: OpaqueTypeInit) Allocator.Error!WipNamespaceType.Result {
|
||||
pub fn getOpaqueType(
|
||||
ip: *InternPool,
|
||||
gpa: Allocator,
|
||||
_: Zcu.PerThread.Id,
|
||||
ini: OpaqueTypeInit,
|
||||
) Allocator.Error!WipNamespaceType.Result {
|
||||
const adapter: KeyAdapter = .{ .intern_pool = ip };
|
||||
const gop = try ip.map.getOrPutAdapted(gpa, Key{ .opaque_type = switch (ini.key) {
|
||||
.declared => |d| .{ .declared = .{
|
||||
@ -7680,23 +7726,23 @@ test "basic usage" {
|
||||
var ip: InternPool = .{};
|
||||
defer ip.deinit(gpa);
|
||||
|
||||
const i32_type = try ip.get(gpa, .{ .int_type = .{
|
||||
const i32_type = try ip.get(gpa, .main, .{ .int_type = .{
|
||||
.signedness = .signed,
|
||||
.bits = 32,
|
||||
} });
|
||||
const array_i32 = try ip.get(gpa, .{ .array_type = .{
|
||||
const array_i32 = try ip.get(gpa, .main, .{ .array_type = .{
|
||||
.len = 10,
|
||||
.child = i32_type,
|
||||
.sentinel = .none,
|
||||
} });
|
||||
|
||||
const another_i32_type = try ip.get(gpa, .{ .int_type = .{
|
||||
const another_i32_type = try ip.get(gpa, .main, .{ .int_type = .{
|
||||
.signedness = .signed,
|
||||
.bits = 32,
|
||||
} });
|
||||
try std.testing.expect(another_i32_type == i32_type);
|
||||
|
||||
const another_array_i32 = try ip.get(gpa, .{ .array_type = .{
|
||||
const another_array_i32 = try ip.get(gpa, .main, .{ .array_type = .{
|
||||
.len = 10,
|
||||
.child = i32_type,
|
||||
.sentinel = .none,
|
||||
@ -7766,48 +7812,54 @@ pub fn sliceLen(ip: *const InternPool, i: Index) Index {
|
||||
/// * payload => error union
|
||||
/// * fn <=> fn
|
||||
/// * aggregate <=> aggregate (where children can also be coerced)
|
||||
pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index {
|
||||
pub fn getCoerced(
|
||||
ip: *InternPool,
|
||||
gpa: Allocator,
|
||||
tid: Zcu.PerThread.Id,
|
||||
val: Index,
|
||||
new_ty: Index,
|
||||
) Allocator.Error!Index {
|
||||
const old_ty = ip.typeOf(val);
|
||||
if (old_ty == new_ty) return val;
|
||||
|
||||
const tags = ip.items.items(.tag);
|
||||
|
||||
switch (val) {
|
||||
.undef => return ip.get(gpa, .{ .undef = new_ty }),
|
||||
.undef => return ip.get(gpa, tid, .{ .undef = new_ty }),
|
||||
.null_value => {
|
||||
if (ip.isOptionalType(new_ty)) return ip.get(gpa, .{ .opt = .{
|
||||
if (ip.isOptionalType(new_ty)) return ip.get(gpa, tid, .{ .opt = .{
|
||||
.ty = new_ty,
|
||||
.val = .none,
|
||||
} });
|
||||
|
||||
if (ip.isPointerType(new_ty)) switch (ip.indexToKey(new_ty).ptr_type.flags.size) {
|
||||
.One, .Many, .C => return ip.get(gpa, .{ .ptr = .{
|
||||
.One, .Many, .C => return ip.get(gpa, tid, .{ .ptr = .{
|
||||
.ty = new_ty,
|
||||
.base_addr = .int,
|
||||
.byte_offset = 0,
|
||||
} }),
|
||||
.Slice => return ip.get(gpa, .{ .slice = .{
|
||||
.Slice => return ip.get(gpa, tid, .{ .slice = .{
|
||||
.ty = new_ty,
|
||||
.ptr = try ip.get(gpa, .{ .ptr = .{
|
||||
.ptr = try ip.get(gpa, tid, .{ .ptr = .{
|
||||
.ty = ip.slicePtrType(new_ty),
|
||||
.base_addr = .int,
|
||||
.byte_offset = 0,
|
||||
} }),
|
||||
.len = try ip.get(gpa, .{ .undef = .usize_type }),
|
||||
.len = try ip.get(gpa, tid, .{ .undef = .usize_type }),
|
||||
} }),
|
||||
};
|
||||
},
|
||||
else => switch (tags[@intFromEnum(val)]) {
|
||||
.func_decl => return getCoercedFuncDecl(ip, gpa, val, new_ty),
|
||||
.func_instance => return getCoercedFuncInstance(ip, gpa, val, new_ty),
|
||||
.func_decl => return getCoercedFuncDecl(ip, gpa, tid, val, new_ty),
|
||||
.func_instance => return getCoercedFuncInstance(ip, gpa, tid, val, new_ty),
|
||||
.func_coerced => {
|
||||
const extra_index = ip.items.items(.data)[@intFromEnum(val)];
|
||||
const func: Index = @enumFromInt(
|
||||
ip.extra.items[extra_index + std.meta.fieldIndex(Tag.FuncCoerced, "func").?],
|
||||
);
|
||||
switch (tags[@intFromEnum(func)]) {
|
||||
.func_decl => return getCoercedFuncDecl(ip, gpa, val, new_ty),
|
||||
.func_instance => return getCoercedFuncInstance(ip, gpa, val, new_ty),
|
||||
.func_decl => return getCoercedFuncDecl(ip, gpa, tid, val, new_ty),
|
||||
.func_instance => return getCoercedFuncInstance(ip, gpa, tid, val, new_ty),
|
||||
else => unreachable,
|
||||
}
|
||||
},
|
||||
@ -7816,9 +7868,9 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
|
||||
}
|
||||
|
||||
switch (ip.indexToKey(val)) {
|
||||
.undef => return ip.get(gpa, .{ .undef = new_ty }),
|
||||
.undef => return ip.get(gpa, tid, .{ .undef = new_ty }),
|
||||
.extern_func => |extern_func| if (ip.isFunctionType(new_ty))
|
||||
return ip.get(gpa, .{ .extern_func = .{
|
||||
return ip.get(gpa, tid, .{ .extern_func = .{
|
||||
.ty = new_ty,
|
||||
.decl = extern_func.decl,
|
||||
.lib_name = extern_func.lib_name,
|
||||
@ -7827,12 +7879,12 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
|
||||
.func => unreachable,
|
||||
|
||||
.int => |int| switch (ip.indexToKey(new_ty)) {
|
||||
.enum_type => return ip.get(gpa, .{ .enum_tag = .{
|
||||
.enum_type => return ip.get(gpa, tid, .{ .enum_tag = .{
|
||||
.ty = new_ty,
|
||||
.int = try ip.getCoerced(gpa, val, ip.loadEnumType(new_ty).tag_ty),
|
||||
.int = try ip.getCoerced(gpa, tid, val, ip.loadEnumType(new_ty).tag_ty),
|
||||
} }),
|
||||
.ptr_type => switch (int.storage) {
|
||||
inline .u64, .i64 => |int_val| return ip.get(gpa, .{ .ptr = .{
|
||||
inline .u64, .i64 => |int_val| return ip.get(gpa, tid, .{ .ptr = .{
|
||||
.ty = new_ty,
|
||||
.base_addr = .int,
|
||||
.byte_offset = @intCast(int_val),
|
||||
@ -7841,7 +7893,7 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
|
||||
.lazy_align, .lazy_size => {},
|
||||
},
|
||||
else => if (ip.isIntegerType(new_ty))
|
||||
return getCoercedInts(ip, gpa, int, new_ty),
|
||||
return ip.getCoercedInts(gpa, tid, int, new_ty),
|
||||
},
|
||||
.float => |float| switch (ip.indexToKey(new_ty)) {
|
||||
.simple_type => |simple| switch (simple) {
|
||||
@ -7852,7 +7904,7 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
|
||||
.f128,
|
||||
.c_longdouble,
|
||||
.comptime_float,
|
||||
=> return ip.get(gpa, .{ .float = .{
|
||||
=> return ip.get(gpa, tid, .{ .float = .{
|
||||
.ty = new_ty,
|
||||
.storage = float.storage,
|
||||
} }),
|
||||
@ -7861,17 +7913,17 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
|
||||
else => {},
|
||||
},
|
||||
.enum_tag => |enum_tag| if (ip.isIntegerType(new_ty))
|
||||
return getCoercedInts(ip, gpa, ip.indexToKey(enum_tag.int).int, new_ty),
|
||||
return ip.getCoercedInts(gpa, tid, ip.indexToKey(enum_tag.int).int, new_ty),
|
||||
.enum_literal => |enum_literal| switch (ip.indexToKey(new_ty)) {
|
||||
.enum_type => {
|
||||
const enum_type = ip.loadEnumType(new_ty);
|
||||
const index = enum_type.nameIndex(ip, enum_literal).?;
|
||||
return ip.get(gpa, .{ .enum_tag = .{
|
||||
return ip.get(gpa, tid, .{ .enum_tag = .{
|
||||
.ty = new_ty,
|
||||
.int = if (enum_type.values.len != 0)
|
||||
enum_type.values.get(ip)[index]
|
||||
else
|
||||
try ip.get(gpa, .{ .int = .{
|
||||
try ip.get(gpa, tid, .{ .int = .{
|
||||
.ty = enum_type.tag_ty,
|
||||
.storage = .{ .u64 = index },
|
||||
} }),
|
||||
@ -7880,22 +7932,22 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
|
||||
else => {},
|
||||
},
|
||||
.slice => |slice| if (ip.isPointerType(new_ty) and ip.indexToKey(new_ty).ptr_type.flags.size == .Slice)
|
||||
return ip.get(gpa, .{ .slice = .{
|
||||
return ip.get(gpa, tid, .{ .slice = .{
|
||||
.ty = new_ty,
|
||||
.ptr = try ip.getCoerced(gpa, slice.ptr, ip.slicePtrType(new_ty)),
|
||||
.ptr = try ip.getCoerced(gpa, tid, slice.ptr, ip.slicePtrType(new_ty)),
|
||||
.len = slice.len,
|
||||
} })
|
||||
else if (ip.isIntegerType(new_ty))
|
||||
return ip.getCoerced(gpa, slice.ptr, new_ty),
|
||||
return ip.getCoerced(gpa, tid, slice.ptr, new_ty),
|
||||
.ptr => |ptr| if (ip.isPointerType(new_ty) and ip.indexToKey(new_ty).ptr_type.flags.size != .Slice)
|
||||
return ip.get(gpa, .{ .ptr = .{
|
||||
return ip.get(gpa, tid, .{ .ptr = .{
|
||||
.ty = new_ty,
|
||||
.base_addr = ptr.base_addr,
|
||||
.byte_offset = ptr.byte_offset,
|
||||
} })
|
||||
else if (ip.isIntegerType(new_ty))
|
||||
switch (ptr.base_addr) {
|
||||
.int => return ip.get(gpa, .{ .int = .{
|
||||
.int => return ip.get(gpa, tid, .{ .int = .{
|
||||
.ty = .usize_type,
|
||||
.storage = .{ .u64 = @intCast(ptr.byte_offset) },
|
||||
} }),
|
||||
@ -7904,44 +7956,44 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
|
||||
.opt => |opt| switch (ip.indexToKey(new_ty)) {
|
||||
.ptr_type => |ptr_type| return switch (opt.val) {
|
||||
.none => switch (ptr_type.flags.size) {
|
||||
.One, .Many, .C => try ip.get(gpa, .{ .ptr = .{
|
||||
.One, .Many, .C => try ip.get(gpa, tid, .{ .ptr = .{
|
||||
.ty = new_ty,
|
||||
.base_addr = .int,
|
||||
.byte_offset = 0,
|
||||
} }),
|
||||
.Slice => try ip.get(gpa, .{ .slice = .{
|
||||
.Slice => try ip.get(gpa, tid, .{ .slice = .{
|
||||
.ty = new_ty,
|
||||
.ptr = try ip.get(gpa, .{ .ptr = .{
|
||||
.ptr = try ip.get(gpa, tid, .{ .ptr = .{
|
||||
.ty = ip.slicePtrType(new_ty),
|
||||
.base_addr = .int,
|
||||
.byte_offset = 0,
|
||||
} }),
|
||||
.len = try ip.get(gpa, .{ .undef = .usize_type }),
|
||||
.len = try ip.get(gpa, tid, .{ .undef = .usize_type }),
|
||||
} }),
|
||||
},
|
||||
else => |payload| try ip.getCoerced(gpa, payload, new_ty),
|
||||
else => |payload| try ip.getCoerced(gpa, tid, payload, new_ty),
|
||||
},
|
||||
.opt_type => |child_type| return try ip.get(gpa, .{ .opt = .{
|
||||
.opt_type => |child_type| return try ip.get(gpa, tid, .{ .opt = .{
|
||||
.ty = new_ty,
|
||||
.val = switch (opt.val) {
|
||||
.none => .none,
|
||||
else => try ip.getCoerced(gpa, opt.val, child_type),
|
||||
else => try ip.getCoerced(gpa, tid, opt.val, child_type),
|
||||
},
|
||||
} }),
|
||||
else => {},
|
||||
},
|
||||
.err => |err| if (ip.isErrorSetType(new_ty))
|
||||
return ip.get(gpa, .{ .err = .{
|
||||
return ip.get(gpa, tid, .{ .err = .{
|
||||
.ty = new_ty,
|
||||
.name = err.name,
|
||||
} })
|
||||
else if (ip.isErrorUnionType(new_ty))
|
||||
return ip.get(gpa, .{ .error_union = .{
|
||||
return ip.get(gpa, tid, .{ .error_union = .{
|
||||
.ty = new_ty,
|
||||
.val = .{ .err_name = err.name },
|
||||
} }),
|
||||
.error_union => |error_union| if (ip.isErrorUnionType(new_ty))
|
||||
return ip.get(gpa, .{ .error_union = .{
|
||||
return ip.get(gpa, tid, .{ .error_union = .{
|
||||
.ty = new_ty,
|
||||
.val = error_union.val,
|
||||
} }),
|
||||
@ -7960,20 +8012,20 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
|
||||
};
|
||||
if (old_ty_child != new_ty_child) break :direct;
|
||||
switch (aggregate.storage) {
|
||||
.bytes => |bytes| return ip.get(gpa, .{ .aggregate = .{
|
||||
.bytes => |bytes| return ip.get(gpa, tid, .{ .aggregate = .{
|
||||
.ty = new_ty,
|
||||
.storage = .{ .bytes = bytes },
|
||||
} }),
|
||||
.elems => |elems| {
|
||||
const elems_copy = try gpa.dupe(Index, elems[0..new_len]);
|
||||
defer gpa.free(elems_copy);
|
||||
return ip.get(gpa, .{ .aggregate = .{
|
||||
return ip.get(gpa, tid, .{ .aggregate = .{
|
||||
.ty = new_ty,
|
||||
.storage = .{ .elems = elems_copy },
|
||||
} });
|
||||
},
|
||||
.repeated_elem => |elem| {
|
||||
return ip.get(gpa, .{ .aggregate = .{
|
||||
return ip.get(gpa, tid, .{ .aggregate = .{
|
||||
.ty = new_ty,
|
||||
.storage = .{ .repeated_elem = elem },
|
||||
} });
|
||||
@ -7991,7 +8043,7 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
|
||||
// We have to intern each value here, so unfortunately we can't easily avoid
|
||||
// the repeated indexToKey calls.
|
||||
for (agg_elems, 0..) |*elem, index| {
|
||||
elem.* = try ip.get(gpa, .{ .int = .{
|
||||
elem.* = try ip.get(gpa, tid, .{ .int = .{
|
||||
.ty = .u8_type,
|
||||
.storage = .{ .u64 = bytes.at(index, ip) },
|
||||
} });
|
||||
@ -8008,27 +8060,27 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
|
||||
.struct_type => ip.loadStructType(new_ty).field_types.get(ip)[i],
|
||||
else => unreachable,
|
||||
};
|
||||
elem.* = try ip.getCoerced(gpa, elem.*, new_elem_ty);
|
||||
elem.* = try ip.getCoerced(gpa, tid, elem.*, new_elem_ty);
|
||||
}
|
||||
return ip.get(gpa, .{ .aggregate = .{ .ty = new_ty, .storage = .{ .elems = agg_elems } } });
|
||||
return ip.get(gpa, tid, .{ .aggregate = .{ .ty = new_ty, .storage = .{ .elems = agg_elems } } });
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
switch (ip.indexToKey(new_ty)) {
|
||||
.opt_type => |child_type| switch (val) {
|
||||
.null_value => return ip.get(gpa, .{ .opt = .{
|
||||
.null_value => return ip.get(gpa, tid, .{ .opt = .{
|
||||
.ty = new_ty,
|
||||
.val = .none,
|
||||
} }),
|
||||
else => return ip.get(gpa, .{ .opt = .{
|
||||
else => return ip.get(gpa, tid, .{ .opt = .{
|
||||
.ty = new_ty,
|
||||
.val = try ip.getCoerced(gpa, val, child_type),
|
||||
.val = try ip.getCoerced(gpa, tid, val, child_type),
|
||||
} }),
|
||||
},
|
||||
.error_union_type => |error_union_type| return ip.get(gpa, .{ .error_union = .{
|
||||
.error_union_type => |error_union_type| return ip.get(gpa, tid, .{ .error_union = .{
|
||||
.ty = new_ty,
|
||||
.val = .{ .payload = try ip.getCoerced(gpa, val, error_union_type.payload_type) },
|
||||
.val = .{ .payload = try ip.getCoerced(gpa, tid, val, error_union_type.payload_type) },
|
||||
} }),
|
||||
else => {},
|
||||
}
|
||||
@ -8042,27 +8094,45 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
|
||||
unreachable;
|
||||
}
|
||||
|
||||
fn getCoercedFuncDecl(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index {
|
||||
fn getCoercedFuncDecl(
|
||||
ip: *InternPool,
|
||||
gpa: Allocator,
|
||||
tid: Zcu.PerThread.Id,
|
||||
val: Index,
|
||||
new_ty: Index,
|
||||
) Allocator.Error!Index {
|
||||
const datas = ip.items.items(.data);
|
||||
const extra_index = datas[@intFromEnum(val)];
|
||||
const prev_ty: Index = @enumFromInt(
|
||||
ip.extra.items[extra_index + std.meta.fieldIndex(Tag.FuncDecl, "ty").?],
|
||||
);
|
||||
if (new_ty == prev_ty) return val;
|
||||
return getCoercedFunc(ip, gpa, val, new_ty);
|
||||
return getCoercedFunc(ip, gpa, tid, val, new_ty);
|
||||
}
|
||||
|
||||
fn getCoercedFuncInstance(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index {
|
||||
fn getCoercedFuncInstance(
|
||||
ip: *InternPool,
|
||||
gpa: Allocator,
|
||||
tid: Zcu.PerThread.Id,
|
||||
val: Index,
|
||||
new_ty: Index,
|
||||
) Allocator.Error!Index {
|
||||
const datas = ip.items.items(.data);
|
||||
const extra_index = datas[@intFromEnum(val)];
|
||||
const prev_ty: Index = @enumFromInt(
|
||||
ip.extra.items[extra_index + std.meta.fieldIndex(Tag.FuncInstance, "ty").?],
|
||||
);
|
||||
if (new_ty == prev_ty) return val;
|
||||
return getCoercedFunc(ip, gpa, val, new_ty);
|
||||
return getCoercedFunc(ip, gpa, tid, val, new_ty);
|
||||
}
|
||||
|
||||
fn getCoercedFunc(ip: *InternPool, gpa: Allocator, func: Index, ty: Index) Allocator.Error!Index {
|
||||
fn getCoercedFunc(
|
||||
ip: *InternPool,
|
||||
gpa: Allocator,
|
||||
_: Zcu.PerThread.Id,
|
||||
func: Index,
|
||||
ty: Index,
|
||||
) Allocator.Error!Index {
|
||||
const prev_extra_len = ip.extra.items.len;
|
||||
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncCoerced).Struct.fields.len);
|
||||
try ip.items.ensureUnusedCapacity(gpa, 1);
|
||||
@ -8092,7 +8162,7 @@ fn getCoercedFunc(ip: *InternPool, gpa: Allocator, func: Index, ty: Index) Alloc
|
||||
|
||||
/// Asserts `val` has an integer type.
|
||||
/// Assumes `new_ty` is an integer type.
|
||||
pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, int: Key.Int, new_ty: Index) Allocator.Error!Index {
|
||||
pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, int: Key.Int, new_ty: Index) Allocator.Error!Index {
|
||||
// The key cannot be passed directly to `get`, otherwise in the case of
|
||||
// big_int storage, the limbs would be invalidated before they are read.
|
||||
// Here we pre-reserve the limbs to ensure that the logic in `addInt` will
|
||||
@ -8111,7 +8181,7 @@ pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, int: Key.Int, new_ty: Ind
|
||||
} };
|
||||
},
|
||||
};
|
||||
return ip.get(gpa, .{ .int = .{
|
||||
return ip.get(gpa, tid, .{ .int = .{
|
||||
.ty = new_ty,
|
||||
.storage = new_storage,
|
||||
} });
|
||||
|
||||
@ -6,13 +6,11 @@ const InternPool = @import("InternPool.zig");
|
||||
const Type = @import("Type.zig");
|
||||
const Value = @import("Value.zig");
|
||||
const Zcu = @import("Zcu.zig");
|
||||
/// Deprecated.
|
||||
const Module = Zcu;
|
||||
const RangeSet = @This();
|
||||
const LazySrcLoc = Zcu.LazySrcLoc;
|
||||
|
||||
pt: Zcu.PerThread,
|
||||
ranges: std.ArrayList(Range),
|
||||
module: *Module,
|
||||
|
||||
pub const Range = struct {
|
||||
first: InternPool.Index,
|
||||
@ -20,10 +18,10 @@ pub const Range = struct {
|
||||
src: LazySrcLoc,
|
||||
};
|
||||
|
||||
pub fn init(allocator: std.mem.Allocator, module: *Module) RangeSet {
|
||||
pub fn init(allocator: std.mem.Allocator, pt: Zcu.PerThread) RangeSet {
|
||||
return .{
|
||||
.pt = pt,
|
||||
.ranges = std.ArrayList(Range).init(allocator),
|
||||
.module = module,
|
||||
};
|
||||
}
|
||||
|
||||
@ -37,8 +35,8 @@ pub fn add(
|
||||
last: InternPool.Index,
|
||||
src: LazySrcLoc,
|
||||
) !?LazySrcLoc {
|
||||
const mod = self.module;
|
||||
const ip = &mod.intern_pool;
|
||||
const pt = self.pt;
|
||||
const ip = &pt.zcu.intern_pool;
|
||||
|
||||
const ty = ip.typeOf(first);
|
||||
assert(ty == ip.typeOf(last));
|
||||
@ -47,8 +45,8 @@ pub fn add(
|
||||
assert(ty == ip.typeOf(range.first));
|
||||
assert(ty == ip.typeOf(range.last));
|
||||
|
||||
if (Value.fromInterned(last).compareScalar(.gte, Value.fromInterned(range.first), Type.fromInterned(ty), mod) and
|
||||
Value.fromInterned(first).compareScalar(.lte, Value.fromInterned(range.last), Type.fromInterned(ty), mod))
|
||||
if (Value.fromInterned(last).compareScalar(.gte, Value.fromInterned(range.first), Type.fromInterned(ty), pt) and
|
||||
Value.fromInterned(first).compareScalar(.lte, Value.fromInterned(range.last), Type.fromInterned(ty), pt))
|
||||
{
|
||||
return range.src; // They overlap.
|
||||
}
|
||||
@ -63,20 +61,20 @@ pub fn add(
|
||||
}
|
||||
|
||||
/// Assumes a and b do not overlap
|
||||
fn lessThan(mod: *Module, a: Range, b: Range) bool {
|
||||
const ty = Type.fromInterned(mod.intern_pool.typeOf(a.first));
|
||||
return Value.fromInterned(a.first).compareScalar(.lt, Value.fromInterned(b.first), ty, mod);
|
||||
fn lessThan(pt: Zcu.PerThread, a: Range, b: Range) bool {
|
||||
const ty = Type.fromInterned(pt.zcu.intern_pool.typeOf(a.first));
|
||||
return Value.fromInterned(a.first).compareScalar(.lt, Value.fromInterned(b.first), ty, pt);
|
||||
}
|
||||
|
||||
pub fn spans(self: *RangeSet, first: InternPool.Index, last: InternPool.Index) !bool {
|
||||
const mod = self.module;
|
||||
const ip = &mod.intern_pool;
|
||||
const pt = self.pt;
|
||||
const ip = &pt.zcu.intern_pool;
|
||||
assert(ip.typeOf(first) == ip.typeOf(last));
|
||||
|
||||
if (self.ranges.items.len == 0)
|
||||
return false;
|
||||
|
||||
std.mem.sort(Range, self.ranges.items, mod, lessThan);
|
||||
std.mem.sort(Range, self.ranges.items, pt, lessThan);
|
||||
|
||||
if (self.ranges.items[0].first != first or
|
||||
self.ranges.items[self.ranges.items.len - 1].last != last)
|
||||
@ -95,10 +93,10 @@ pub fn spans(self: *RangeSet, first: InternPool.Index, last: InternPool.Index) !
|
||||
const prev = self.ranges.items[i];
|
||||
|
||||
// prev.last + 1 == cur.first
|
||||
try counter.copy(Value.fromInterned(prev.last).toBigInt(&space, mod));
|
||||
try counter.copy(Value.fromInterned(prev.last).toBigInt(&space, pt));
|
||||
try counter.addScalar(&counter, 1);
|
||||
|
||||
const cur_start_int = Value.fromInterned(cur.first).toBigInt(&space, mod);
|
||||
const cur_start_int = Value.fromInterned(cur.first).toBigInt(&space, pt);
|
||||
if (!cur_start_int.eql(counter.toConst())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
4968
src/Sema.zig
4968
src/Sema.zig
File diff suppressed because it is too large
Load Diff
@ -69,7 +69,8 @@ fn bitCastInner(
|
||||
host_bits: u64,
|
||||
bit_offset: u64,
|
||||
) BitCastError!Value {
|
||||
const zcu = sema.mod;
|
||||
const pt = sema.pt;
|
||||
const zcu = pt.zcu;
|
||||
const endian = zcu.getTarget().cpu.arch.endian();
|
||||
|
||||
if (dest_ty.toIntern() == val.typeOf(zcu).toIntern() and bit_offset == 0) {
|
||||
@ -78,29 +79,29 @@ fn bitCastInner(
|
||||
|
||||
const val_ty = val.typeOf(zcu);
|
||||
|
||||
try val_ty.resolveLayout(zcu);
|
||||
try dest_ty.resolveLayout(zcu);
|
||||
try val_ty.resolveLayout(pt);
|
||||
try dest_ty.resolveLayout(pt);
|
||||
|
||||
assert(val_ty.hasWellDefinedLayout(zcu));
|
||||
|
||||
const abi_pad_bits, const host_pad_bits = if (host_bits > 0)
|
||||
.{ val_ty.abiSize(zcu) * 8 - host_bits, host_bits - val_ty.bitSize(zcu) }
|
||||
.{ val_ty.abiSize(pt) * 8 - host_bits, host_bits - val_ty.bitSize(pt) }
|
||||
else
|
||||
.{ val_ty.abiSize(zcu) * 8 - val_ty.bitSize(zcu), 0 };
|
||||
.{ val_ty.abiSize(pt) * 8 - val_ty.bitSize(pt), 0 };
|
||||
|
||||
const skip_bits = switch (endian) {
|
||||
.little => bit_offset + byte_offset * 8,
|
||||
.big => if (host_bits > 0)
|
||||
val_ty.abiSize(zcu) * 8 - byte_offset * 8 - host_bits + bit_offset
|
||||
val_ty.abiSize(pt) * 8 - byte_offset * 8 - host_bits + bit_offset
|
||||
else
|
||||
val_ty.abiSize(zcu) * 8 - byte_offset * 8 - dest_ty.bitSize(zcu),
|
||||
val_ty.abiSize(pt) * 8 - byte_offset * 8 - dest_ty.bitSize(pt),
|
||||
};
|
||||
|
||||
var unpack: UnpackValueBits = .{
|
||||
.zcu = zcu,
|
||||
.pt = sema.pt,
|
||||
.arena = sema.arena,
|
||||
.skip_bits = skip_bits,
|
||||
.remaining_bits = dest_ty.bitSize(zcu),
|
||||
.remaining_bits = dest_ty.bitSize(pt),
|
||||
.unpacked = std.ArrayList(InternPool.Index).init(sema.arena),
|
||||
};
|
||||
switch (endian) {
|
||||
@ -116,7 +117,7 @@ fn bitCastInner(
|
||||
try unpack.padding(host_pad_bits);
|
||||
|
||||
var pack: PackValueBits = .{
|
||||
.zcu = zcu,
|
||||
.pt = sema.pt,
|
||||
.arena = sema.arena,
|
||||
.unpacked = unpack.unpacked.items,
|
||||
};
|
||||
@ -131,33 +132,34 @@ fn bitCastSpliceInner(
|
||||
host_bits: u64,
|
||||
bit_offset: u64,
|
||||
) BitCastError!Value {
|
||||
const zcu = sema.mod;
|
||||
const pt = sema.pt;
|
||||
const zcu = pt.zcu;
|
||||
const endian = zcu.getTarget().cpu.arch.endian();
|
||||
const val_ty = val.typeOf(zcu);
|
||||
const splice_val_ty = splice_val.typeOf(zcu);
|
||||
|
||||
try val_ty.resolveLayout(zcu);
|
||||
try splice_val_ty.resolveLayout(zcu);
|
||||
try val_ty.resolveLayout(pt);
|
||||
try splice_val_ty.resolveLayout(pt);
|
||||
|
||||
const splice_bits = splice_val_ty.bitSize(zcu);
|
||||
const splice_bits = splice_val_ty.bitSize(pt);
|
||||
|
||||
const splice_offset = switch (endian) {
|
||||
.little => bit_offset + byte_offset * 8,
|
||||
.big => if (host_bits > 0)
|
||||
val_ty.abiSize(zcu) * 8 - byte_offset * 8 - host_bits + bit_offset
|
||||
val_ty.abiSize(pt) * 8 - byte_offset * 8 - host_bits + bit_offset
|
||||
else
|
||||
val_ty.abiSize(zcu) * 8 - byte_offset * 8 - splice_bits,
|
||||
val_ty.abiSize(pt) * 8 - byte_offset * 8 - splice_bits,
|
||||
};
|
||||
|
||||
assert(splice_offset + splice_bits <= val_ty.abiSize(zcu) * 8);
|
||||
assert(splice_offset + splice_bits <= val_ty.abiSize(pt) * 8);
|
||||
|
||||
const abi_pad_bits, const host_pad_bits = if (host_bits > 0)
|
||||
.{ val_ty.abiSize(zcu) * 8 - host_bits, host_bits - val_ty.bitSize(zcu) }
|
||||
.{ val_ty.abiSize(pt) * 8 - host_bits, host_bits - val_ty.bitSize(pt) }
|
||||
else
|
||||
.{ val_ty.abiSize(zcu) * 8 - val_ty.bitSize(zcu), 0 };
|
||||
.{ val_ty.abiSize(pt) * 8 - val_ty.bitSize(pt), 0 };
|
||||
|
||||
var unpack: UnpackValueBits = .{
|
||||
.zcu = zcu,
|
||||
.pt = pt,
|
||||
.arena = sema.arena,
|
||||
.skip_bits = 0,
|
||||
.remaining_bits = splice_offset,
|
||||
@ -179,7 +181,7 @@ fn bitCastSpliceInner(
|
||||
try unpack.add(splice_val);
|
||||
|
||||
unpack.skip_bits = splice_offset + splice_bits;
|
||||
unpack.remaining_bits = val_ty.abiSize(zcu) * 8 - splice_offset - splice_bits;
|
||||
unpack.remaining_bits = val_ty.abiSize(pt) * 8 - splice_offset - splice_bits;
|
||||
switch (endian) {
|
||||
.little => {
|
||||
try unpack.add(val);
|
||||
@ -193,7 +195,7 @@ fn bitCastSpliceInner(
|
||||
try unpack.padding(host_pad_bits);
|
||||
|
||||
var pack: PackValueBits = .{
|
||||
.zcu = zcu,
|
||||
.pt = pt,
|
||||
.arena = sema.arena,
|
||||
.unpacked = unpack.unpacked.items,
|
||||
};
|
||||
@ -209,7 +211,7 @@ fn bitCastSpliceInner(
|
||||
/// of values in *packed* memory - therefore, on big-endian targets, the first element of this
|
||||
/// list contains bits from the *final* byte of the value.
|
||||
const UnpackValueBits = struct {
|
||||
zcu: *Zcu,
|
||||
pt: Zcu.PerThread,
|
||||
arena: Allocator,
|
||||
skip_bits: u64,
|
||||
remaining_bits: u64,
|
||||
@ -217,7 +219,8 @@ const UnpackValueBits = struct {
|
||||
unpacked: std.ArrayList(InternPool.Index),
|
||||
|
||||
fn add(unpack: *UnpackValueBits, val: Value) BitCastError!void {
|
||||
const zcu = unpack.zcu;
|
||||
const pt = unpack.pt;
|
||||
const zcu = pt.zcu;
|
||||
const endian = zcu.getTarget().cpu.arch.endian();
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
@ -226,7 +229,7 @@ const UnpackValueBits = struct {
|
||||
}
|
||||
|
||||
const ty = val.typeOf(zcu);
|
||||
const bit_size = ty.bitSize(zcu);
|
||||
const bit_size = ty.bitSize(pt);
|
||||
|
||||
if (unpack.skip_bits >= bit_size) {
|
||||
unpack.skip_bits -= bit_size;
|
||||
@ -279,7 +282,7 @@ const UnpackValueBits = struct {
|
||||
.little => i,
|
||||
.big => len - i - 1,
|
||||
};
|
||||
const elem_val = try val.elemValue(zcu, real_idx);
|
||||
const elem_val = try val.elemValue(pt, real_idx);
|
||||
try unpack.add(elem_val);
|
||||
}
|
||||
},
|
||||
@ -288,7 +291,7 @@ const UnpackValueBits = struct {
|
||||
// The final element does not have trailing padding.
|
||||
// Elements are reversed in packed memory on BE targets.
|
||||
const elem_ty = ty.childType(zcu);
|
||||
const pad_bits = elem_ty.abiSize(zcu) * 8 - elem_ty.bitSize(zcu);
|
||||
const pad_bits = elem_ty.abiSize(pt) * 8 - elem_ty.bitSize(pt);
|
||||
const len = ty.arrayLen(zcu);
|
||||
const maybe_sent = ty.sentinel(zcu);
|
||||
|
||||
@ -303,7 +306,7 @@ const UnpackValueBits = struct {
|
||||
.little => i,
|
||||
.big => len - i - 1,
|
||||
};
|
||||
const elem_val = try val.elemValue(zcu, @intCast(real_idx));
|
||||
const elem_val = try val.elemValue(pt, @intCast(real_idx));
|
||||
try unpack.add(elem_val);
|
||||
if (i != len - 1) try unpack.padding(pad_bits);
|
||||
}
|
||||
@ -320,12 +323,12 @@ const UnpackValueBits = struct {
|
||||
var cur_bit_off: u64 = 0;
|
||||
var it = zcu.typeToStruct(ty).?.iterateRuntimeOrder(ip);
|
||||
while (it.next()) |field_idx| {
|
||||
const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8;
|
||||
const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8;
|
||||
const pad_bits = want_bit_off - cur_bit_off;
|
||||
const field_val = try val.fieldValue(zcu, field_idx);
|
||||
const field_val = try val.fieldValue(pt, field_idx);
|
||||
try unpack.padding(pad_bits);
|
||||
try unpack.add(field_val);
|
||||
cur_bit_off = want_bit_off + field_val.typeOf(zcu).bitSize(zcu);
|
||||
cur_bit_off = want_bit_off + field_val.typeOf(zcu).bitSize(pt);
|
||||
}
|
||||
// Add trailing padding bits.
|
||||
try unpack.padding(bit_size - cur_bit_off);
|
||||
@ -334,13 +337,13 @@ const UnpackValueBits = struct {
|
||||
var cur_bit_off: u64 = bit_size;
|
||||
var it = zcu.typeToStruct(ty).?.iterateRuntimeOrderReverse(ip);
|
||||
while (it.next()) |field_idx| {
|
||||
const field_val = try val.fieldValue(zcu, field_idx);
|
||||
const field_val = try val.fieldValue(pt, field_idx);
|
||||
const field_ty = field_val.typeOf(zcu);
|
||||
const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8 + field_ty.bitSize(zcu);
|
||||
const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8 + field_ty.bitSize(pt);
|
||||
const pad_bits = cur_bit_off - want_bit_off;
|
||||
try unpack.padding(pad_bits);
|
||||
try unpack.add(field_val);
|
||||
cur_bit_off = want_bit_off - field_ty.bitSize(zcu);
|
||||
cur_bit_off = want_bit_off - field_ty.bitSize(pt);
|
||||
}
|
||||
assert(cur_bit_off == 0);
|
||||
},
|
||||
@ -349,7 +352,7 @@ const UnpackValueBits = struct {
|
||||
// Just add all fields in order. There are no padding bits.
|
||||
// This is identical between LE and BE targets.
|
||||
for (0..ty.structFieldCount(zcu)) |i| {
|
||||
const field_val = try val.fieldValue(zcu, i);
|
||||
const field_val = try val.fieldValue(pt, i);
|
||||
try unpack.add(field_val);
|
||||
}
|
||||
},
|
||||
@ -363,7 +366,7 @@ const UnpackValueBits = struct {
|
||||
// This correctly handles the case where `tag == .none`, since the payload is then
|
||||
// either an integer or a byte array, both of which we can unpack.
|
||||
const payload_val = Value.fromInterned(un.val);
|
||||
const pad_bits = bit_size - payload_val.typeOf(zcu).bitSize(zcu);
|
||||
const pad_bits = bit_size - payload_val.typeOf(zcu).bitSize(pt);
|
||||
if (endian == .little or ty.containerLayout(zcu) == .@"packed") {
|
||||
try unpack.add(payload_val);
|
||||
try unpack.padding(pad_bits);
|
||||
@ -377,31 +380,31 @@ const UnpackValueBits = struct {
|
||||
|
||||
fn padding(unpack: *UnpackValueBits, pad_bits: u64) BitCastError!void {
|
||||
if (pad_bits == 0) return;
|
||||
const zcu = unpack.zcu;
|
||||
const pt = unpack.pt;
|
||||
// Figure out how many full bytes and leftover bits there are.
|
||||
const bytes = pad_bits / 8;
|
||||
const bits = pad_bits % 8;
|
||||
// Add undef u8 values for the bytes...
|
||||
const undef_u8 = try zcu.undefValue(Type.u8);
|
||||
const undef_u8 = try pt.undefValue(Type.u8);
|
||||
for (0..@intCast(bytes)) |_| {
|
||||
try unpack.primitive(undef_u8);
|
||||
}
|
||||
// ...and an undef int for the leftover bits.
|
||||
if (bits == 0) return;
|
||||
const bits_ty = try zcu.intType(.unsigned, @intCast(bits));
|
||||
const bits_val = try zcu.undefValue(bits_ty);
|
||||
const bits_ty = try pt.intType(.unsigned, @intCast(bits));
|
||||
const bits_val = try pt.undefValue(bits_ty);
|
||||
try unpack.primitive(bits_val);
|
||||
}
|
||||
|
||||
fn primitive(unpack: *UnpackValueBits, val: Value) BitCastError!void {
|
||||
const zcu = unpack.zcu;
|
||||
const pt = unpack.pt;
|
||||
|
||||
if (unpack.remaining_bits == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const ty = val.typeOf(zcu);
|
||||
const bit_size = ty.bitSize(zcu);
|
||||
const ty = val.typeOf(pt.zcu);
|
||||
const bit_size = ty.bitSize(pt);
|
||||
|
||||
// Note that this skips all zero-bit types.
|
||||
if (unpack.skip_bits >= bit_size) {
|
||||
@ -425,21 +428,21 @@ const UnpackValueBits = struct {
|
||||
}
|
||||
|
||||
fn splitPrimitive(unpack: *UnpackValueBits, val: Value, bit_offset: u64, bit_count: u64) BitCastError!void {
|
||||
const zcu = unpack.zcu;
|
||||
const ty = val.typeOf(zcu);
|
||||
const pt = unpack.pt;
|
||||
const ty = val.typeOf(pt.zcu);
|
||||
|
||||
const val_bits = ty.bitSize(zcu);
|
||||
const val_bits = ty.bitSize(pt);
|
||||
assert(bit_offset + bit_count <= val_bits);
|
||||
|
||||
switch (zcu.intern_pool.indexToKey(val.toIntern())) {
|
||||
switch (pt.zcu.intern_pool.indexToKey(val.toIntern())) {
|
||||
// In the `ptr` case, this will return `error.ReinterpretDeclRef`
|
||||
// if we're trying to split a non-integer pointer value.
|
||||
.int, .float, .enum_tag, .ptr, .opt => {
|
||||
// This @intCast is okay because no primitive can exceed the size of a u16.
|
||||
const int_ty = try zcu.intType(.unsigned, @intCast(bit_count));
|
||||
const int_ty = try unpack.pt.intType(.unsigned, @intCast(bit_count));
|
||||
const buf = try unpack.arena.alloc(u8, @intCast((val_bits + 7) / 8));
|
||||
try val.writeToPackedMemory(ty, zcu, buf, 0);
|
||||
const sub_val = try Value.readFromPackedMemory(int_ty, zcu, buf, @intCast(bit_offset), unpack.arena);
|
||||
try val.writeToPackedMemory(ty, unpack.pt, buf, 0);
|
||||
const sub_val = try Value.readFromPackedMemory(int_ty, unpack.pt, buf, @intCast(bit_offset), unpack.arena);
|
||||
try unpack.primitive(sub_val);
|
||||
},
|
||||
.undef => try unpack.padding(bit_count),
|
||||
@ -456,13 +459,14 @@ const UnpackValueBits = struct {
|
||||
/// reconstructs a value of an arbitrary type, with correct handling of `undefined`
|
||||
/// values and of pointers which align in virtual memory.
|
||||
const PackValueBits = struct {
|
||||
zcu: *Zcu,
|
||||
pt: Zcu.PerThread,
|
||||
arena: Allocator,
|
||||
bit_offset: u64 = 0,
|
||||
unpacked: []const InternPool.Index,
|
||||
|
||||
fn get(pack: *PackValueBits, ty: Type) BitCastError!Value {
|
||||
const zcu = pack.zcu;
|
||||
const pt = pack.pt;
|
||||
const zcu = pt.zcu;
|
||||
const endian = zcu.getTarget().cpu.arch.endian();
|
||||
const ip = &zcu.intern_pool;
|
||||
const arena = pack.arena;
|
||||
@ -485,7 +489,7 @@ const PackValueBits = struct {
|
||||
}
|
||||
},
|
||||
}
|
||||
return Value.fromInterned(try zcu.intern(.{ .aggregate = .{
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = elems },
|
||||
} }));
|
||||
@ -495,12 +499,12 @@ const PackValueBits = struct {
|
||||
const len = ty.arrayLen(zcu);
|
||||
const elem_ty = ty.childType(zcu);
|
||||
const maybe_sent = ty.sentinel(zcu);
|
||||
const pad_bits = elem_ty.abiSize(zcu) * 8 - elem_ty.bitSize(zcu);
|
||||
const pad_bits = elem_ty.abiSize(pt) * 8 - elem_ty.bitSize(pt);
|
||||
const elems = try arena.alloc(InternPool.Index, @intCast(len));
|
||||
|
||||
if (endian == .big and maybe_sent != null) {
|
||||
// TODO: validate sentinel was preserved!
|
||||
try pack.padding(elem_ty.bitSize(zcu));
|
||||
try pack.padding(elem_ty.bitSize(pt));
|
||||
if (len != 0) try pack.padding(pad_bits);
|
||||
}
|
||||
|
||||
@ -516,10 +520,10 @@ const PackValueBits = struct {
|
||||
if (endian == .little and maybe_sent != null) {
|
||||
// TODO: validate sentinel was preserved!
|
||||
if (len != 0) try pack.padding(pad_bits);
|
||||
try pack.padding(elem_ty.bitSize(zcu));
|
||||
try pack.padding(elem_ty.bitSize(pt));
|
||||
}
|
||||
|
||||
return Value.fromInterned(try zcu.intern(.{ .aggregate = .{
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = elems },
|
||||
} }));
|
||||
@ -534,23 +538,23 @@ const PackValueBits = struct {
|
||||
var cur_bit_off: u64 = 0;
|
||||
var it = zcu.typeToStruct(ty).?.iterateRuntimeOrder(ip);
|
||||
while (it.next()) |field_idx| {
|
||||
const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8;
|
||||
const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8;
|
||||
try pack.padding(want_bit_off - cur_bit_off);
|
||||
const field_ty = ty.structFieldType(field_idx, zcu);
|
||||
elems[field_idx] = (try pack.get(field_ty)).toIntern();
|
||||
cur_bit_off = want_bit_off + field_ty.bitSize(zcu);
|
||||
cur_bit_off = want_bit_off + field_ty.bitSize(pt);
|
||||
}
|
||||
try pack.padding(ty.bitSize(zcu) - cur_bit_off);
|
||||
try pack.padding(ty.bitSize(pt) - cur_bit_off);
|
||||
},
|
||||
.big => {
|
||||
var cur_bit_off: u64 = ty.bitSize(zcu);
|
||||
var cur_bit_off: u64 = ty.bitSize(pt);
|
||||
var it = zcu.typeToStruct(ty).?.iterateRuntimeOrderReverse(ip);
|
||||
while (it.next()) |field_idx| {
|
||||
const field_ty = ty.structFieldType(field_idx, zcu);
|
||||
const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8 + field_ty.bitSize(zcu);
|
||||
const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8 + field_ty.bitSize(pt);
|
||||
try pack.padding(cur_bit_off - want_bit_off);
|
||||
elems[field_idx] = (try pack.get(field_ty)).toIntern();
|
||||
cur_bit_off = want_bit_off - field_ty.bitSize(zcu);
|
||||
cur_bit_off = want_bit_off - field_ty.bitSize(pt);
|
||||
}
|
||||
assert(cur_bit_off == 0);
|
||||
},
|
||||
@ -559,10 +563,10 @@ const PackValueBits = struct {
|
||||
// Fill those values now.
|
||||
for (elems, 0..) |*elem, field_idx| {
|
||||
if (elem.* != .none) continue;
|
||||
const val = (try ty.structFieldValueComptime(zcu, field_idx)).?;
|
||||
const val = (try ty.structFieldValueComptime(pt, field_idx)).?;
|
||||
elem.* = val.toIntern();
|
||||
}
|
||||
return Value.fromInterned(try zcu.intern(.{ .aggregate = .{
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = elems },
|
||||
} }));
|
||||
@ -575,7 +579,7 @@ const PackValueBits = struct {
|
||||
const field_ty = ty.structFieldType(i, zcu);
|
||||
elem.* = (try pack.get(field_ty)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try zcu.intern(.{ .aggregate = .{
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = elems },
|
||||
} }));
|
||||
@ -591,7 +595,7 @@ const PackValueBits = struct {
|
||||
const prev_unpacked = pack.unpacked;
|
||||
const prev_bit_offset = pack.bit_offset;
|
||||
|
||||
const backing_ty = try ty.unionBackingType(zcu);
|
||||
const backing_ty = try ty.unionBackingType(pt);
|
||||
|
||||
backing: {
|
||||
const backing_val = pack.get(backing_ty) catch |err| switch (err) {
|
||||
@ -607,7 +611,7 @@ const PackValueBits = struct {
|
||||
pack.bit_offset = prev_bit_offset;
|
||||
break :backing;
|
||||
}
|
||||
return Value.fromInterned(try zcu.intern(.{ .un = .{
|
||||
return Value.fromInterned(try pt.intern(.{ .un = .{
|
||||
.ty = ty.toIntern(),
|
||||
.tag = .none,
|
||||
.val = backing_val.toIntern(),
|
||||
@ -618,16 +622,16 @@ const PackValueBits = struct {
|
||||
for (field_order, 0..) |*f, i| f.* = @intCast(i);
|
||||
// Sort `field_order` to put the fields with the largest bit sizes first.
|
||||
const SizeSortCtx = struct {
|
||||
zcu: *Zcu,
|
||||
pt: Zcu.PerThread,
|
||||
field_types: []const InternPool.Index,
|
||||
fn lessThan(ctx: @This(), a_idx: u32, b_idx: u32) bool {
|
||||
const a_ty = Type.fromInterned(ctx.field_types[a_idx]);
|
||||
const b_ty = Type.fromInterned(ctx.field_types[b_idx]);
|
||||
return a_ty.bitSize(ctx.zcu) > b_ty.bitSize(ctx.zcu);
|
||||
return a_ty.bitSize(ctx.pt) > b_ty.bitSize(ctx.pt);
|
||||
}
|
||||
};
|
||||
std.mem.sortUnstable(u32, field_order, SizeSortCtx{
|
||||
.zcu = zcu,
|
||||
.pt = pt,
|
||||
.field_types = zcu.typeToUnion(ty).?.field_types.get(ip),
|
||||
}, SizeSortCtx.lessThan);
|
||||
|
||||
@ -635,7 +639,7 @@ const PackValueBits = struct {
|
||||
|
||||
for (field_order) |field_idx| {
|
||||
const field_ty = Type.fromInterned(zcu.typeToUnion(ty).?.field_types.get(ip)[field_idx]);
|
||||
const pad_bits = ty.bitSize(zcu) - field_ty.bitSize(zcu);
|
||||
const pad_bits = ty.bitSize(pt) - field_ty.bitSize(pt);
|
||||
if (!padding_after) try pack.padding(pad_bits);
|
||||
const field_val = pack.get(field_ty) catch |err| switch (err) {
|
||||
error.ReinterpretDeclRef => {
|
||||
@ -651,8 +655,8 @@ const PackValueBits = struct {
|
||||
pack.bit_offset = prev_bit_offset;
|
||||
continue;
|
||||
}
|
||||
const tag_val = try zcu.enumValueFieldIndex(ty.unionTagTypeHypothetical(zcu), field_idx);
|
||||
return Value.fromInterned(try zcu.intern(.{ .un = .{
|
||||
const tag_val = try pt.enumValueFieldIndex(ty.unionTagTypeHypothetical(zcu), field_idx);
|
||||
return Value.fromInterned(try pt.intern(.{ .un = .{
|
||||
.ty = ty.toIntern(),
|
||||
.tag = tag_val.toIntern(),
|
||||
.val = field_val.toIntern(),
|
||||
@ -662,7 +666,7 @@ const PackValueBits = struct {
|
||||
// No field could represent the value. Just do whatever happens when we try to read
|
||||
// the backing type - either `undefined` or `error.ReinterpretDeclRef`.
|
||||
const backing_val = try pack.get(backing_ty);
|
||||
return Value.fromInterned(try zcu.intern(.{ .un = .{
|
||||
return Value.fromInterned(try pt.intern(.{ .un = .{
|
||||
.ty = ty.toIntern(),
|
||||
.tag = .none,
|
||||
.val = backing_val.toIntern(),
|
||||
@ -677,14 +681,14 @@ const PackValueBits = struct {
|
||||
}
|
||||
|
||||
fn primitive(pack: *PackValueBits, want_ty: Type) BitCastError!Value {
|
||||
const zcu = pack.zcu;
|
||||
const vals, const bit_offset = pack.prepareBits(want_ty.bitSize(zcu));
|
||||
const pt = pack.pt;
|
||||
const vals, const bit_offset = pack.prepareBits(want_ty.bitSize(pt));
|
||||
|
||||
for (vals) |val| {
|
||||
if (!Value.fromInterned(val).isUndef(zcu)) break;
|
||||
if (!Value.fromInterned(val).isUndef(pt.zcu)) break;
|
||||
} else {
|
||||
// All bits of the value are `undefined`.
|
||||
return zcu.undefValue(want_ty);
|
||||
return pt.undefValue(want_ty);
|
||||
}
|
||||
|
||||
// TODO: we need to decide how to handle partially-undef values here.
|
||||
@ -702,9 +706,9 @@ const PackValueBits = struct {
|
||||
ptr_cast: {
|
||||
if (vals.len != 1) break :ptr_cast;
|
||||
const val = Value.fromInterned(vals[0]);
|
||||
if (!val.typeOf(zcu).isPtrAtRuntime(zcu)) break :ptr_cast;
|
||||
if (!want_ty.isPtrAtRuntime(zcu)) break :ptr_cast;
|
||||
return zcu.getCoerced(val, want_ty);
|
||||
if (!val.typeOf(pt.zcu).isPtrAtRuntime(pt.zcu)) break :ptr_cast;
|
||||
if (!want_ty.isPtrAtRuntime(pt.zcu)) break :ptr_cast;
|
||||
return pt.getCoerced(val, want_ty);
|
||||
}
|
||||
|
||||
// Reinterpret via an in-memory buffer.
|
||||
@ -712,8 +716,8 @@ const PackValueBits = struct {
|
||||
var buf_bits: u64 = 0;
|
||||
for (vals) |ip_val| {
|
||||
const val = Value.fromInterned(ip_val);
|
||||
const ty = val.typeOf(zcu);
|
||||
buf_bits += ty.bitSize(zcu);
|
||||
const ty = val.typeOf(pt.zcu);
|
||||
buf_bits += ty.bitSize(pt);
|
||||
}
|
||||
|
||||
const buf = try pack.arena.alloc(u8, @intCast((buf_bits + 7) / 8));
|
||||
@ -722,25 +726,25 @@ const PackValueBits = struct {
|
||||
var cur_bit_off: usize = 0;
|
||||
for (vals) |ip_val| {
|
||||
const val = Value.fromInterned(ip_val);
|
||||
const ty = val.typeOf(zcu);
|
||||
if (!val.isUndef(zcu)) {
|
||||
try val.writeToPackedMemory(ty, zcu, buf, cur_bit_off);
|
||||
const ty = val.typeOf(pt.zcu);
|
||||
if (!val.isUndef(pt.zcu)) {
|
||||
try val.writeToPackedMemory(ty, pt, buf, cur_bit_off);
|
||||
}
|
||||
cur_bit_off += @intCast(ty.bitSize(zcu));
|
||||
cur_bit_off += @intCast(ty.bitSize(pt));
|
||||
}
|
||||
|
||||
return Value.readFromPackedMemory(want_ty, zcu, buf, @intCast(bit_offset), pack.arena);
|
||||
return Value.readFromPackedMemory(want_ty, pt, buf, @intCast(bit_offset), pack.arena);
|
||||
}
|
||||
|
||||
fn prepareBits(pack: *PackValueBits, need_bits: u64) struct { []const InternPool.Index, u64 } {
|
||||
if (need_bits == 0) return .{ &.{}, 0 };
|
||||
|
||||
const zcu = pack.zcu;
|
||||
const pt = pack.pt;
|
||||
|
||||
var bits: u64 = 0;
|
||||
var len: usize = 0;
|
||||
while (bits < pack.bit_offset + need_bits) {
|
||||
bits += Value.fromInterned(pack.unpacked[len]).typeOf(zcu).bitSize(zcu);
|
||||
bits += Value.fromInterned(pack.unpacked[len]).typeOf(pt.zcu).bitSize(pt);
|
||||
len += 1;
|
||||
}
|
||||
|
||||
@ -753,7 +757,7 @@ const PackValueBits = struct {
|
||||
pack.bit_offset = 0;
|
||||
} else {
|
||||
pack.unpacked = pack.unpacked[len - 1 ..];
|
||||
pack.bit_offset = Value.fromInterned(pack.unpacked[0]).typeOf(zcu).bitSize(zcu) - extra_bits;
|
||||
pack.bit_offset = Value.fromInterned(pack.unpacked[0]).typeOf(pt.zcu).bitSize(pt) - extra_bits;
|
||||
}
|
||||
|
||||
return .{ result_vals, result_offset };
|
||||
|
||||
@ -12,19 +12,19 @@ pub const ComptimeLoadResult = union(enum) {
|
||||
};
|
||||
|
||||
pub fn loadComptimePtr(sema: *Sema, block: *Block, src: LazySrcLoc, ptr: Value) !ComptimeLoadResult {
|
||||
const zcu = sema.mod;
|
||||
const ptr_info = ptr.typeOf(zcu).ptrInfo(zcu);
|
||||
const pt = sema.pt;
|
||||
const ptr_info = ptr.typeOf(pt.zcu).ptrInfo(pt.zcu);
|
||||
// TODO: host size for vectors is terrible
|
||||
const host_bits = switch (ptr_info.flags.vector_index) {
|
||||
.none => ptr_info.packed_offset.host_size * 8,
|
||||
else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(zcu),
|
||||
else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(pt),
|
||||
};
|
||||
const bit_offset = if (host_bits != 0) bit_offset: {
|
||||
const child_bits = Type.fromInterned(ptr_info.child).bitSize(zcu);
|
||||
const child_bits = Type.fromInterned(ptr_info.child).bitSize(pt);
|
||||
const bit_offset = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) {
|
||||
.none => 0,
|
||||
.runtime => return .runtime_load,
|
||||
else => |idx| switch (zcu.getTarget().cpu.arch.endian()) {
|
||||
else => |idx| switch (pt.zcu.getTarget().cpu.arch.endian()) {
|
||||
.little => child_bits * @intFromEnum(idx),
|
||||
.big => host_bits - child_bits * (@intFromEnum(idx) + 1), // element order reversed on big endian
|
||||
},
|
||||
@ -60,28 +60,29 @@ pub fn storeComptimePtr(
|
||||
ptr: Value,
|
||||
store_val: Value,
|
||||
) !ComptimeStoreResult {
|
||||
const zcu = sema.mod;
|
||||
const pt = sema.pt;
|
||||
const zcu = pt.zcu;
|
||||
const ptr_info = ptr.typeOf(zcu).ptrInfo(zcu);
|
||||
assert(store_val.typeOf(zcu).toIntern() == ptr_info.child);
|
||||
// TODO: host size for vectors is terrible
|
||||
const host_bits = switch (ptr_info.flags.vector_index) {
|
||||
.none => ptr_info.packed_offset.host_size * 8,
|
||||
else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(zcu),
|
||||
else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(pt),
|
||||
};
|
||||
const bit_offset = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) {
|
||||
.none => 0,
|
||||
.runtime => return .runtime_store,
|
||||
else => |idx| switch (zcu.getTarget().cpu.arch.endian()) {
|
||||
.little => Type.fromInterned(ptr_info.child).bitSize(zcu) * @intFromEnum(idx),
|
||||
.big => host_bits - Type.fromInterned(ptr_info.child).bitSize(zcu) * (@intFromEnum(idx) + 1), // element order reversed on big endian
|
||||
.little => Type.fromInterned(ptr_info.child).bitSize(pt) * @intFromEnum(idx),
|
||||
.big => host_bits - Type.fromInterned(ptr_info.child).bitSize(pt) * (@intFromEnum(idx) + 1), // element order reversed on big endian
|
||||
},
|
||||
};
|
||||
const pseudo_store_ty = if (host_bits > 0) t: {
|
||||
const need_bits = Type.fromInterned(ptr_info.child).bitSize(zcu);
|
||||
const need_bits = Type.fromInterned(ptr_info.child).bitSize(pt);
|
||||
if (need_bits + bit_offset > host_bits) {
|
||||
return .exceeds_host_size;
|
||||
}
|
||||
break :t try zcu.intType(.unsigned, @intCast(host_bits));
|
||||
break :t try sema.pt.intType(.unsigned, @intCast(host_bits));
|
||||
} else Type.fromInterned(ptr_info.child);
|
||||
|
||||
const strat = try prepareComptimePtrStore(sema, block, src, ptr, pseudo_store_ty, 0);
|
||||
@ -103,7 +104,7 @@ pub fn storeComptimePtr(
|
||||
.needed_well_defined => |ty| return .{ .needed_well_defined = ty },
|
||||
.out_of_bounds => |ty| return .{ .out_of_bounds = ty },
|
||||
};
|
||||
const expected = try expected_mv.intern(zcu, sema.arena);
|
||||
const expected = try expected_mv.intern(pt, sema.arena);
|
||||
if (store_val.toIntern() != expected.toIntern()) {
|
||||
return .{ .comptime_field_mismatch = expected };
|
||||
}
|
||||
@ -126,14 +127,14 @@ pub fn storeComptimePtr(
|
||||
switch (strat) {
|
||||
.direct => |direct| {
|
||||
const want_ty = direct.val.typeOf(zcu);
|
||||
const coerced_store_val = try zcu.getCoerced(store_val, want_ty);
|
||||
const coerced_store_val = try pt.getCoerced(store_val, want_ty);
|
||||
direct.val.* = .{ .interned = coerced_store_val.toIntern() };
|
||||
return .success;
|
||||
},
|
||||
.index => |index| {
|
||||
const want_ty = index.val.typeOf(zcu).childType(zcu);
|
||||
const coerced_store_val = try zcu.getCoerced(store_val, want_ty);
|
||||
try index.val.setElem(zcu, sema.arena, @intCast(index.elem_index), .{ .interned = coerced_store_val.toIntern() });
|
||||
const coerced_store_val = try pt.getCoerced(store_val, want_ty);
|
||||
try index.val.setElem(pt, sema.arena, @intCast(index.elem_index), .{ .interned = coerced_store_val.toIntern() });
|
||||
return .success;
|
||||
},
|
||||
.flat_index => |flat| {
|
||||
@ -149,7 +150,7 @@ pub fn storeComptimePtr(
|
||||
// Better would be to gather all the store targets into an array.
|
||||
var index: u64 = flat.flat_elem_index + idx;
|
||||
const val_ptr, const final_idx = (try recursiveIndex(sema, flat.val, &index)).?;
|
||||
try val_ptr.setElem(zcu, sema.arena, @intCast(final_idx), .{ .interned = elem });
|
||||
try val_ptr.setElem(pt, sema.arena, @intCast(final_idx), .{ .interned = elem });
|
||||
}
|
||||
return .success;
|
||||
},
|
||||
@ -165,9 +166,9 @@ pub fn storeComptimePtr(
|
||||
.direct => |direct| .{ direct.val, 0 },
|
||||
.index => |index| .{
|
||||
index.val,
|
||||
index.elem_index * index.val.typeOf(zcu).childType(zcu).abiSize(zcu),
|
||||
index.elem_index * index.val.typeOf(zcu).childType(zcu).abiSize(pt),
|
||||
},
|
||||
.flat_index => |flat| .{ flat.val, flat.flat_elem_index * flat.val.typeOf(zcu).arrayBase(zcu)[0].abiSize(zcu) },
|
||||
.flat_index => |flat| .{ flat.val, flat.flat_elem_index * flat.val.typeOf(zcu).arrayBase(zcu)[0].abiSize(pt) },
|
||||
.reinterpret => |reinterpret| .{ reinterpret.val, reinterpret.byte_offset },
|
||||
else => unreachable,
|
||||
};
|
||||
@ -181,7 +182,7 @@ pub fn storeComptimePtr(
|
||||
}
|
||||
|
||||
const new_val = try sema.bitCastSpliceVal(
|
||||
try val_ptr.intern(zcu, sema.arena),
|
||||
try val_ptr.intern(pt, sema.arena),
|
||||
store_val,
|
||||
byte_offset,
|
||||
host_bits,
|
||||
@ -205,7 +206,8 @@ fn loadComptimePtrInner(
|
||||
/// before `load_ty`. Otherwise, it is ignored and may be `undefined`.
|
||||
array_offset: u64,
|
||||
) !ComptimeLoadResult {
|
||||
const zcu = sema.mod;
|
||||
const pt = sema.pt;
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
const ptr = switch (ip.indexToKey(ptr_val.toIntern())) {
|
||||
@ -263,7 +265,7 @@ fn loadComptimePtrInner(
|
||||
const load_one_ty, const load_count = load_ty.arrayBase(zcu);
|
||||
const count = if (load_one_ty.toIntern() == base_ty.toIntern()) load_count else 1;
|
||||
|
||||
const want_ty = try zcu.arrayType(.{
|
||||
const want_ty = try sema.pt.arrayType(.{
|
||||
.len = count,
|
||||
.child = base_ty.toIntern(),
|
||||
});
|
||||
@ -285,7 +287,7 @@ fn loadComptimePtrInner(
|
||||
|
||||
const agg_ty = agg_val.typeOf(zcu);
|
||||
switch (agg_ty.zigTypeTag(zcu)) {
|
||||
.Struct, .Pointer => break :val try agg_val.getElem(zcu, @intCast(base_index.index)),
|
||||
.Struct, .Pointer => break :val try agg_val.getElem(sema.pt, @intCast(base_index.index)),
|
||||
.Union => {
|
||||
const tag_val: Value, const payload_mv: MutableValue = switch (agg_val) {
|
||||
.un => |un| .{ Value.fromInterned(un.tag), un.payload.* },
|
||||
@ -427,7 +429,7 @@ fn loadComptimePtrInner(
|
||||
const next_elem_off = elem_size * (elem_idx + 1);
|
||||
if (cur_offset + need_bytes <= next_elem_off) {
|
||||
// We can look at a single array element.
|
||||
cur_val = try cur_val.getElem(zcu, @intCast(elem_idx));
|
||||
cur_val = try cur_val.getElem(sema.pt, @intCast(elem_idx));
|
||||
cur_offset -= elem_idx * elem_size;
|
||||
} else {
|
||||
break;
|
||||
@ -437,10 +439,10 @@ fn loadComptimePtrInner(
|
||||
.auto => unreachable, // ill-defined layout
|
||||
.@"packed" => break, // let the bitcast logic handle this
|
||||
.@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| {
|
||||
const start_off = cur_ty.structFieldOffset(field_idx, zcu);
|
||||
const start_off = cur_ty.structFieldOffset(field_idx, pt);
|
||||
const end_off = start_off + try sema.typeAbiSize(cur_ty.structFieldType(field_idx, zcu));
|
||||
if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) {
|
||||
cur_val = try cur_val.getElem(zcu, field_idx);
|
||||
cur_val = try cur_val.getElem(sema.pt, field_idx);
|
||||
cur_offset -= start_off;
|
||||
break;
|
||||
}
|
||||
@ -482,7 +484,7 @@ fn loadComptimePtrInner(
|
||||
}
|
||||
|
||||
const result_val = try sema.bitCastVal(
|
||||
try cur_val.intern(zcu, sema.arena),
|
||||
try cur_val.intern(sema.pt, sema.arena),
|
||||
load_ty,
|
||||
cur_offset,
|
||||
host_bits,
|
||||
@ -564,7 +566,8 @@ fn prepareComptimePtrStore(
|
||||
/// before `store_ty`. Otherwise, it is ignored and may be `undefined`.
|
||||
array_offset: u64,
|
||||
) !ComptimeStoreStrategy {
|
||||
const zcu = sema.mod;
|
||||
const pt = sema.pt;
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
const ptr = switch (ip.indexToKey(ptr_val.toIntern())) {
|
||||
@ -587,14 +590,14 @@ fn prepareComptimePtrStore(
|
||||
const eu_val_ptr, const alloc = switch (try prepareComptimePtrStore(sema, block, src, base_ptr, base_ty, undefined)) {
|
||||
.direct => |direct| .{ direct.val, direct.alloc },
|
||||
.index => |index| .{
|
||||
try index.val.elem(zcu, sema.arena, @intCast(index.elem_index)),
|
||||
try index.val.elem(pt, sema.arena, @intCast(index.elem_index)),
|
||||
index.alloc,
|
||||
},
|
||||
.flat_index => unreachable, // base_ty is not an array
|
||||
.reinterpret => unreachable, // base_ty has ill-defined layout
|
||||
else => |err| return err,
|
||||
};
|
||||
try eu_val_ptr.unintern(zcu, sema.arena, false, false);
|
||||
try eu_val_ptr.unintern(pt, sema.arena, false, false);
|
||||
switch (eu_val_ptr.*) {
|
||||
.interned => |ip_index| switch (ip.indexToKey(ip_index)) {
|
||||
.undef => return .undef,
|
||||
@ -614,14 +617,14 @@ fn prepareComptimePtrStore(
|
||||
const opt_val_ptr, const alloc = switch (try prepareComptimePtrStore(sema, block, src, base_ptr, base_ty, undefined)) {
|
||||
.direct => |direct| .{ direct.val, direct.alloc },
|
||||
.index => |index| .{
|
||||
try index.val.elem(zcu, sema.arena, @intCast(index.elem_index)),
|
||||
try index.val.elem(pt, sema.arena, @intCast(index.elem_index)),
|
||||
index.alloc,
|
||||
},
|
||||
.flat_index => unreachable, // base_ty is not an array
|
||||
.reinterpret => unreachable, // base_ty has ill-defined layout
|
||||
else => |err| return err,
|
||||
};
|
||||
try opt_val_ptr.unintern(zcu, sema.arena, false, false);
|
||||
try opt_val_ptr.unintern(pt, sema.arena, false, false);
|
||||
switch (opt_val_ptr.*) {
|
||||
.interned => |ip_index| switch (ip.indexToKey(ip_index)) {
|
||||
.undef => return .undef,
|
||||
@ -648,7 +651,7 @@ fn prepareComptimePtrStore(
|
||||
const store_one_ty, const store_count = store_ty.arrayBase(zcu);
|
||||
const count = if (store_one_ty.toIntern() == base_ty.toIntern()) store_count else 1;
|
||||
|
||||
const want_ty = try zcu.arrayType(.{
|
||||
const want_ty = try pt.arrayType(.{
|
||||
.len = count,
|
||||
.child = base_ty.toIntern(),
|
||||
});
|
||||
@ -668,7 +671,7 @@ fn prepareComptimePtrStore(
|
||||
const agg_val, const alloc = switch (try prepareComptimePtrStore(sema, block, src, base_ptr, base_ty, undefined)) {
|
||||
.direct => |direct| .{ direct.val, direct.alloc },
|
||||
.index => |index| .{
|
||||
try index.val.elem(zcu, sema.arena, @intCast(index.elem_index)),
|
||||
try index.val.elem(pt, sema.arena, @intCast(index.elem_index)),
|
||||
index.alloc,
|
||||
},
|
||||
.flat_index => unreachable, // base_ty is not an array
|
||||
@ -679,14 +682,14 @@ fn prepareComptimePtrStore(
|
||||
const agg_ty = agg_val.typeOf(zcu);
|
||||
switch (agg_ty.zigTypeTag(zcu)) {
|
||||
.Struct, .Pointer => break :strat .{ .direct = .{
|
||||
.val = try agg_val.elem(zcu, sema.arena, @intCast(base_index.index)),
|
||||
.val = try agg_val.elem(pt, sema.arena, @intCast(base_index.index)),
|
||||
.alloc = alloc,
|
||||
} },
|
||||
.Union => {
|
||||
if (agg_val.* == .interned and Value.fromInterned(agg_val.interned).isUndef(zcu)) {
|
||||
return .undef;
|
||||
}
|
||||
try agg_val.unintern(zcu, sema.arena, false, false);
|
||||
try agg_val.unintern(pt, sema.arena, false, false);
|
||||
const un = agg_val.un;
|
||||
const tag_ty = agg_ty.unionTagTypeHypothetical(zcu);
|
||||
if (tag_ty.enumTagFieldIndex(Value.fromInterned(un.tag), zcu).? != base_index.index) {
|
||||
@ -847,7 +850,7 @@ fn prepareComptimePtrStore(
|
||||
const next_elem_off = elem_size * (elem_idx + 1);
|
||||
if (cur_offset + need_bytes <= next_elem_off) {
|
||||
// We can look at a single array element.
|
||||
cur_val = try cur_val.elem(zcu, sema.arena, @intCast(elem_idx));
|
||||
cur_val = try cur_val.elem(pt, sema.arena, @intCast(elem_idx));
|
||||
cur_offset -= elem_idx * elem_size;
|
||||
} else {
|
||||
break;
|
||||
@ -857,10 +860,10 @@ fn prepareComptimePtrStore(
|
||||
.auto => unreachable, // ill-defined layout
|
||||
.@"packed" => break, // let the bitcast logic handle this
|
||||
.@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| {
|
||||
const start_off = cur_ty.structFieldOffset(field_idx, zcu);
|
||||
const start_off = cur_ty.structFieldOffset(field_idx, pt);
|
||||
const end_off = start_off + try sema.typeAbiSize(cur_ty.structFieldType(field_idx, zcu));
|
||||
if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) {
|
||||
cur_val = try cur_val.elem(zcu, sema.arena, field_idx);
|
||||
cur_val = try cur_val.elem(pt, sema.arena, field_idx);
|
||||
cur_offset -= start_off;
|
||||
break;
|
||||
}
|
||||
@ -874,7 +877,7 @@ fn prepareComptimePtrStore(
|
||||
// Otherwise, we might traverse into a union field which doesn't allow pointers.
|
||||
// Figure out a solution!
|
||||
if (true) break;
|
||||
try cur_val.unintern(zcu, sema.arena, false, false);
|
||||
try cur_val.unintern(pt, sema.arena, false, false);
|
||||
const payload = switch (cur_val.*) {
|
||||
.un => |un| un.payload,
|
||||
else => unreachable,
|
||||
@ -918,7 +921,7 @@ fn flattenArray(
|
||||
) Allocator.Error!void {
|
||||
if (next_idx.* == out.len) return;
|
||||
|
||||
const zcu = sema.mod;
|
||||
const zcu = sema.pt.zcu;
|
||||
|
||||
const ty = val.typeOf(zcu);
|
||||
const base_elem_count = ty.arrayBase(zcu)[1];
|
||||
@ -928,7 +931,7 @@ fn flattenArray(
|
||||
}
|
||||
|
||||
if (ty.zigTypeTag(zcu) != .Array) {
|
||||
out[@intCast(next_idx.*)] = (try val.intern(zcu, sema.arena)).toIntern();
|
||||
out[@intCast(next_idx.*)] = (try val.intern(sema.pt, sema.arena)).toIntern();
|
||||
next_idx.* += 1;
|
||||
return;
|
||||
}
|
||||
@ -942,7 +945,7 @@ fn flattenArray(
|
||||
skip.* -= arr_base_elem_count;
|
||||
continue;
|
||||
}
|
||||
try flattenArray(sema, try val.getElem(zcu, elem_idx), skip, next_idx, out);
|
||||
try flattenArray(sema, try val.getElem(sema.pt, elem_idx), skip, next_idx, out);
|
||||
}
|
||||
if (ty.sentinel(zcu)) |s| {
|
||||
try flattenArray(sema, .{ .interned = s.toIntern() }, skip, next_idx, out);
|
||||
@ -957,13 +960,13 @@ fn unflattenArray(
|
||||
elems: []const InternPool.Index,
|
||||
next_idx: *u64,
|
||||
) Allocator.Error!Value {
|
||||
const zcu = sema.mod;
|
||||
const zcu = sema.pt.zcu;
|
||||
const arena = sema.arena;
|
||||
|
||||
if (ty.zigTypeTag(zcu) != .Array) {
|
||||
const val = Value.fromInterned(elems[@intCast(next_idx.*)]);
|
||||
next_idx.* += 1;
|
||||
return zcu.getCoerced(val, ty);
|
||||
return sema.pt.getCoerced(val, ty);
|
||||
}
|
||||
|
||||
const elem_ty = ty.childType(zcu);
|
||||
@ -975,7 +978,7 @@ fn unflattenArray(
|
||||
// TODO: validate sentinel
|
||||
_ = try unflattenArray(sema, elem_ty, elems, next_idx);
|
||||
}
|
||||
return Value.fromInterned(try zcu.intern(.{ .aggregate = .{
|
||||
return Value.fromInterned(try sema.pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = buf },
|
||||
} }));
|
||||
@ -990,25 +993,25 @@ fn recursiveIndex(
|
||||
mv: *MutableValue,
|
||||
index: *u64,
|
||||
) !?struct { *MutableValue, u64 } {
|
||||
const zcu = sema.mod;
|
||||
const pt = sema.pt;
|
||||
|
||||
const ty = mv.typeOf(zcu);
|
||||
assert(ty.zigTypeTag(zcu) == .Array);
|
||||
const ty = mv.typeOf(pt.zcu);
|
||||
assert(ty.zigTypeTag(pt.zcu) == .Array);
|
||||
|
||||
const ty_base_elems = ty.arrayBase(zcu)[1];
|
||||
const ty_base_elems = ty.arrayBase(pt.zcu)[1];
|
||||
if (index.* >= ty_base_elems) {
|
||||
index.* -= ty_base_elems;
|
||||
return null;
|
||||
}
|
||||
|
||||
const elem_ty = ty.childType(zcu);
|
||||
if (elem_ty.zigTypeTag(zcu) != .Array) {
|
||||
assert(index.* < ty.arrayLenIncludingSentinel(zcu)); // should be handled by initial check
|
||||
const elem_ty = ty.childType(pt.zcu);
|
||||
if (elem_ty.zigTypeTag(pt.zcu) != .Array) {
|
||||
assert(index.* < ty.arrayLenIncludingSentinel(pt.zcu)); // should be handled by initial check
|
||||
return .{ mv, index.* };
|
||||
}
|
||||
|
||||
for (0..@intCast(ty.arrayLenIncludingSentinel(zcu))) |elem_index| {
|
||||
if (try recursiveIndex(sema, try mv.elem(zcu, sema.arena, elem_index), index)) |result| {
|
||||
for (0..@intCast(ty.arrayLenIncludingSentinel(pt.zcu))) |elem_index| {
|
||||
if (try recursiveIndex(sema, try mv.elem(pt, sema.arena, elem_index), index)) |result| {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
735
src/Type.zig
735
src/Type.zig
File diff suppressed because it is too large
Load Diff
2277
src/Value.zig
2277
src/Value.zig
File diff suppressed because it is too large
Load Diff
2102
src/Zcu.zig
2102
src/Zcu.zig
File diff suppressed because it is too large
Load Diff
2102
src/Zcu/PerThread.zig
Normal file
2102
src/Zcu/PerThread.zig
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -8,9 +8,7 @@ const Mir = @import("Mir.zig");
|
||||
const bits = @import("bits.zig");
|
||||
const link = @import("../../link.zig");
|
||||
const Zcu = @import("../../Zcu.zig");
|
||||
/// Deprecated.
|
||||
const Module = Zcu;
|
||||
const ErrorMsg = Module.ErrorMsg;
|
||||
const ErrorMsg = Zcu.ErrorMsg;
|
||||
const assert = std.debug.assert;
|
||||
const Instruction = bits.Instruction;
|
||||
const Register = bits.Register;
|
||||
@ -22,7 +20,7 @@ bin_file: *link.File,
|
||||
debug_output: DebugInfoOutput,
|
||||
target: *const std.Target,
|
||||
err_msg: ?*ErrorMsg = null,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
code: *std.ArrayList(u8),
|
||||
|
||||
prev_di_line: u32,
|
||||
|
||||
@ -5,8 +5,6 @@ const Register = bits.Register;
|
||||
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
|
||||
const Type = @import("../../Type.zig");
|
||||
const Zcu = @import("../../Zcu.zig");
|
||||
/// Deprecated.
|
||||
const Module = Zcu;
|
||||
|
||||
pub const Class = union(enum) {
|
||||
memory,
|
||||
@ -17,44 +15,44 @@ pub const Class = union(enum) {
|
||||
};
|
||||
|
||||
/// For `float_array` the second element will be the amount of floats.
|
||||
pub fn classifyType(ty: Type, mod: *Module) Class {
|
||||
std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(mod));
|
||||
pub fn classifyType(ty: Type, pt: Zcu.PerThread) Class {
|
||||
std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(pt));
|
||||
|
||||
var maybe_float_bits: ?u16 = null;
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
switch (ty.zigTypeTag(pt.zcu)) {
|
||||
.Struct => {
|
||||
if (ty.containerLayout(mod) == .@"packed") return .byval;
|
||||
const float_count = countFloats(ty, mod, &maybe_float_bits);
|
||||
if (ty.containerLayout(pt.zcu) == .@"packed") return .byval;
|
||||
const float_count = countFloats(ty, pt.zcu, &maybe_float_bits);
|
||||
if (float_count <= sret_float_count) return .{ .float_array = float_count };
|
||||
|
||||
const bit_size = ty.bitSize(mod);
|
||||
const bit_size = ty.bitSize(pt);
|
||||
if (bit_size > 128) return .memory;
|
||||
if (bit_size > 64) return .double_integer;
|
||||
return .integer;
|
||||
},
|
||||
.Union => {
|
||||
if (ty.containerLayout(mod) == .@"packed") return .byval;
|
||||
const float_count = countFloats(ty, mod, &maybe_float_bits);
|
||||
if (ty.containerLayout(pt.zcu) == .@"packed") return .byval;
|
||||
const float_count = countFloats(ty, pt.zcu, &maybe_float_bits);
|
||||
if (float_count <= sret_float_count) return .{ .float_array = float_count };
|
||||
|
||||
const bit_size = ty.bitSize(mod);
|
||||
const bit_size = ty.bitSize(pt);
|
||||
if (bit_size > 128) return .memory;
|
||||
if (bit_size > 64) return .double_integer;
|
||||
return .integer;
|
||||
},
|
||||
.Int, .Enum, .ErrorSet, .Float, .Bool => return .byval,
|
||||
.Vector => {
|
||||
const bit_size = ty.bitSize(mod);
|
||||
const bit_size = ty.bitSize(pt);
|
||||
// TODO is this controlled by a cpu feature?
|
||||
if (bit_size > 128) return .memory;
|
||||
return .byval;
|
||||
},
|
||||
.Optional => {
|
||||
std.debug.assert(ty.isPtrLikeOptional(mod));
|
||||
std.debug.assert(ty.isPtrLikeOptional(pt.zcu));
|
||||
return .byval;
|
||||
},
|
||||
.Pointer => {
|
||||
std.debug.assert(!ty.isSlice(mod));
|
||||
std.debug.assert(!ty.isSlice(pt.zcu));
|
||||
return .byval;
|
||||
},
|
||||
.ErrorUnion,
|
||||
@ -76,16 +74,16 @@ pub fn classifyType(ty: Type, mod: *Module) Class {
|
||||
}
|
||||
|
||||
const sret_float_count = 4;
|
||||
fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u8 {
|
||||
const ip = &mod.intern_pool;
|
||||
const target = mod.getTarget();
|
||||
fn countFloats(ty: Type, zcu: *Zcu, maybe_float_bits: *?u16) u8 {
|
||||
const ip = &zcu.intern_pool;
|
||||
const target = zcu.getTarget();
|
||||
const invalid = std.math.maxInt(u8);
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
switch (ty.zigTypeTag(zcu)) {
|
||||
.Union => {
|
||||
const union_obj = mod.typeToUnion(ty).?;
|
||||
const union_obj = zcu.typeToUnion(ty).?;
|
||||
var max_count: u8 = 0;
|
||||
for (union_obj.field_types.get(ip)) |field_ty| {
|
||||
const field_count = countFloats(Type.fromInterned(field_ty), mod, maybe_float_bits);
|
||||
const field_count = countFloats(Type.fromInterned(field_ty), zcu, maybe_float_bits);
|
||||
if (field_count == invalid) return invalid;
|
||||
if (field_count > max_count) max_count = field_count;
|
||||
if (max_count > sret_float_count) return invalid;
|
||||
@ -93,12 +91,12 @@ fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u8 {
|
||||
return max_count;
|
||||
},
|
||||
.Struct => {
|
||||
const fields_len = ty.structFieldCount(mod);
|
||||
const fields_len = ty.structFieldCount(zcu);
|
||||
var count: u8 = 0;
|
||||
var i: u32 = 0;
|
||||
while (i < fields_len) : (i += 1) {
|
||||
const field_ty = ty.structFieldType(i, mod);
|
||||
const field_count = countFloats(field_ty, mod, maybe_float_bits);
|
||||
const field_ty = ty.structFieldType(i, zcu);
|
||||
const field_count = countFloats(field_ty, zcu, maybe_float_bits);
|
||||
if (field_count == invalid) return invalid;
|
||||
count += field_count;
|
||||
if (count > sret_float_count) return invalid;
|
||||
@ -118,22 +116,22 @@ fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u8 {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn getFloatArrayType(ty: Type, mod: *Module) ?Type {
|
||||
const ip = &mod.intern_pool;
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
pub fn getFloatArrayType(ty: Type, zcu: *Zcu) ?Type {
|
||||
const ip = &zcu.intern_pool;
|
||||
switch (ty.zigTypeTag(zcu)) {
|
||||
.Union => {
|
||||
const union_obj = mod.typeToUnion(ty).?;
|
||||
const union_obj = zcu.typeToUnion(ty).?;
|
||||
for (union_obj.field_types.get(ip)) |field_ty| {
|
||||
if (getFloatArrayType(Type.fromInterned(field_ty), mod)) |some| return some;
|
||||
if (getFloatArrayType(Type.fromInterned(field_ty), zcu)) |some| return some;
|
||||
}
|
||||
return null;
|
||||
},
|
||||
.Struct => {
|
||||
const fields_len = ty.structFieldCount(mod);
|
||||
const fields_len = ty.structFieldCount(zcu);
|
||||
var i: u32 = 0;
|
||||
while (i < fields_len) : (i += 1) {
|
||||
const field_ty = ty.structFieldType(i, mod);
|
||||
if (getFloatArrayType(field_ty, mod)) |some| return some;
|
||||
const field_ty = ty.structFieldType(i, zcu);
|
||||
if (getFloatArrayType(field_ty, zcu)) |some| return some;
|
||||
}
|
||||
return null;
|
||||
},
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -9,10 +9,8 @@ const Mir = @import("Mir.zig");
|
||||
const bits = @import("bits.zig");
|
||||
const link = @import("../../link.zig");
|
||||
const Zcu = @import("../../Zcu.zig");
|
||||
/// Deprecated.
|
||||
const Module = Zcu;
|
||||
const Type = @import("../../Type.zig");
|
||||
const ErrorMsg = Module.ErrorMsg;
|
||||
const ErrorMsg = Zcu.ErrorMsg;
|
||||
const Target = std.Target;
|
||||
const assert = std.debug.assert;
|
||||
const Instruction = bits.Instruction;
|
||||
@ -26,7 +24,7 @@ bin_file: *link.File,
|
||||
debug_output: DebugInfoOutput,
|
||||
target: *const std.Target,
|
||||
err_msg: ?*ErrorMsg = null,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
code: *std.ArrayList(u8),
|
||||
|
||||
prev_di_line: u32,
|
||||
|
||||
@ -5,8 +5,6 @@ const Register = bits.Register;
|
||||
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
|
||||
const Type = @import("../../Type.zig");
|
||||
const Zcu = @import("../../Zcu.zig");
|
||||
/// Deprecated.
|
||||
const Module = Zcu;
|
||||
|
||||
pub const Class = union(enum) {
|
||||
memory,
|
||||
@ -26,29 +24,29 @@ pub const Class = union(enum) {
|
||||
|
||||
pub const Context = enum { ret, arg };
|
||||
|
||||
pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class {
|
||||
assert(ty.hasRuntimeBitsIgnoreComptime(mod));
|
||||
pub fn classifyType(ty: Type, pt: Zcu.PerThread, ctx: Context) Class {
|
||||
assert(ty.hasRuntimeBitsIgnoreComptime(pt));
|
||||
|
||||
var maybe_float_bits: ?u16 = null;
|
||||
const max_byval_size = 512;
|
||||
const ip = &mod.intern_pool;
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
const ip = &pt.zcu.intern_pool;
|
||||
switch (ty.zigTypeTag(pt.zcu)) {
|
||||
.Struct => {
|
||||
const bit_size = ty.bitSize(mod);
|
||||
if (ty.containerLayout(mod) == .@"packed") {
|
||||
const bit_size = ty.bitSize(pt);
|
||||
if (ty.containerLayout(pt.zcu) == .@"packed") {
|
||||
if (bit_size > 64) return .memory;
|
||||
return .byval;
|
||||
}
|
||||
if (bit_size > max_byval_size) return .memory;
|
||||
const float_count = countFloats(ty, mod, &maybe_float_bits);
|
||||
const float_count = countFloats(ty, pt.zcu, &maybe_float_bits);
|
||||
if (float_count <= byval_float_count) return .byval;
|
||||
|
||||
const fields = ty.structFieldCount(mod);
|
||||
const fields = ty.structFieldCount(pt.zcu);
|
||||
var i: u32 = 0;
|
||||
while (i < fields) : (i += 1) {
|
||||
const field_ty = ty.structFieldType(i, mod);
|
||||
const field_alignment = ty.structFieldAlign(i, mod);
|
||||
const field_size = field_ty.bitSize(mod);
|
||||
const field_ty = ty.structFieldType(i, pt.zcu);
|
||||
const field_alignment = ty.structFieldAlign(i, pt);
|
||||
const field_size = field_ty.bitSize(pt);
|
||||
if (field_size > 32 or field_alignment.compare(.gt, .@"32")) {
|
||||
return Class.arrSize(bit_size, 64);
|
||||
}
|
||||
@ -56,19 +54,19 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class {
|
||||
return Class.arrSize(bit_size, 32);
|
||||
},
|
||||
.Union => {
|
||||
const bit_size = ty.bitSize(mod);
|
||||
const union_obj = mod.typeToUnion(ty).?;
|
||||
const bit_size = ty.bitSize(pt);
|
||||
const union_obj = pt.zcu.typeToUnion(ty).?;
|
||||
if (union_obj.getLayout(ip) == .@"packed") {
|
||||
if (bit_size > 64) return .memory;
|
||||
return .byval;
|
||||
}
|
||||
if (bit_size > max_byval_size) return .memory;
|
||||
const float_count = countFloats(ty, mod, &maybe_float_bits);
|
||||
const float_count = countFloats(ty, pt.zcu, &maybe_float_bits);
|
||||
if (float_count <= byval_float_count) return .byval;
|
||||
|
||||
for (union_obj.field_types.get(ip), 0..) |field_ty, field_index| {
|
||||
if (Type.fromInterned(field_ty).bitSize(mod) > 32 or
|
||||
mod.unionFieldNormalAlignment(union_obj, @intCast(field_index)).compare(.gt, .@"32"))
|
||||
if (Type.fromInterned(field_ty).bitSize(pt) > 32 or
|
||||
pt.unionFieldNormalAlignment(union_obj, @intCast(field_index)).compare(.gt, .@"32"))
|
||||
{
|
||||
return Class.arrSize(bit_size, 64);
|
||||
}
|
||||
@ -79,28 +77,28 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class {
|
||||
.Int => {
|
||||
// TODO this is incorrect for _BitInt(128) but implementing
|
||||
// this correctly makes implementing compiler-rt impossible.
|
||||
// const bit_size = ty.bitSize(mod);
|
||||
// const bit_size = ty.bitSize(pt);
|
||||
// if (bit_size > 64) return .memory;
|
||||
return .byval;
|
||||
},
|
||||
.Enum, .ErrorSet => {
|
||||
const bit_size = ty.bitSize(mod);
|
||||
const bit_size = ty.bitSize(pt);
|
||||
if (bit_size > 64) return .memory;
|
||||
return .byval;
|
||||
},
|
||||
.Vector => {
|
||||
const bit_size = ty.bitSize(mod);
|
||||
const bit_size = ty.bitSize(pt);
|
||||
// TODO is this controlled by a cpu feature?
|
||||
if (ctx == .ret and bit_size > 128) return .memory;
|
||||
if (bit_size > 512) return .memory;
|
||||
return .byval;
|
||||
},
|
||||
.Optional => {
|
||||
assert(ty.isPtrLikeOptional(mod));
|
||||
assert(ty.isPtrLikeOptional(pt.zcu));
|
||||
return .byval;
|
||||
},
|
||||
.Pointer => {
|
||||
assert(!ty.isSlice(mod));
|
||||
assert(!ty.isSlice(pt.zcu));
|
||||
return .byval;
|
||||
},
|
||||
.ErrorUnion,
|
||||
@ -122,16 +120,16 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class {
|
||||
}
|
||||
|
||||
const byval_float_count = 4;
|
||||
fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u32 {
|
||||
const ip = &mod.intern_pool;
|
||||
const target = mod.getTarget();
|
||||
fn countFloats(ty: Type, zcu: *Zcu, maybe_float_bits: *?u16) u32 {
|
||||
const ip = &zcu.intern_pool;
|
||||
const target = zcu.getTarget();
|
||||
const invalid = std.math.maxInt(u32);
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
switch (ty.zigTypeTag(zcu)) {
|
||||
.Union => {
|
||||
const union_obj = mod.typeToUnion(ty).?;
|
||||
const union_obj = zcu.typeToUnion(ty).?;
|
||||
var max_count: u32 = 0;
|
||||
for (union_obj.field_types.get(ip)) |field_ty| {
|
||||
const field_count = countFloats(Type.fromInterned(field_ty), mod, maybe_float_bits);
|
||||
const field_count = countFloats(Type.fromInterned(field_ty), zcu, maybe_float_bits);
|
||||
if (field_count == invalid) return invalid;
|
||||
if (field_count > max_count) max_count = field_count;
|
||||
if (max_count > byval_float_count) return invalid;
|
||||
@ -139,12 +137,12 @@ fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u32 {
|
||||
return max_count;
|
||||
},
|
||||
.Struct => {
|
||||
const fields_len = ty.structFieldCount(mod);
|
||||
const fields_len = ty.structFieldCount(zcu);
|
||||
var count: u32 = 0;
|
||||
var i: u32 = 0;
|
||||
while (i < fields_len) : (i += 1) {
|
||||
const field_ty = ty.structFieldType(i, mod);
|
||||
const field_count = countFloats(field_ty, mod, maybe_float_bits);
|
||||
const field_ty = ty.structFieldType(i, zcu);
|
||||
const field_count = countFloats(field_ty, zcu, maybe_float_bits);
|
||||
if (field_count == invalid) return invalid;
|
||||
count += field_count;
|
||||
if (count > byval_float_count) return invalid;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,6 @@
|
||||
//! This file contains the functionality for emitting RISC-V MIR as machine code
|
||||
|
||||
bin_file: *link.File,
|
||||
lower: Lower,
|
||||
debug_output: DebugInfoOutput,
|
||||
code: *std.ArrayList(u8),
|
||||
@ -48,7 +49,7 @@ pub fn emitMir(emit: *Emit) Error!void {
|
||||
.Lib => emit.lower.link_mode == .static,
|
||||
};
|
||||
|
||||
if (emit.lower.bin_file.cast(link.File.Elf)) |elf_file| {
|
||||
if (emit.bin_file.cast(link.File.Elf)) |elf_file| {
|
||||
const atom_ptr = elf_file.symbol(symbol.atom_index).atom(elf_file).?;
|
||||
const sym_index = elf_file.zigObjectPtr().?.symbol(symbol.sym_index);
|
||||
const sym = elf_file.symbol(sym_index);
|
||||
@ -77,7 +78,7 @@ pub fn emitMir(emit: *Emit) Error!void {
|
||||
} else return emit.fail("TODO: load_symbol_reloc non-ELF", .{});
|
||||
},
|
||||
.call_extern_fn_reloc => |symbol| {
|
||||
if (emit.lower.bin_file.cast(link.File.Elf)) |elf_file| {
|
||||
if (emit.bin_file.cast(link.File.Elf)) |elf_file| {
|
||||
const atom_ptr = elf_file.symbol(symbol.atom_index).atom(elf_file).?;
|
||||
|
||||
const r_type: u32 = @intFromEnum(std.elf.R_RISCV.CALL_PLT);
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
//! This file contains the functionality for lowering RISC-V MIR to Instructions
|
||||
|
||||
bin_file: *link.File,
|
||||
pt: Zcu.PerThread,
|
||||
output_mode: std.builtin.OutputMode,
|
||||
link_mode: std.builtin.LinkMode,
|
||||
pic: bool,
|
||||
@ -44,7 +44,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
|
||||
insts: []const Instruction,
|
||||
relocs: []const Reloc,
|
||||
} {
|
||||
const zcu = lower.bin_file.comp.module.?;
|
||||
const pt = lower.pt;
|
||||
|
||||
lower.result_insts = undefined;
|
||||
lower.result_relocs = undefined;
|
||||
@ -243,11 +243,11 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
|
||||
|
||||
const class = rs1.class();
|
||||
const ty = compare.ty;
|
||||
const size = std.math.ceilPowerOfTwo(u64, ty.bitSize(zcu)) catch {
|
||||
return lower.fail("pseudo_compare size {}", .{ty.bitSize(zcu)});
|
||||
const size = std.math.ceilPowerOfTwo(u64, ty.bitSize(pt)) catch {
|
||||
return lower.fail("pseudo_compare size {}", .{ty.bitSize(pt)});
|
||||
};
|
||||
|
||||
const is_unsigned = ty.isUnsignedInt(zcu);
|
||||
const is_unsigned = ty.isUnsignedInt(pt.zcu);
|
||||
|
||||
const less_than: Encoding.Mnemonic = if (is_unsigned) .sltu else .slt;
|
||||
|
||||
@ -502,7 +502,7 @@ pub fn fail(lower: *Lower, comptime format: []const u8, args: anytype) Error {
|
||||
}
|
||||
|
||||
fn hasFeature(lower: *Lower, feature: std.Target.riscv.Feature) bool {
|
||||
const target = lower.bin_file.comp.module.?.getTarget();
|
||||
const target = lower.pt.zcu.getTarget();
|
||||
const features = target.cpu.features;
|
||||
return std.Target.riscv.featureSetHas(features, feature);
|
||||
}
|
||||
|
||||
@ -9,15 +9,15 @@ const assert = std.debug.assert;
|
||||
|
||||
pub const Class = enum { memory, byval, integer, double_integer, fields };
|
||||
|
||||
pub fn classifyType(ty: Type, mod: *Zcu) Class {
|
||||
const target = mod.getTarget();
|
||||
std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(mod));
|
||||
pub fn classifyType(ty: Type, pt: Zcu.PerThread) Class {
|
||||
const target = pt.zcu.getTarget();
|
||||
std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(pt));
|
||||
|
||||
const max_byval_size = target.ptrBitWidth() * 2;
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
switch (ty.zigTypeTag(pt.zcu)) {
|
||||
.Struct => {
|
||||
const bit_size = ty.bitSize(mod);
|
||||
if (ty.containerLayout(mod) == .@"packed") {
|
||||
const bit_size = ty.bitSize(pt);
|
||||
if (ty.containerLayout(pt.zcu) == .@"packed") {
|
||||
if (bit_size > max_byval_size) return .memory;
|
||||
return .byval;
|
||||
}
|
||||
@ -25,12 +25,12 @@ pub fn classifyType(ty: Type, mod: *Zcu) Class {
|
||||
if (std.Target.riscv.featureSetHas(target.cpu.features, .d)) fields: {
|
||||
var any_fp = false;
|
||||
var field_count: usize = 0;
|
||||
for (0..ty.structFieldCount(mod)) |field_index| {
|
||||
const field_ty = ty.structFieldType(field_index, mod);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
for (0..ty.structFieldCount(pt.zcu)) |field_index| {
|
||||
const field_ty = ty.structFieldType(field_index, pt.zcu);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
|
||||
if (field_ty.isRuntimeFloat())
|
||||
any_fp = true
|
||||
else if (!field_ty.isAbiInt(mod))
|
||||
else if (!field_ty.isAbiInt(pt.zcu))
|
||||
break :fields;
|
||||
field_count += 1;
|
||||
if (field_count > 2) break :fields;
|
||||
@ -45,8 +45,8 @@ pub fn classifyType(ty: Type, mod: *Zcu) Class {
|
||||
return .integer;
|
||||
},
|
||||
.Union => {
|
||||
const bit_size = ty.bitSize(mod);
|
||||
if (ty.containerLayout(mod) == .@"packed") {
|
||||
const bit_size = ty.bitSize(pt);
|
||||
if (ty.containerLayout(pt.zcu) == .@"packed") {
|
||||
if (bit_size > max_byval_size) return .memory;
|
||||
return .byval;
|
||||
}
|
||||
@ -58,21 +58,21 @@ pub fn classifyType(ty: Type, mod: *Zcu) Class {
|
||||
.Bool => return .integer,
|
||||
.Float => return .byval,
|
||||
.Int, .Enum, .ErrorSet => {
|
||||
const bit_size = ty.bitSize(mod);
|
||||
const bit_size = ty.bitSize(pt);
|
||||
if (bit_size > max_byval_size) return .memory;
|
||||
return .byval;
|
||||
},
|
||||
.Vector => {
|
||||
const bit_size = ty.bitSize(mod);
|
||||
const bit_size = ty.bitSize(pt);
|
||||
if (bit_size > max_byval_size) return .memory;
|
||||
return .integer;
|
||||
},
|
||||
.Optional => {
|
||||
std.debug.assert(ty.isPtrLikeOptional(mod));
|
||||
std.debug.assert(ty.isPtrLikeOptional(pt.zcu));
|
||||
return .byval;
|
||||
},
|
||||
.Pointer => {
|
||||
std.debug.assert(!ty.isSlice(mod));
|
||||
std.debug.assert(!ty.isSlice(pt.zcu));
|
||||
return .byval;
|
||||
},
|
||||
.ErrorUnion,
|
||||
@ -97,18 +97,19 @@ pub const SystemClass = enum { integer, float, memory, none };
|
||||
|
||||
/// There are a maximum of 8 possible return slots. Returned values are in
|
||||
/// the beginning of the array; unused slots are filled with .none.
|
||||
pub fn classifySystem(ty: Type, zcu: *Zcu) [8]SystemClass {
|
||||
pub fn classifySystem(ty: Type, pt: Zcu.PerThread) [8]SystemClass {
|
||||
const zcu = pt.zcu;
|
||||
var result = [1]SystemClass{.none} ** 8;
|
||||
const memory_class = [_]SystemClass{
|
||||
.memory, .none, .none, .none,
|
||||
.none, .none, .none, .none,
|
||||
};
|
||||
switch (ty.zigTypeTag(zcu)) {
|
||||
switch (ty.zigTypeTag(pt.zcu)) {
|
||||
.Bool, .Void, .NoReturn => {
|
||||
result[0] = .integer;
|
||||
return result;
|
||||
},
|
||||
.Pointer => switch (ty.ptrSize(zcu)) {
|
||||
.Pointer => switch (ty.ptrSize(pt.zcu)) {
|
||||
.Slice => {
|
||||
result[0] = .integer;
|
||||
result[1] = .integer;
|
||||
@ -120,17 +121,17 @@ pub fn classifySystem(ty: Type, zcu: *Zcu) [8]SystemClass {
|
||||
},
|
||||
},
|
||||
.Optional => {
|
||||
if (ty.isPtrLikeOptional(zcu)) {
|
||||
if (ty.isPtrLikeOptional(pt.zcu)) {
|
||||
result[0] = .integer;
|
||||
return result;
|
||||
}
|
||||
result[0] = .integer;
|
||||
if (ty.optionalChild(zcu).abiSize(zcu) == 0) return result;
|
||||
if (ty.optionalChild(zcu).abiSize(pt) == 0) return result;
|
||||
result[1] = .integer;
|
||||
return result;
|
||||
},
|
||||
.Int, .Enum, .ErrorSet => {
|
||||
const int_bits = ty.intInfo(zcu).bits;
|
||||
const int_bits = ty.intInfo(pt.zcu).bits;
|
||||
if (int_bits <= 64) {
|
||||
result[0] = .integer;
|
||||
return result;
|
||||
@ -155,8 +156,8 @@ pub fn classifySystem(ty: Type, zcu: *Zcu) [8]SystemClass {
|
||||
unreachable; // support split float args
|
||||
},
|
||||
.ErrorUnion => {
|
||||
const payload_ty = ty.errorUnionPayload(zcu);
|
||||
const payload_bits = payload_ty.bitSize(zcu);
|
||||
const payload_ty = ty.errorUnionPayload(pt.zcu);
|
||||
const payload_bits = payload_ty.bitSize(pt);
|
||||
|
||||
// the error union itself
|
||||
result[0] = .integer;
|
||||
@ -167,8 +168,8 @@ pub fn classifySystem(ty: Type, zcu: *Zcu) [8]SystemClass {
|
||||
return memory_class;
|
||||
},
|
||||
.Struct => {
|
||||
const layout = ty.containerLayout(zcu);
|
||||
const ty_size = ty.abiSize(zcu);
|
||||
const layout = ty.containerLayout(pt.zcu);
|
||||
const ty_size = ty.abiSize(pt);
|
||||
|
||||
if (layout == .@"packed") {
|
||||
assert(ty_size <= 16);
|
||||
@ -180,7 +181,7 @@ pub fn classifySystem(ty: Type, zcu: *Zcu) [8]SystemClass {
|
||||
return memory_class;
|
||||
},
|
||||
.Array => {
|
||||
const ty_size = ty.abiSize(zcu);
|
||||
const ty_size = ty.abiSize(pt);
|
||||
if (ty_size <= 8) {
|
||||
result[0] = .integer;
|
||||
return result;
|
||||
|
||||
@ -11,11 +11,9 @@ const Allocator = mem.Allocator;
|
||||
const builtin = @import("builtin");
|
||||
const link = @import("../../link.zig");
|
||||
const Zcu = @import("../../Zcu.zig");
|
||||
/// Deprecated.
|
||||
const Module = Zcu;
|
||||
const InternPool = @import("../../InternPool.zig");
|
||||
const Value = @import("../../Value.zig");
|
||||
const ErrorMsg = Module.ErrorMsg;
|
||||
const ErrorMsg = Zcu.ErrorMsg;
|
||||
const codegen = @import("../../codegen.zig");
|
||||
const Air = @import("../../Air.zig");
|
||||
const Mir = @import("Mir.zig");
|
||||
@ -52,6 +50,7 @@ const RegisterView = enum(u1) {
|
||||
};
|
||||
|
||||
gpa: Allocator,
|
||||
pt: Zcu.PerThread,
|
||||
air: Air,
|
||||
liveness: Liveness,
|
||||
bin_file: *link.File,
|
||||
@ -64,7 +63,7 @@ args: []MCValue,
|
||||
ret_mcv: MCValue,
|
||||
fn_type: Type,
|
||||
arg_index: usize,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
stack_align: Alignment,
|
||||
|
||||
/// MIR Instructions
|
||||
@ -263,15 +262,16 @@ const BigTomb = struct {
|
||||
|
||||
pub fn generate(
|
||||
lf: *link.File,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
func_index: InternPool.Index,
|
||||
air: Air,
|
||||
liveness: Liveness,
|
||||
code: *std.ArrayList(u8),
|
||||
debug_output: DebugInfoOutput,
|
||||
) CodeGenError!Result {
|
||||
const gpa = lf.comp.gpa;
|
||||
const zcu = lf.comp.module.?;
|
||||
const zcu = pt.zcu;
|
||||
const gpa = zcu.gpa;
|
||||
const func = zcu.funcInfo(func_index);
|
||||
const fn_owner_decl = zcu.declPtr(func.owner_decl);
|
||||
assert(fn_owner_decl.has_tv);
|
||||
@ -289,11 +289,12 @@ pub fn generate(
|
||||
|
||||
var function = Self{
|
||||
.gpa = gpa,
|
||||
.pt = pt,
|
||||
.air = air,
|
||||
.liveness = liveness,
|
||||
.target = target,
|
||||
.func_index = func_index,
|
||||
.bin_file = lf,
|
||||
.func_index = func_index,
|
||||
.code = code,
|
||||
.debug_output = debug_output,
|
||||
.err_msg = null,
|
||||
@ -365,7 +366,8 @@ pub fn generate(
|
||||
}
|
||||
|
||||
fn gen(self: *Self) !void {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const cc = self.fn_type.fnCallingConvention(mod);
|
||||
if (cc != .Naked) {
|
||||
// TODO Finish function prologue and epilogue for sparc64.
|
||||
@ -493,7 +495,8 @@ fn gen(self: *Self) !void {
|
||||
}
|
||||
|
||||
fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const ip = &mod.intern_pool;
|
||||
const air_tags = self.air.instructions.items(.tag);
|
||||
|
||||
@ -757,7 +760,8 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
|
||||
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
||||
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const lhs = try self.resolveInst(extra.lhs);
|
||||
const rhs = try self.resolveInst(extra.rhs);
|
||||
@ -835,7 +839,8 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
}
|
||||
|
||||
fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const vector_ty = self.typeOfIndex(inst);
|
||||
const len = vector_ty.vectorLen(mod);
|
||||
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
||||
@ -869,7 +874,8 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
}
|
||||
|
||||
fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const ptr_ty = self.typeOf(ty_op.operand);
|
||||
@ -1006,7 +1012,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
|
||||
}
|
||||
|
||||
fn airArg(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const arg_index = self.arg_index;
|
||||
self.arg_index += 1;
|
||||
|
||||
@ -1016,8 +1022,8 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const mcv = blk: {
|
||||
switch (arg) {
|
||||
.stack_offset => |off| {
|
||||
const abi_size = math.cast(u32, ty.abiSize(mod)) orelse {
|
||||
return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(mod)});
|
||||
const abi_size = math.cast(u32, ty.abiSize(pt)) orelse {
|
||||
return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(pt)});
|
||||
};
|
||||
const offset = off + abi_size;
|
||||
break :blk MCValue{ .stack_offset = offset };
|
||||
@ -1205,7 +1211,8 @@ fn airBreakpoint(self: *Self) !void {
|
||||
}
|
||||
|
||||
fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
||||
|
||||
// We have hardware byteswapper in SPARCv9, don't let mainstream compilers mislead you.
|
||||
@ -1228,7 +1235,7 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void {
|
||||
if (int_info.bits == 8) break :result operand;
|
||||
|
||||
const abi_size = int_info.bits >> 3;
|
||||
const abi_align = operand_ty.abiAlignment(mod);
|
||||
const abi_align = operand_ty.abiAlignment(pt);
|
||||
const opposite_endian_asi = switch (self.target.cpu.arch.endian()) {
|
||||
Endian.big => ASI.asi_primary_little,
|
||||
Endian.little => ASI.asi_primary,
|
||||
@ -1297,7 +1304,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
||||
const extra = self.air.extraData(Air.Call, pl_op.payload);
|
||||
const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end .. extra.end + extra.data.args_len]));
|
||||
const ty = self.typeOf(callee);
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const fn_ty = switch (ty.zigTypeTag(mod)) {
|
||||
.Fn => ty,
|
||||
.Pointer => ty.childType(mod),
|
||||
@ -1341,7 +1349,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
||||
|
||||
// Due to incremental compilation, how function calls are generated depends
|
||||
// on linking.
|
||||
if (try self.air.value(callee, mod)) |func_value| {
|
||||
if (try self.air.value(callee, pt)) |func_value| {
|
||||
if (self.bin_file.tag == link.File.Elf.base_tag) {
|
||||
switch (mod.intern_pool.indexToKey(func_value.ip_index)) {
|
||||
.func => |func| {
|
||||
@ -1429,7 +1437,8 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
|
||||
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const lhs = try self.resolveInst(bin_op.lhs);
|
||||
const rhs = try self.resolveInst(bin_op.rhs);
|
||||
@ -1444,7 +1453,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
|
||||
.ErrorSet => Type.u16,
|
||||
.Optional => blk: {
|
||||
const payload_ty = lhs_ty.optionalChild(mod);
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
|
||||
break :blk Type.u1;
|
||||
} else if (lhs_ty.isPtrLikeOptional(mod)) {
|
||||
break :blk Type.usize;
|
||||
@ -1655,7 +1664,8 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
|
||||
}
|
||||
|
||||
fn airDbgInlineBlock(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
||||
const extra = self.air.extraData(Air.DbgInlineBlock, ty_pl.payload);
|
||||
const func = mod.funcInfo(extra.data.func);
|
||||
@ -1753,7 +1763,8 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
|
||||
if (self.liveness.isUnused(inst))
|
||||
return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
|
||||
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const operand_ty = self.typeOf(ty_op.operand);
|
||||
const operand = try self.resolveInst(ty_op.operand);
|
||||
const info_a = operand_ty.intInfo(mod);
|
||||
@ -1814,12 +1825,13 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void {
|
||||
}
|
||||
|
||||
fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
||||
const elem_ty = self.typeOfIndex(inst);
|
||||
const elem_size = elem_ty.abiSize(mod);
|
||||
const elem_size = elem_ty.abiSize(pt);
|
||||
const result: MCValue = result: {
|
||||
if (!elem_ty.hasRuntimeBits(mod))
|
||||
if (!elem_ty.hasRuntimeBits(pt))
|
||||
break :result MCValue.none;
|
||||
|
||||
const ptr = try self.resolveInst(ty_op.operand);
|
||||
@ -1898,7 +1910,7 @@ fn airMod(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const rhs = try self.resolveInst(bin_op.rhs);
|
||||
const lhs_ty = self.typeOf(bin_op.lhs);
|
||||
const rhs_ty = self.typeOf(bin_op.rhs);
|
||||
assert(lhs_ty.eql(rhs_ty, self.bin_file.comp.module.?));
|
||||
assert(lhs_ty.eql(rhs_ty, self.pt.zcu));
|
||||
|
||||
if (self.liveness.isUnused(inst))
|
||||
return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
@ -2040,7 +2052,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
//const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
|
||||
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
||||
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const lhs = try self.resolveInst(extra.lhs);
|
||||
const rhs = try self.resolveInst(extra.rhs);
|
||||
@ -2104,7 +2117,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
fn airNot(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const operand = try self.resolveInst(ty_op.operand);
|
||||
const operand_ty = self.typeOf(ty_op.operand);
|
||||
@ -2336,7 +2350,8 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) !void {
|
||||
fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
||||
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const lhs = try self.resolveInst(extra.lhs);
|
||||
const rhs = try self.resolveInst(extra.rhs);
|
||||
@ -2441,7 +2456,8 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
|
||||
}
|
||||
|
||||
fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const is_volatile = false; // TODO
|
||||
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
|
||||
|
||||
@ -2452,7 +2468,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
const slice_ty = self.typeOf(bin_op.lhs);
|
||||
const elem_ty = slice_ty.childType(mod);
|
||||
const elem_size = elem_ty.abiSize(mod);
|
||||
const elem_size = elem_ty.abiSize(pt);
|
||||
|
||||
const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod);
|
||||
|
||||
@ -2566,10 +2582,10 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const operand = extra.struct_operand;
|
||||
const index = extra.field_index;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mcv = try self.resolveInst(operand);
|
||||
const struct_ty = self.typeOf(operand);
|
||||
const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod)));
|
||||
const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, pt)));
|
||||
|
||||
switch (mcv) {
|
||||
.dead, .unreach => unreachable,
|
||||
@ -2699,13 +2715,14 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
|
||||
}
|
||||
|
||||
fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const error_union_ty = self.typeOf(ty_op.operand);
|
||||
const payload_ty = error_union_ty.errorUnionPayload(mod);
|
||||
const mcv = try self.resolveInst(ty_op.operand);
|
||||
if (!payload_ty.hasRuntimeBits(mod)) break :result mcv;
|
||||
if (!payload_ty.hasRuntimeBits(pt)) break :result mcv;
|
||||
|
||||
return self.fail("TODO implement unwrap error union error for non-empty payloads", .{});
|
||||
};
|
||||
@ -2713,12 +2730,13 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void {
|
||||
}
|
||||
|
||||
fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const error_union_ty = self.typeOf(ty_op.operand);
|
||||
const payload_ty = error_union_ty.errorUnionPayload(mod);
|
||||
if (!payload_ty.hasRuntimeBits(mod)) break :result MCValue.none;
|
||||
if (!payload_ty.hasRuntimeBits(pt)) break :result MCValue.none;
|
||||
|
||||
return self.fail("TODO implement unwrap error union payload for non-empty payloads", .{});
|
||||
};
|
||||
@ -2727,13 +2745,14 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
/// E to E!T
|
||||
fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const error_union_ty = ty_op.ty.toType();
|
||||
const payload_ty = error_union_ty.errorUnionPayload(mod);
|
||||
const mcv = try self.resolveInst(ty_op.operand);
|
||||
if (!payload_ty.hasRuntimeBits(mod)) break :result mcv;
|
||||
if (!payload_ty.hasRuntimeBits(pt)) break :result mcv;
|
||||
|
||||
return self.fail("TODO implement wrap errunion error for non-empty payloads", .{});
|
||||
};
|
||||
@ -2748,13 +2767,13 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
|
||||
}
|
||||
|
||||
fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const optional_ty = self.typeOfIndex(inst);
|
||||
|
||||
// Optional with a zero-bit payload type is just a boolean true
|
||||
if (optional_ty.abiSize(mod) == 1)
|
||||
if (optional_ty.abiSize(pt) == 1)
|
||||
break :result MCValue{ .immediate = 1 };
|
||||
|
||||
return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch});
|
||||
@ -2788,10 +2807,11 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: Alignme
|
||||
|
||||
/// Use a pointer instruction as the basis for allocating stack memory.
|
||||
fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const elem_ty = self.typeOfIndex(inst).childType(mod);
|
||||
|
||||
if (!elem_ty.hasRuntimeBits(mod)) {
|
||||
if (!elem_ty.hasRuntimeBits(pt)) {
|
||||
// As this stack item will never be dereferenced at runtime,
|
||||
// return the stack offset 0. Stack offset 0 will be where all
|
||||
// zero-sized stack allocations live as non-zero-sized
|
||||
@ -2799,21 +2819,21 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
|
||||
return @as(u32, 0);
|
||||
}
|
||||
|
||||
const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse {
|
||||
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
|
||||
const abi_size = math.cast(u32, elem_ty.abiSize(pt)) orelse {
|
||||
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
|
||||
};
|
||||
// TODO swap this for inst.ty.ptrAlign
|
||||
const abi_align = elem_ty.abiAlignment(mod);
|
||||
const abi_align = elem_ty.abiAlignment(pt);
|
||||
return self.allocMem(inst, abi_size, abi_align);
|
||||
}
|
||||
|
||||
fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const elem_ty = self.typeOfIndex(inst);
|
||||
const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse {
|
||||
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
|
||||
const abi_size = math.cast(u32, elem_ty.abiSize(pt)) orelse {
|
||||
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
|
||||
};
|
||||
const abi_align = elem_ty.abiAlignment(mod);
|
||||
const abi_align = elem_ty.abiAlignment(pt);
|
||||
self.stack_align = self.stack_align.max(abi_align);
|
||||
|
||||
if (reg_ok) {
|
||||
@ -2855,7 +2875,8 @@ fn binOp(
|
||||
rhs_ty: Type,
|
||||
metadata: ?BinOpMetadata,
|
||||
) InnerError!MCValue {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
switch (tag) {
|
||||
.add,
|
||||
.sub,
|
||||
@ -2996,7 +3017,7 @@ fn binOp(
|
||||
.One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type
|
||||
else => ptr_ty.childType(mod),
|
||||
};
|
||||
const elem_size = elem_ty.abiSize(mod);
|
||||
const elem_size = elem_ty.abiSize(pt);
|
||||
|
||||
if (elem_size == 1) {
|
||||
const base_tag: Mir.Inst.Tag = switch (tag) {
|
||||
@ -3396,8 +3417,8 @@ fn binOpRegister(
|
||||
fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
|
||||
const block_data = self.blocks.getPtr(block).?;
|
||||
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
if (self.typeOf(operand).hasRuntimeBits(mod)) {
|
||||
const pt = self.pt;
|
||||
if (self.typeOf(operand).hasRuntimeBits(pt)) {
|
||||
const operand_mcv = try self.resolveInst(operand);
|
||||
const block_mcv = block_data.mcv;
|
||||
if (block_mcv == .none) {
|
||||
@ -3516,17 +3537,18 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void {
|
||||
|
||||
/// Given an error union, returns the payload
|
||||
fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCValue {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const err_ty = error_union_ty.errorUnionSet(mod);
|
||||
const payload_ty = error_union_ty.errorUnionPayload(mod);
|
||||
if (err_ty.errorSetIsEmpty(mod)) {
|
||||
return error_union_mcv;
|
||||
}
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
|
||||
return MCValue.none;
|
||||
}
|
||||
|
||||
const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, mod)));
|
||||
const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, pt)));
|
||||
switch (error_union_mcv) {
|
||||
.register => return self.fail("TODO errUnionPayload for registers", .{}),
|
||||
.stack_offset => |off| {
|
||||
@ -3587,7 +3609,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live
|
||||
}
|
||||
|
||||
fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const arg = self.air.instructions.items(.data)[@intFromEnum(inst)].arg;
|
||||
const ty = arg.ty.toType();
|
||||
const owner_decl = mod.funcOwnerDeclIndex(self.func_index);
|
||||
@ -3736,7 +3759,7 @@ fn genLoadASI(self: *Self, value_reg: Register, addr_reg: Register, off_reg: Reg
|
||||
}
|
||||
|
||||
fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
switch (mcv) {
|
||||
.dead => unreachable,
|
||||
.unreach, .none => return, // Nothing to do.
|
||||
@ -3935,20 +3958,21 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
|
||||
// The value is in memory at a hard-coded address.
|
||||
// If the type is a pointer, it means the pointer address is at this memory location.
|
||||
try self.genSetReg(ty, reg, .{ .immediate = addr });
|
||||
try self.genLoad(reg, reg, i13, 0, ty.abiSize(mod));
|
||||
try self.genLoad(reg, reg, i13, 0, ty.abiSize(pt));
|
||||
},
|
||||
.stack_offset => |off| {
|
||||
const real_offset = realStackOffset(off);
|
||||
const simm13 = math.cast(i13, real_offset) orelse
|
||||
return self.fail("TODO larger stack offsets: {}", .{real_offset});
|
||||
try self.genLoad(reg, .sp, i13, simm13, ty.abiSize(mod));
|
||||
try self.genLoad(reg, .sp, i13, simm13, ty.abiSize(pt));
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const abi_size = ty.abiSize(mod);
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const abi_size = ty.abiSize(pt);
|
||||
switch (mcv) {
|
||||
.dead => unreachable,
|
||||
.unreach, .none => return, // Nothing to do.
|
||||
@ -3956,7 +3980,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
|
||||
if (!self.wantSafety())
|
||||
return; // The already existing value will do just fine.
|
||||
// TODO Upgrade this to a memset call when we have that available.
|
||||
switch (ty.abiSize(mod)) {
|
||||
switch (ty.abiSize(pt)) {
|
||||
1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }),
|
||||
2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }),
|
||||
4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }),
|
||||
@ -3986,7 +4010,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
|
||||
try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg });
|
||||
|
||||
const overflow_bit_ty = ty.structFieldType(1, mod);
|
||||
const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, mod)));
|
||||
const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, pt)));
|
||||
const cond_reg = try self.register_manager.allocReg(null, gp);
|
||||
|
||||
// TODO handle floating point CCRs
|
||||
@ -4032,7 +4056,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
|
||||
const reg = try self.copyToTmpRegister(ty, mcv);
|
||||
return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
|
||||
} else {
|
||||
const ptr_ty = try mod.singleMutPtrType(ty);
|
||||
const ptr_ty = try pt.singleMutPtrType(ty);
|
||||
|
||||
const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null }, gp);
|
||||
const regs_locks = self.register_manager.lockRegsAssumeUnused(4, regs);
|
||||
@ -4121,12 +4145,13 @@ fn genStoreASI(self: *Self, value_reg: Register, addr_reg: Register, off_reg: Re
|
||||
}
|
||||
|
||||
fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mcv: MCValue = switch (try codegen.genTypedValue(
|
||||
self.bin_file,
|
||||
pt,
|
||||
self.src_loc,
|
||||
val,
|
||||
mod.funcOwnerDeclIndex(self.func_index),
|
||||
pt.zcu.funcOwnerDeclIndex(self.func_index),
|
||||
)) {
|
||||
.mcv => |mcv| switch (mcv) {
|
||||
.none => .none,
|
||||
@ -4157,14 +4182,15 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
|
||||
}
|
||||
|
||||
fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const error_type = ty.errorUnionSet(mod);
|
||||
const payload_type = ty.errorUnionPayload(mod);
|
||||
|
||||
if (!error_type.hasRuntimeBits(mod)) {
|
||||
if (!error_type.hasRuntimeBits(pt)) {
|
||||
return MCValue{ .immediate = 0 }; // always false
|
||||
} else if (!payload_type.hasRuntimeBits(mod)) {
|
||||
if (error_type.abiSize(mod) <= 8) {
|
||||
} else if (!payload_type.hasRuntimeBits(pt)) {
|
||||
if (error_type.abiSize(pt) <= 8) {
|
||||
const reg_mcv: MCValue = switch (operand) {
|
||||
.register => operand,
|
||||
else => .{ .register = try self.copyToTmpRegister(error_type, operand) },
|
||||
@ -4255,9 +4281,10 @@ fn jump(self: *Self, inst: Mir.Inst.Index) !void {
|
||||
}
|
||||
|
||||
fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const elem_ty = ptr_ty.childType(mod);
|
||||
const elem_size = elem_ty.abiSize(mod);
|
||||
const elem_size = elem_ty.abiSize(pt);
|
||||
|
||||
switch (ptr) {
|
||||
.none => unreachable,
|
||||
@ -4326,7 +4353,8 @@ fn minMax(
|
||||
lhs_ty: Type,
|
||||
rhs_ty: Type,
|
||||
) InnerError!MCValue {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
assert(lhs_ty.eql(rhs_ty, mod));
|
||||
switch (lhs_ty.zigTypeTag(mod)) {
|
||||
.Float => return self.fail("TODO min/max on floats", .{}),
|
||||
@ -4446,7 +4474,8 @@ fn realStackOffset(off: u32) u32 {
|
||||
|
||||
/// Caller must call `CallMCValues.deinit`.
|
||||
fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) !CallMCValues {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const ip = &mod.intern_pool;
|
||||
const fn_info = mod.typeToFunc(fn_ty).?;
|
||||
const cc = fn_info.cc;
|
||||
@ -4487,7 +4516,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
|
||||
};
|
||||
|
||||
for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
|
||||
const param_size = @as(u32, @intCast(Type.fromInterned(ty).abiSize(mod)));
|
||||
const param_size = @as(u32, @intCast(Type.fromInterned(ty).abiSize(pt)));
|
||||
if (param_size <= 8) {
|
||||
if (next_register < argument_registers.len) {
|
||||
result_arg.* = .{ .register = argument_registers[next_register] };
|
||||
@ -4516,10 +4545,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
|
||||
|
||||
if (ret_ty.zigTypeTag(mod) == .NoReturn) {
|
||||
result.return_value = .{ .unreach = {} };
|
||||
} else if (!ret_ty.hasRuntimeBits(mod)) {
|
||||
} else if (!ret_ty.hasRuntimeBits(pt)) {
|
||||
result.return_value = .{ .none = {} };
|
||||
} else {
|
||||
const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
|
||||
const ret_ty_size: u32 = @intCast(ret_ty.abiSize(pt));
|
||||
// The callee puts the return values in %i0-%i3, which becomes %o0-%o3 inside the caller.
|
||||
if (ret_ty_size <= 8) {
|
||||
result.return_value = switch (role) {
|
||||
@ -4538,21 +4567,22 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
|
||||
}
|
||||
|
||||
fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const ty = self.typeOf(ref);
|
||||
|
||||
// If the type has no codegen bits, no need to store it.
|
||||
if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return .none;
|
||||
if (!ty.hasRuntimeBitsIgnoreComptime(pt)) return .none;
|
||||
|
||||
if (ref.toIndex()) |inst| {
|
||||
return self.getResolvedInstValue(inst);
|
||||
}
|
||||
|
||||
return self.genTypedValue((try self.air.value(ref, mod)).?);
|
||||
return self.genTypedValue((try self.air.value(ref, pt)).?);
|
||||
}
|
||||
|
||||
fn ret(self: *Self, mcv: MCValue) !void {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const ret_ty = self.fn_type.fnReturnType(mod);
|
||||
try self.setRegOrMem(ret_ty, self.ret_mcv, mcv);
|
||||
|
||||
@ -4654,8 +4684,8 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void
|
||||
}
|
||||
|
||||
fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const abi_size = value_ty.abiSize(mod);
|
||||
const pt = self.pt;
|
||||
const abi_size = value_ty.abiSize(pt);
|
||||
|
||||
switch (ptr) {
|
||||
.none => unreachable,
|
||||
@ -4696,11 +4726,12 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
|
||||
|
||||
fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue {
|
||||
return if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const mcv = try self.resolveInst(operand);
|
||||
const ptr_ty = self.typeOf(operand);
|
||||
const struct_ty = ptr_ty.childType(mod);
|
||||
const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod)));
|
||||
const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, pt)));
|
||||
switch (mcv) {
|
||||
.ptr_stack_offset => |off| {
|
||||
break :result MCValue{ .ptr_stack_offset = off - struct_field_offset };
|
||||
@ -4738,7 +4769,8 @@ fn trunc(
|
||||
operand_ty: Type,
|
||||
dest_ty: Type,
|
||||
) !MCValue {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const info_a = operand_ty.intInfo(mod);
|
||||
const info_b = dest_ty.intInfo(mod);
|
||||
|
||||
@ -4848,7 +4880,7 @@ fn truncRegister(
|
||||
}
|
||||
}
|
||||
|
||||
/// TODO support scope overrides. Also note this logic is duplicated with `Module.wantSafety`.
|
||||
/// TODO support scope overrides. Also note this logic is duplicated with `Zcu.wantSafety`.
|
||||
fn wantSafety(self: *Self) bool {
|
||||
return switch (self.bin_file.comp.root_mod.optimize_mode) {
|
||||
.Debug => true,
|
||||
@ -4859,11 +4891,9 @@ fn wantSafety(self: *Self) bool {
|
||||
}
|
||||
|
||||
fn typeOf(self: *Self, inst: Air.Inst.Ref) Type {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
return self.air.typeOf(inst, &mod.intern_pool);
|
||||
return self.air.typeOf(inst, &self.pt.zcu.intern_pool);
|
||||
}
|
||||
|
||||
fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type {
|
||||
const mod = self.bin_file.comp.module.?;
|
||||
return self.air.typeOfIndex(inst, &mod.intern_pool);
|
||||
return self.air.typeOfIndex(inst, &self.pt.zcu.intern_pool);
|
||||
}
|
||||
|
||||
@ -6,9 +6,7 @@ const Endian = std.builtin.Endian;
|
||||
const assert = std.debug.assert;
|
||||
const link = @import("../../link.zig");
|
||||
const Zcu = @import("../../Zcu.zig");
|
||||
/// Deprecated.
|
||||
const Module = Zcu;
|
||||
const ErrorMsg = Module.ErrorMsg;
|
||||
const ErrorMsg = Zcu.ErrorMsg;
|
||||
const Liveness = @import("../../Liveness.zig");
|
||||
const log = std.log.scoped(.sparcv9_emit);
|
||||
const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
|
||||
@ -24,7 +22,7 @@ bin_file: *link.File,
|
||||
debug_output: DebugInfoOutput,
|
||||
target: *const std.Target,
|
||||
err_msg: ?*ErrorMsg = null,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
code: *std.ArrayList(u8),
|
||||
|
||||
prev_di_line: u32,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -22,15 +22,16 @@ const direct: [2]Class = .{ .direct, .none };
|
||||
/// Classifies a given Zig type to determine how they must be passed
|
||||
/// or returned as value within a wasm function.
|
||||
/// When all elements result in `.none`, no value must be passed in or returned.
|
||||
pub fn classifyType(ty: Type, mod: *Zcu) [2]Class {
|
||||
pub fn classifyType(ty: Type, pt: Zcu.PerThread) [2]Class {
|
||||
const mod = pt.zcu;
|
||||
const ip = &mod.intern_pool;
|
||||
const target = mod.getTarget();
|
||||
if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return none;
|
||||
if (!ty.hasRuntimeBitsIgnoreComptime(pt)) return none;
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
.Struct => {
|
||||
const struct_type = mod.typeToStruct(ty).?;
|
||||
const struct_type = pt.zcu.typeToStruct(ty).?;
|
||||
if (struct_type.layout == .@"packed") {
|
||||
if (ty.bitSize(mod) <= 64) return direct;
|
||||
if (ty.bitSize(pt) <= 64) return direct;
|
||||
return .{ .direct, .direct };
|
||||
}
|
||||
if (struct_type.field_types.len > 1) {
|
||||
@ -40,13 +41,13 @@ pub fn classifyType(ty: Type, mod: *Zcu) [2]Class {
|
||||
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[0]);
|
||||
const explicit_align = struct_type.fieldAlign(ip, 0);
|
||||
if (explicit_align != .none) {
|
||||
if (explicit_align.compareStrict(.gt, field_ty.abiAlignment(mod)))
|
||||
if (explicit_align.compareStrict(.gt, field_ty.abiAlignment(pt)))
|
||||
return memory;
|
||||
}
|
||||
return classifyType(field_ty, mod);
|
||||
return classifyType(field_ty, pt);
|
||||
},
|
||||
.Int, .Enum, .ErrorSet => {
|
||||
const int_bits = ty.intInfo(mod).bits;
|
||||
const int_bits = ty.intInfo(pt.zcu).bits;
|
||||
if (int_bits <= 64) return direct;
|
||||
if (int_bits <= 128) return .{ .direct, .direct };
|
||||
return memory;
|
||||
@ -61,24 +62,24 @@ pub fn classifyType(ty: Type, mod: *Zcu) [2]Class {
|
||||
.Vector => return direct,
|
||||
.Array => return memory,
|
||||
.Optional => {
|
||||
assert(ty.isPtrLikeOptional(mod));
|
||||
assert(ty.isPtrLikeOptional(pt.zcu));
|
||||
return direct;
|
||||
},
|
||||
.Pointer => {
|
||||
assert(!ty.isSlice(mod));
|
||||
assert(!ty.isSlice(pt.zcu));
|
||||
return direct;
|
||||
},
|
||||
.Union => {
|
||||
const union_obj = mod.typeToUnion(ty).?;
|
||||
const union_obj = pt.zcu.typeToUnion(ty).?;
|
||||
if (union_obj.getLayout(ip) == .@"packed") {
|
||||
if (ty.bitSize(mod) <= 64) return direct;
|
||||
if (ty.bitSize(pt) <= 64) return direct;
|
||||
return .{ .direct, .direct };
|
||||
}
|
||||
const layout = ty.unionGetLayout(mod);
|
||||
const layout = ty.unionGetLayout(pt);
|
||||
assert(layout.tag_size == 0);
|
||||
if (union_obj.field_types.len > 1) return memory;
|
||||
const first_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]);
|
||||
return classifyType(first_field_ty, mod);
|
||||
return classifyType(first_field_ty, pt);
|
||||
},
|
||||
.ErrorUnion,
|
||||
.Frame,
|
||||
@ -100,28 +101,29 @@ pub fn classifyType(ty: Type, mod: *Zcu) [2]Class {
|
||||
/// Returns the scalar type a given type can represent.
|
||||
/// Asserts given type can be represented as scalar, such as
|
||||
/// a struct with a single scalar field.
|
||||
pub fn scalarType(ty: Type, mod: *Zcu) Type {
|
||||
pub fn scalarType(ty: Type, pt: Zcu.PerThread) Type {
|
||||
const mod = pt.zcu;
|
||||
const ip = &mod.intern_pool;
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
.Struct => {
|
||||
if (mod.typeToPackedStruct(ty)) |packed_struct| {
|
||||
return scalarType(Type.fromInterned(packed_struct.backingIntType(ip).*), mod);
|
||||
return scalarType(Type.fromInterned(packed_struct.backingIntType(ip).*), pt);
|
||||
} else {
|
||||
assert(ty.structFieldCount(mod) == 1);
|
||||
return scalarType(ty.structFieldType(0, mod), mod);
|
||||
return scalarType(ty.structFieldType(0, mod), pt);
|
||||
}
|
||||
},
|
||||
.Union => {
|
||||
const union_obj = mod.typeToUnion(ty).?;
|
||||
if (union_obj.getLayout(ip) != .@"packed") {
|
||||
const layout = mod.getUnionLayout(union_obj);
|
||||
const layout = pt.getUnionLayout(union_obj);
|
||||
if (layout.payload_size == 0 and layout.tag_size != 0) {
|
||||
return scalarType(ty.unionTagTypeSafety(mod).?, mod);
|
||||
return scalarType(ty.unionTagTypeSafety(mod).?, pt);
|
||||
}
|
||||
assert(union_obj.field_types.len == 1);
|
||||
}
|
||||
const first_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]);
|
||||
return scalarType(first_field_ty, mod);
|
||||
return scalarType(first_field_ty, pt);
|
||||
},
|
||||
else => return ty,
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -8,7 +8,7 @@ allocator: Allocator,
|
||||
mir: Mir,
|
||||
cc: std.builtin.CallingConvention,
|
||||
err_msg: ?*ErrorMsg = null,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
result_insts_len: u8 = undefined,
|
||||
result_relocs_len: u8 = undefined,
|
||||
result_insts: [
|
||||
@ -657,7 +657,7 @@ const std = @import("std");
|
||||
|
||||
const Air = @import("../../Air.zig");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const ErrorMsg = Module.ErrorMsg;
|
||||
const ErrorMsg = Zcu.ErrorMsg;
|
||||
const Immediate = bits.Immediate;
|
||||
const Instruction = encoder.Instruction;
|
||||
const Lower = @This();
|
||||
@ -665,8 +665,6 @@ const Memory = Instruction.Memory;
|
||||
const Mir = @import("Mir.zig");
|
||||
const Mnemonic = Instruction.Mnemonic;
|
||||
const Zcu = @import("../../Zcu.zig");
|
||||
/// Deprecated.
|
||||
const Module = Zcu;
|
||||
const Operand = Instruction.Operand;
|
||||
const Prefix = Instruction.Prefix;
|
||||
const Register = bits.Register;
|
||||
|
||||
@ -44,7 +44,7 @@ pub const Class = enum {
|
||||
}
|
||||
};
|
||||
|
||||
pub fn classifyWindows(ty: Type, zcu: *Zcu) Class {
|
||||
pub fn classifyWindows(ty: Type, pt: Zcu.PerThread) Class {
|
||||
// https://docs.microsoft.com/en-gb/cpp/build/x64-calling-convention?view=vs-2017
|
||||
// "There's a strict one-to-one correspondence between a function call's arguments
|
||||
// and the registers used for those arguments. Any argument that doesn't fit in 8
|
||||
@ -53,7 +53,7 @@ pub fn classifyWindows(ty: Type, zcu: *Zcu) Class {
|
||||
// "All floating point operations are done using the 16 XMM registers."
|
||||
// "Structs and unions of size 8, 16, 32, or 64 bits, and __m64 types, are passed
|
||||
// as if they were integers of the same size."
|
||||
switch (ty.zigTypeTag(zcu)) {
|
||||
switch (ty.zigTypeTag(pt.zcu)) {
|
||||
.Pointer,
|
||||
.Int,
|
||||
.Bool,
|
||||
@ -68,12 +68,12 @@ pub fn classifyWindows(ty: Type, zcu: *Zcu) Class {
|
||||
.ErrorUnion,
|
||||
.AnyFrame,
|
||||
.Frame,
|
||||
=> switch (ty.abiSize(zcu)) {
|
||||
=> switch (ty.abiSize(pt)) {
|
||||
0 => unreachable,
|
||||
1, 2, 4, 8 => return .integer,
|
||||
else => switch (ty.zigTypeTag(zcu)) {
|
||||
else => switch (ty.zigTypeTag(pt.zcu)) {
|
||||
.Int => return .win_i128,
|
||||
.Struct, .Union => if (ty.containerLayout(zcu) == .@"packed") {
|
||||
.Struct, .Union => if (ty.containerLayout(pt.zcu) == .@"packed") {
|
||||
return .win_i128;
|
||||
} else {
|
||||
return .memory;
|
||||
@ -100,14 +100,14 @@ pub const Context = enum { ret, arg, field, other };
|
||||
|
||||
/// There are a maximum of 8 possible return slots. Returned values are in
|
||||
/// the beginning of the array; unused slots are filled with .none.
|
||||
pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8]Class {
|
||||
pub fn classifySystemV(ty: Type, pt: Zcu.PerThread, target: std.Target, ctx: Context) [8]Class {
|
||||
const memory_class = [_]Class{
|
||||
.memory, .none, .none, .none,
|
||||
.none, .none, .none, .none,
|
||||
};
|
||||
var result = [1]Class{.none} ** 8;
|
||||
switch (ty.zigTypeTag(zcu)) {
|
||||
.Pointer => switch (ty.ptrSize(zcu)) {
|
||||
switch (ty.zigTypeTag(pt.zcu)) {
|
||||
.Pointer => switch (ty.ptrSize(pt.zcu)) {
|
||||
.Slice => {
|
||||
result[0] = .integer;
|
||||
result[1] = .integer;
|
||||
@ -119,7 +119,7 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8
|
||||
},
|
||||
},
|
||||
.Int, .Enum, .ErrorSet => {
|
||||
const bits = ty.intInfo(zcu).bits;
|
||||
const bits = ty.intInfo(pt.zcu).bits;
|
||||
if (bits <= 64) {
|
||||
result[0] = .integer;
|
||||
return result;
|
||||
@ -185,8 +185,8 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8
|
||||
else => unreachable,
|
||||
},
|
||||
.Vector => {
|
||||
const elem_ty = ty.childType(zcu);
|
||||
const bits = elem_ty.bitSize(zcu) * ty.arrayLen(zcu);
|
||||
const elem_ty = ty.childType(pt.zcu);
|
||||
const bits = elem_ty.bitSize(pt) * ty.arrayLen(pt.zcu);
|
||||
if (elem_ty.toIntern() == .bool_type) {
|
||||
if (bits <= 32) return .{
|
||||
.integer, .none, .none, .none,
|
||||
@ -250,7 +250,7 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8
|
||||
return memory_class;
|
||||
},
|
||||
.Optional => {
|
||||
if (ty.isPtrLikeOptional(zcu)) {
|
||||
if (ty.isPtrLikeOptional(pt.zcu)) {
|
||||
result[0] = .integer;
|
||||
return result;
|
||||
}
|
||||
@ -261,8 +261,8 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8
|
||||
// it contains unaligned fields, it has class MEMORY"
|
||||
// "If the size of the aggregate exceeds a single eightbyte, each is classified
|
||||
// separately.".
|
||||
const ty_size = ty.abiSize(zcu);
|
||||
switch (ty.containerLayout(zcu)) {
|
||||
const ty_size = ty.abiSize(pt);
|
||||
switch (ty.containerLayout(pt.zcu)) {
|
||||
.auto, .@"extern" => {},
|
||||
.@"packed" => {
|
||||
assert(ty_size <= 16);
|
||||
@ -274,10 +274,10 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8
|
||||
if (ty_size > 64)
|
||||
return memory_class;
|
||||
|
||||
_ = if (zcu.typeToStruct(ty)) |loaded_struct|
|
||||
classifySystemVStruct(&result, 0, loaded_struct, zcu, target)
|
||||
else if (zcu.typeToUnion(ty)) |loaded_union|
|
||||
classifySystemVUnion(&result, 0, loaded_union, zcu, target)
|
||||
_ = if (pt.zcu.typeToStruct(ty)) |loaded_struct|
|
||||
classifySystemVStruct(&result, 0, loaded_struct, pt, target)
|
||||
else if (pt.zcu.typeToUnion(ty)) |loaded_union|
|
||||
classifySystemVUnion(&result, 0, loaded_union, pt, target)
|
||||
else
|
||||
unreachable;
|
||||
|
||||
@ -306,7 +306,7 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8
|
||||
return result;
|
||||
},
|
||||
.Array => {
|
||||
const ty_size = ty.abiSize(zcu);
|
||||
const ty_size = ty.abiSize(pt);
|
||||
if (ty_size <= 8) {
|
||||
result[0] = .integer;
|
||||
return result;
|
||||
@ -326,10 +326,10 @@ fn classifySystemVStruct(
|
||||
result: *[8]Class,
|
||||
starting_byte_offset: u64,
|
||||
loaded_struct: InternPool.LoadedStructType,
|
||||
zcu: *Zcu,
|
||||
pt: Zcu.PerThread,
|
||||
target: std.Target,
|
||||
) u64 {
|
||||
const ip = &zcu.intern_pool;
|
||||
const ip = &pt.zcu.intern_pool;
|
||||
var byte_offset = starting_byte_offset;
|
||||
var field_it = loaded_struct.iterateRuntimeOrder(ip);
|
||||
while (field_it.next()) |field_index| {
|
||||
@ -338,29 +338,29 @@ fn classifySystemVStruct(
|
||||
byte_offset = std.mem.alignForward(
|
||||
u64,
|
||||
byte_offset,
|
||||
field_align.toByteUnits() orelse field_ty.abiAlignment(zcu).toByteUnits().?,
|
||||
field_align.toByteUnits() orelse field_ty.abiAlignment(pt).toByteUnits().?,
|
||||
);
|
||||
if (zcu.typeToStruct(field_ty)) |field_loaded_struct| {
|
||||
if (pt.zcu.typeToStruct(field_ty)) |field_loaded_struct| {
|
||||
switch (field_loaded_struct.layout) {
|
||||
.auto, .@"extern" => {
|
||||
byte_offset = classifySystemVStruct(result, byte_offset, field_loaded_struct, zcu, target);
|
||||
byte_offset = classifySystemVStruct(result, byte_offset, field_loaded_struct, pt, target);
|
||||
continue;
|
||||
},
|
||||
.@"packed" => {},
|
||||
}
|
||||
} else if (zcu.typeToUnion(field_ty)) |field_loaded_union| {
|
||||
} else if (pt.zcu.typeToUnion(field_ty)) |field_loaded_union| {
|
||||
switch (field_loaded_union.getLayout(ip)) {
|
||||
.auto, .@"extern" => {
|
||||
byte_offset = classifySystemVUnion(result, byte_offset, field_loaded_union, zcu, target);
|
||||
byte_offset = classifySystemVUnion(result, byte_offset, field_loaded_union, pt, target);
|
||||
continue;
|
||||
},
|
||||
.@"packed" => {},
|
||||
}
|
||||
}
|
||||
const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, zcu, target, .field), .none);
|
||||
const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, pt, target, .field), .none);
|
||||
for (result[@intCast(byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class|
|
||||
result_class.* = result_class.combineSystemV(field_class);
|
||||
byte_offset += field_ty.abiSize(zcu);
|
||||
byte_offset += field_ty.abiSize(pt);
|
||||
}
|
||||
const final_byte_offset = starting_byte_offset + loaded_struct.size(ip).*;
|
||||
std.debug.assert(final_byte_offset == std.mem.alignForward(
|
||||
@ -375,30 +375,30 @@ fn classifySystemVUnion(
|
||||
result: *[8]Class,
|
||||
starting_byte_offset: u64,
|
||||
loaded_union: InternPool.LoadedUnionType,
|
||||
zcu: *Zcu,
|
||||
pt: Zcu.PerThread,
|
||||
target: std.Target,
|
||||
) u64 {
|
||||
const ip = &zcu.intern_pool;
|
||||
const ip = &pt.zcu.intern_pool;
|
||||
for (0..loaded_union.field_types.len) |field_index| {
|
||||
const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
|
||||
if (zcu.typeToStruct(field_ty)) |field_loaded_struct| {
|
||||
if (pt.zcu.typeToStruct(field_ty)) |field_loaded_struct| {
|
||||
switch (field_loaded_struct.layout) {
|
||||
.auto, .@"extern" => {
|
||||
_ = classifySystemVStruct(result, starting_byte_offset, field_loaded_struct, zcu, target);
|
||||
_ = classifySystemVStruct(result, starting_byte_offset, field_loaded_struct, pt, target);
|
||||
continue;
|
||||
},
|
||||
.@"packed" => {},
|
||||
}
|
||||
} else if (zcu.typeToUnion(field_ty)) |field_loaded_union| {
|
||||
} else if (pt.zcu.typeToUnion(field_ty)) |field_loaded_union| {
|
||||
switch (field_loaded_union.getLayout(ip)) {
|
||||
.auto, .@"extern" => {
|
||||
_ = classifySystemVUnion(result, starting_byte_offset, field_loaded_union, zcu, target);
|
||||
_ = classifySystemVUnion(result, starting_byte_offset, field_loaded_union, pt, target);
|
||||
continue;
|
||||
},
|
||||
.@"packed" => {},
|
||||
}
|
||||
}
|
||||
const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, zcu, target, .field), .none);
|
||||
const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, pt, target, .field), .none);
|
||||
for (result[@intCast(starting_byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class|
|
||||
result_class.* = result_class.combineSystemV(field_class);
|
||||
}
|
||||
|
||||
269
src/codegen.zig
269
src/codegen.zig
@ -13,12 +13,10 @@ const trace = @import("tracy.zig").trace;
|
||||
const Air = @import("Air.zig");
|
||||
const Allocator = mem.Allocator;
|
||||
const Compilation = @import("Compilation.zig");
|
||||
const ErrorMsg = Module.ErrorMsg;
|
||||
const ErrorMsg = Zcu.ErrorMsg;
|
||||
const InternPool = @import("InternPool.zig");
|
||||
const Liveness = @import("Liveness.zig");
|
||||
const Zcu = @import("Zcu.zig");
|
||||
/// Deprecated.
|
||||
const Module = Zcu;
|
||||
const Target = std.Target;
|
||||
const Type = @import("Type.zig");
|
||||
const Value = @import("Value.zig");
|
||||
@ -47,14 +45,15 @@ pub const DebugInfoOutput = union(enum) {
|
||||
|
||||
pub fn generateFunction(
|
||||
lf: *link.File,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
func_index: InternPool.Index,
|
||||
air: Air,
|
||||
liveness: Liveness,
|
||||
code: *std.ArrayList(u8),
|
||||
debug_output: DebugInfoOutput,
|
||||
) CodeGenError!Result {
|
||||
const zcu = lf.comp.module.?;
|
||||
const zcu = pt.zcu;
|
||||
const func = zcu.funcInfo(func_index);
|
||||
const decl = zcu.declPtr(func.owner_decl);
|
||||
const namespace = zcu.namespacePtr(decl.src_namespace);
|
||||
@ -62,35 +61,36 @@ pub fn generateFunction(
|
||||
switch (target.cpu.arch) {
|
||||
.arm,
|
||||
.armeb,
|
||||
=> return @import("arch/arm/CodeGen.zig").generate(lf, src_loc, func_index, air, liveness, code, debug_output),
|
||||
=> return @import("arch/arm/CodeGen.zig").generate(lf, pt, src_loc, func_index, air, liveness, code, debug_output),
|
||||
.aarch64,
|
||||
.aarch64_be,
|
||||
.aarch64_32,
|
||||
=> return @import("arch/aarch64/CodeGen.zig").generate(lf, src_loc, func_index, air, liveness, code, debug_output),
|
||||
.riscv64 => return @import("arch/riscv64/CodeGen.zig").generate(lf, src_loc, func_index, air, liveness, code, debug_output),
|
||||
.sparc64 => return @import("arch/sparc64/CodeGen.zig").generate(lf, src_loc, func_index, air, liveness, code, debug_output),
|
||||
.x86_64 => return @import("arch/x86_64/CodeGen.zig").generate(lf, src_loc, func_index, air, liveness, code, debug_output),
|
||||
=> return @import("arch/aarch64/CodeGen.zig").generate(lf, pt, src_loc, func_index, air, liveness, code, debug_output),
|
||||
.riscv64 => return @import("arch/riscv64/CodeGen.zig").generate(lf, pt, src_loc, func_index, air, liveness, code, debug_output),
|
||||
.sparc64 => return @import("arch/sparc64/CodeGen.zig").generate(lf, pt, src_loc, func_index, air, liveness, code, debug_output),
|
||||
.x86_64 => return @import("arch/x86_64/CodeGen.zig").generate(lf, pt, src_loc, func_index, air, liveness, code, debug_output),
|
||||
.wasm32,
|
||||
.wasm64,
|
||||
=> return @import("arch/wasm/CodeGen.zig").generate(lf, src_loc, func_index, air, liveness, code, debug_output),
|
||||
=> return @import("arch/wasm/CodeGen.zig").generate(lf, pt, src_loc, func_index, air, liveness, code, debug_output),
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn generateLazyFunction(
|
||||
lf: *link.File,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
lazy_sym: link.File.LazySymbol,
|
||||
code: *std.ArrayList(u8),
|
||||
debug_output: DebugInfoOutput,
|
||||
) CodeGenError!Result {
|
||||
const zcu = lf.comp.module.?;
|
||||
const zcu = pt.zcu;
|
||||
const decl_index = lazy_sym.ty.getOwnerDecl(zcu);
|
||||
const decl = zcu.declPtr(decl_index);
|
||||
const namespace = zcu.namespacePtr(decl.src_namespace);
|
||||
const target = namespace.fileScope(zcu).mod.resolved_target.result;
|
||||
switch (target.cpu.arch) {
|
||||
.x86_64 => return @import("arch/x86_64/CodeGen.zig").generateLazy(lf, src_loc, lazy_sym, code, debug_output),
|
||||
.x86_64 => return @import("arch/x86_64/CodeGen.zig").generateLazy(lf, pt, src_loc, lazy_sym, code, debug_output),
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
@ -105,7 +105,8 @@ fn writeFloat(comptime F: type, f: F, target: Target, endian: std.builtin.Endian
|
||||
|
||||
pub fn generateLazySymbol(
|
||||
bin_file: *link.File,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
lazy_sym: link.File.LazySymbol,
|
||||
// TODO don't use an "out" parameter like this; put it in the result instead
|
||||
alignment: *Alignment,
|
||||
@ -119,25 +120,24 @@ pub fn generateLazySymbol(
|
||||
defer tracy.end();
|
||||
|
||||
const comp = bin_file.comp;
|
||||
const zcu = comp.module.?;
|
||||
const ip = &zcu.intern_pool;
|
||||
const ip = &pt.zcu.intern_pool;
|
||||
const target = comp.root_mod.resolved_target.result;
|
||||
const endian = target.cpu.arch.endian();
|
||||
const gpa = comp.gpa;
|
||||
|
||||
log.debug("generateLazySymbol: kind = {s}, ty = {}", .{
|
||||
@tagName(lazy_sym.kind),
|
||||
lazy_sym.ty.fmt(zcu),
|
||||
lazy_sym.ty.fmt(pt),
|
||||
});
|
||||
|
||||
if (lazy_sym.kind == .code) {
|
||||
alignment.* = target_util.defaultFunctionAlignment(target);
|
||||
return generateLazyFunction(bin_file, src_loc, lazy_sym, code, debug_output);
|
||||
return generateLazyFunction(bin_file, pt, src_loc, lazy_sym, code, debug_output);
|
||||
}
|
||||
|
||||
if (lazy_sym.ty.isAnyError(zcu)) {
|
||||
if (lazy_sym.ty.isAnyError(pt.zcu)) {
|
||||
alignment.* = .@"4";
|
||||
const err_names = zcu.global_error_set.keys();
|
||||
const err_names = pt.zcu.global_error_set.keys();
|
||||
mem.writeInt(u32, try code.addManyAsArray(4), @intCast(err_names.len), endian);
|
||||
var offset = code.items.len;
|
||||
try code.resize((1 + err_names.len + 1) * 4);
|
||||
@ -151,9 +151,9 @@ pub fn generateLazySymbol(
|
||||
}
|
||||
mem.writeInt(u32, code.items[offset..][0..4], @intCast(code.items.len), endian);
|
||||
return Result.ok;
|
||||
} else if (lazy_sym.ty.zigTypeTag(zcu) == .Enum) {
|
||||
} else if (lazy_sym.ty.zigTypeTag(pt.zcu) == .Enum) {
|
||||
alignment.* = .@"1";
|
||||
const tag_names = lazy_sym.ty.enumFields(zcu);
|
||||
const tag_names = lazy_sym.ty.enumFields(pt.zcu);
|
||||
for (0..tag_names.len) |tag_index| {
|
||||
const tag_name = tag_names.get(ip)[tag_index].toSlice(ip);
|
||||
try code.ensureUnusedCapacity(tag_name.len + 1);
|
||||
@ -165,13 +165,14 @@ pub fn generateLazySymbol(
|
||||
gpa,
|
||||
src_loc,
|
||||
"TODO implement generateLazySymbol for {s} {}",
|
||||
.{ @tagName(lazy_sym.kind), lazy_sym.ty.fmt(zcu) },
|
||||
.{ @tagName(lazy_sym.kind), lazy_sym.ty.fmt(pt) },
|
||||
) };
|
||||
}
|
||||
|
||||
pub fn generateSymbol(
|
||||
bin_file: *link.File,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
val: Value,
|
||||
code: *std.ArrayList(u8),
|
||||
debug_output: DebugInfoOutput,
|
||||
@ -180,17 +181,17 @@ pub fn generateSymbol(
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
const mod = bin_file.comp.module.?;
|
||||
const mod = pt.zcu;
|
||||
const ip = &mod.intern_pool;
|
||||
const ty = val.typeOf(mod);
|
||||
|
||||
const target = mod.getTarget();
|
||||
const endian = target.cpu.arch.endian();
|
||||
|
||||
log.debug("generateSymbol: val = {}", .{val.fmtValue(mod, null)});
|
||||
log.debug("generateSymbol: val = {}", .{val.fmtValue(pt, null)});
|
||||
|
||||
if (val.isUndefDeep(mod)) {
|
||||
const abi_size = math.cast(usize, ty.abiSize(mod)) orelse return error.Overflow;
|
||||
const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow;
|
||||
try code.appendNTimes(0xaa, abi_size);
|
||||
return .ok;
|
||||
}
|
||||
@ -236,9 +237,9 @@ pub fn generateSymbol(
|
||||
.empty_enum_value,
|
||||
=> unreachable, // non-runtime values
|
||||
.int => {
|
||||
const abi_size = math.cast(usize, ty.abiSize(mod)) orelse return error.Overflow;
|
||||
const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow;
|
||||
var space: Value.BigIntSpace = undefined;
|
||||
const int_val = val.toBigInt(&space, mod);
|
||||
const int_val = val.toBigInt(&space, pt);
|
||||
int_val.writeTwosComplement(try code.addManyAsSlice(abi_size), endian);
|
||||
},
|
||||
.err => |err| {
|
||||
@ -252,14 +253,14 @@ pub fn generateSymbol(
|
||||
.payload => 0,
|
||||
};
|
||||
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
|
||||
try code.writer().writeInt(u16, err_val, endian);
|
||||
return .ok;
|
||||
}
|
||||
|
||||
const payload_align = payload_ty.abiAlignment(mod);
|
||||
const error_align = Type.anyerror.abiAlignment(mod);
|
||||
const abi_align = ty.abiAlignment(mod);
|
||||
const payload_align = payload_ty.abiAlignment(pt);
|
||||
const error_align = Type.anyerror.abiAlignment(pt);
|
||||
const abi_align = ty.abiAlignment(pt);
|
||||
|
||||
// error value first when its type is larger than the error union's payload
|
||||
if (error_align.order(payload_align) == .gt) {
|
||||
@ -269,8 +270,8 @@ pub fn generateSymbol(
|
||||
// emit payload part of the error union
|
||||
{
|
||||
const begin = code.items.len;
|
||||
switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(switch (error_union.val) {
|
||||
.err_name => try mod.intern(.{ .undef = payload_ty.toIntern() }),
|
||||
switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(switch (error_union.val) {
|
||||
.err_name => try pt.intern(.{ .undef = payload_ty.toIntern() }),
|
||||
.payload => |payload| payload,
|
||||
}), code, debug_output, reloc_info)) {
|
||||
.ok => {},
|
||||
@ -300,7 +301,7 @@ pub fn generateSymbol(
|
||||
},
|
||||
.enum_tag => |enum_tag| {
|
||||
const int_tag_ty = ty.intTagType(mod);
|
||||
switch (try generateSymbol(bin_file, src_loc, try mod.getCoerced(Value.fromInterned(enum_tag.int), int_tag_ty), code, debug_output, reloc_info)) {
|
||||
switch (try generateSymbol(bin_file, pt, src_loc, try pt.getCoerced(Value.fromInterned(enum_tag.int), int_tag_ty), code, debug_output, reloc_info)) {
|
||||
.ok => {},
|
||||
.fail => |em| return .{ .fail = em },
|
||||
}
|
||||
@ -311,21 +312,21 @@ pub fn generateSymbol(
|
||||
.f64 => |f64_val| writeFloat(f64, f64_val, target, endian, try code.addManyAsArray(8)),
|
||||
.f80 => |f80_val| {
|
||||
writeFloat(f80, f80_val, target, endian, try code.addManyAsArray(10));
|
||||
const abi_size = math.cast(usize, ty.abiSize(mod)) orelse return error.Overflow;
|
||||
const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow;
|
||||
try code.appendNTimes(0, abi_size - 10);
|
||||
},
|
||||
.f128 => |f128_val| writeFloat(f128, f128_val, target, endian, try code.addManyAsArray(16)),
|
||||
},
|
||||
.ptr => switch (try lowerPtr(bin_file, src_loc, val.toIntern(), code, debug_output, reloc_info, 0)) {
|
||||
.ptr => switch (try lowerPtr(bin_file, pt, src_loc, val.toIntern(), code, debug_output, reloc_info, 0)) {
|
||||
.ok => {},
|
||||
.fail => |em| return .{ .fail = em },
|
||||
},
|
||||
.slice => |slice| {
|
||||
switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(slice.ptr), code, debug_output, reloc_info)) {
|
||||
switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.ptr), code, debug_output, reloc_info)) {
|
||||
.ok => {},
|
||||
.fail => |em| return .{ .fail = em },
|
||||
}
|
||||
switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(slice.len), code, debug_output, reloc_info)) {
|
||||
switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.len), code, debug_output, reloc_info)) {
|
||||
.ok => {},
|
||||
.fail => |em| return .{ .fail = em },
|
||||
}
|
||||
@ -333,11 +334,11 @@ pub fn generateSymbol(
|
||||
.opt => {
|
||||
const payload_type = ty.optionalChild(mod);
|
||||
const payload_val = val.optionalValue(mod);
|
||||
const abi_size = math.cast(usize, ty.abiSize(mod)) orelse return error.Overflow;
|
||||
const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow;
|
||||
|
||||
if (ty.optionalReprIsPayload(mod)) {
|
||||
if (payload_val) |value| {
|
||||
switch (try generateSymbol(bin_file, src_loc, value, code, debug_output, reloc_info)) {
|
||||
switch (try generateSymbol(bin_file, pt, src_loc, value, code, debug_output, reloc_info)) {
|
||||
.ok => {},
|
||||
.fail => |em| return Result{ .fail = em },
|
||||
}
|
||||
@ -345,10 +346,12 @@ pub fn generateSymbol(
|
||||
try code.appendNTimes(0, abi_size);
|
||||
}
|
||||
} else {
|
||||
const padding = abi_size - (math.cast(usize, payload_type.abiSize(mod)) orelse return error.Overflow) - 1;
|
||||
if (payload_type.hasRuntimeBits(mod)) {
|
||||
const value = payload_val orelse Value.fromInterned((try mod.intern(.{ .undef = payload_type.toIntern() })));
|
||||
switch (try generateSymbol(bin_file, src_loc, value, code, debug_output, reloc_info)) {
|
||||
const padding = abi_size - (math.cast(usize, payload_type.abiSize(pt)) orelse return error.Overflow) - 1;
|
||||
if (payload_type.hasRuntimeBits(pt)) {
|
||||
const value = payload_val orelse Value.fromInterned(try pt.intern(.{
|
||||
.undef = payload_type.toIntern(),
|
||||
}));
|
||||
switch (try generateSymbol(bin_file, pt, src_loc, value, code, debug_output, reloc_info)) {
|
||||
.ok => {},
|
||||
.fail => |em| return Result{ .fail = em },
|
||||
}
|
||||
@ -363,7 +366,7 @@ pub fn generateSymbol(
|
||||
.elems, .repeated_elem => {
|
||||
var index: u64 = 0;
|
||||
while (index < array_type.lenIncludingSentinel()) : (index += 1) {
|
||||
switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(switch (aggregate.storage) {
|
||||
switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(switch (aggregate.storage) {
|
||||
.bytes => unreachable,
|
||||
.elems => |elems| elems[@intCast(index)],
|
||||
.repeated_elem => |elem| if (index < array_type.len)
|
||||
@ -378,8 +381,7 @@ pub fn generateSymbol(
|
||||
},
|
||||
},
|
||||
.vector_type => |vector_type| {
|
||||
const abi_size = math.cast(usize, ty.abiSize(mod)) orelse
|
||||
return error.Overflow;
|
||||
const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow;
|
||||
if (vector_type.child == .bool_type) {
|
||||
const bytes = try code.addManyAsSlice(abi_size);
|
||||
@memset(bytes, 0xaa);
|
||||
@ -424,7 +426,7 @@ pub fn generateSymbol(
|
||||
.elems, .repeated_elem => {
|
||||
var index: u64 = 0;
|
||||
while (index < vector_type.len) : (index += 1) {
|
||||
switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(switch (aggregate.storage) {
|
||||
switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(switch (aggregate.storage) {
|
||||
.bytes => unreachable,
|
||||
.elems => |elems| elems[
|
||||
math.cast(usize, index) orelse return error.Overflow
|
||||
@ -439,7 +441,7 @@ pub fn generateSymbol(
|
||||
}
|
||||
|
||||
const padding = abi_size -
|
||||
(math.cast(usize, Type.fromInterned(vector_type.child).abiSize(mod) * vector_type.len) orelse
|
||||
(math.cast(usize, Type.fromInterned(vector_type.child).abiSize(pt) * vector_type.len) orelse
|
||||
return error.Overflow);
|
||||
if (padding > 0) try code.appendNTimes(0, padding);
|
||||
}
|
||||
@ -452,10 +454,10 @@ pub fn generateSymbol(
|
||||
0..,
|
||||
) |field_ty, comptime_val, index| {
|
||||
if (comptime_val != .none) continue;
|
||||
if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
|
||||
if (!Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue;
|
||||
|
||||
const field_val = switch (aggregate.storage) {
|
||||
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
|
||||
.bytes => |bytes| try pt.intern(.{ .int = .{
|
||||
.ty = field_ty,
|
||||
.storage = .{ .u64 = bytes.at(index, ip) },
|
||||
} }),
|
||||
@ -463,14 +465,14 @@ pub fn generateSymbol(
|
||||
.repeated_elem => |elem| elem,
|
||||
};
|
||||
|
||||
switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(field_val), code, debug_output, reloc_info)) {
|
||||
switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), code, debug_output, reloc_info)) {
|
||||
.ok => {},
|
||||
.fail => |em| return Result{ .fail = em },
|
||||
}
|
||||
const unpadded_field_end = code.items.len - struct_begin;
|
||||
|
||||
// Pad struct members if required
|
||||
const padded_field_end = ty.structFieldOffset(index + 1, mod);
|
||||
const padded_field_end = ty.structFieldOffset(index + 1, pt);
|
||||
const padding = math.cast(usize, padded_field_end - unpadded_field_end) orelse
|
||||
return error.Overflow;
|
||||
|
||||
@ -483,15 +485,14 @@ pub fn generateSymbol(
|
||||
const struct_type = ip.loadStructType(ty.toIntern());
|
||||
switch (struct_type.layout) {
|
||||
.@"packed" => {
|
||||
const abi_size = math.cast(usize, ty.abiSize(mod)) orelse
|
||||
return error.Overflow;
|
||||
const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow;
|
||||
const current_pos = code.items.len;
|
||||
try code.appendNTimes(0, abi_size);
|
||||
var bits: u16 = 0;
|
||||
|
||||
for (struct_type.field_types.get(ip), 0..) |field_ty, index| {
|
||||
const field_val = switch (aggregate.storage) {
|
||||
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
|
||||
.bytes => |bytes| try pt.intern(.{ .int = .{
|
||||
.ty = field_ty,
|
||||
.storage = .{ .u64 = bytes.at(index, ip) },
|
||||
} }),
|
||||
@ -502,18 +503,18 @@ pub fn generateSymbol(
|
||||
// pointer may point to a decl which must be marked used
|
||||
// but can also result in a relocation. Therefore we handle those separately.
|
||||
if (Type.fromInterned(field_ty).zigTypeTag(mod) == .Pointer) {
|
||||
const field_size = math.cast(usize, Type.fromInterned(field_ty).abiSize(mod)) orelse
|
||||
const field_size = math.cast(usize, Type.fromInterned(field_ty).abiSize(pt)) orelse
|
||||
return error.Overflow;
|
||||
var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size);
|
||||
defer tmp_list.deinit();
|
||||
switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(field_val), &tmp_list, debug_output, reloc_info)) {
|
||||
switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), &tmp_list, debug_output, reloc_info)) {
|
||||
.ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items),
|
||||
.fail => |em| return Result{ .fail = em },
|
||||
}
|
||||
} else {
|
||||
Value.fromInterned(field_val).writeToPackedMemory(Type.fromInterned(field_ty), mod, code.items[current_pos..], bits) catch unreachable;
|
||||
Value.fromInterned(field_val).writeToPackedMemory(Type.fromInterned(field_ty), pt, code.items[current_pos..], bits) catch unreachable;
|
||||
}
|
||||
bits += @intCast(Type.fromInterned(field_ty).bitSize(mod));
|
||||
bits += @intCast(Type.fromInterned(field_ty).bitSize(pt));
|
||||
}
|
||||
},
|
||||
.auto, .@"extern" => {
|
||||
@ -524,10 +525,10 @@ pub fn generateSymbol(
|
||||
var it = struct_type.iterateRuntimeOrder(ip);
|
||||
while (it.next()) |field_index| {
|
||||
const field_ty = field_types[field_index];
|
||||
if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
|
||||
if (!Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue;
|
||||
|
||||
const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
|
||||
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
|
||||
.bytes => |bytes| try pt.intern(.{ .int = .{
|
||||
.ty = field_ty,
|
||||
.storage = .{ .u64 = bytes.at(field_index, ip) },
|
||||
} }),
|
||||
@ -541,7 +542,7 @@ pub fn generateSymbol(
|
||||
) orelse return error.Overflow;
|
||||
if (padding > 0) try code.appendNTimes(0, padding);
|
||||
|
||||
switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(field_val), code, debug_output, reloc_info)) {
|
||||
switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), code, debug_output, reloc_info)) {
|
||||
.ok => {},
|
||||
.fail => |em| return Result{ .fail = em },
|
||||
}
|
||||
@ -562,15 +563,15 @@ pub fn generateSymbol(
|
||||
else => unreachable,
|
||||
},
|
||||
.un => |un| {
|
||||
const layout = ty.unionGetLayout(mod);
|
||||
const layout = ty.unionGetLayout(pt);
|
||||
|
||||
if (layout.payload_size == 0) {
|
||||
return generateSymbol(bin_file, src_loc, Value.fromInterned(un.tag), code, debug_output, reloc_info);
|
||||
return generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, debug_output, reloc_info);
|
||||
}
|
||||
|
||||
// Check if we should store the tag first.
|
||||
if (layout.tag_size > 0 and layout.tag_align.compare(.gte, layout.payload_align)) {
|
||||
switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(un.tag), code, debug_output, reloc_info)) {
|
||||
switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, debug_output, reloc_info)) {
|
||||
.ok => {},
|
||||
.fail => |em| return Result{ .fail = em },
|
||||
}
|
||||
@ -580,28 +581,28 @@ pub fn generateSymbol(
|
||||
if (un.tag != .none) {
|
||||
const field_index = ty.unionTagFieldIndex(Value.fromInterned(un.tag), mod).?;
|
||||
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
|
||||
if (!field_ty.hasRuntimeBits(mod)) {
|
||||
if (!field_ty.hasRuntimeBits(pt)) {
|
||||
try code.appendNTimes(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow);
|
||||
} else {
|
||||
switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(un.val), code, debug_output, reloc_info)) {
|
||||
switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), code, debug_output, reloc_info)) {
|
||||
.ok => {},
|
||||
.fail => |em| return Result{ .fail = em },
|
||||
}
|
||||
|
||||
const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(mod)) orelse return error.Overflow;
|
||||
const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(pt)) orelse return error.Overflow;
|
||||
if (padding > 0) {
|
||||
try code.appendNTimes(0, padding);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(un.val), code, debug_output, reloc_info)) {
|
||||
switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), code, debug_output, reloc_info)) {
|
||||
.ok => {},
|
||||
.fail => |em| return Result{ .fail = em },
|
||||
}
|
||||
}
|
||||
|
||||
if (layout.tag_size > 0 and layout.tag_align.compare(.lt, layout.payload_align)) {
|
||||
switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(un.tag), code, debug_output, reloc_info)) {
|
||||
switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, debug_output, reloc_info)) {
|
||||
.ok => {},
|
||||
.fail => |em| return Result{ .fail = em },
|
||||
}
|
||||
@ -618,22 +619,24 @@ pub fn generateSymbol(
|
||||
|
||||
fn lowerPtr(
|
||||
bin_file: *link.File,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
ptr_val: InternPool.Index,
|
||||
code: *std.ArrayList(u8),
|
||||
debug_output: DebugInfoOutput,
|
||||
reloc_info: RelocInfo,
|
||||
prev_offset: u64,
|
||||
) CodeGenError!Result {
|
||||
const zcu = bin_file.comp.module.?;
|
||||
const zcu = pt.zcu;
|
||||
const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr;
|
||||
const offset: u64 = prev_offset + ptr.byte_offset;
|
||||
return switch (ptr.base_addr) {
|
||||
.decl => |decl| try lowerDeclRef(bin_file, src_loc, decl, code, debug_output, reloc_info, offset),
|
||||
.anon_decl => |ad| try lowerAnonDeclRef(bin_file, src_loc, ad, code, debug_output, reloc_info, offset),
|
||||
.int => try generateSymbol(bin_file, src_loc, try zcu.intValue(Type.usize, offset), code, debug_output, reloc_info),
|
||||
.decl => |decl| try lowerDeclRef(bin_file, pt, src_loc, decl, code, debug_output, reloc_info, offset),
|
||||
.anon_decl => |ad| try lowerAnonDeclRef(bin_file, pt, src_loc, ad, code, debug_output, reloc_info, offset),
|
||||
.int => try generateSymbol(bin_file, pt, src_loc, try pt.intValue(Type.usize, offset), code, debug_output, reloc_info),
|
||||
.eu_payload => |eu_ptr| try lowerPtr(
|
||||
bin_file,
|
||||
pt,
|
||||
src_loc,
|
||||
eu_ptr,
|
||||
code,
|
||||
@ -641,11 +644,12 @@ fn lowerPtr(
|
||||
reloc_info,
|
||||
offset + errUnionPayloadOffset(
|
||||
Value.fromInterned(eu_ptr).typeOf(zcu).childType(zcu).errorUnionPayload(zcu),
|
||||
zcu,
|
||||
pt,
|
||||
),
|
||||
),
|
||||
.opt_payload => |opt_ptr| try lowerPtr(
|
||||
bin_file,
|
||||
pt,
|
||||
src_loc,
|
||||
opt_ptr,
|
||||
code,
|
||||
@ -666,12 +670,12 @@ fn lowerPtr(
|
||||
};
|
||||
},
|
||||
.Struct, .Union => switch (base_ty.containerLayout(zcu)) {
|
||||
.auto => base_ty.structFieldOffset(@intCast(field.index), zcu),
|
||||
.auto => base_ty.structFieldOffset(@intCast(field.index), pt),
|
||||
.@"extern", .@"packed" => unreachable,
|
||||
},
|
||||
else => unreachable,
|
||||
};
|
||||
return lowerPtr(bin_file, src_loc, field.base, code, debug_output, reloc_info, offset + field_off);
|
||||
return lowerPtr(bin_file, pt, src_loc, field.base, code, debug_output, reloc_info, offset + field_off);
|
||||
},
|
||||
.arr_elem, .comptime_field, .comptime_alloc => unreachable,
|
||||
};
|
||||
@ -683,7 +687,8 @@ const RelocInfo = struct {
|
||||
|
||||
fn lowerAnonDeclRef(
|
||||
lf: *link.File,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl,
|
||||
code: *std.ArrayList(u8),
|
||||
debug_output: DebugInfoOutput,
|
||||
@ -691,22 +696,21 @@ fn lowerAnonDeclRef(
|
||||
offset: u64,
|
||||
) CodeGenError!Result {
|
||||
_ = debug_output;
|
||||
const zcu = lf.comp.module.?;
|
||||
const ip = &zcu.intern_pool;
|
||||
const ip = &pt.zcu.intern_pool;
|
||||
const target = lf.comp.root_mod.resolved_target.result;
|
||||
|
||||
const ptr_width_bytes = @divExact(target.ptrBitWidth(), 8);
|
||||
const decl_val = anon_decl.val;
|
||||
const decl_ty = Type.fromInterned(ip.typeOf(decl_val));
|
||||
log.debug("lowerAnonDecl: ty = {}", .{decl_ty.fmt(zcu)});
|
||||
const is_fn_body = decl_ty.zigTypeTag(zcu) == .Fn;
|
||||
if (!is_fn_body and !decl_ty.hasRuntimeBits(zcu)) {
|
||||
log.debug("lowerAnonDecl: ty = {}", .{decl_ty.fmt(pt)});
|
||||
const is_fn_body = decl_ty.zigTypeTag(pt.zcu) == .Fn;
|
||||
if (!is_fn_body and !decl_ty.hasRuntimeBits(pt)) {
|
||||
try code.appendNTimes(0xaa, ptr_width_bytes);
|
||||
return Result.ok;
|
||||
}
|
||||
|
||||
const decl_align = ip.indexToKey(anon_decl.orig_ty).ptr_type.flags.alignment;
|
||||
const res = try lf.lowerAnonDecl(decl_val, decl_align, src_loc);
|
||||
const res = try lf.lowerAnonDecl(pt, decl_val, decl_align, src_loc);
|
||||
switch (res) {
|
||||
.ok => {},
|
||||
.fail => |em| return .{ .fail = em },
|
||||
@ -730,7 +734,8 @@ fn lowerAnonDeclRef(
|
||||
|
||||
fn lowerDeclRef(
|
||||
lf: *link.File,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
decl_index: InternPool.DeclIndex,
|
||||
code: *std.ArrayList(u8),
|
||||
debug_output: DebugInfoOutput,
|
||||
@ -739,14 +744,14 @@ fn lowerDeclRef(
|
||||
) CodeGenError!Result {
|
||||
_ = src_loc;
|
||||
_ = debug_output;
|
||||
const zcu = lf.comp.module.?;
|
||||
const zcu = pt.zcu;
|
||||
const decl = zcu.declPtr(decl_index);
|
||||
const namespace = zcu.namespacePtr(decl.src_namespace);
|
||||
const target = namespace.fileScope(zcu).mod.resolved_target.result;
|
||||
|
||||
const ptr_width = target.ptrBitWidth();
|
||||
const is_fn_body = decl.typeOf(zcu).zigTypeTag(zcu) == .Fn;
|
||||
if (!is_fn_body and !decl.typeOf(zcu).hasRuntimeBits(zcu)) {
|
||||
if (!is_fn_body and !decl.typeOf(zcu).hasRuntimeBits(pt)) {
|
||||
try code.appendNTimes(0xaa, @divExact(ptr_width, 8));
|
||||
return Result.ok;
|
||||
}
|
||||
@ -814,7 +819,7 @@ pub const GenResult = union(enum) {
|
||||
|
||||
fn fail(
|
||||
gpa: Allocator,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) Allocator.Error!GenResult {
|
||||
@ -825,14 +830,15 @@ pub const GenResult = union(enum) {
|
||||
|
||||
fn genDeclRef(
|
||||
lf: *link.File,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
val: Value,
|
||||
ptr_decl_index: InternPool.DeclIndex,
|
||||
) CodeGenError!GenResult {
|
||||
const zcu = lf.comp.module.?;
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const ty = val.typeOf(zcu);
|
||||
log.debug("genDeclRef: val = {}", .{val.fmtValue(zcu, null)});
|
||||
log.debug("genDeclRef: val = {}", .{val.fmtValue(pt, null)});
|
||||
|
||||
const ptr_decl = zcu.declPtr(ptr_decl_index);
|
||||
const namespace = zcu.namespacePtr(ptr_decl.src_namespace);
|
||||
@ -848,7 +854,7 @@ fn genDeclRef(
|
||||
};
|
||||
const decl = zcu.declPtr(decl_index);
|
||||
|
||||
if (!decl.typeOf(zcu).isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
if (!decl.typeOf(zcu).isFnOrHasRuntimeBitsIgnoreComptime(pt)) {
|
||||
const imm: u64 = switch (ptr_bytes) {
|
||||
1 => 0xaa,
|
||||
2 => 0xaaaa,
|
||||
@ -865,12 +871,12 @@ fn genDeclRef(
|
||||
// TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
|
||||
if (ty.castPtrToFn(zcu)) |fn_ty| {
|
||||
if (zcu.typeToFunc(fn_ty).?.is_generic) {
|
||||
return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(zcu).toByteUnits().? });
|
||||
return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(pt).toByteUnits().? });
|
||||
}
|
||||
} else if (ty.zigTypeTag(zcu) == .Pointer) {
|
||||
const elem_ty = ty.elemType2(zcu);
|
||||
if (!elem_ty.hasRuntimeBits(zcu)) {
|
||||
return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(zcu).toByteUnits().? });
|
||||
if (!elem_ty.hasRuntimeBits(pt)) {
|
||||
return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(pt).toByteUnits().? });
|
||||
}
|
||||
}
|
||||
|
||||
@ -931,15 +937,15 @@ fn genDeclRef(
|
||||
|
||||
fn genUnnamedConst(
|
||||
lf: *link.File,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
val: Value,
|
||||
owner_decl_index: InternPool.DeclIndex,
|
||||
) CodeGenError!GenResult {
|
||||
const zcu = lf.comp.module.?;
|
||||
const gpa = lf.comp.gpa;
|
||||
log.debug("genUnnamedConst: val = {}", .{val.fmtValue(zcu, null)});
|
||||
log.debug("genUnnamedConst: val = {}", .{val.fmtValue(pt, null)});
|
||||
|
||||
const local_sym_index = lf.lowerUnnamedConst(val, owner_decl_index) catch |err| {
|
||||
const local_sym_index = lf.lowerUnnamedConst(pt, val, owner_decl_index) catch |err| {
|
||||
return GenResult.fail(gpa, src_loc, "lowering unnamed constant failed: {s}", .{@errorName(err)});
|
||||
};
|
||||
switch (lf.tag) {
|
||||
@ -970,15 +976,16 @@ fn genUnnamedConst(
|
||||
|
||||
pub fn genTypedValue(
|
||||
lf: *link.File,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
val: Value,
|
||||
owner_decl_index: InternPool.DeclIndex,
|
||||
) CodeGenError!GenResult {
|
||||
const zcu = lf.comp.module.?;
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const ty = val.typeOf(zcu);
|
||||
|
||||
log.debug("genTypedValue: val = {}", .{val.fmtValue(zcu, null)});
|
||||
log.debug("genTypedValue: val = {}", .{val.fmtValue(pt, null)});
|
||||
|
||||
if (val.isUndef(zcu))
|
||||
return GenResult.mcv(.undef);
|
||||
@ -990,7 +997,7 @@ pub fn genTypedValue(
|
||||
|
||||
if (!ty.isSlice(zcu)) switch (ip.indexToKey(val.toIntern())) {
|
||||
.ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
|
||||
.decl => |decl| return genDeclRef(lf, src_loc, val, decl),
|
||||
.decl => |decl| return genDeclRef(lf, pt, src_loc, val, decl),
|
||||
else => {},
|
||||
},
|
||||
else => {},
|
||||
@ -1007,7 +1014,7 @@ pub fn genTypedValue(
|
||||
.none => {},
|
||||
else => switch (ip.indexToKey(val.toIntern())) {
|
||||
.int => {
|
||||
return GenResult.mcv(.{ .immediate = val.toUnsignedInt(zcu) });
|
||||
return GenResult.mcv(.{ .immediate = val.toUnsignedInt(pt) });
|
||||
},
|
||||
else => {},
|
||||
},
|
||||
@ -1017,8 +1024,8 @@ pub fn genTypedValue(
|
||||
const info = ty.intInfo(zcu);
|
||||
if (info.bits <= ptr_bits) {
|
||||
const unsigned: u64 = switch (info.signedness) {
|
||||
.signed => @bitCast(val.toSignedInt(zcu)),
|
||||
.unsigned => val.toUnsignedInt(zcu),
|
||||
.signed => @bitCast(val.toSignedInt(pt)),
|
||||
.unsigned => val.toUnsignedInt(pt),
|
||||
};
|
||||
return GenResult.mcv(.{ .immediate = unsigned });
|
||||
}
|
||||
@ -1030,11 +1037,12 @@ pub fn genTypedValue(
|
||||
if (ty.isPtrLikeOptional(zcu)) {
|
||||
return genTypedValue(
|
||||
lf,
|
||||
pt,
|
||||
src_loc,
|
||||
val.optionalValue(zcu) orelse return GenResult.mcv(.{ .immediate = 0 }),
|
||||
owner_decl_index,
|
||||
);
|
||||
} else if (ty.abiSize(zcu) == 1) {
|
||||
} else if (ty.abiSize(pt) == 1) {
|
||||
return GenResult.mcv(.{ .immediate = @intFromBool(!val.isNull(zcu)) });
|
||||
}
|
||||
},
|
||||
@ -1042,6 +1050,7 @@ pub fn genTypedValue(
|
||||
const enum_tag = ip.indexToKey(val.toIntern()).enum_tag;
|
||||
return genTypedValue(
|
||||
lf,
|
||||
pt,
|
||||
src_loc,
|
||||
Value.fromInterned(enum_tag.int),
|
||||
owner_decl_index,
|
||||
@ -1055,14 +1064,15 @@ pub fn genTypedValue(
|
||||
.ErrorUnion => {
|
||||
const err_type = ty.errorUnionSet(zcu);
|
||||
const payload_type = ty.errorUnionPayload(zcu);
|
||||
if (!payload_type.hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
if (!payload_type.hasRuntimeBitsIgnoreComptime(pt)) {
|
||||
// We use the error type directly as the type.
|
||||
const err_int_ty = try zcu.errorIntType();
|
||||
const err_int_ty = try pt.errorIntType();
|
||||
switch (ip.indexToKey(val.toIntern()).error_union.val) {
|
||||
.err_name => |err_name| return genTypedValue(
|
||||
lf,
|
||||
pt,
|
||||
src_loc,
|
||||
Value.fromInterned(try zcu.intern(.{ .err = .{
|
||||
Value.fromInterned(try pt.intern(.{ .err = .{
|
||||
.ty = err_type.toIntern(),
|
||||
.name = err_name,
|
||||
} })),
|
||||
@ -1070,8 +1080,9 @@ pub fn genTypedValue(
|
||||
),
|
||||
.payload => return genTypedValue(
|
||||
lf,
|
||||
pt,
|
||||
src_loc,
|
||||
try zcu.intValue(err_int_ty, 0),
|
||||
try pt.intValue(err_int_ty, 0),
|
||||
owner_decl_index,
|
||||
),
|
||||
}
|
||||
@ -1090,26 +1101,26 @@ pub fn genTypedValue(
|
||||
else => {},
|
||||
}
|
||||
|
||||
return genUnnamedConst(lf, src_loc, val, owner_decl_index);
|
||||
return genUnnamedConst(lf, pt, src_loc, val, owner_decl_index);
|
||||
}
|
||||
|
||||
pub fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) u64 {
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0;
|
||||
const payload_align = payload_ty.abiAlignment(mod);
|
||||
const error_align = Type.anyerror.abiAlignment(mod);
|
||||
if (payload_align.compare(.gte, error_align) or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
|
||||
pub fn errUnionPayloadOffset(payload_ty: Type, pt: Zcu.PerThread) u64 {
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return 0;
|
||||
const payload_align = payload_ty.abiAlignment(pt);
|
||||
const error_align = Type.anyerror.abiAlignment(pt);
|
||||
if (payload_align.compare(.gte, error_align) or !payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
|
||||
return 0;
|
||||
} else {
|
||||
return payload_align.forward(Type.anyerror.abiSize(mod));
|
||||
return payload_align.forward(Type.anyerror.abiSize(pt));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn errUnionErrorOffset(payload_ty: Type, mod: *Module) u64 {
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0;
|
||||
const payload_align = payload_ty.abiAlignment(mod);
|
||||
const error_align = Type.anyerror.abiAlignment(mod);
|
||||
if (payload_align.compare(.gte, error_align) and payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
|
||||
return error_align.forward(payload_ty.abiSize(mod));
|
||||
pub fn errUnionErrorOffset(payload_ty: Type, pt: Zcu.PerThread) u64 {
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return 0;
|
||||
const payload_align = payload_ty.abiAlignment(pt);
|
||||
const error_align = Type.anyerror.abiAlignment(pt);
|
||||
if (payload_align.compare(.gte, error_align) and payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
|
||||
return error_align.forward(payload_ty.abiSize(pt));
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -1339,11 +1339,11 @@ pub const Pool = struct {
|
||||
allocator: std.mem.Allocator,
|
||||
scratch: *std.ArrayListUnmanaged(u32),
|
||||
ty: Type,
|
||||
zcu: *Zcu,
|
||||
pt: Zcu.PerThread,
|
||||
mod: *Module,
|
||||
kind: Kind,
|
||||
) !CType {
|
||||
const ip = &zcu.intern_pool;
|
||||
const ip = &pt.zcu.intern_pool;
|
||||
switch (ty.toIntern()) {
|
||||
.u0_type,
|
||||
.i0_type,
|
||||
@ -1400,7 +1400,7 @@ pub const Pool = struct {
|
||||
allocator,
|
||||
scratch,
|
||||
Type.fromInterned(ip.loadEnumType(ip_index).tag_ty),
|
||||
zcu,
|
||||
pt,
|
||||
mod,
|
||||
kind,
|
||||
),
|
||||
@ -1409,7 +1409,7 @@ pub const Pool = struct {
|
||||
.adhoc_inferred_error_set_type,
|
||||
=> return pool.fromIntInfo(allocator, .{
|
||||
.signedness = .unsigned,
|
||||
.bits = zcu.errorSetBits(),
|
||||
.bits = pt.zcu.errorSetBits(),
|
||||
}, mod, kind),
|
||||
.manyptr_u8_type,
|
||||
=> return pool.getPointer(allocator, .{
|
||||
@ -1492,13 +1492,13 @@ pub const Pool = struct {
|
||||
allocator,
|
||||
scratch,
|
||||
Type.fromInterned(ptr_info.child),
|
||||
zcu,
|
||||
pt,
|
||||
mod,
|
||||
.forward,
|
||||
),
|
||||
.alignas = AlignAs.fromAlignment(.{
|
||||
.@"align" = ptr_info.flags.alignment,
|
||||
.abi = Type.fromInterned(ptr_info.child).abiAlignment(zcu),
|
||||
.abi = Type.fromInterned(ptr_info.child).abiAlignment(pt),
|
||||
}),
|
||||
};
|
||||
break :elem_ctype if (elem.alignas.abiOrder().compare(.gte))
|
||||
@ -1535,7 +1535,7 @@ pub const Pool = struct {
|
||||
allocator,
|
||||
scratch,
|
||||
Type.fromInterned(ip.slicePtrType(ip_index)),
|
||||
zcu,
|
||||
pt,
|
||||
mod,
|
||||
kind,
|
||||
),
|
||||
@ -1560,7 +1560,7 @@ pub const Pool = struct {
|
||||
allocator,
|
||||
scratch,
|
||||
elem_type,
|
||||
zcu,
|
||||
pt,
|
||||
mod,
|
||||
kind.noParameter(),
|
||||
);
|
||||
@ -1574,7 +1574,7 @@ pub const Pool = struct {
|
||||
.{
|
||||
.name = .{ .index = .array },
|
||||
.ctype = array_ctype,
|
||||
.alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(zcu)),
|
||||
.alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(pt)),
|
||||
},
|
||||
};
|
||||
return pool.fromFields(allocator, .@"struct", &fields, kind);
|
||||
@ -1586,7 +1586,7 @@ pub const Pool = struct {
|
||||
allocator,
|
||||
scratch,
|
||||
elem_type,
|
||||
zcu,
|
||||
pt,
|
||||
mod,
|
||||
kind.noParameter(),
|
||||
);
|
||||
@ -1600,7 +1600,7 @@ pub const Pool = struct {
|
||||
.{
|
||||
.name = .{ .index = .array },
|
||||
.ctype = vector_ctype,
|
||||
.alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(zcu)),
|
||||
.alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(pt)),
|
||||
},
|
||||
};
|
||||
return pool.fromFields(allocator, .@"struct", &fields, kind);
|
||||
@ -1611,7 +1611,7 @@ pub const Pool = struct {
|
||||
allocator,
|
||||
scratch,
|
||||
Type.fromInterned(payload_type),
|
||||
zcu,
|
||||
pt,
|
||||
mod,
|
||||
kind.noParameter(),
|
||||
);
|
||||
@ -1635,7 +1635,7 @@ pub const Pool = struct {
|
||||
.name = .{ .index = .payload },
|
||||
.ctype = payload_ctype,
|
||||
.alignas = AlignAs.fromAbiAlignment(
|
||||
Type.fromInterned(payload_type).abiAlignment(zcu),
|
||||
Type.fromInterned(payload_type).abiAlignment(pt),
|
||||
),
|
||||
},
|
||||
};
|
||||
@ -1643,7 +1643,7 @@ pub const Pool = struct {
|
||||
},
|
||||
.anyframe_type => unreachable,
|
||||
.error_union_type => |error_union_info| {
|
||||
const error_set_bits = zcu.errorSetBits();
|
||||
const error_set_bits = pt.zcu.errorSetBits();
|
||||
const error_set_ctype = try pool.fromIntInfo(allocator, .{
|
||||
.signedness = .unsigned,
|
||||
.bits = error_set_bits,
|
||||
@ -1654,7 +1654,7 @@ pub const Pool = struct {
|
||||
allocator,
|
||||
scratch,
|
||||
payload_type,
|
||||
zcu,
|
||||
pt,
|
||||
mod,
|
||||
kind.noParameter(),
|
||||
);
|
||||
@ -1671,7 +1671,7 @@ pub const Pool = struct {
|
||||
.{
|
||||
.name = .{ .index = .payload },
|
||||
.ctype = payload_ctype,
|
||||
.alignas = AlignAs.fromAbiAlignment(payload_type.abiAlignment(zcu)),
|
||||
.alignas = AlignAs.fromAbiAlignment(payload_type.abiAlignment(pt)),
|
||||
},
|
||||
};
|
||||
return pool.fromFields(allocator, .@"struct", &fields, kind);
|
||||
@ -1685,7 +1685,7 @@ pub const Pool = struct {
|
||||
.tag = .@"struct",
|
||||
.name = .{ .owner_decl = loaded_struct.decl.unwrap().? },
|
||||
});
|
||||
if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(zcu))
|
||||
if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(pt))
|
||||
fwd_decl
|
||||
else
|
||||
CType.void;
|
||||
@ -1706,7 +1706,7 @@ pub const Pool = struct {
|
||||
allocator,
|
||||
scratch,
|
||||
field_type,
|
||||
zcu,
|
||||
pt,
|
||||
mod,
|
||||
kind.noParameter(),
|
||||
);
|
||||
@ -1718,7 +1718,7 @@ pub const Pool = struct {
|
||||
String.fromUnnamed(@intCast(field_index));
|
||||
const field_alignas = AlignAs.fromAlignment(.{
|
||||
.@"align" = loaded_struct.fieldAlign(ip, field_index),
|
||||
.abi = field_type.abiAlignment(zcu),
|
||||
.abi = field_type.abiAlignment(pt),
|
||||
});
|
||||
pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{
|
||||
.name = field_name.index,
|
||||
@ -1745,7 +1745,7 @@ pub const Pool = struct {
|
||||
allocator,
|
||||
scratch,
|
||||
Type.fromInterned(loaded_struct.backingIntType(ip).*),
|
||||
zcu,
|
||||
pt,
|
||||
mod,
|
||||
kind,
|
||||
),
|
||||
@ -1766,7 +1766,7 @@ pub const Pool = struct {
|
||||
allocator,
|
||||
scratch,
|
||||
field_type,
|
||||
zcu,
|
||||
pt,
|
||||
mod,
|
||||
kind.noParameter(),
|
||||
);
|
||||
@ -1780,7 +1780,7 @@ pub const Pool = struct {
|
||||
.name = field_name.index,
|
||||
.ctype = field_ctype.index,
|
||||
.flags = .{ .alignas = AlignAs.fromAbiAlignment(
|
||||
field_type.abiAlignment(zcu),
|
||||
field_type.abiAlignment(pt),
|
||||
) },
|
||||
});
|
||||
}
|
||||
@ -1806,7 +1806,7 @@ pub const Pool = struct {
|
||||
extra_index,
|
||||
);
|
||||
}
|
||||
const fwd_decl = try pool.fromType(allocator, scratch, ty, zcu, mod, .forward);
|
||||
const fwd_decl = try pool.fromType(allocator, scratch, ty, pt, mod, .forward);
|
||||
try pool.ensureUnusedCapacity(allocator, 1);
|
||||
const extra_index = try pool.addHashedExtra(allocator, &hasher, Aggregate, .{
|
||||
.fwd_decl = fwd_decl.index,
|
||||
@ -1824,7 +1824,7 @@ pub const Pool = struct {
|
||||
.tag = if (has_tag) .@"struct" else .@"union",
|
||||
.name = .{ .owner_decl = loaded_union.decl },
|
||||
});
|
||||
if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(zcu))
|
||||
if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(pt))
|
||||
fwd_decl
|
||||
else
|
||||
CType.void;
|
||||
@ -1847,7 +1847,7 @@ pub const Pool = struct {
|
||||
allocator,
|
||||
scratch,
|
||||
field_type,
|
||||
zcu,
|
||||
pt,
|
||||
mod,
|
||||
kind.noParameter(),
|
||||
);
|
||||
@ -1858,7 +1858,7 @@ pub const Pool = struct {
|
||||
);
|
||||
const field_alignas = AlignAs.fromAlignment(.{
|
||||
.@"align" = loaded_union.fieldAlign(ip, field_index),
|
||||
.abi = field_type.abiAlignment(zcu),
|
||||
.abi = field_type.abiAlignment(pt),
|
||||
});
|
||||
pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{
|
||||
.name = field_name.index,
|
||||
@ -1895,7 +1895,7 @@ pub const Pool = struct {
|
||||
allocator,
|
||||
scratch,
|
||||
tag_type,
|
||||
zcu,
|
||||
pt,
|
||||
mod,
|
||||
kind.noParameter(),
|
||||
);
|
||||
@ -1903,7 +1903,7 @@ pub const Pool = struct {
|
||||
struct_fields[struct_fields_len] = .{
|
||||
.name = .{ .index = .tag },
|
||||
.ctype = tag_ctype,
|
||||
.alignas = AlignAs.fromAbiAlignment(tag_type.abiAlignment(zcu)),
|
||||
.alignas = AlignAs.fromAbiAlignment(tag_type.abiAlignment(pt)),
|
||||
};
|
||||
struct_fields_len += 1;
|
||||
}
|
||||
@ -1951,7 +1951,7 @@ pub const Pool = struct {
|
||||
},
|
||||
.@"packed" => return pool.fromIntInfo(allocator, .{
|
||||
.signedness = .unsigned,
|
||||
.bits = @intCast(ty.bitSize(zcu)),
|
||||
.bits = @intCast(ty.bitSize(pt)),
|
||||
}, mod, kind),
|
||||
}
|
||||
},
|
||||
@ -1960,7 +1960,7 @@ pub const Pool = struct {
|
||||
allocator,
|
||||
scratch,
|
||||
Type.fromInterned(ip.loadEnumType(ip_index).tag_ty),
|
||||
zcu,
|
||||
pt,
|
||||
mod,
|
||||
kind,
|
||||
),
|
||||
@ -1975,7 +1975,7 @@ pub const Pool = struct {
|
||||
allocator,
|
||||
scratch,
|
||||
return_type,
|
||||
zcu,
|
||||
pt,
|
||||
mod,
|
||||
kind.asParameter(),
|
||||
) else CType.void;
|
||||
@ -1987,7 +1987,7 @@ pub const Pool = struct {
|
||||
allocator,
|
||||
scratch,
|
||||
param_type,
|
||||
zcu,
|
||||
pt,
|
||||
mod,
|
||||
kind.asParameter(),
|
||||
);
|
||||
@ -2011,7 +2011,7 @@ pub const Pool = struct {
|
||||
.inferred_error_set_type,
|
||||
=> return pool.fromIntInfo(allocator, .{
|
||||
.signedness = .unsigned,
|
||||
.bits = zcu.errorSetBits(),
|
||||
.bits = pt.zcu.errorSetBits(),
|
||||
}, mod, kind),
|
||||
|
||||
.undef,
|
||||
|
||||
1431
src/codegen/llvm.zig
1431
src/codegen/llvm.zig
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -76,9 +76,9 @@ fn dumpStatusReport() !void {
|
||||
|
||||
const stderr = io.getStdErr().writer();
|
||||
const block: *Sema.Block = anal.block;
|
||||
const mod = anal.sema.mod;
|
||||
const zcu = anal.sema.pt.zcu;
|
||||
|
||||
const file, const src_base_node = Zcu.LazySrcLoc.resolveBaseNode(block.src_base_inst, mod);
|
||||
const file, const src_base_node = Zcu.LazySrcLoc.resolveBaseNode(block.src_base_inst, zcu);
|
||||
|
||||
try stderr.writeAll("Analyzing ");
|
||||
try writeFilePath(file, stderr);
|
||||
@ -104,7 +104,7 @@ fn dumpStatusReport() !void {
|
||||
while (parent) |curr| {
|
||||
fba.reset();
|
||||
try stderr.writeAll(" in ");
|
||||
const cur_block_file, const cur_block_src_base_node = Zcu.LazySrcLoc.resolveBaseNode(curr.block.src_base_inst, mod);
|
||||
const cur_block_file, const cur_block_src_base_node = Zcu.LazySrcLoc.resolveBaseNode(curr.block.src_base_inst, zcu);
|
||||
try writeFilePath(cur_block_file, stderr);
|
||||
try stderr.writeAll("\n > ");
|
||||
print_zir.renderSingleInstruction(
|
||||
|
||||
49
src/link.zig
49
src/link.zig
@ -15,8 +15,6 @@ const Compilation = @import("Compilation.zig");
|
||||
const LibCInstallation = std.zig.LibCInstallation;
|
||||
const Liveness = @import("Liveness.zig");
|
||||
const Zcu = @import("Zcu.zig");
|
||||
/// Deprecated.
|
||||
const Module = Zcu;
|
||||
const InternPool = @import("InternPool.zig");
|
||||
const Type = @import("Type.zig");
|
||||
const Value = @import("Value.zig");
|
||||
@ -367,14 +365,14 @@ pub const File = struct {
|
||||
/// Called from within the CodeGen to lower a local variable instantion as an unnamed
|
||||
/// constant. Returns the symbol index of the lowered constant in the read-only section
|
||||
/// of the final binary.
|
||||
pub fn lowerUnnamedConst(base: *File, val: Value, decl_index: InternPool.DeclIndex) UpdateDeclError!u32 {
|
||||
pub fn lowerUnnamedConst(base: *File, pt: Zcu.PerThread, val: Value, decl_index: InternPool.DeclIndex) UpdateDeclError!u32 {
|
||||
if (build_options.only_c) @compileError("unreachable");
|
||||
switch (base.tag) {
|
||||
.spirv => unreachable,
|
||||
.c => unreachable,
|
||||
.nvptx => unreachable,
|
||||
inline else => |t| {
|
||||
return @as(*t.Type(), @fieldParentPtr("base", base)).lowerUnnamedConst(val, decl_index);
|
||||
return @as(*t.Type(), @fieldParentPtr("base", base)).lowerUnnamedConst(pt, val, decl_index);
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -399,13 +397,13 @@ pub const File = struct {
|
||||
}
|
||||
|
||||
/// May be called before or after updateExports for any given Decl.
|
||||
pub fn updateDecl(base: *File, module: *Module, decl_index: InternPool.DeclIndex) UpdateDeclError!void {
|
||||
const decl = module.declPtr(decl_index);
|
||||
pub fn updateDecl(base: *File, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) UpdateDeclError!void {
|
||||
const decl = pt.zcu.declPtr(decl_index);
|
||||
assert(decl.has_tv);
|
||||
switch (base.tag) {
|
||||
inline else => |tag| {
|
||||
if (tag != .c and build_options.only_c) unreachable;
|
||||
return @as(*tag.Type(), @fieldParentPtr("base", base)).updateDecl(module, decl_index);
|
||||
return @as(*tag.Type(), @fieldParentPtr("base", base)).updateDecl(pt, decl_index);
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -413,7 +411,7 @@ pub const File = struct {
|
||||
/// May be called before or after updateExports for any given Decl.
|
||||
pub fn updateFunc(
|
||||
base: *File,
|
||||
module: *Module,
|
||||
pt: Zcu.PerThread,
|
||||
func_index: InternPool.Index,
|
||||
air: Air,
|
||||
liveness: Liveness,
|
||||
@ -421,12 +419,12 @@ pub const File = struct {
|
||||
switch (base.tag) {
|
||||
inline else => |tag| {
|
||||
if (tag != .c and build_options.only_c) unreachable;
|
||||
return @as(*tag.Type(), @fieldParentPtr("base", base)).updateFunc(module, func_index, air, liveness);
|
||||
return @as(*tag.Type(), @fieldParentPtr("base", base)).updateFunc(pt, func_index, air, liveness);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn updateDeclLineNumber(base: *File, module: *Module, decl_index: InternPool.DeclIndex) UpdateDeclError!void {
|
||||
pub fn updateDeclLineNumber(base: *File, module: *Zcu, decl_index: InternPool.DeclIndex) UpdateDeclError!void {
|
||||
const decl = module.declPtr(decl_index);
|
||||
assert(decl.has_tv);
|
||||
switch (base.tag) {
|
||||
@ -537,7 +535,7 @@ pub const File = struct {
|
||||
/// Commit pending changes and write headers. Takes into account final output mode
|
||||
/// and `use_lld`, not only `effectiveOutputMode`.
|
||||
/// `arena` has the lifetime of the call to `Compilation.update`.
|
||||
pub fn flush(base: *File, arena: Allocator, prog_node: std.Progress.Node) FlushError!void {
|
||||
pub fn flush(base: *File, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) FlushError!void {
|
||||
if (build_options.only_c) {
|
||||
assert(base.tag == .c);
|
||||
return @as(*C, @fieldParentPtr("base", base)).flush(arena, prog_node);
|
||||
@ -563,27 +561,27 @@ pub const File = struct {
|
||||
const output_mode = comp.config.output_mode;
|
||||
const link_mode = comp.config.link_mode;
|
||||
if (use_lld and output_mode == .Lib and link_mode == .static) {
|
||||
return base.linkAsArchive(arena, prog_node);
|
||||
return base.linkAsArchive(arena, tid, prog_node);
|
||||
}
|
||||
switch (base.tag) {
|
||||
inline else => |tag| {
|
||||
return @as(*tag.Type(), @fieldParentPtr("base", base)).flush(arena, prog_node);
|
||||
return @as(*tag.Type(), @fieldParentPtr("base", base)).flush(arena, tid, prog_node);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Commit pending changes and write headers. Works based on `effectiveOutputMode`
|
||||
/// rather than final output mode.
|
||||
pub fn flushModule(base: *File, arena: Allocator, prog_node: std.Progress.Node) FlushError!void {
|
||||
pub fn flushModule(base: *File, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) FlushError!void {
|
||||
switch (base.tag) {
|
||||
inline else => |tag| {
|
||||
if (tag != .c and build_options.only_c) unreachable;
|
||||
return @as(*tag.Type(), @fieldParentPtr("base", base)).flushModule(arena, prog_node);
|
||||
return @as(*tag.Type(), @fieldParentPtr("base", base)).flushModule(arena, tid, prog_node);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Called when a Decl is deleted from the Module.
|
||||
/// Called when a Decl is deleted from the Zcu.
|
||||
pub fn freeDecl(base: *File, decl_index: InternPool.DeclIndex) void {
|
||||
switch (base.tag) {
|
||||
inline else => |tag| {
|
||||
@ -604,14 +602,14 @@ pub const File = struct {
|
||||
/// May be called before or after updateDecl for any given Decl.
|
||||
pub fn updateExports(
|
||||
base: *File,
|
||||
module: *Module,
|
||||
exported: Module.Exported,
|
||||
pt: Zcu.PerThread,
|
||||
exported: Zcu.Exported,
|
||||
export_indices: []const u32,
|
||||
) UpdateExportsError!void {
|
||||
switch (base.tag) {
|
||||
inline else => |tag| {
|
||||
if (tag != .c and build_options.only_c) unreachable;
|
||||
return @as(*tag.Type(), @fieldParentPtr("base", base)).updateExports(module, exported, export_indices);
|
||||
return @as(*tag.Type(), @fieldParentPtr("base", base)).updateExports(pt, exported, export_indices);
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -644,9 +642,10 @@ pub const File = struct {
|
||||
|
||||
pub fn lowerAnonDecl(
|
||||
base: *File,
|
||||
pt: Zcu.PerThread,
|
||||
decl_val: InternPool.Index,
|
||||
decl_align: InternPool.Alignment,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
) !LowerResult {
|
||||
if (build_options.only_c) @compileError("unreachable");
|
||||
switch (base.tag) {
|
||||
@ -654,7 +653,7 @@ pub const File = struct {
|
||||
.spirv => unreachable,
|
||||
.nvptx => unreachable,
|
||||
inline else => |tag| {
|
||||
return @as(*tag.Type(), @fieldParentPtr("base", base)).lowerAnonDecl(decl_val, decl_align, src_loc);
|
||||
return @as(*tag.Type(), @fieldParentPtr("base", base)).lowerAnonDecl(pt, decl_val, decl_align, src_loc);
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -689,7 +688,7 @@ pub const File = struct {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn linkAsArchive(base: *File, arena: Allocator, prog_node: std.Progress.Node) FlushError!void {
|
||||
pub fn linkAsArchive(base: *File, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) FlushError!void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
@ -704,7 +703,7 @@ pub const File = struct {
|
||||
// If there is no Zig code to compile, then we should skip flushing the output file
|
||||
// because it will not be part of the linker line anyway.
|
||||
const zcu_obj_path: ?[]const u8 = if (opt_zcu != null) blk: {
|
||||
try base.flushModule(arena, prog_node);
|
||||
try base.flushModule(arena, tid, prog_node);
|
||||
|
||||
const dirname = fs.path.dirname(full_out_path_z) orelse ".";
|
||||
break :blk try fs.path.join(arena, &.{ dirname, base.zcu_object_sub_path.? });
|
||||
@ -896,14 +895,14 @@ pub const File = struct {
|
||||
kind: Kind,
|
||||
ty: Type,
|
||||
|
||||
pub fn initDecl(kind: Kind, decl: ?InternPool.DeclIndex, mod: *Module) LazySymbol {
|
||||
pub fn initDecl(kind: Kind, decl: ?InternPool.DeclIndex, mod: *Zcu) LazySymbol {
|
||||
return .{ .kind = kind, .ty = if (decl) |decl_index|
|
||||
mod.declPtr(decl_index).val.toType()
|
||||
else
|
||||
Type.anyerror };
|
||||
}
|
||||
|
||||
pub fn getDecl(self: LazySymbol, mod: *Module) InternPool.OptionalDeclIndex {
|
||||
pub fn getDecl(self: LazySymbol, mod: *Zcu) InternPool.OptionalDeclIndex {
|
||||
return InternPool.OptionalDeclIndex.init(self.ty.getOwnerDeclOrNull(mod));
|
||||
}
|
||||
};
|
||||
|
||||
@ -186,13 +186,13 @@ pub fn freeDecl(self: *C, decl_index: InternPool.DeclIndex) void {
|
||||
|
||||
pub fn updateFunc(
|
||||
self: *C,
|
||||
zcu: *Zcu,
|
||||
pt: Zcu.PerThread,
|
||||
func_index: InternPool.Index,
|
||||
air: Air,
|
||||
liveness: Liveness,
|
||||
) !void {
|
||||
const gpa = self.base.comp.gpa;
|
||||
|
||||
const zcu = pt.zcu;
|
||||
const gpa = zcu.gpa;
|
||||
const func = zcu.funcInfo(func_index);
|
||||
const decl_index = func.owner_decl;
|
||||
const decl = zcu.declPtr(decl_index);
|
||||
@ -218,7 +218,7 @@ pub fn updateFunc(
|
||||
.object = .{
|
||||
.dg = .{
|
||||
.gpa = gpa,
|
||||
.zcu = zcu,
|
||||
.pt = pt,
|
||||
.mod = file_scope.mod,
|
||||
.error_msg = null,
|
||||
.pass = .{ .decl = decl_index },
|
||||
@ -263,7 +263,7 @@ pub fn updateFunc(
|
||||
gop.value_ptr.code = try self.addString(function.object.code.items);
|
||||
}
|
||||
|
||||
fn updateAnonDecl(self: *C, zcu: *Zcu, i: usize) !void {
|
||||
fn updateAnonDecl(self: *C, pt: Zcu.PerThread, i: usize) !void {
|
||||
const gpa = self.base.comp.gpa;
|
||||
const anon_decl = self.anon_decls.keys()[i];
|
||||
|
||||
@ -275,8 +275,8 @@ fn updateAnonDecl(self: *C, zcu: *Zcu, i: usize) !void {
|
||||
var object: codegen.Object = .{
|
||||
.dg = .{
|
||||
.gpa = gpa,
|
||||
.zcu = zcu,
|
||||
.mod = zcu.root_mod,
|
||||
.pt = pt,
|
||||
.mod = pt.zcu.root_mod,
|
||||
.error_msg = null,
|
||||
.pass = .{ .anon = anon_decl },
|
||||
.is_naked_fn = false,
|
||||
@ -319,12 +319,13 @@ fn updateAnonDecl(self: *C, zcu: *Zcu, i: usize) !void {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn updateDecl(self: *C, zcu: *Zcu, decl_index: InternPool.DeclIndex) !void {
|
||||
pub fn updateDecl(self: *C, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
const gpa = self.base.comp.gpa;
|
||||
|
||||
const zcu = pt.zcu;
|
||||
const decl = zcu.declPtr(decl_index);
|
||||
const gop = try self.decl_table.getOrPut(gpa, decl_index);
|
||||
errdefer _ = self.decl_table.pop();
|
||||
@ -342,7 +343,7 @@ pub fn updateDecl(self: *C, zcu: *Zcu, decl_index: InternPool.DeclIndex) !void {
|
||||
var object: codegen.Object = .{
|
||||
.dg = .{
|
||||
.gpa = gpa,
|
||||
.zcu = zcu,
|
||||
.pt = pt,
|
||||
.mod = file_scope.mod,
|
||||
.error_msg = null,
|
||||
.pass = .{ .decl = decl_index },
|
||||
@ -390,8 +391,8 @@ pub fn updateDeclLineNumber(self: *C, zcu: *Zcu, decl_index: InternPool.DeclInde
|
||||
_ = decl_index;
|
||||
}
|
||||
|
||||
pub fn flush(self: *C, arena: Allocator, prog_node: std.Progress.Node) !void {
|
||||
return self.flushModule(arena, prog_node);
|
||||
pub fn flush(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void {
|
||||
return self.flushModule(arena, tid, prog_node);
|
||||
}
|
||||
|
||||
fn abiDefines(self: *C, target: std.Target) !std.ArrayList(u8) {
|
||||
@ -409,7 +410,7 @@ fn abiDefines(self: *C, target: std.Target) !std.ArrayList(u8) {
|
||||
return defines;
|
||||
}
|
||||
|
||||
pub fn flushModule(self: *C, arena: Allocator, prog_node: std.Progress.Node) !void {
|
||||
pub fn flushModule(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void {
|
||||
_ = arena; // Has the same lifetime as the call to Compilation.update.
|
||||
|
||||
const tracy = trace(@src());
|
||||
@ -421,11 +422,12 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: std.Progress.Node) !vo
|
||||
const comp = self.base.comp;
|
||||
const gpa = comp.gpa;
|
||||
const zcu = self.base.comp.module.?;
|
||||
const pt: Zcu.PerThread = .{ .zcu = zcu, .tid = tid };
|
||||
|
||||
{
|
||||
var i: usize = 0;
|
||||
while (i < self.anon_decls.count()) : (i += 1) {
|
||||
try updateAnonDecl(self, zcu, i);
|
||||
try updateAnonDecl(self, pt, i);
|
||||
}
|
||||
}
|
||||
|
||||
@ -463,7 +465,7 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: std.Progress.Node) !vo
|
||||
self.lazy_fwd_decl_buf.clearRetainingCapacity();
|
||||
self.lazy_code_buf.clearRetainingCapacity();
|
||||
try f.lazy_ctype_pool.init(gpa);
|
||||
try self.flushErrDecls(zcu, &f.lazy_ctype_pool);
|
||||
try self.flushErrDecls(pt, &f.lazy_ctype_pool);
|
||||
|
||||
// Unlike other backends, the .c code we are emitting has order-dependent decls.
|
||||
// `CType`s, forward decls, and non-functions first.
|
||||
@ -483,7 +485,7 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: std.Progress.Node) !vo
|
||||
}
|
||||
|
||||
for (self.anon_decls.keys(), self.anon_decls.values()) |value, *decl_block| try self.flushDeclBlock(
|
||||
zcu,
|
||||
pt,
|
||||
zcu.root_mod,
|
||||
&f,
|
||||
decl_block,
|
||||
@ -497,7 +499,7 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: std.Progress.Node) !vo
|
||||
const extern_name = if (decl.isExtern(zcu)) decl.name.toOptional() else .none;
|
||||
const mod = zcu.namespacePtr(decl.src_namespace).fileScope(zcu).mod;
|
||||
try self.flushDeclBlock(
|
||||
zcu,
|
||||
pt,
|
||||
mod,
|
||||
&f,
|
||||
decl_block,
|
||||
@ -670,7 +672,7 @@ fn flushCTypes(
|
||||
}
|
||||
}
|
||||
|
||||
fn flushErrDecls(self: *C, zcu: *Zcu, ctype_pool: *codegen.CType.Pool) FlushDeclError!void {
|
||||
fn flushErrDecls(self: *C, pt: Zcu.PerThread, ctype_pool: *codegen.CType.Pool) FlushDeclError!void {
|
||||
const gpa = self.base.comp.gpa;
|
||||
|
||||
const fwd_decl = &self.lazy_fwd_decl_buf;
|
||||
@ -679,8 +681,8 @@ fn flushErrDecls(self: *C, zcu: *Zcu, ctype_pool: *codegen.CType.Pool) FlushDecl
|
||||
var object = codegen.Object{
|
||||
.dg = .{
|
||||
.gpa = gpa,
|
||||
.zcu = zcu,
|
||||
.mod = zcu.root_mod,
|
||||
.pt = pt,
|
||||
.mod = pt.zcu.root_mod,
|
||||
.error_msg = null,
|
||||
.pass = .flush,
|
||||
.is_naked_fn = false,
|
||||
@ -712,7 +714,7 @@ fn flushErrDecls(self: *C, zcu: *Zcu, ctype_pool: *codegen.CType.Pool) FlushDecl
|
||||
|
||||
fn flushLazyFn(
|
||||
self: *C,
|
||||
zcu: *Zcu,
|
||||
pt: Zcu.PerThread,
|
||||
mod: *Module,
|
||||
ctype_pool: *codegen.CType.Pool,
|
||||
lazy_ctype_pool: *const codegen.CType.Pool,
|
||||
@ -726,7 +728,7 @@ fn flushLazyFn(
|
||||
var object = codegen.Object{
|
||||
.dg = .{
|
||||
.gpa = gpa,
|
||||
.zcu = zcu,
|
||||
.pt = pt,
|
||||
.mod = mod,
|
||||
.error_msg = null,
|
||||
.pass = .flush,
|
||||
@ -761,7 +763,7 @@ fn flushLazyFn(
|
||||
|
||||
fn flushLazyFns(
|
||||
self: *C,
|
||||
zcu: *Zcu,
|
||||
pt: Zcu.PerThread,
|
||||
mod: *Module,
|
||||
f: *Flush,
|
||||
lazy_ctype_pool: *const codegen.CType.Pool,
|
||||
@ -775,13 +777,13 @@ fn flushLazyFns(
|
||||
const gop = f.lazy_fns.getOrPutAssumeCapacity(entry.key_ptr.*);
|
||||
if (gop.found_existing) continue;
|
||||
gop.value_ptr.* = {};
|
||||
try self.flushLazyFn(zcu, mod, &f.lazy_ctype_pool, lazy_ctype_pool, entry);
|
||||
try self.flushLazyFn(pt, mod, &f.lazy_ctype_pool, lazy_ctype_pool, entry);
|
||||
}
|
||||
}
|
||||
|
||||
fn flushDeclBlock(
|
||||
self: *C,
|
||||
zcu: *Zcu,
|
||||
pt: Zcu.PerThread,
|
||||
mod: *Module,
|
||||
f: *Flush,
|
||||
decl_block: *const DeclBlock,
|
||||
@ -790,7 +792,7 @@ fn flushDeclBlock(
|
||||
extern_name: InternPool.OptionalNullTerminatedString,
|
||||
) FlushDeclError!void {
|
||||
const gpa = self.base.comp.gpa;
|
||||
try self.flushLazyFns(zcu, mod, f, &decl_block.ctype_pool, decl_block.lazy_fns);
|
||||
try self.flushLazyFns(pt, mod, f, &decl_block.ctype_pool, decl_block.lazy_fns);
|
||||
try f.all_buffers.ensureUnusedCapacity(gpa, 1);
|
||||
// avoid emitting extern decls that are already exported
|
||||
if (extern_name.unwrap()) |name| if (export_names.contains(name)) return;
|
||||
@ -845,11 +847,12 @@ pub fn flushEmitH(zcu: *Zcu) !void {
|
||||
|
||||
pub fn updateExports(
|
||||
self: *C,
|
||||
zcu: *Zcu,
|
||||
pt: Zcu.PerThread,
|
||||
exported: Zcu.Exported,
|
||||
export_indices: []const u32,
|
||||
) !void {
|
||||
const gpa = self.base.comp.gpa;
|
||||
const zcu = pt.zcu;
|
||||
const gpa = zcu.gpa;
|
||||
const mod, const pass: codegen.DeclGen.Pass, const decl_block, const exported_block = switch (exported) {
|
||||
.decl_index => |decl_index| .{
|
||||
zcu.namespacePtr(zcu.declPtr(decl_index).src_namespace).fileScope(zcu).mod,
|
||||
@ -869,7 +872,7 @@ pub fn updateExports(
|
||||
fwd_decl.clearRetainingCapacity();
|
||||
var dg: codegen.DeclGen = .{
|
||||
.gpa = gpa,
|
||||
.zcu = zcu,
|
||||
.pt = pt,
|
||||
.mod = mod,
|
||||
.error_msg = null,
|
||||
.pass = pass,
|
||||
|
||||
@ -1120,16 +1120,17 @@ fn freeAtom(self: *Coff, atom_index: Atom.Index) void {
|
||||
self.getAtomPtr(atom_index).sym_index = 0;
|
||||
}
|
||||
|
||||
pub fn updateFunc(self: *Coff, mod: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
|
||||
pub fn updateFunc(self: *Coff, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
|
||||
if (build_options.skip_non_native and builtin.object_format != .coff) {
|
||||
@panic("Attempted to compile for object format that was disabled by build configuration");
|
||||
}
|
||||
if (self.llvm_object) |llvm_object| {
|
||||
return llvm_object.updateFunc(mod, func_index, air, liveness);
|
||||
return llvm_object.updateFunc(pt, func_index, air, liveness);
|
||||
}
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
const mod = pt.zcu;
|
||||
const func = mod.funcInfo(func_index);
|
||||
const decl_index = func.owner_decl;
|
||||
const decl = mod.declPtr(decl_index);
|
||||
@ -1144,6 +1145,7 @@ pub fn updateFunc(self: *Coff, mod: *Module, func_index: InternPool.Index, air:
|
||||
|
||||
const res = try codegen.generateFunction(
|
||||
&self.base,
|
||||
pt,
|
||||
decl.navSrcLoc(mod),
|
||||
func_index,
|
||||
air,
|
||||
@ -1160,14 +1162,14 @@ pub fn updateFunc(self: *Coff, mod: *Module, func_index: InternPool.Index, air:
|
||||
},
|
||||
};
|
||||
|
||||
try self.updateDeclCode(decl_index, code, .FUNCTION);
|
||||
try self.updateDeclCode(pt, decl_index, code, .FUNCTION);
|
||||
|
||||
// Exports will be updated by `Zcu.processExports` after the update.
|
||||
}
|
||||
|
||||
pub fn lowerUnnamedConst(self: *Coff, val: Value, decl_index: InternPool.DeclIndex) !u32 {
|
||||
const gpa = self.base.comp.gpa;
|
||||
const mod = self.base.comp.module.?;
|
||||
pub fn lowerUnnamedConst(self: *Coff, pt: Zcu.PerThread, val: Value, decl_index: InternPool.DeclIndex) !u32 {
|
||||
const mod = pt.zcu;
|
||||
const gpa = mod.gpa;
|
||||
const decl = mod.declPtr(decl_index);
|
||||
const gop = try self.unnamed_const_atoms.getOrPut(gpa, decl_index);
|
||||
if (!gop.found_existing) {
|
||||
@ -1179,7 +1181,7 @@ pub fn lowerUnnamedConst(self: *Coff, val: Value, decl_index: InternPool.DeclInd
|
||||
const sym_name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ decl_name.fmt(&mod.intern_pool), index });
|
||||
defer gpa.free(sym_name);
|
||||
const ty = val.typeOf(mod);
|
||||
const atom_index = switch (try self.lowerConst(sym_name, val, ty.abiAlignment(mod), self.rdata_section_index.?, decl.navSrcLoc(mod))) {
|
||||
const atom_index = switch (try self.lowerConst(pt, sym_name, val, ty.abiAlignment(pt), self.rdata_section_index.?, decl.navSrcLoc(mod))) {
|
||||
.ok => |atom_index| atom_index,
|
||||
.fail => |em| {
|
||||
decl.analysis = .codegen_failure;
|
||||
@ -1197,7 +1199,15 @@ const LowerConstResult = union(enum) {
|
||||
fail: *Module.ErrorMsg,
|
||||
};
|
||||
|
||||
fn lowerConst(self: *Coff, name: []const u8, val: Value, required_alignment: InternPool.Alignment, sect_id: u16, src_loc: Module.LazySrcLoc) !LowerConstResult {
|
||||
fn lowerConst(
|
||||
self: *Coff,
|
||||
pt: Zcu.PerThread,
|
||||
name: []const u8,
|
||||
val: Value,
|
||||
required_alignment: InternPool.Alignment,
|
||||
sect_id: u16,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
) !LowerConstResult {
|
||||
const gpa = self.base.comp.gpa;
|
||||
|
||||
var code_buffer = std.ArrayList(u8).init(gpa);
|
||||
@ -1208,7 +1218,7 @@ fn lowerConst(self: *Coff, name: []const u8, val: Value, required_alignment: Int
|
||||
try self.setSymbolName(sym, name);
|
||||
sym.section_number = @as(coff.SectionNumber, @enumFromInt(sect_id + 1));
|
||||
|
||||
const res = try codegen.generateSymbol(&self.base, src_loc, val, &code_buffer, .none, .{
|
||||
const res = try codegen.generateSymbol(&self.base, pt, src_loc, val, &code_buffer, .none, .{
|
||||
.parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?,
|
||||
});
|
||||
const code = switch (res) {
|
||||
@ -1235,13 +1245,14 @@ fn lowerConst(self: *Coff, name: []const u8, val: Value, required_alignment: Int
|
||||
|
||||
pub fn updateDecl(
|
||||
self: *Coff,
|
||||
mod: *Module,
|
||||
pt: Zcu.PerThread,
|
||||
decl_index: InternPool.DeclIndex,
|
||||
) link.File.UpdateDeclError!void {
|
||||
const mod = pt.zcu;
|
||||
if (build_options.skip_non_native and builtin.object_format != .coff) {
|
||||
@panic("Attempted to compile for object format that was disabled by build configuration");
|
||||
}
|
||||
if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(mod, decl_index);
|
||||
if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(pt, decl_index);
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
@ -1270,7 +1281,7 @@ pub fn updateDecl(
|
||||
defer code_buffer.deinit();
|
||||
|
||||
const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val;
|
||||
const res = try codegen.generateSymbol(&self.base, decl.navSrcLoc(mod), decl_val, &code_buffer, .none, .{
|
||||
const res = try codegen.generateSymbol(&self.base, pt, decl.navSrcLoc(mod), decl_val, &code_buffer, .none, .{
|
||||
.parent_atom_index = atom.getSymbolIndex().?,
|
||||
});
|
||||
const code = switch (res) {
|
||||
@ -1282,19 +1293,20 @@ pub fn updateDecl(
|
||||
},
|
||||
};
|
||||
|
||||
try self.updateDeclCode(decl_index, code, .NULL);
|
||||
try self.updateDeclCode(pt, decl_index, code, .NULL);
|
||||
|
||||
// Exports will be updated by `Zcu.processExports` after the update.
|
||||
}
|
||||
|
||||
fn updateLazySymbolAtom(
|
||||
self: *Coff,
|
||||
pt: Zcu.PerThread,
|
||||
sym: link.File.LazySymbol,
|
||||
atom_index: Atom.Index,
|
||||
section_index: u16,
|
||||
) !void {
|
||||
const gpa = self.base.comp.gpa;
|
||||
const mod = self.base.comp.module.?;
|
||||
const mod = pt.zcu;
|
||||
const gpa = mod.gpa;
|
||||
|
||||
var required_alignment: InternPool.Alignment = .none;
|
||||
var code_buffer = std.ArrayList(u8).init(gpa);
|
||||
@ -1302,7 +1314,7 @@ fn updateLazySymbolAtom(
|
||||
|
||||
const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{}", .{
|
||||
@tagName(sym.kind),
|
||||
sym.ty.fmt(mod),
|
||||
sym.ty.fmt(pt),
|
||||
});
|
||||
defer gpa.free(name);
|
||||
|
||||
@ -1312,6 +1324,7 @@ fn updateLazySymbolAtom(
|
||||
const src = sym.ty.srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded;
|
||||
const res = try codegen.generateLazySymbol(
|
||||
&self.base,
|
||||
pt,
|
||||
src,
|
||||
sym,
|
||||
&required_alignment,
|
||||
@ -1346,7 +1359,7 @@ fn updateLazySymbolAtom(
|
||||
try self.writeAtom(atom_index, code);
|
||||
}
|
||||
|
||||
pub fn getOrCreateAtomForLazySymbol(self: *Coff, sym: link.File.LazySymbol) !Atom.Index {
|
||||
pub fn getOrCreateAtomForLazySymbol(self: *Coff, pt: Zcu.PerThread, sym: link.File.LazySymbol) !Atom.Index {
|
||||
const gpa = self.base.comp.gpa;
|
||||
const mod = self.base.comp.module.?;
|
||||
const gop = try self.lazy_syms.getOrPut(gpa, sym.getDecl(mod));
|
||||
@ -1364,7 +1377,7 @@ pub fn getOrCreateAtomForLazySymbol(self: *Coff, sym: link.File.LazySymbol) !Ato
|
||||
metadata.state.* = .pending_flush;
|
||||
const atom = metadata.atom.*;
|
||||
// anyerror needs to be deferred until flushModule
|
||||
if (sym.getDecl(mod) != .none) try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) {
|
||||
if (sym.getDecl(mod) != .none) try self.updateLazySymbolAtom(pt, sym, atom, switch (sym.kind) {
|
||||
.code => self.text_section_index.?,
|
||||
.const_data => self.rdata_section_index.?,
|
||||
});
|
||||
@ -1410,14 +1423,14 @@ fn getDeclOutputSection(self: *Coff, decl_index: InternPool.DeclIndex) u16 {
|
||||
return index;
|
||||
}
|
||||
|
||||
fn updateDeclCode(self: *Coff, decl_index: InternPool.DeclIndex, code: []u8, complex_type: coff.ComplexType) !void {
|
||||
const mod = self.base.comp.module.?;
|
||||
fn updateDeclCode(self: *Coff, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, code: []u8, complex_type: coff.ComplexType) !void {
|
||||
const mod = pt.zcu;
|
||||
const decl = mod.declPtr(decl_index);
|
||||
|
||||
const decl_name = try decl.fullyQualifiedName(mod);
|
||||
|
||||
log.debug("updateDeclCode {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl });
|
||||
const required_alignment: u32 = @intCast(decl.getAlignment(mod).toByteUnits() orelse 0);
|
||||
const required_alignment: u32 = @intCast(decl.getAlignment(pt).toByteUnits() orelse 0);
|
||||
|
||||
const decl_metadata = self.decls.get(decl_index).?;
|
||||
const atom_index = decl_metadata.atom;
|
||||
@ -1496,7 +1509,7 @@ pub fn freeDecl(self: *Coff, decl_index: InternPool.DeclIndex) void {
|
||||
|
||||
pub fn updateExports(
|
||||
self: *Coff,
|
||||
mod: *Module,
|
||||
pt: Zcu.PerThread,
|
||||
exported: Module.Exported,
|
||||
export_indices: []const u32,
|
||||
) link.File.UpdateExportsError!void {
|
||||
@ -1504,6 +1517,7 @@ pub fn updateExports(
|
||||
@panic("Attempted to compile for object format that was disabled by build configuration");
|
||||
}
|
||||
|
||||
const mod = pt.zcu;
|
||||
const ip = &mod.intern_pool;
|
||||
const comp = self.base.comp;
|
||||
const target = comp.root_mod.resolved_target.result;
|
||||
@ -1542,7 +1556,7 @@ pub fn updateExports(
|
||||
}
|
||||
}
|
||||
|
||||
if (self.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, export_indices);
|
||||
if (self.llvm_object) |llvm_object| return llvm_object.updateExports(pt, exported, export_indices);
|
||||
|
||||
const gpa = comp.gpa;
|
||||
|
||||
@ -1553,7 +1567,7 @@ pub fn updateExports(
|
||||
},
|
||||
.value => |value| self.anon_decls.getPtr(value) orelse blk: {
|
||||
const first_exp = mod.all_exports.items[export_indices[0]];
|
||||
const res = try self.lowerAnonDecl(value, .none, first_exp.src);
|
||||
const res = try self.lowerAnonDecl(pt, value, .none, first_exp.src);
|
||||
switch (res) {
|
||||
.ok => {},
|
||||
.fail => |em| {
|
||||
@ -1696,19 +1710,19 @@ fn resolveGlobalSymbol(self: *Coff, current: SymbolWithLoc) !void {
|
||||
gop.value_ptr.* = current;
|
||||
}
|
||||
|
||||
pub fn flush(self: *Coff, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void {
|
||||
pub fn flush(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
|
||||
const comp = self.base.comp;
|
||||
const use_lld = build_options.have_llvm and comp.config.use_lld;
|
||||
if (use_lld) {
|
||||
return lld.linkWithLLD(self, arena, prog_node);
|
||||
return lld.linkWithLLD(self, arena, tid, prog_node);
|
||||
}
|
||||
switch (comp.config.output_mode) {
|
||||
.Exe, .Obj => return self.flushModule(arena, prog_node),
|
||||
.Exe, .Obj => return self.flushModule(arena, tid, prog_node),
|
||||
.Lib => return error.TODOImplementWritingLibFiles,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn flushModule(self: *Coff, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void {
|
||||
pub fn flushModule(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
@ -1723,13 +1737,17 @@ pub fn flushModule(self: *Coff, arena: Allocator, prog_node: std.Progress.Node)
|
||||
const sub_prog_node = prog_node.start("COFF Flush", 0);
|
||||
defer sub_prog_node.end();
|
||||
|
||||
const module = comp.module orelse return error.LinkingWithoutZigSourceUnimplemented;
|
||||
const pt: Zcu.PerThread = .{
|
||||
.zcu = comp.module orelse return error.LinkingWithoutZigSourceUnimplemented,
|
||||
.tid = tid,
|
||||
};
|
||||
|
||||
if (self.lazy_syms.getPtr(.none)) |metadata| {
|
||||
// Most lazy symbols can be updated on first use, but
|
||||
// anyerror needs to wait for everything to be flushed.
|
||||
if (metadata.text_state != .unused) self.updateLazySymbolAtom(
|
||||
link.File.LazySymbol.initDecl(.code, null, module),
|
||||
pt,
|
||||
link.File.LazySymbol.initDecl(.code, null, pt.zcu),
|
||||
metadata.text_atom,
|
||||
self.text_section_index.?,
|
||||
) catch |err| return switch (err) {
|
||||
@ -1737,7 +1755,8 @@ pub fn flushModule(self: *Coff, arena: Allocator, prog_node: std.Progress.Node)
|
||||
else => |e| e,
|
||||
};
|
||||
if (metadata.rdata_state != .unused) self.updateLazySymbolAtom(
|
||||
link.File.LazySymbol.initDecl(.const_data, null, module),
|
||||
pt,
|
||||
link.File.LazySymbol.initDecl(.const_data, null, pt.zcu),
|
||||
metadata.rdata_atom,
|
||||
self.rdata_section_index.?,
|
||||
) catch |err| return switch (err) {
|
||||
@ -1858,6 +1877,7 @@ pub fn getDeclVAddr(self: *Coff, decl_index: InternPool.DeclIndex, reloc_info: l
|
||||
|
||||
pub fn lowerAnonDecl(
|
||||
self: *Coff,
|
||||
pt: Zcu.PerThread,
|
||||
decl_val: InternPool.Index,
|
||||
explicit_alignment: InternPool.Alignment,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
@ -1866,7 +1886,7 @@ pub fn lowerAnonDecl(
|
||||
const mod = self.base.comp.module.?;
|
||||
const ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val));
|
||||
const decl_alignment = switch (explicit_alignment) {
|
||||
.none => ty.abiAlignment(mod),
|
||||
.none => ty.abiAlignment(pt),
|
||||
else => explicit_alignment,
|
||||
};
|
||||
if (self.anon_decls.get(decl_val)) |metadata| {
|
||||
@ -1881,6 +1901,7 @@ pub fn lowerAnonDecl(
|
||||
@intFromEnum(decl_val),
|
||||
}) catch unreachable;
|
||||
const res = self.lowerConst(
|
||||
pt,
|
||||
name,
|
||||
val,
|
||||
decl_alignment,
|
||||
|
||||
@ -15,8 +15,9 @@ const Allocator = mem.Allocator;
|
||||
|
||||
const Coff = @import("../Coff.zig");
|
||||
const Compilation = @import("../../Compilation.zig");
|
||||
const Zcu = @import("../../Zcu.zig");
|
||||
|
||||
pub fn linkWithLLD(self: *Coff, arena: Allocator, prog_node: std.Progress.Node) !void {
|
||||
pub fn linkWithLLD(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
@ -29,7 +30,7 @@ pub fn linkWithLLD(self: *Coff, arena: Allocator, prog_node: std.Progress.Node)
|
||||
// If there is no Zig code to compile, then we should skip flushing the output file because it
|
||||
// will not be part of the linker line anyway.
|
||||
const module_obj_path: ?[]const u8 = if (comp.module != null) blk: {
|
||||
try self.flushModule(arena, prog_node);
|
||||
try self.flushModule(arena, tid, prog_node);
|
||||
|
||||
if (fs.path.dirname(full_out_path)) |dirname| {
|
||||
break :blk try fs.path.join(arena, &.{ dirname, self.base.zcu_object_sub_path.? });
|
||||
|
||||
@ -31,7 +31,7 @@ strtab: StringTable = .{},
|
||||
/// They will end up in the DWARF debug_line header as two lists:
|
||||
/// * []include_directory
|
||||
/// * []file_names
|
||||
di_files: std.AutoArrayHashMapUnmanaged(*const Module.File, void) = .{},
|
||||
di_files: std.AutoArrayHashMapUnmanaged(*const Zcu.File, void) = .{},
|
||||
|
||||
global_abbrev_relocs: std.ArrayListUnmanaged(AbbrevRelocation) = .{},
|
||||
|
||||
@ -67,7 +67,7 @@ const DbgLineHeader = struct {
|
||||
/// Decl's inner Atom is assigned an offset within the DWARF section.
|
||||
pub const DeclState = struct {
|
||||
dwarf: *Dwarf,
|
||||
mod: *Module,
|
||||
pt: Zcu.PerThread,
|
||||
di_atom_decls: *const AtomTable,
|
||||
dbg_line_func: InternPool.Index,
|
||||
dbg_line: std.ArrayList(u8),
|
||||
@ -113,7 +113,7 @@ pub const DeclState = struct {
|
||||
.type = ty,
|
||||
.offset = undefined,
|
||||
});
|
||||
log.debug("%{d}: {}", .{ sym_index, ty.fmt(self.mod) });
|
||||
log.debug("%{d}: {}", .{ sym_index, ty.fmt(self.pt) });
|
||||
try self.abbrev_resolver.putNoClobber(gpa, ty.toIntern(), sym_index);
|
||||
break :blk sym_index;
|
||||
};
|
||||
@ -128,16 +128,17 @@ pub const DeclState = struct {
|
||||
|
||||
fn addDbgInfoType(
|
||||
self: *DeclState,
|
||||
mod: *Module,
|
||||
pt: Zcu.PerThread,
|
||||
atom_index: Atom.Index,
|
||||
ty: Type,
|
||||
) error{OutOfMemory}!void {
|
||||
const zcu = pt.zcu;
|
||||
const dbg_info_buffer = &self.dbg_info;
|
||||
const target = mod.getTarget();
|
||||
const target = zcu.getTarget();
|
||||
const target_endian = target.cpu.arch.endian();
|
||||
const ip = &mod.intern_pool;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
switch (ty.zigTypeTag(zcu)) {
|
||||
.NoReturn => unreachable,
|
||||
.Void => {
|
||||
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.zero_bit_type));
|
||||
@ -148,12 +149,12 @@ pub const DeclState = struct {
|
||||
// DW.AT.encoding, DW.FORM.data1
|
||||
dbg_info_buffer.appendAssumeCapacity(DW.ATE.boolean);
|
||||
// DW.AT.byte_size, DW.FORM.udata
|
||||
try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(mod));
|
||||
try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(pt));
|
||||
// DW.AT.name, DW.FORM.string
|
||||
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
|
||||
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(pt)});
|
||||
},
|
||||
.Int => {
|
||||
const info = ty.intInfo(mod);
|
||||
const info = ty.intInfo(zcu);
|
||||
try dbg_info_buffer.ensureUnusedCapacity(12);
|
||||
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.base_type));
|
||||
// DW.AT.encoding, DW.FORM.data1
|
||||
@ -162,30 +163,30 @@ pub const DeclState = struct {
|
||||
.unsigned => DW.ATE.unsigned,
|
||||
});
|
||||
// DW.AT.byte_size, DW.FORM.udata
|
||||
try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(mod));
|
||||
try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(pt));
|
||||
// DW.AT.name, DW.FORM.string
|
||||
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
|
||||
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(pt)});
|
||||
},
|
||||
.Optional => {
|
||||
if (ty.isPtrLikeOptional(mod)) {
|
||||
if (ty.isPtrLikeOptional(zcu)) {
|
||||
try dbg_info_buffer.ensureUnusedCapacity(12);
|
||||
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.base_type));
|
||||
// DW.AT.encoding, DW.FORM.data1
|
||||
dbg_info_buffer.appendAssumeCapacity(DW.ATE.address);
|
||||
// DW.AT.byte_size, DW.FORM.udata
|
||||
try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(mod));
|
||||
try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(pt));
|
||||
// DW.AT.name, DW.FORM.string
|
||||
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
|
||||
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(pt)});
|
||||
} else {
|
||||
// Non-pointer optionals are structs: struct { .maybe = *, .val = * }
|
||||
const payload_ty = ty.optionalChild(mod);
|
||||
const payload_ty = ty.optionalChild(zcu);
|
||||
// DW.AT.structure_type
|
||||
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.struct_type));
|
||||
// DW.AT.byte_size, DW.FORM.udata
|
||||
const abi_size = ty.abiSize(mod);
|
||||
const abi_size = ty.abiSize(pt);
|
||||
try leb128.writeUleb128(dbg_info_buffer.writer(), abi_size);
|
||||
// DW.AT.name, DW.FORM.string
|
||||
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
|
||||
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(pt)});
|
||||
// DW.AT.member
|
||||
try dbg_info_buffer.ensureUnusedCapacity(21);
|
||||
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.struct_member));
|
||||
@ -208,14 +209,14 @@ pub const DeclState = struct {
|
||||
dbg_info_buffer.appendNTimesAssumeCapacity(0, 4);
|
||||
try self.addTypeRelocGlobal(atom_index, payload_ty, @intCast(index));
|
||||
// DW.AT.data_member_location, DW.FORM.udata
|
||||
const offset = abi_size - payload_ty.abiSize(mod);
|
||||
const offset = abi_size - payload_ty.abiSize(pt);
|
||||
try leb128.writeUleb128(dbg_info_buffer.writer(), offset);
|
||||
// DW.AT.structure_type delimit children
|
||||
try dbg_info_buffer.append(0);
|
||||
}
|
||||
},
|
||||
.Pointer => {
|
||||
if (ty.isSlice(mod)) {
|
||||
if (ty.isSlice(zcu)) {
|
||||
// Slices are structs: struct { .ptr = *, .len = N }
|
||||
const ptr_bits = target.ptrBitWidth();
|
||||
const ptr_bytes: u8 = @intCast(@divExact(ptr_bits, 8));
|
||||
@ -223,9 +224,9 @@ pub const DeclState = struct {
|
||||
try dbg_info_buffer.ensureUnusedCapacity(2);
|
||||
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.struct_type));
|
||||
// DW.AT.byte_size, DW.FORM.udata
|
||||
try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(mod));
|
||||
try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(pt));
|
||||
// DW.AT.name, DW.FORM.string
|
||||
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
|
||||
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(pt)});
|
||||
// DW.AT.member
|
||||
try dbg_info_buffer.ensureUnusedCapacity(21);
|
||||
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.struct_member));
|
||||
@ -235,7 +236,7 @@ pub const DeclState = struct {
|
||||
// DW.AT.type, DW.FORM.ref4
|
||||
var index = dbg_info_buffer.items.len;
|
||||
dbg_info_buffer.appendNTimesAssumeCapacity(0, 4);
|
||||
const ptr_ty = ty.slicePtrFieldType(mod);
|
||||
const ptr_ty = ty.slicePtrFieldType(zcu);
|
||||
try self.addTypeRelocGlobal(atom_index, ptr_ty, @intCast(index));
|
||||
// DW.AT.data_member_location, DW.FORM.udata
|
||||
dbg_info_buffer.appendAssumeCapacity(0);
|
||||
@ -258,19 +259,19 @@ pub const DeclState = struct {
|
||||
// DW.AT.type, DW.FORM.ref4
|
||||
const index = dbg_info_buffer.items.len;
|
||||
dbg_info_buffer.appendNTimesAssumeCapacity(0, 4);
|
||||
try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @intCast(index));
|
||||
try self.addTypeRelocGlobal(atom_index, ty.childType(zcu), @intCast(index));
|
||||
}
|
||||
},
|
||||
.Array => {
|
||||
// DW.AT.array_type
|
||||
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.array_type));
|
||||
// DW.AT.name, DW.FORM.string
|
||||
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
|
||||
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(pt)});
|
||||
// DW.AT.type, DW.FORM.ref4
|
||||
var index = dbg_info_buffer.items.len;
|
||||
try dbg_info_buffer.ensureUnusedCapacity(9);
|
||||
dbg_info_buffer.appendNTimesAssumeCapacity(0, 4);
|
||||
try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @intCast(index));
|
||||
try self.addTypeRelocGlobal(atom_index, ty.childType(zcu), @intCast(index));
|
||||
// DW.AT.subrange_type
|
||||
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.array_dim));
|
||||
// DW.AT.type, DW.FORM.ref4
|
||||
@ -278,7 +279,7 @@ pub const DeclState = struct {
|
||||
dbg_info_buffer.appendNTimesAssumeCapacity(0, 4);
|
||||
try self.addTypeRelocGlobal(atom_index, Type.usize, @intCast(index));
|
||||
// DW.AT.count, DW.FORM.udata
|
||||
const len = ty.arrayLenIncludingSentinel(mod);
|
||||
const len = ty.arrayLenIncludingSentinel(pt.zcu);
|
||||
try leb128.writeUleb128(dbg_info_buffer.writer(), len);
|
||||
// DW.AT.array_type delimit children
|
||||
try dbg_info_buffer.append(0);
|
||||
@ -287,13 +288,13 @@ pub const DeclState = struct {
|
||||
// DW.AT.structure_type
|
||||
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.struct_type));
|
||||
// DW.AT.byte_size, DW.FORM.udata
|
||||
try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(mod));
|
||||
try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(pt));
|
||||
|
||||
blk: {
|
||||
switch (ip.indexToKey(ty.ip_index)) {
|
||||
.anon_struct_type => |fields| {
|
||||
// DW.AT.name, DW.FORM.string
|
||||
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
|
||||
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(pt)});
|
||||
|
||||
for (fields.types.get(ip), 0..) |field_ty, field_index| {
|
||||
// DW.AT.member
|
||||
@ -305,14 +306,14 @@ pub const DeclState = struct {
|
||||
try dbg_info_buffer.appendNTimes(0, 4);
|
||||
try self.addTypeRelocGlobal(atom_index, Type.fromInterned(field_ty), @intCast(index));
|
||||
// DW.AT.data_member_location, DW.FORM.udata
|
||||
const field_off = ty.structFieldOffset(field_index, mod);
|
||||
const field_off = ty.structFieldOffset(field_index, pt);
|
||||
try leb128.writeUleb128(dbg_info_buffer.writer(), field_off);
|
||||
}
|
||||
},
|
||||
.struct_type => {
|
||||
const struct_type = ip.loadStructType(ty.toIntern());
|
||||
// DW.AT.name, DW.FORM.string
|
||||
try ty.print(dbg_info_buffer.writer(), mod);
|
||||
try ty.print(dbg_info_buffer.writer(), pt);
|
||||
try dbg_info_buffer.append(0);
|
||||
|
||||
if (struct_type.layout == .@"packed") {
|
||||
@ -322,7 +323,7 @@ pub const DeclState = struct {
|
||||
|
||||
if (struct_type.isTuple(ip)) {
|
||||
for (struct_type.field_types.get(ip), struct_type.offsets.get(ip), 0..) |field_ty, field_off, field_index| {
|
||||
if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
|
||||
if (!Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue;
|
||||
// DW.AT.member
|
||||
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.struct_member));
|
||||
// DW.AT.name, DW.FORM.string
|
||||
@ -340,7 +341,7 @@ pub const DeclState = struct {
|
||||
struct_type.field_types.get(ip),
|
||||
struct_type.offsets.get(ip),
|
||||
) |field_name, field_ty, field_off| {
|
||||
if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
|
||||
if (!Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue;
|
||||
const field_name_slice = field_name.toSlice(ip);
|
||||
// DW.AT.member
|
||||
try dbg_info_buffer.ensureUnusedCapacity(field_name_slice.len + 2);
|
||||
@ -367,9 +368,9 @@ pub const DeclState = struct {
|
||||
// DW.AT.enumeration_type
|
||||
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.enum_type));
|
||||
// DW.AT.byte_size, DW.FORM.udata
|
||||
try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(mod));
|
||||
try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(pt));
|
||||
// DW.AT.name, DW.FORM.string
|
||||
try ty.print(dbg_info_buffer.writer(), mod);
|
||||
try ty.print(dbg_info_buffer.writer(), pt);
|
||||
try dbg_info_buffer.append(0);
|
||||
|
||||
const enum_type = ip.loadEnumType(ty.ip_index);
|
||||
@ -386,8 +387,8 @@ pub const DeclState = struct {
|
||||
const value = enum_type.values.get(ip)[field_i];
|
||||
// TODO do not assume a 64bit enum value - could be bigger.
|
||||
// See https://github.com/ziglang/zig/issues/645
|
||||
const field_int_val = try Value.fromInterned(value).intFromEnum(ty, mod);
|
||||
break :value @bitCast(field_int_val.toSignedInt(mod));
|
||||
const field_int_val = try Value.fromInterned(value).intFromEnum(ty, pt);
|
||||
break :value @bitCast(field_int_val.toSignedInt(pt));
|
||||
};
|
||||
mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), value, target_endian);
|
||||
}
|
||||
@ -396,8 +397,8 @@ pub const DeclState = struct {
|
||||
try dbg_info_buffer.append(0);
|
||||
},
|
||||
.Union => {
|
||||
const union_obj = mod.typeToUnion(ty).?;
|
||||
const layout = mod.getUnionLayout(union_obj);
|
||||
const union_obj = zcu.typeToUnion(ty).?;
|
||||
const layout = pt.getUnionLayout(union_obj);
|
||||
const payload_offset = if (layout.tag_align.compare(.gte, layout.payload_align)) layout.tag_size else 0;
|
||||
const tag_offset = if (layout.tag_align.compare(.gte, layout.payload_align)) 0 else layout.payload_size;
|
||||
// TODO this is temporary to match current state of unions in Zig - we don't yet have
|
||||
@ -410,7 +411,7 @@ pub const DeclState = struct {
|
||||
// DW.AT.byte_size, DW.FORM.udata
|
||||
try leb128.writeUleb128(dbg_info_buffer.writer(), layout.abi_size);
|
||||
// DW.AT.name, DW.FORM.string
|
||||
try ty.print(dbg_info_buffer.writer(), mod);
|
||||
try ty.print(dbg_info_buffer.writer(), pt);
|
||||
try dbg_info_buffer.append(0);
|
||||
|
||||
// DW.AT.member
|
||||
@ -435,12 +436,12 @@ pub const DeclState = struct {
|
||||
if (is_tagged) {
|
||||
try dbg_info_buffer.writer().print("AnonUnion\x00", .{});
|
||||
} else {
|
||||
try ty.print(dbg_info_buffer.writer(), mod);
|
||||
try ty.print(dbg_info_buffer.writer(), pt);
|
||||
try dbg_info_buffer.append(0);
|
||||
}
|
||||
|
||||
for (union_obj.field_types.get(ip), union_obj.loadTagType(ip).names.get(ip)) |field_ty, field_name| {
|
||||
if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
|
||||
if (!Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue;
|
||||
const field_name_slice = field_name.toSlice(ip);
|
||||
// DW.AT.member
|
||||
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.struct_member));
|
||||
@ -474,25 +475,25 @@ pub const DeclState = struct {
|
||||
try dbg_info_buffer.append(0);
|
||||
}
|
||||
},
|
||||
.ErrorSet => try addDbgInfoErrorSet(mod, ty, target, &self.dbg_info),
|
||||
.ErrorSet => try addDbgInfoErrorSet(pt, ty, target, &self.dbg_info),
|
||||
.ErrorUnion => {
|
||||
const error_ty = ty.errorUnionSet(mod);
|
||||
const payload_ty = ty.errorUnionPayload(mod);
|
||||
const payload_align = if (payload_ty.isNoReturn(mod)) .none else payload_ty.abiAlignment(mod);
|
||||
const error_align = Type.anyerror.abiAlignment(mod);
|
||||
const abi_size = ty.abiSize(mod);
|
||||
const payload_off = if (error_align.compare(.gte, payload_align)) Type.anyerror.abiSize(mod) else 0;
|
||||
const error_off = if (error_align.compare(.gte, payload_align)) 0 else payload_ty.abiSize(mod);
|
||||
const error_ty = ty.errorUnionSet(zcu);
|
||||
const payload_ty = ty.errorUnionPayload(zcu);
|
||||
const payload_align = if (payload_ty.isNoReturn(zcu)) .none else payload_ty.abiAlignment(pt);
|
||||
const error_align = Type.anyerror.abiAlignment(pt);
|
||||
const abi_size = ty.abiSize(pt);
|
||||
const payload_off = if (error_align.compare(.gte, payload_align)) Type.anyerror.abiSize(pt) else 0;
|
||||
const error_off = if (error_align.compare(.gte, payload_align)) 0 else payload_ty.abiSize(pt);
|
||||
|
||||
// DW.AT.structure_type
|
||||
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.struct_type));
|
||||
// DW.AT.byte_size, DW.FORM.udata
|
||||
try leb128.writeUleb128(dbg_info_buffer.writer(), abi_size);
|
||||
// DW.AT.name, DW.FORM.string
|
||||
try ty.print(dbg_info_buffer.writer(), mod);
|
||||
try ty.print(dbg_info_buffer.writer(), pt);
|
||||
try dbg_info_buffer.append(0);
|
||||
|
||||
if (!payload_ty.isNoReturn(mod)) {
|
||||
if (!payload_ty.isNoReturn(zcu)) {
|
||||
// DW.AT.member
|
||||
try dbg_info_buffer.ensureUnusedCapacity(11);
|
||||
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.struct_member));
|
||||
@ -526,7 +527,7 @@ pub const DeclState = struct {
|
||||
try dbg_info_buffer.append(0);
|
||||
},
|
||||
else => {
|
||||
log.debug("TODO implement .debug_info for type '{}'", .{ty.fmt(self.mod)});
|
||||
log.debug("TODO implement .debug_info for type '{}'", .{ty.fmt(pt)});
|
||||
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.zero_bit_type));
|
||||
},
|
||||
}
|
||||
@ -555,6 +556,7 @@ pub const DeclState = struct {
|
||||
owner_decl: InternPool.DeclIndex,
|
||||
loc: DbgInfoLoc,
|
||||
) error{OutOfMemory}!void {
|
||||
const pt = self.pt;
|
||||
const dbg_info = &self.dbg_info;
|
||||
const atom_index = self.di_atom_decls.get(owner_decl).?;
|
||||
const name_with_null = name.ptr[0 .. name.len + 1];
|
||||
@ -580,9 +582,9 @@ pub const DeclState = struct {
|
||||
}
|
||||
},
|
||||
.register_pair => |regs| {
|
||||
const reg_bits = self.mod.getTarget().ptrBitWidth();
|
||||
const reg_bits = pt.zcu.getTarget().ptrBitWidth();
|
||||
const reg_bytes: u8 = @intCast(@divExact(reg_bits, 8));
|
||||
const abi_size = ty.abiSize(self.mod);
|
||||
const abi_size = ty.abiSize(pt);
|
||||
try dbg_info.ensureUnusedCapacity(10);
|
||||
dbg_info.appendAssumeCapacity(@intFromEnum(AbbrevCode.parameter));
|
||||
// DW.AT.location, DW.FORM.exprloc
|
||||
@ -675,10 +677,10 @@ pub const DeclState = struct {
|
||||
const name_with_null = name.ptr[0 .. name.len + 1];
|
||||
try dbg_info.append(@intFromEnum(AbbrevCode.variable));
|
||||
const gpa = self.dwarf.allocator;
|
||||
const mod = self.mod;
|
||||
const target = mod.getTarget();
|
||||
const pt = self.pt;
|
||||
const target = pt.zcu.getTarget();
|
||||
const endian = target.cpu.arch.endian();
|
||||
const child_ty = if (is_ptr) ty.childType(mod) else ty;
|
||||
const child_ty = if (is_ptr) ty.childType(pt.zcu) else ty;
|
||||
|
||||
switch (loc) {
|
||||
.register => |reg| {
|
||||
@ -701,9 +703,9 @@ pub const DeclState = struct {
|
||||
},
|
||||
|
||||
.register_pair => |regs| {
|
||||
const reg_bits = self.mod.getTarget().ptrBitWidth();
|
||||
const reg_bits = pt.zcu.getTarget().ptrBitWidth();
|
||||
const reg_bytes: u8 = @intCast(@divExact(reg_bits, 8));
|
||||
const abi_size = child_ty.abiSize(self.mod);
|
||||
const abi_size = child_ty.abiSize(pt);
|
||||
try dbg_info.ensureUnusedCapacity(9);
|
||||
// DW.AT.location, DW.FORM.exprloc
|
||||
var expr_len = std.io.countingWriter(std.io.null_writer);
|
||||
@ -829,9 +831,9 @@ pub const DeclState = struct {
|
||||
const fixup = dbg_info.items.len;
|
||||
dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc
|
||||
1,
|
||||
if (child_ty.isSignedInt(mod)) DW.OP.consts else DW.OP.constu,
|
||||
if (child_ty.isSignedInt(pt.zcu)) DW.OP.consts else DW.OP.constu,
|
||||
});
|
||||
if (child_ty.isSignedInt(mod)) {
|
||||
if (child_ty.isSignedInt(pt.zcu)) {
|
||||
try leb128.writeIleb128(dbg_info.writer(), @as(i64, @bitCast(x)));
|
||||
} else {
|
||||
try leb128.writeUleb128(dbg_info.writer(), x);
|
||||
@ -844,7 +846,7 @@ pub const DeclState = struct {
|
||||
// DW.AT.location, DW.FORM.exprloc
|
||||
// uleb128(exprloc_len)
|
||||
// DW.OP.implicit_value uleb128(len_of_bytes) bytes
|
||||
const abi_size: u32 = @intCast(child_ty.abiSize(mod));
|
||||
const abi_size: u32 = @intCast(child_ty.abiSize(self.pt));
|
||||
var implicit_value_len = std.ArrayList(u8).init(gpa);
|
||||
defer implicit_value_len.deinit();
|
||||
try leb128.writeUleb128(implicit_value_len.writer(), abi_size);
|
||||
@ -934,22 +936,23 @@ pub const DeclState = struct {
|
||||
}
|
||||
|
||||
pub fn setInlineFunc(self: *DeclState, func: InternPool.Index) error{OutOfMemory}!void {
|
||||
const zcu = self.pt.zcu;
|
||||
if (self.dbg_line_func == func) return;
|
||||
|
||||
try self.dbg_line.ensureUnusedCapacity((1 + 4) + (1 + 5));
|
||||
|
||||
const old_func_info = self.mod.funcInfo(self.dbg_line_func);
|
||||
const new_func_info = self.mod.funcInfo(func);
|
||||
const old_func_info = zcu.funcInfo(self.dbg_line_func);
|
||||
const new_func_info = zcu.funcInfo(func);
|
||||
|
||||
const old_file = try self.dwarf.addDIFile(self.mod, old_func_info.owner_decl);
|
||||
const new_file = try self.dwarf.addDIFile(self.mod, new_func_info.owner_decl);
|
||||
const old_file = try self.dwarf.addDIFile(zcu, old_func_info.owner_decl);
|
||||
const new_file = try self.dwarf.addDIFile(zcu, new_func_info.owner_decl);
|
||||
if (old_file != new_file) {
|
||||
self.dbg_line.appendAssumeCapacity(DW.LNS.set_file);
|
||||
leb128.writeUnsignedFixed(4, self.dbg_line.addManyAsArrayAssumeCapacity(4), new_file);
|
||||
}
|
||||
|
||||
const old_src_line: i33 = self.mod.declPtr(old_func_info.owner_decl).navSrcLine(self.mod);
|
||||
const new_src_line: i33 = self.mod.declPtr(new_func_info.owner_decl).navSrcLine(self.mod);
|
||||
const old_src_line: i33 = zcu.declPtr(old_func_info.owner_decl).navSrcLine(zcu);
|
||||
const new_src_line: i33 = zcu.declPtr(new_func_info.owner_decl).navSrcLine(zcu);
|
||||
if (new_src_line != old_src_line) {
|
||||
self.dbg_line.appendAssumeCapacity(DW.LNS.advance_line);
|
||||
leb128.writeSignedFixed(5, self.dbg_line.addManyAsArrayAssumeCapacity(5), new_src_line - old_src_line);
|
||||
@ -1074,19 +1077,19 @@ pub fn deinit(self: *Dwarf) void {
|
||||
|
||||
/// Initializes Decl's state and its matching output buffers.
|
||||
/// Call this before `commitDeclState`.
|
||||
pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: InternPool.DeclIndex) !DeclState {
|
||||
pub fn initDeclState(self: *Dwarf, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !DeclState {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
const decl = mod.declPtr(decl_index);
|
||||
const decl_linkage_name = try decl.fullyQualifiedName(mod);
|
||||
const decl = pt.zcu.declPtr(decl_index);
|
||||
const decl_linkage_name = try decl.fullyQualifiedName(pt.zcu);
|
||||
|
||||
log.debug("initDeclState {}{*}", .{ decl_linkage_name.fmt(&mod.intern_pool), decl });
|
||||
log.debug("initDeclState {}{*}", .{ decl_linkage_name.fmt(&pt.zcu.intern_pool), decl });
|
||||
|
||||
const gpa = self.allocator;
|
||||
var decl_state: DeclState = .{
|
||||
.dwarf = self,
|
||||
.mod = mod,
|
||||
.pt = pt,
|
||||
.di_atom_decls = &self.di_atom_decls,
|
||||
.dbg_line_func = undefined,
|
||||
.dbg_line = std.ArrayList(u8).init(gpa),
|
||||
@ -1105,7 +1108,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: InternPool.DeclInde
|
||||
|
||||
assert(decl.has_tv);
|
||||
|
||||
switch (decl.typeOf(mod).zigTypeTag(mod)) {
|
||||
switch (decl.typeOf(pt.zcu).zigTypeTag(pt.zcu)) {
|
||||
.Fn => {
|
||||
_ = try self.getOrCreateAtomForDecl(.src_fn, decl_index);
|
||||
|
||||
@ -1114,13 +1117,13 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: InternPool.DeclInde
|
||||
try dbg_line_buffer.ensureTotalCapacity((3 + ptr_width_bytes) + (1 + 4) + (1 + 4) + (1 + 5) + 1);
|
||||
|
||||
decl_state.dbg_line_func = decl.val.toIntern();
|
||||
const func = decl.val.getFunction(mod).?;
|
||||
const func = decl.val.getFunction(pt.zcu).?;
|
||||
log.debug("decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{
|
||||
decl.navSrcLine(mod),
|
||||
decl.navSrcLine(pt.zcu),
|
||||
func.lbrace_line,
|
||||
func.rbrace_line,
|
||||
});
|
||||
const line: u28 = @intCast(decl.navSrcLine(mod) + func.lbrace_line);
|
||||
const line: u28 = @intCast(decl.navSrcLine(pt.zcu) + func.lbrace_line);
|
||||
|
||||
dbg_line_buffer.appendSliceAssumeCapacity(&.{
|
||||
DW.LNS.extended_op,
|
||||
@ -1142,7 +1145,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: InternPool.DeclInde
|
||||
assert(self.getRelocDbgFileIndex() == dbg_line_buffer.items.len);
|
||||
// Once we support more than one source file, this will have the ability to be more
|
||||
// than one possible value.
|
||||
const file_index = try self.addDIFile(mod, decl_index);
|
||||
const file_index = try self.addDIFile(pt.zcu, decl_index);
|
||||
leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), file_index);
|
||||
|
||||
dbg_line_buffer.appendAssumeCapacity(DW.LNS.set_column);
|
||||
@ -1153,13 +1156,13 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: InternPool.DeclInde
|
||||
dbg_line_buffer.appendAssumeCapacity(DW.LNS.copy);
|
||||
|
||||
// .debug_info subprogram
|
||||
const decl_name_slice = decl.name.toSlice(&mod.intern_pool);
|
||||
const decl_linkage_name_slice = decl_linkage_name.toSlice(&mod.intern_pool);
|
||||
const decl_name_slice = decl.name.toSlice(&pt.zcu.intern_pool);
|
||||
const decl_linkage_name_slice = decl_linkage_name.toSlice(&pt.zcu.intern_pool);
|
||||
try dbg_info_buffer.ensureUnusedCapacity(1 + ptr_width_bytes + 4 + 4 +
|
||||
(decl_name_slice.len + 1) + (decl_linkage_name_slice.len + 1));
|
||||
|
||||
const fn_ret_type = decl.typeOf(mod).fnReturnType(mod);
|
||||
const fn_ret_has_bits = fn_ret_type.hasRuntimeBits(mod);
|
||||
const fn_ret_type = decl.typeOf(pt.zcu).fnReturnType(pt.zcu);
|
||||
const fn_ret_has_bits = fn_ret_type.hasRuntimeBits(pt);
|
||||
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(
|
||||
@as(AbbrevCode, if (fn_ret_has_bits) .subprogram else .subprogram_retvoid),
|
||||
));
|
||||
@ -1191,7 +1194,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: InternPool.DeclInde
|
||||
|
||||
pub fn commitDeclState(
|
||||
self: *Dwarf,
|
||||
zcu: *Module,
|
||||
pt: Zcu.PerThread,
|
||||
decl_index: InternPool.DeclIndex,
|
||||
sym_addr: u64,
|
||||
sym_size: u64,
|
||||
@ -1201,6 +1204,7 @@ pub fn commitDeclState(
|
||||
defer tracy.end();
|
||||
|
||||
const gpa = self.allocator;
|
||||
const zcu = pt.zcu;
|
||||
const decl = zcu.declPtr(decl_index);
|
||||
const ip = &zcu.intern_pool;
|
||||
const namespace = zcu.namespacePtr(decl.src_namespace);
|
||||
@ -1432,7 +1436,7 @@ pub fn commitDeclState(
|
||||
if (ip.isErrorSetType(ty.toIntern())) continue;
|
||||
|
||||
symbol.offset = @intCast(dbg_info_buffer.items.len);
|
||||
try decl_state.addDbgInfoType(zcu, di_atom_index, ty);
|
||||
try decl_state.addDbgInfoType(pt, di_atom_index, ty);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1457,7 +1461,7 @@ pub fn commitDeclState(
|
||||
reloc.offset,
|
||||
value,
|
||||
reloc_target,
|
||||
ty.fmt(zcu),
|
||||
ty.fmt(pt),
|
||||
});
|
||||
mem.writeInt(
|
||||
u32,
|
||||
@ -1691,7 +1695,7 @@ fn writeDeclDebugInfo(self: *Dwarf, atom_index: Atom.Index, dbg_info_buf: []cons
|
||||
}
|
||||
}
|
||||
|
||||
pub fn updateDeclLineNumber(self: *Dwarf, mod: *Module, decl_index: InternPool.DeclIndex) !void {
|
||||
pub fn updateDeclLineNumber(self: *Dwarf, zcu: *Zcu, decl_index: InternPool.DeclIndex) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
@ -1699,14 +1703,14 @@ pub fn updateDeclLineNumber(self: *Dwarf, mod: *Module, decl_index: InternPool.D
|
||||
const atom = self.getAtom(.src_fn, atom_index);
|
||||
if (atom.len == 0) return;
|
||||
|
||||
const decl = mod.declPtr(decl_index);
|
||||
const func = decl.val.getFunction(mod).?;
|
||||
const decl = zcu.declPtr(decl_index);
|
||||
const func = decl.val.getFunction(zcu).?;
|
||||
log.debug("decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{
|
||||
decl.navSrcLine(mod),
|
||||
decl.navSrcLine(zcu),
|
||||
func.lbrace_line,
|
||||
func.rbrace_line,
|
||||
});
|
||||
const line: u28 = @intCast(decl.navSrcLine(mod) + func.lbrace_line);
|
||||
const line: u28 = @intCast(decl.navSrcLine(zcu) + func.lbrace_line);
|
||||
var data: [4]u8 = undefined;
|
||||
leb128.writeUnsignedFixed(4, &data, line);
|
||||
|
||||
@ -1969,7 +1973,7 @@ fn dbgInfoHeaderBytes(self: *Dwarf) usize {
|
||||
return 120;
|
||||
}
|
||||
|
||||
pub fn writeDbgInfoHeader(self: *Dwarf, zcu: *Module, low_pc: u64, high_pc: u64) !void {
|
||||
pub fn writeDbgInfoHeader(self: *Dwarf, zcu: *Zcu, low_pc: u64, high_pc: u64) !void {
|
||||
// If this value is null it means there is an error in the module;
|
||||
// leave debug_info_header_dirty=true.
|
||||
const first_dbg_info_off = self.getDebugInfoOff() orelse return;
|
||||
@ -2058,14 +2062,14 @@ pub fn writeDbgInfoHeader(self: *Dwarf, zcu: *Module, low_pc: u64, high_pc: u64)
|
||||
}
|
||||
}
|
||||
|
||||
fn resolveCompilationDir(module: *Module, buffer: *[std.fs.max_path_bytes]u8) []const u8 {
|
||||
fn resolveCompilationDir(zcu: *Zcu, buffer: *[std.fs.max_path_bytes]u8) []const u8 {
|
||||
// We fully resolve all paths at this point to avoid lack of source line info in stack
|
||||
// traces or lack of debugging information which, if relative paths were used, would
|
||||
// be very location dependent.
|
||||
// TODO: the only concern I have with this is WASI as either host or target, should
|
||||
// we leave the paths as relative then?
|
||||
const root_dir_path = module.root_mod.root.root_dir.path orelse ".";
|
||||
const sub_path = module.root_mod.root.sub_path;
|
||||
const root_dir_path = zcu.root_mod.root.root_dir.path orelse ".";
|
||||
const sub_path = zcu.root_mod.root.sub_path;
|
||||
const realpath = if (std.fs.path.isAbsolute(root_dir_path)) r: {
|
||||
@memcpy(buffer[0..root_dir_path.len], root_dir_path);
|
||||
break :r root_dir_path;
|
||||
@ -2682,7 +2686,7 @@ fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) {
|
||||
return actual_size +| (actual_size / ideal_factor);
|
||||
}
|
||||
|
||||
pub fn flushModule(self: *Dwarf, module: *Module) !void {
|
||||
pub fn flushModule(self: *Dwarf, pt: Zcu.PerThread) !void {
|
||||
const comp = self.bin_file.comp;
|
||||
const target = comp.root_mod.resolved_target.result;
|
||||
|
||||
@ -2694,9 +2698,9 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void {
|
||||
|
||||
var dbg_info_buffer = std.ArrayList(u8).init(arena);
|
||||
try addDbgInfoErrorSetNames(
|
||||
module,
|
||||
pt,
|
||||
Type.anyerror,
|
||||
module.global_error_set.keys(),
|
||||
pt.zcu.global_error_set.keys(),
|
||||
target,
|
||||
&dbg_info_buffer,
|
||||
);
|
||||
@ -2759,9 +2763,9 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void {
|
||||
}
|
||||
}
|
||||
|
||||
fn addDIFile(self: *Dwarf, mod: *Module, decl_index: InternPool.DeclIndex) !u28 {
|
||||
const decl = mod.declPtr(decl_index);
|
||||
const file_scope = decl.getFileScope(mod);
|
||||
fn addDIFile(self: *Dwarf, zcu: *Zcu, decl_index: InternPool.DeclIndex) !u28 {
|
||||
const decl = zcu.declPtr(decl_index);
|
||||
const file_scope = decl.getFileScope(zcu);
|
||||
const gop = try self.di_files.getOrPut(self.allocator, file_scope);
|
||||
if (!gop.found_existing) {
|
||||
switch (self.bin_file.tag) {
|
||||
@ -2827,16 +2831,16 @@ fn genIncludeDirsAndFileNames(self: *Dwarf, arena: Allocator) !struct {
|
||||
}
|
||||
|
||||
fn addDbgInfoErrorSet(
|
||||
mod: *Module,
|
||||
pt: Zcu.PerThread,
|
||||
ty: Type,
|
||||
target: std.Target,
|
||||
dbg_info_buffer: *std.ArrayList(u8),
|
||||
) !void {
|
||||
return addDbgInfoErrorSetNames(mod, ty, ty.errorSetNames(mod).get(&mod.intern_pool), target, dbg_info_buffer);
|
||||
return addDbgInfoErrorSetNames(pt, ty, ty.errorSetNames(pt.zcu).get(&pt.zcu.intern_pool), target, dbg_info_buffer);
|
||||
}
|
||||
|
||||
fn addDbgInfoErrorSetNames(
|
||||
mod: *Module,
|
||||
pt: Zcu.PerThread,
|
||||
/// Used for printing the type name only.
|
||||
ty: Type,
|
||||
error_names: []const InternPool.NullTerminatedString,
|
||||
@ -2848,10 +2852,10 @@ fn addDbgInfoErrorSetNames(
|
||||
// DW.AT.enumeration_type
|
||||
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.enum_type));
|
||||
// DW.AT.byte_size, DW.FORM.udata
|
||||
const abi_size = Type.anyerror.abiSize(mod);
|
||||
const abi_size = Type.anyerror.abiSize(pt);
|
||||
try leb128.writeUleb128(dbg_info_buffer.writer(), abi_size);
|
||||
// DW.AT.name, DW.FORM.string
|
||||
try ty.print(dbg_info_buffer.writer(), mod);
|
||||
try ty.print(dbg_info_buffer.writer(), pt);
|
||||
try dbg_info_buffer.append(0);
|
||||
|
||||
// DW.AT.enumerator
|
||||
@ -2865,8 +2869,8 @@ fn addDbgInfoErrorSetNames(
|
||||
mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), 0, target_endian);
|
||||
|
||||
for (error_names) |error_name| {
|
||||
const int = try mod.getErrorValue(error_name);
|
||||
const error_name_slice = error_name.toSlice(&mod.intern_pool);
|
||||
const int = try pt.zcu.getErrorValue(error_name);
|
||||
const error_name_slice = error_name.toSlice(&pt.zcu.intern_pool);
|
||||
// DW.AT.enumerator
|
||||
try dbg_info_buffer.ensureUnusedCapacity(error_name_slice.len + 2 + @sizeOf(u64));
|
||||
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.enum_variant));
|
||||
@ -2965,8 +2969,6 @@ const LinkBlock = File.LinkBlock;
|
||||
const LinkFn = File.LinkFn;
|
||||
const LinkerLoad = @import("../codegen.zig").LinkerLoad;
|
||||
const Zcu = @import("../Zcu.zig");
|
||||
/// Deprecated.
|
||||
const Module = Zcu;
|
||||
const InternPool = @import("../InternPool.zig");
|
||||
const StringTable = @import("StringTable.zig");
|
||||
const Type = @import("../Type.zig");
|
||||
|
||||
@ -550,11 +550,12 @@ pub fn getDeclVAddr(self: *Elf, decl_index: InternPool.DeclIndex, reloc_info: li
|
||||
|
||||
pub fn lowerAnonDecl(
|
||||
self: *Elf,
|
||||
pt: Zcu.PerThread,
|
||||
decl_val: InternPool.Index,
|
||||
explicit_alignment: InternPool.Alignment,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
) !codegen.Result {
|
||||
return self.zigObjectPtr().?.lowerAnonDecl(self, decl_val, explicit_alignment, src_loc);
|
||||
return self.zigObjectPtr().?.lowerAnonDecl(self, pt, decl_val, explicit_alignment, src_loc);
|
||||
}
|
||||
|
||||
pub fn getAnonDeclVAddr(self: *Elf, decl_val: InternPool.Index, reloc_info: link.File.RelocInfo) !u64 {
|
||||
@ -1064,15 +1065,15 @@ pub fn markDirty(self: *Elf, shdr_index: u32) void {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn flush(self: *Elf, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void {
|
||||
pub fn flush(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
|
||||
const use_lld = build_options.have_llvm and self.base.comp.config.use_lld;
|
||||
if (use_lld) {
|
||||
return self.linkWithLLD(arena, prog_node);
|
||||
return self.linkWithLLD(arena, tid, prog_node);
|
||||
}
|
||||
try self.flushModule(arena, prog_node);
|
||||
try self.flushModule(arena, tid, prog_node);
|
||||
}
|
||||
|
||||
pub fn flushModule(self: *Elf, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void {
|
||||
pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
@ -1103,7 +1104,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: std.Progress.Node) l
|
||||
// --verbose-link
|
||||
if (comp.verbose_link) try self.dumpArgv(comp);
|
||||
|
||||
if (self.zigObjectPtr()) |zig_object| try zig_object.flushModule(self);
|
||||
if (self.zigObjectPtr()) |zig_object| try zig_object.flushModule(self, tid);
|
||||
if (self.base.isStaticLib()) return relocatable.flushStaticLib(self, comp, module_obj_path);
|
||||
if (self.base.isObject()) return relocatable.flushObject(self, comp, module_obj_path);
|
||||
|
||||
@ -2146,7 +2147,7 @@ fn scanRelocs(self: *Elf) !void {
|
||||
}
|
||||
}
|
||||
|
||||
fn linkWithLLD(self: *Elf, arena: Allocator, prog_node: std.Progress.Node) !void {
|
||||
fn linkWithLLD(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
@ -2159,7 +2160,7 @@ fn linkWithLLD(self: *Elf, arena: Allocator, prog_node: std.Progress.Node) !void
|
||||
// If there is no Zig code to compile, then we should skip flushing the output file because it
|
||||
// will not be part of the linker line anyway.
|
||||
const module_obj_path: ?[]const u8 = if (comp.module != null) blk: {
|
||||
try self.flushModule(arena, prog_node);
|
||||
try self.flushModule(arena, tid, prog_node);
|
||||
|
||||
if (fs.path.dirname(full_out_path)) |dirname| {
|
||||
break :blk try fs.path.join(arena, &.{ dirname, self.base.zcu_object_sub_path.? });
|
||||
@ -2983,41 +2984,41 @@ pub fn freeDecl(self: *Elf, decl_index: InternPool.DeclIndex) void {
|
||||
return self.zigObjectPtr().?.freeDecl(self, decl_index);
|
||||
}
|
||||
|
||||
pub fn updateFunc(self: *Elf, mod: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
|
||||
pub fn updateFunc(self: *Elf, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
|
||||
if (build_options.skip_non_native and builtin.object_format != .elf) {
|
||||
@panic("Attempted to compile for object format that was disabled by build configuration");
|
||||
}
|
||||
if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func_index, air, liveness);
|
||||
return self.zigObjectPtr().?.updateFunc(self, mod, func_index, air, liveness);
|
||||
if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(pt, func_index, air, liveness);
|
||||
return self.zigObjectPtr().?.updateFunc(self, pt, func_index, air, liveness);
|
||||
}
|
||||
|
||||
pub fn updateDecl(
|
||||
self: *Elf,
|
||||
mod: *Module,
|
||||
pt: Zcu.PerThread,
|
||||
decl_index: InternPool.DeclIndex,
|
||||
) link.File.UpdateDeclError!void {
|
||||
if (build_options.skip_non_native and builtin.object_format != .elf) {
|
||||
@panic("Attempted to compile for object format that was disabled by build configuration");
|
||||
}
|
||||
if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(mod, decl_index);
|
||||
return self.zigObjectPtr().?.updateDecl(self, mod, decl_index);
|
||||
if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(pt, decl_index);
|
||||
return self.zigObjectPtr().?.updateDecl(self, pt, decl_index);
|
||||
}
|
||||
|
||||
pub fn lowerUnnamedConst(self: *Elf, val: Value, decl_index: InternPool.DeclIndex) !u32 {
|
||||
return self.zigObjectPtr().?.lowerUnnamedConst(self, val, decl_index);
|
||||
pub fn lowerUnnamedConst(self: *Elf, pt: Zcu.PerThread, val: Value, decl_index: InternPool.DeclIndex) !u32 {
|
||||
return self.zigObjectPtr().?.lowerUnnamedConst(self, pt, val, decl_index);
|
||||
}
|
||||
|
||||
pub fn updateExports(
|
||||
self: *Elf,
|
||||
mod: *Module,
|
||||
pt: Zcu.PerThread,
|
||||
exported: Module.Exported,
|
||||
export_indices: []const u32,
|
||||
) link.File.UpdateExportsError!void {
|
||||
if (build_options.skip_non_native and builtin.object_format != .elf) {
|
||||
@panic("Attempted to compile for object format that was disabled by build configuration");
|
||||
}
|
||||
if (self.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, export_indices);
|
||||
return self.zigObjectPtr().?.updateExports(self, mod, exported, export_indices);
|
||||
if (self.llvm_object) |llvm_object| return llvm_object.updateExports(pt, exported, export_indices);
|
||||
return self.zigObjectPtr().?.updateExports(self, pt, exported, export_indices);
|
||||
}
|
||||
|
||||
pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl_index: InternPool.DeclIndex) !void {
|
||||
|
||||
@ -158,16 +158,17 @@ pub fn deinit(self: *ZigObject, allocator: Allocator) void {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn flushModule(self: *ZigObject, elf_file: *Elf) !void {
|
||||
pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !void {
|
||||
// Handle any lazy symbols that were emitted by incremental compilation.
|
||||
if (self.lazy_syms.getPtr(.none)) |metadata| {
|
||||
const zcu = elf_file.base.comp.module.?;
|
||||
const pt: Zcu.PerThread = .{ .zcu = elf_file.base.comp.module.?, .tid = tid };
|
||||
|
||||
// Most lazy symbols can be updated on first use, but
|
||||
// anyerror needs to wait for everything to be flushed.
|
||||
if (metadata.text_state != .unused) self.updateLazySymbol(
|
||||
elf_file,
|
||||
link.File.LazySymbol.initDecl(.code, null, zcu),
|
||||
pt,
|
||||
link.File.LazySymbol.initDecl(.code, null, pt.zcu),
|
||||
metadata.text_symbol_index,
|
||||
) catch |err| return switch (err) {
|
||||
error.CodegenFail => error.FlushFailure,
|
||||
@ -175,7 +176,8 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf) !void {
|
||||
};
|
||||
if (metadata.rodata_state != .unused) self.updateLazySymbol(
|
||||
elf_file,
|
||||
link.File.LazySymbol.initDecl(.const_data, null, zcu),
|
||||
pt,
|
||||
link.File.LazySymbol.initDecl(.const_data, null, pt.zcu),
|
||||
metadata.rodata_symbol_index,
|
||||
) catch |err| return switch (err) {
|
||||
error.CodegenFail => error.FlushFailure,
|
||||
@ -188,8 +190,8 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf) !void {
|
||||
}
|
||||
|
||||
if (self.dwarf) |*dw| {
|
||||
const zcu = elf_file.base.comp.module.?;
|
||||
try dw.flushModule(zcu);
|
||||
const pt: Zcu.PerThread = .{ .zcu = elf_file.base.comp.module.?, .tid = tid };
|
||||
try dw.flushModule(pt);
|
||||
|
||||
// TODO I need to re-think how to handle ZigObject's debug sections AND debug sections
|
||||
// extracted from input object files correctly.
|
||||
@ -202,7 +204,7 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf) !void {
|
||||
const text_shdr = elf_file.shdrs.items[elf_file.zig_text_section_index.?];
|
||||
const low_pc = text_shdr.sh_addr;
|
||||
const high_pc = text_shdr.sh_addr + text_shdr.sh_size;
|
||||
try dw.writeDbgInfoHeader(zcu, low_pc, high_pc);
|
||||
try dw.writeDbgInfoHeader(pt.zcu, low_pc, high_pc);
|
||||
self.debug_info_header_dirty = false;
|
||||
}
|
||||
|
||||
@ -684,6 +686,7 @@ pub fn getAnonDeclVAddr(
|
||||
pub fn lowerAnonDecl(
|
||||
self: *ZigObject,
|
||||
elf_file: *Elf,
|
||||
pt: Zcu.PerThread,
|
||||
decl_val: InternPool.Index,
|
||||
explicit_alignment: InternPool.Alignment,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
@ -692,7 +695,7 @@ pub fn lowerAnonDecl(
|
||||
const mod = elf_file.base.comp.module.?;
|
||||
const ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val));
|
||||
const decl_alignment = switch (explicit_alignment) {
|
||||
.none => ty.abiAlignment(mod),
|
||||
.none => ty.abiAlignment(pt),
|
||||
else => explicit_alignment,
|
||||
};
|
||||
if (self.anon_decls.get(decl_val)) |metadata| {
|
||||
@ -708,6 +711,7 @@ pub fn lowerAnonDecl(
|
||||
}) catch unreachable;
|
||||
const res = self.lowerConst(
|
||||
elf_file,
|
||||
pt,
|
||||
name,
|
||||
val,
|
||||
decl_alignment,
|
||||
@ -733,10 +737,11 @@ pub fn lowerAnonDecl(
|
||||
pub fn getOrCreateMetadataForLazySymbol(
|
||||
self: *ZigObject,
|
||||
elf_file: *Elf,
|
||||
pt: Zcu.PerThread,
|
||||
lazy_sym: link.File.LazySymbol,
|
||||
) !Symbol.Index {
|
||||
const gpa = elf_file.base.comp.gpa;
|
||||
const mod = elf_file.base.comp.module.?;
|
||||
const mod = pt.zcu;
|
||||
const gpa = mod.gpa;
|
||||
const gop = try self.lazy_syms.getOrPut(gpa, lazy_sym.getDecl(mod));
|
||||
errdefer _ = if (!gop.found_existing) self.lazy_syms.pop();
|
||||
if (!gop.found_existing) gop.value_ptr.* = .{};
|
||||
@ -766,7 +771,7 @@ pub fn getOrCreateMetadataForLazySymbol(
|
||||
metadata.state.* = .pending_flush;
|
||||
const symbol_index = metadata.symbol_index.*;
|
||||
// anyerror needs to be deferred until flushModule
|
||||
if (lazy_sym.getDecl(mod) != .none) try self.updateLazySymbol(elf_file, lazy_sym, symbol_index);
|
||||
if (lazy_sym.getDecl(mod) != .none) try self.updateLazySymbol(elf_file, pt, lazy_sym, symbol_index);
|
||||
return symbol_index;
|
||||
}
|
||||
|
||||
@ -893,6 +898,7 @@ fn getDeclShdrIndex(
|
||||
fn updateDeclCode(
|
||||
self: *ZigObject,
|
||||
elf_file: *Elf,
|
||||
pt: Zcu.PerThread,
|
||||
decl_index: InternPool.DeclIndex,
|
||||
sym_index: Symbol.Index,
|
||||
shdr_index: u32,
|
||||
@ -900,13 +906,13 @@ fn updateDeclCode(
|
||||
stt_bits: u8,
|
||||
) !void {
|
||||
const gpa = elf_file.base.comp.gpa;
|
||||
const mod = elf_file.base.comp.module.?;
|
||||
const mod = pt.zcu;
|
||||
const decl = mod.declPtr(decl_index);
|
||||
const decl_name = try decl.fullyQualifiedName(mod);
|
||||
|
||||
log.debug("updateDeclCode {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl });
|
||||
|
||||
const required_alignment = decl.getAlignment(mod).max(
|
||||
const required_alignment = decl.getAlignment(pt).max(
|
||||
target_util.minFunctionAlignment(mod.getTarget()),
|
||||
);
|
||||
|
||||
@ -994,19 +1000,20 @@ fn updateDeclCode(
|
||||
fn updateTlv(
|
||||
self: *ZigObject,
|
||||
elf_file: *Elf,
|
||||
pt: Zcu.PerThread,
|
||||
decl_index: InternPool.DeclIndex,
|
||||
sym_index: Symbol.Index,
|
||||
shndx: u32,
|
||||
code: []const u8,
|
||||
) !void {
|
||||
const gpa = elf_file.base.comp.gpa;
|
||||
const mod = elf_file.base.comp.module.?;
|
||||
const mod = pt.zcu;
|
||||
const gpa = mod.gpa;
|
||||
const decl = mod.declPtr(decl_index);
|
||||
const decl_name = try decl.fullyQualifiedName(mod);
|
||||
|
||||
log.debug("updateTlv {} ({*})", .{ decl_name.fmt(&mod.intern_pool), decl });
|
||||
|
||||
const required_alignment = decl.getAlignment(mod);
|
||||
const required_alignment = decl.getAlignment(pt);
|
||||
|
||||
const sym = elf_file.symbol(sym_index);
|
||||
const esym = &self.local_esyms.items(.elf_sym)[sym.esym_index];
|
||||
@ -1048,7 +1055,7 @@ fn updateTlv(
|
||||
pub fn updateFunc(
|
||||
self: *ZigObject,
|
||||
elf_file: *Elf,
|
||||
mod: *Module,
|
||||
pt: Zcu.PerThread,
|
||||
func_index: InternPool.Index,
|
||||
air: Air,
|
||||
liveness: Liveness,
|
||||
@ -1056,6 +1063,7 @@ pub fn updateFunc(
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
const mod = pt.zcu;
|
||||
const gpa = elf_file.base.comp.gpa;
|
||||
const func = mod.funcInfo(func_index);
|
||||
const decl_index = func.owner_decl;
|
||||
@ -1068,29 +1076,19 @@ pub fn updateFunc(
|
||||
var code_buffer = std.ArrayList(u8).init(gpa);
|
||||
defer code_buffer.deinit();
|
||||
|
||||
var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(mod, decl_index) else null;
|
||||
var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(pt, decl_index) else null;
|
||||
defer if (decl_state) |*ds| ds.deinit();
|
||||
|
||||
const res = if (decl_state) |*ds|
|
||||
try codegen.generateFunction(
|
||||
&elf_file.base,
|
||||
decl.navSrcLoc(mod),
|
||||
func_index,
|
||||
air,
|
||||
liveness,
|
||||
&code_buffer,
|
||||
.{ .dwarf = ds },
|
||||
)
|
||||
else
|
||||
try codegen.generateFunction(
|
||||
&elf_file.base,
|
||||
decl.navSrcLoc(mod),
|
||||
func_index,
|
||||
air,
|
||||
liveness,
|
||||
&code_buffer,
|
||||
.none,
|
||||
);
|
||||
const res = try codegen.generateFunction(
|
||||
&elf_file.base,
|
||||
pt,
|
||||
decl.navSrcLoc(mod),
|
||||
func_index,
|
||||
air,
|
||||
liveness,
|
||||
&code_buffer,
|
||||
if (decl_state) |*ds| .{ .dwarf = ds } else .none,
|
||||
);
|
||||
|
||||
const code = switch (res) {
|
||||
.ok => code_buffer.items,
|
||||
@ -1102,12 +1100,12 @@ pub fn updateFunc(
|
||||
};
|
||||
|
||||
const shndx = try self.getDeclShdrIndex(elf_file, decl, code);
|
||||
try self.updateDeclCode(elf_file, decl_index, sym_index, shndx, code, elf.STT_FUNC);
|
||||
try self.updateDeclCode(elf_file, pt, decl_index, sym_index, shndx, code, elf.STT_FUNC);
|
||||
|
||||
if (decl_state) |*ds| {
|
||||
const sym = elf_file.symbol(sym_index);
|
||||
try self.dwarf.?.commitDeclState(
|
||||
mod,
|
||||
pt,
|
||||
decl_index,
|
||||
@intCast(sym.address(.{}, elf_file)),
|
||||
sym.atom(elf_file).?.size,
|
||||
@ -1121,12 +1119,13 @@ pub fn updateFunc(
|
||||
pub fn updateDecl(
|
||||
self: *ZigObject,
|
||||
elf_file: *Elf,
|
||||
mod: *Module,
|
||||
pt: Zcu.PerThread,
|
||||
decl_index: InternPool.DeclIndex,
|
||||
) link.File.UpdateDeclError!void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
const mod = pt.zcu;
|
||||
const decl = mod.declPtr(decl_index);
|
||||
|
||||
if (decl.val.getExternFunc(mod)) |_| {
|
||||
@ -1150,19 +1149,19 @@ pub fn updateDecl(
|
||||
var code_buffer = std.ArrayList(u8).init(gpa);
|
||||
defer code_buffer.deinit();
|
||||
|
||||
var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(mod, decl_index) else null;
|
||||
var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(pt, decl_index) else null;
|
||||
defer if (decl_state) |*ds| ds.deinit();
|
||||
|
||||
// TODO implement .debug_info for global variables
|
||||
const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val;
|
||||
const res = if (decl_state) |*ds|
|
||||
try codegen.generateSymbol(&elf_file.base, decl.navSrcLoc(mod), decl_val, &code_buffer, .{
|
||||
try codegen.generateSymbol(&elf_file.base, pt, decl.navSrcLoc(mod), decl_val, &code_buffer, .{
|
||||
.dwarf = ds,
|
||||
}, .{
|
||||
.parent_atom_index = sym_index,
|
||||
})
|
||||
else
|
||||
try codegen.generateSymbol(&elf_file.base, decl.navSrcLoc(mod), decl_val, &code_buffer, .none, .{
|
||||
try codegen.generateSymbol(&elf_file.base, pt, decl.navSrcLoc(mod), decl_val, &code_buffer, .none, .{
|
||||
.parent_atom_index = sym_index,
|
||||
});
|
||||
|
||||
@ -1177,14 +1176,14 @@ pub fn updateDecl(
|
||||
|
||||
const shndx = try self.getDeclShdrIndex(elf_file, decl, code);
|
||||
if (elf_file.shdrs.items[shndx].sh_flags & elf.SHF_TLS != 0)
|
||||
try self.updateTlv(elf_file, decl_index, sym_index, shndx, code)
|
||||
try self.updateTlv(elf_file, pt, decl_index, sym_index, shndx, code)
|
||||
else
|
||||
try self.updateDeclCode(elf_file, decl_index, sym_index, shndx, code, elf.STT_OBJECT);
|
||||
try self.updateDeclCode(elf_file, pt, decl_index, sym_index, shndx, code, elf.STT_OBJECT);
|
||||
|
||||
if (decl_state) |*ds| {
|
||||
const sym = elf_file.symbol(sym_index);
|
||||
try self.dwarf.?.commitDeclState(
|
||||
mod,
|
||||
pt,
|
||||
decl_index,
|
||||
@intCast(sym.address(.{}, elf_file)),
|
||||
sym.atom(elf_file).?.size,
|
||||
@ -1198,11 +1197,12 @@ pub fn updateDecl(
|
||||
fn updateLazySymbol(
|
||||
self: *ZigObject,
|
||||
elf_file: *Elf,
|
||||
pt: Zcu.PerThread,
|
||||
sym: link.File.LazySymbol,
|
||||
symbol_index: Symbol.Index,
|
||||
) !void {
|
||||
const gpa = elf_file.base.comp.gpa;
|
||||
const mod = elf_file.base.comp.module.?;
|
||||
const mod = pt.zcu;
|
||||
const gpa = mod.gpa;
|
||||
|
||||
var required_alignment: InternPool.Alignment = .none;
|
||||
var code_buffer = std.ArrayList(u8).init(gpa);
|
||||
@ -1211,7 +1211,7 @@ fn updateLazySymbol(
|
||||
const name_str_index = blk: {
|
||||
const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{}", .{
|
||||
@tagName(sym.kind),
|
||||
sym.ty.fmt(mod),
|
||||
sym.ty.fmt(pt),
|
||||
});
|
||||
defer gpa.free(name);
|
||||
break :blk try self.strtab.insert(gpa, name);
|
||||
@ -1220,6 +1220,7 @@ fn updateLazySymbol(
|
||||
const src = sym.ty.srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded;
|
||||
const res = try codegen.generateLazySymbol(
|
||||
&elf_file.base,
|
||||
pt,
|
||||
src,
|
||||
sym,
|
||||
&required_alignment,
|
||||
@ -1273,6 +1274,7 @@ fn updateLazySymbol(
|
||||
pub fn lowerUnnamedConst(
|
||||
self: *ZigObject,
|
||||
elf_file: *Elf,
|
||||
pt: Zcu.PerThread,
|
||||
val: Value,
|
||||
decl_index: InternPool.DeclIndex,
|
||||
) !u32 {
|
||||
@ -1291,9 +1293,10 @@ pub fn lowerUnnamedConst(
|
||||
const ty = val.typeOf(mod);
|
||||
const sym_index = switch (try self.lowerConst(
|
||||
elf_file,
|
||||
pt,
|
||||
name,
|
||||
val,
|
||||
ty.abiAlignment(mod),
|
||||
ty.abiAlignment(pt),
|
||||
elf_file.zig_data_rel_ro_section_index.?,
|
||||
decl.navSrcLoc(mod),
|
||||
)) {
|
||||
@ -1318,20 +1321,21 @@ const LowerConstResult = union(enum) {
|
||||
fn lowerConst(
|
||||
self: *ZigObject,
|
||||
elf_file: *Elf,
|
||||
pt: Zcu.PerThread,
|
||||
name: []const u8,
|
||||
val: Value,
|
||||
required_alignment: InternPool.Alignment,
|
||||
output_section_index: u32,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
) !LowerConstResult {
|
||||
const gpa = elf_file.base.comp.gpa;
|
||||
const gpa = pt.zcu.gpa;
|
||||
|
||||
var code_buffer = std.ArrayList(u8).init(gpa);
|
||||
defer code_buffer.deinit();
|
||||
|
||||
const sym_index = try self.addAtom(elf_file);
|
||||
|
||||
const res = try codegen.generateSymbol(&elf_file.base, src_loc, val, &code_buffer, .{
|
||||
const res = try codegen.generateSymbol(&elf_file.base, pt, src_loc, val, &code_buffer, .{
|
||||
.none = {},
|
||||
}, .{
|
||||
.parent_atom_index = sym_index,
|
||||
@ -1373,13 +1377,14 @@ fn lowerConst(
|
||||
pub fn updateExports(
|
||||
self: *ZigObject,
|
||||
elf_file: *Elf,
|
||||
mod: *Module,
|
||||
pt: Zcu.PerThread,
|
||||
exported: Module.Exported,
|
||||
export_indices: []const u32,
|
||||
) link.File.UpdateExportsError!void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
const mod = pt.zcu;
|
||||
const gpa = elf_file.base.comp.gpa;
|
||||
const metadata = switch (exported) {
|
||||
.decl_index => |decl_index| blk: {
|
||||
@ -1388,7 +1393,7 @@ pub fn updateExports(
|
||||
},
|
||||
.value => |value| self.anon_decls.getPtr(value) orelse blk: {
|
||||
const first_exp = mod.all_exports.items[export_indices[0]];
|
||||
const res = try self.lowerAnonDecl(elf_file, value, .none, first_exp.src);
|
||||
const res = try self.lowerAnonDecl(elf_file, pt, value, .none, first_exp.src);
|
||||
switch (res) {
|
||||
.ok => {},
|
||||
.fail => |em| {
|
||||
|
||||
@ -360,11 +360,11 @@ pub fn deinit(self: *MachO) void {
|
||||
self.unwind_records.deinit(gpa);
|
||||
}
|
||||
|
||||
pub fn flush(self: *MachO, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void {
|
||||
try self.flushModule(arena, prog_node);
|
||||
pub fn flush(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
|
||||
try self.flushModule(arena, tid, prog_node);
|
||||
}
|
||||
|
||||
pub fn flushModule(self: *MachO, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void {
|
||||
pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
@ -391,7 +391,7 @@ pub fn flushModule(self: *MachO, arena: Allocator, prog_node: std.Progress.Node)
|
||||
// --verbose-link
|
||||
if (comp.verbose_link) try self.dumpArgv(comp);
|
||||
|
||||
if (self.getZigObject()) |zo| try zo.flushModule(self);
|
||||
if (self.getZigObject()) |zo| try zo.flushModule(self, tid);
|
||||
if (self.base.isStaticLib()) return relocatable.flushStaticLib(self, comp, module_obj_path);
|
||||
if (self.base.isObject()) return relocatable.flushObject(self, comp, module_obj_path);
|
||||
|
||||
@ -3178,24 +3178,24 @@ pub fn writeCodeSignature(self: *MachO, code_sig: *CodeSignature) !void {
|
||||
try self.base.file.?.pwriteAll(buffer.items, offset);
|
||||
}
|
||||
|
||||
pub fn updateFunc(self: *MachO, mod: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
|
||||
pub fn updateFunc(self: *MachO, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
|
||||
if (build_options.skip_non_native and builtin.object_format != .macho) {
|
||||
@panic("Attempted to compile for object format that was disabled by build configuration");
|
||||
}
|
||||
if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func_index, air, liveness);
|
||||
return self.getZigObject().?.updateFunc(self, mod, func_index, air, liveness);
|
||||
if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(pt, func_index, air, liveness);
|
||||
return self.getZigObject().?.updateFunc(self, pt, func_index, air, liveness);
|
||||
}
|
||||
|
||||
pub fn lowerUnnamedConst(self: *MachO, val: Value, decl_index: InternPool.DeclIndex) !u32 {
|
||||
return self.getZigObject().?.lowerUnnamedConst(self, val, decl_index);
|
||||
pub fn lowerUnnamedConst(self: *MachO, pt: Zcu.PerThread, val: Value, decl_index: InternPool.DeclIndex) !u32 {
|
||||
return self.getZigObject().?.lowerUnnamedConst(self, pt, val, decl_index);
|
||||
}
|
||||
|
||||
pub fn updateDecl(self: *MachO, mod: *Module, decl_index: InternPool.DeclIndex) !void {
|
||||
pub fn updateDecl(self: *MachO, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void {
|
||||
if (build_options.skip_non_native and builtin.object_format != .macho) {
|
||||
@panic("Attempted to compile for object format that was disabled by build configuration");
|
||||
}
|
||||
if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(mod, decl_index);
|
||||
return self.getZigObject().?.updateDecl(self, mod, decl_index);
|
||||
if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(pt, decl_index);
|
||||
return self.getZigObject().?.updateDecl(self, pt, decl_index);
|
||||
}
|
||||
|
||||
pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl_index: InternPool.DeclIndex) !void {
|
||||
@ -3205,15 +3205,15 @@ pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl_index: InternPoo
|
||||
|
||||
pub fn updateExports(
|
||||
self: *MachO,
|
||||
mod: *Module,
|
||||
pt: Zcu.PerThread,
|
||||
exported: Module.Exported,
|
||||
export_indices: []const u32,
|
||||
) link.File.UpdateExportsError!void {
|
||||
if (build_options.skip_non_native and builtin.object_format != .macho) {
|
||||
@panic("Attempted to compile for object format that was disabled by build configuration");
|
||||
}
|
||||
if (self.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, export_indices);
|
||||
return self.getZigObject().?.updateExports(self, mod, exported, export_indices);
|
||||
if (self.llvm_object) |llvm_object| return llvm_object.updateExports(pt, exported, export_indices);
|
||||
return self.getZigObject().?.updateExports(self, pt, exported, export_indices);
|
||||
}
|
||||
|
||||
pub fn deleteExport(
|
||||
@ -3237,11 +3237,12 @@ pub fn getDeclVAddr(self: *MachO, decl_index: InternPool.DeclIndex, reloc_info:
|
||||
|
||||
pub fn lowerAnonDecl(
|
||||
self: *MachO,
|
||||
pt: Zcu.PerThread,
|
||||
decl_val: InternPool.Index,
|
||||
explicit_alignment: InternPool.Alignment,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
) !codegen.Result {
|
||||
return self.getZigObject().?.lowerAnonDecl(self, decl_val, explicit_alignment, src_loc);
|
||||
return self.getZigObject().?.lowerAnonDecl(self, pt, decl_val, explicit_alignment, src_loc);
|
||||
}
|
||||
|
||||
pub fn getAnonDeclVAddr(self: *MachO, decl_val: InternPool.Index, reloc_info: link.File.RelocInfo) !u64 {
|
||||
|
||||
@ -425,16 +425,17 @@ pub fn getInputSection(self: ZigObject, atom: Atom, macho_file: *MachO) macho.se
|
||||
return sect;
|
||||
}
|
||||
|
||||
pub fn flushModule(self: *ZigObject, macho_file: *MachO) !void {
|
||||
pub fn flushModule(self: *ZigObject, macho_file: *MachO, tid: Zcu.PerThread.Id) !void {
|
||||
// Handle any lazy symbols that were emitted by incremental compilation.
|
||||
if (self.lazy_syms.getPtr(.none)) |metadata| {
|
||||
const zcu = macho_file.base.comp.module.?;
|
||||
const pt: Zcu.PerThread = .{ .zcu = macho_file.base.comp.module.?, .tid = tid };
|
||||
|
||||
// Most lazy symbols can be updated on first use, but
|
||||
// anyerror needs to wait for everything to be flushed.
|
||||
if (metadata.text_state != .unused) self.updateLazySymbol(
|
||||
macho_file,
|
||||
link.File.LazySymbol.initDecl(.code, null, zcu),
|
||||
pt,
|
||||
link.File.LazySymbol.initDecl(.code, null, pt.zcu),
|
||||
metadata.text_symbol_index,
|
||||
) catch |err| return switch (err) {
|
||||
error.CodegenFail => error.FlushFailure,
|
||||
@ -442,7 +443,8 @@ pub fn flushModule(self: *ZigObject, macho_file: *MachO) !void {
|
||||
};
|
||||
if (metadata.const_state != .unused) self.updateLazySymbol(
|
||||
macho_file,
|
||||
link.File.LazySymbol.initDecl(.const_data, null, zcu),
|
||||
pt,
|
||||
link.File.LazySymbol.initDecl(.const_data, null, pt.zcu),
|
||||
metadata.const_symbol_index,
|
||||
) catch |err| return switch (err) {
|
||||
error.CodegenFail => error.FlushFailure,
|
||||
@ -455,8 +457,8 @@ pub fn flushModule(self: *ZigObject, macho_file: *MachO) !void {
|
||||
}
|
||||
|
||||
if (self.dwarf) |*dw| {
|
||||
const zcu = macho_file.base.comp.module.?;
|
||||
try dw.flushModule(zcu);
|
||||
const pt: Zcu.PerThread = .{ .zcu = macho_file.base.comp.module.?, .tid = tid };
|
||||
try dw.flushModule(pt);
|
||||
|
||||
if (self.debug_abbrev_dirty) {
|
||||
try dw.writeDbgAbbrev();
|
||||
@ -469,7 +471,7 @@ pub fn flushModule(self: *ZigObject, macho_file: *MachO) !void {
|
||||
const text_section = macho_file.sections.items(.header)[macho_file.zig_text_sect_index.?];
|
||||
const low_pc = text_section.addr;
|
||||
const high_pc = text_section.addr + text_section.size;
|
||||
try dw.writeDbgInfoHeader(zcu, low_pc, high_pc);
|
||||
try dw.writeDbgInfoHeader(pt.zcu, low_pc, high_pc);
|
||||
self.debug_info_header_dirty = false;
|
||||
}
|
||||
|
||||
@ -570,6 +572,7 @@ pub fn getAnonDeclVAddr(
|
||||
pub fn lowerAnonDecl(
|
||||
self: *ZigObject,
|
||||
macho_file: *MachO,
|
||||
pt: Zcu.PerThread,
|
||||
decl_val: InternPool.Index,
|
||||
explicit_alignment: Atom.Alignment,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
@ -578,7 +581,7 @@ pub fn lowerAnonDecl(
|
||||
const mod = macho_file.base.comp.module.?;
|
||||
const ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val));
|
||||
const decl_alignment = switch (explicit_alignment) {
|
||||
.none => ty.abiAlignment(mod),
|
||||
.none => ty.abiAlignment(pt),
|
||||
else => explicit_alignment,
|
||||
};
|
||||
if (self.anon_decls.get(decl_val)) |metadata| {
|
||||
@ -593,6 +596,7 @@ pub fn lowerAnonDecl(
|
||||
}) catch unreachable;
|
||||
const res = self.lowerConst(
|
||||
macho_file,
|
||||
pt,
|
||||
name,
|
||||
Value.fromInterned(decl_val),
|
||||
decl_alignment,
|
||||
@ -656,7 +660,7 @@ pub fn freeDecl(self: *ZigObject, macho_file: *MachO, decl_index: InternPool.Dec
|
||||
pub fn updateFunc(
|
||||
self: *ZigObject,
|
||||
macho_file: *MachO,
|
||||
mod: *Module,
|
||||
pt: Zcu.PerThread,
|
||||
func_index: InternPool.Index,
|
||||
air: Air,
|
||||
liveness: Liveness,
|
||||
@ -664,7 +668,8 @@ pub fn updateFunc(
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
const gpa = macho_file.base.comp.gpa;
|
||||
const mod = pt.zcu;
|
||||
const gpa = mod.gpa;
|
||||
const func = mod.funcInfo(func_index);
|
||||
const decl_index = func.owner_decl;
|
||||
const decl = mod.declPtr(decl_index);
|
||||
@ -676,12 +681,13 @@ pub fn updateFunc(
|
||||
var code_buffer = std.ArrayList(u8).init(gpa);
|
||||
defer code_buffer.deinit();
|
||||
|
||||
var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(mod, decl_index) else null;
|
||||
var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(pt, decl_index) else null;
|
||||
defer if (decl_state) |*ds| ds.deinit();
|
||||
|
||||
const dio: codegen.DebugInfoOutput = if (decl_state) |*ds| .{ .dwarf = ds } else .none;
|
||||
const res = try codegen.generateFunction(
|
||||
&macho_file.base,
|
||||
pt,
|
||||
decl.navSrcLoc(mod),
|
||||
func_index,
|
||||
air,
|
||||
@ -700,12 +706,12 @@ pub fn updateFunc(
|
||||
};
|
||||
|
||||
const sect_index = try self.getDeclOutputSection(macho_file, decl, code);
|
||||
try self.updateDeclCode(macho_file, decl_index, sym_index, sect_index, code);
|
||||
try self.updateDeclCode(macho_file, pt, decl_index, sym_index, sect_index, code);
|
||||
|
||||
if (decl_state) |*ds| {
|
||||
const sym = macho_file.getSymbol(sym_index);
|
||||
try self.dwarf.?.commitDeclState(
|
||||
mod,
|
||||
pt,
|
||||
decl_index,
|
||||
sym.getAddress(.{}, macho_file),
|
||||
sym.getAtom(macho_file).?.size,
|
||||
@ -719,12 +725,13 @@ pub fn updateFunc(
|
||||
pub fn updateDecl(
|
||||
self: *ZigObject,
|
||||
macho_file: *MachO,
|
||||
mod: *Module,
|
||||
pt: Zcu.PerThread,
|
||||
decl_index: InternPool.DeclIndex,
|
||||
) link.File.UpdateDeclError!void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
const mod = pt.zcu;
|
||||
const decl = mod.declPtr(decl_index);
|
||||
|
||||
if (decl.val.getExternFunc(mod)) |_| {
|
||||
@ -749,12 +756,12 @@ pub fn updateDecl(
|
||||
var code_buffer = std.ArrayList(u8).init(gpa);
|
||||
defer code_buffer.deinit();
|
||||
|
||||
var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(mod, decl_index) else null;
|
||||
var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(pt, decl_index) else null;
|
||||
defer if (decl_state) |*ds| ds.deinit();
|
||||
|
||||
const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val;
|
||||
const dio: codegen.DebugInfoOutput = if (decl_state) |*ds| .{ .dwarf = ds } else .none;
|
||||
const res = try codegen.generateSymbol(&macho_file.base, decl.navSrcLoc(mod), decl_val, &code_buffer, dio, .{
|
||||
const res = try codegen.generateSymbol(&macho_file.base, pt, decl.navSrcLoc(mod), decl_val, &code_buffer, dio, .{
|
||||
.parent_atom_index = sym_index,
|
||||
});
|
||||
|
||||
@ -772,15 +779,15 @@ pub fn updateDecl(
|
||||
else => false,
|
||||
};
|
||||
if (is_threadlocal) {
|
||||
try self.updateTlv(macho_file, decl_index, sym_index, sect_index, code);
|
||||
try self.updateTlv(macho_file, pt, decl_index, sym_index, sect_index, code);
|
||||
} else {
|
||||
try self.updateDeclCode(macho_file, decl_index, sym_index, sect_index, code);
|
||||
try self.updateDeclCode(macho_file, pt, decl_index, sym_index, sect_index, code);
|
||||
}
|
||||
|
||||
if (decl_state) |*ds| {
|
||||
const sym = macho_file.getSymbol(sym_index);
|
||||
try self.dwarf.?.commitDeclState(
|
||||
mod,
|
||||
pt,
|
||||
decl_index,
|
||||
sym.getAddress(.{}, macho_file),
|
||||
sym.getAtom(macho_file).?.size,
|
||||
@ -794,19 +801,20 @@ pub fn updateDecl(
|
||||
fn updateDeclCode(
|
||||
self: *ZigObject,
|
||||
macho_file: *MachO,
|
||||
pt: Zcu.PerThread,
|
||||
decl_index: InternPool.DeclIndex,
|
||||
sym_index: Symbol.Index,
|
||||
sect_index: u8,
|
||||
code: []const u8,
|
||||
) !void {
|
||||
const gpa = macho_file.base.comp.gpa;
|
||||
const mod = macho_file.base.comp.module.?;
|
||||
const mod = pt.zcu;
|
||||
const decl = mod.declPtr(decl_index);
|
||||
const decl_name = try decl.fullyQualifiedName(mod);
|
||||
|
||||
log.debug("updateDeclCode {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl });
|
||||
|
||||
const required_alignment = decl.getAlignment(mod);
|
||||
const required_alignment = decl.getAlignment(pt);
|
||||
|
||||
const sect = &macho_file.sections.items(.header)[sect_index];
|
||||
const sym = macho_file.getSymbol(sym_index);
|
||||
@ -879,19 +887,20 @@ fn updateDeclCode(
|
||||
fn updateTlv(
|
||||
self: *ZigObject,
|
||||
macho_file: *MachO,
|
||||
pt: Zcu.PerThread,
|
||||
decl_index: InternPool.DeclIndex,
|
||||
sym_index: Symbol.Index,
|
||||
sect_index: u8,
|
||||
code: []const u8,
|
||||
) !void {
|
||||
const mod = macho_file.base.comp.module.?;
|
||||
const mod = pt.zcu;
|
||||
const decl = mod.declPtr(decl_index);
|
||||
const decl_name = try decl.fullyQualifiedName(mod);
|
||||
|
||||
log.debug("updateTlv {} ({*})", .{ decl_name.fmt(&mod.intern_pool), decl });
|
||||
|
||||
const decl_name_slice = decl_name.toSlice(&mod.intern_pool);
|
||||
const required_alignment = decl.getAlignment(mod);
|
||||
const required_alignment = decl.getAlignment(pt);
|
||||
|
||||
// 1. Lower TLV initializer
|
||||
const init_sym_index = try self.createTlvInitializer(
|
||||
@ -1079,11 +1088,12 @@ fn getDeclOutputSection(
|
||||
pub fn lowerUnnamedConst(
|
||||
self: *ZigObject,
|
||||
macho_file: *MachO,
|
||||
pt: Zcu.PerThread,
|
||||
val: Value,
|
||||
decl_index: InternPool.DeclIndex,
|
||||
) !u32 {
|
||||
const gpa = macho_file.base.comp.gpa;
|
||||
const mod = macho_file.base.comp.module.?;
|
||||
const mod = pt.zcu;
|
||||
const gpa = mod.gpa;
|
||||
const gop = try self.unnamed_consts.getOrPut(gpa, decl_index);
|
||||
if (!gop.found_existing) {
|
||||
gop.value_ptr.* = .{};
|
||||
@ -1096,9 +1106,10 @@ pub fn lowerUnnamedConst(
|
||||
defer gpa.free(name);
|
||||
const sym_index = switch (try self.lowerConst(
|
||||
macho_file,
|
||||
pt,
|
||||
name,
|
||||
val,
|
||||
val.typeOf(mod).abiAlignment(mod),
|
||||
val.typeOf(mod).abiAlignment(pt),
|
||||
macho_file.zig_const_sect_index.?,
|
||||
decl.navSrcLoc(mod),
|
||||
)) {
|
||||
@ -1123,6 +1134,7 @@ const LowerConstResult = union(enum) {
|
||||
fn lowerConst(
|
||||
self: *ZigObject,
|
||||
macho_file: *MachO,
|
||||
pt: Zcu.PerThread,
|
||||
name: []const u8,
|
||||
val: Value,
|
||||
required_alignment: Atom.Alignment,
|
||||
@ -1136,7 +1148,7 @@ fn lowerConst(
|
||||
|
||||
const sym_index = try self.addAtom(macho_file);
|
||||
|
||||
const res = try codegen.generateSymbol(&macho_file.base, src_loc, val, &code_buffer, .{
|
||||
const res = try codegen.generateSymbol(&macho_file.base, pt, src_loc, val, &code_buffer, .{
|
||||
.none = {},
|
||||
}, .{
|
||||
.parent_atom_index = sym_index,
|
||||
@ -1181,13 +1193,14 @@ fn lowerConst(
|
||||
pub fn updateExports(
|
||||
self: *ZigObject,
|
||||
macho_file: *MachO,
|
||||
mod: *Module,
|
||||
pt: Zcu.PerThread,
|
||||
exported: Module.Exported,
|
||||
export_indices: []const u32,
|
||||
) link.File.UpdateExportsError!void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
const mod = pt.zcu;
|
||||
const gpa = macho_file.base.comp.gpa;
|
||||
const metadata = switch (exported) {
|
||||
.decl_index => |decl_index| blk: {
|
||||
@ -1196,7 +1209,7 @@ pub fn updateExports(
|
||||
},
|
||||
.value => |value| self.anon_decls.getPtr(value) orelse blk: {
|
||||
const first_exp = mod.all_exports.items[export_indices[0]];
|
||||
const res = try self.lowerAnonDecl(macho_file, value, .none, first_exp.src);
|
||||
const res = try self.lowerAnonDecl(macho_file, pt, value, .none, first_exp.src);
|
||||
switch (res) {
|
||||
.ok => {},
|
||||
.fail => |em| {
|
||||
@ -1272,6 +1285,7 @@ pub fn updateExports(
|
||||
fn updateLazySymbol(
|
||||
self: *ZigObject,
|
||||
macho_file: *MachO,
|
||||
pt: Zcu.PerThread,
|
||||
lazy_sym: link.File.LazySymbol,
|
||||
symbol_index: Symbol.Index,
|
||||
) !void {
|
||||
@ -1285,7 +1299,7 @@ fn updateLazySymbol(
|
||||
const name_str_index = blk: {
|
||||
const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{}", .{
|
||||
@tagName(lazy_sym.kind),
|
||||
lazy_sym.ty.fmt(mod),
|
||||
lazy_sym.ty.fmt(pt),
|
||||
});
|
||||
defer gpa.free(name);
|
||||
break :blk try self.strtab.insert(gpa, name);
|
||||
@ -1294,6 +1308,7 @@ fn updateLazySymbol(
|
||||
const src = lazy_sym.ty.srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded;
|
||||
const res = try codegen.generateLazySymbol(
|
||||
&macho_file.base,
|
||||
pt,
|
||||
src,
|
||||
lazy_sym,
|
||||
&required_alignment,
|
||||
@ -1431,10 +1446,11 @@ pub fn getOrCreateMetadataForDecl(
|
||||
pub fn getOrCreateMetadataForLazySymbol(
|
||||
self: *ZigObject,
|
||||
macho_file: *MachO,
|
||||
pt: Zcu.PerThread,
|
||||
lazy_sym: link.File.LazySymbol,
|
||||
) !Symbol.Index {
|
||||
const gpa = macho_file.base.comp.gpa;
|
||||
const mod = macho_file.base.comp.module.?;
|
||||
const mod = pt.zcu;
|
||||
const gpa = mod.gpa;
|
||||
const gop = try self.lazy_syms.getOrPut(gpa, lazy_sym.getDecl(mod));
|
||||
errdefer _ = if (!gop.found_existing) self.lazy_syms.pop();
|
||||
if (!gop.found_existing) gop.value_ptr.* = .{};
|
||||
@ -1464,7 +1480,7 @@ pub fn getOrCreateMetadataForLazySymbol(
|
||||
metadata.state.* = .pending_flush;
|
||||
const symbol_index = metadata.symbol_index.*;
|
||||
// anyerror needs to be deferred until flushModule
|
||||
if (lazy_sym.getDecl(mod) != .none) try self.updateLazySymbol(macho_file, lazy_sym, symbol_index);
|
||||
if (lazy_sym.getDecl(mod) != .none) try self.updateLazySymbol(macho_file, pt, lazy_sym, symbol_index);
|
||||
return symbol_index;
|
||||
}
|
||||
|
||||
|
||||
@ -13,8 +13,6 @@ const assert = std.debug.assert;
|
||||
const log = std.log.scoped(.link);
|
||||
|
||||
const Zcu = @import("../Zcu.zig");
|
||||
/// Deprecated.
|
||||
const Module = Zcu;
|
||||
const InternPool = @import("../InternPool.zig");
|
||||
const Compilation = @import("../Compilation.zig");
|
||||
const link = @import("../link.zig");
|
||||
@ -84,35 +82,35 @@ pub fn deinit(self: *NvPtx) void {
|
||||
self.llvm_object.deinit();
|
||||
}
|
||||
|
||||
pub fn updateFunc(self: *NvPtx, module: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
|
||||
try self.llvm_object.updateFunc(module, func_index, air, liveness);
|
||||
pub fn updateFunc(self: *NvPtx, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
|
||||
try self.llvm_object.updateFunc(pt, func_index, air, liveness);
|
||||
}
|
||||
|
||||
pub fn updateDecl(self: *NvPtx, module: *Module, decl_index: InternPool.DeclIndex) !void {
|
||||
return self.llvm_object.updateDecl(module, decl_index);
|
||||
pub fn updateDecl(self: *NvPtx, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void {
|
||||
return self.llvm_object.updateDecl(pt, decl_index);
|
||||
}
|
||||
|
||||
pub fn updateExports(
|
||||
self: *NvPtx,
|
||||
module: *Module,
|
||||
exported: Module.Exported,
|
||||
pt: Zcu.PerThread,
|
||||
exported: Zcu.Exported,
|
||||
export_indices: []const u32,
|
||||
) !void {
|
||||
if (build_options.skip_non_native and builtin.object_format != .nvptx)
|
||||
@panic("Attempted to compile for object format that was disabled by build configuration");
|
||||
|
||||
return self.llvm_object.updateExports(module, exported, export_indices);
|
||||
return self.llvm_object.updateExports(pt, exported, export_indices);
|
||||
}
|
||||
|
||||
pub fn freeDecl(self: *NvPtx, decl_index: InternPool.DeclIndex) void {
|
||||
return self.llvm_object.freeDecl(decl_index);
|
||||
}
|
||||
|
||||
pub fn flush(self: *NvPtx, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void {
|
||||
return self.flushModule(arena, prog_node);
|
||||
pub fn flush(self: *NvPtx, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
|
||||
return self.flushModule(arena, tid, prog_node);
|
||||
}
|
||||
|
||||
pub fn flushModule(self: *NvPtx, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void {
|
||||
pub fn flushModule(self: *NvPtx, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
|
||||
if (build_options.skip_non_native)
|
||||
@panic("Attempted to compile for architecture that was disabled by build configuration");
|
||||
|
||||
@ -121,5 +119,6 @@ pub fn flushModule(self: *NvPtx, arena: Allocator, prog_node: std.Progress.Node)
|
||||
_ = arena;
|
||||
_ = self;
|
||||
_ = prog_node;
|
||||
_ = tid;
|
||||
@panic("TODO: rewrite the NvPtx.flushModule function");
|
||||
}
|
||||
|
||||
@ -4,8 +4,6 @@
|
||||
const Plan9 = @This();
|
||||
const link = @import("../link.zig");
|
||||
const Zcu = @import("../Zcu.zig");
|
||||
/// Deprecated.
|
||||
const Module = Zcu;
|
||||
const InternPool = @import("../InternPool.zig");
|
||||
const Compilation = @import("../Compilation.zig");
|
||||
const aout = @import("Plan9/aout.zig");
|
||||
@ -56,7 +54,7 @@ path_arena: std.heap.ArenaAllocator,
|
||||
/// of the function to know what file it came from.
|
||||
/// If we group the decls by file, it makes it really easy to do this (put the symbol in the correct place)
|
||||
fn_decl_table: std.AutoArrayHashMapUnmanaged(
|
||||
*Module.File,
|
||||
*Zcu.File,
|
||||
struct { sym_index: u32, functions: std.AutoArrayHashMapUnmanaged(InternPool.DeclIndex, FnDeclOutput) = .{} },
|
||||
) = .{},
|
||||
/// the code is modified when relocated, so that is why it is mutable
|
||||
@ -411,12 +409,13 @@ fn addPathComponents(self: *Plan9, path: []const u8, a: *std.ArrayList(u8)) !voi
|
||||
}
|
||||
}
|
||||
|
||||
pub fn updateFunc(self: *Plan9, mod: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
|
||||
pub fn updateFunc(self: *Plan9, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
|
||||
if (build_options.skip_non_native and builtin.object_format != .plan9) {
|
||||
@panic("Attempted to compile for object format that was disabled by build configuration");
|
||||
}
|
||||
|
||||
const gpa = self.base.comp.gpa;
|
||||
const mod = pt.zcu;
|
||||
const gpa = mod.gpa;
|
||||
const target = self.base.comp.root_mod.resolved_target.result;
|
||||
const func = mod.funcInfo(func_index);
|
||||
const decl_index = func.owner_decl;
|
||||
@ -439,6 +438,7 @@ pub fn updateFunc(self: *Plan9, mod: *Module, func_index: InternPool.Index, air:
|
||||
|
||||
const res = try codegen.generateFunction(
|
||||
&self.base,
|
||||
pt,
|
||||
decl.navSrcLoc(mod),
|
||||
func_index,
|
||||
air,
|
||||
@ -468,13 +468,13 @@ pub fn updateFunc(self: *Plan9, mod: *Module, func_index: InternPool.Index, air:
|
||||
return self.updateFinish(decl_index);
|
||||
}
|
||||
|
||||
pub fn lowerUnnamedConst(self: *Plan9, val: Value, decl_index: InternPool.DeclIndex) !u32 {
|
||||
const gpa = self.base.comp.gpa;
|
||||
pub fn lowerUnnamedConst(self: *Plan9, pt: Zcu.PerThread, val: Value, decl_index: InternPool.DeclIndex) !u32 {
|
||||
const mod = pt.zcu;
|
||||
const gpa = mod.gpa;
|
||||
_ = try self.seeDecl(decl_index);
|
||||
var code_buffer = std.ArrayList(u8).init(gpa);
|
||||
defer code_buffer.deinit();
|
||||
|
||||
const mod = self.base.comp.module.?;
|
||||
const decl = mod.declPtr(decl_index);
|
||||
|
||||
const gop = try self.unnamed_const_atoms.getOrPut(gpa, decl_index);
|
||||
@ -505,7 +505,7 @@ pub fn lowerUnnamedConst(self: *Plan9, val: Value, decl_index: InternPool.DeclIn
|
||||
};
|
||||
self.syms.items[info.sym_index.?] = sym;
|
||||
|
||||
const res = try codegen.generateSymbol(&self.base, decl.navSrcLoc(mod), val, &code_buffer, .{
|
||||
const res = try codegen.generateSymbol(&self.base, pt, decl.navSrcLoc(mod), val, &code_buffer, .{
|
||||
.none = {},
|
||||
}, .{
|
||||
.parent_atom_index = new_atom_idx,
|
||||
@ -530,8 +530,9 @@ pub fn lowerUnnamedConst(self: *Plan9, val: Value, decl_index: InternPool.DeclIn
|
||||
return new_atom_idx;
|
||||
}
|
||||
|
||||
pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: InternPool.DeclIndex) !void {
|
||||
pub fn updateDecl(self: *Plan9, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void {
|
||||
const gpa = self.base.comp.gpa;
|
||||
const mod = pt.zcu;
|
||||
const decl = mod.declPtr(decl_index);
|
||||
|
||||
if (decl.isExtern(mod)) {
|
||||
@ -544,7 +545,7 @@ pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: InternPool.DeclIndex)
|
||||
defer code_buffer.deinit();
|
||||
const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val;
|
||||
// TODO we need the symbol index for symbol in the table of locals for the containing atom
|
||||
const res = try codegen.generateSymbol(&self.base, decl.navSrcLoc(mod), decl_val, &code_buffer, .{ .none = {} }, .{
|
||||
const res = try codegen.generateSymbol(&self.base, pt, decl.navSrcLoc(mod), decl_val, &code_buffer, .{ .none = {} }, .{
|
||||
.parent_atom_index = @as(Atom.Index, @intCast(atom_idx)),
|
||||
});
|
||||
const code = switch (res) {
|
||||
@ -610,7 +611,7 @@ fn allocateGotIndex(self: *Plan9) usize {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn flush(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void {
|
||||
pub fn flush(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
|
||||
const comp = self.base.comp;
|
||||
const use_lld = build_options.have_llvm and comp.config.use_lld;
|
||||
assert(!use_lld);
|
||||
@ -621,7 +622,7 @@ pub fn flush(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node) link.
|
||||
.Obj => return error.TODOImplementPlan9Objs,
|
||||
.Lib => return error.TODOImplementWritingLibFiles,
|
||||
}
|
||||
return self.flushModule(arena, prog_node);
|
||||
return self.flushModule(arena, tid, prog_node);
|
||||
}
|
||||
|
||||
pub fn changeLine(l: *std.ArrayList(u8), delta_line: i32) !void {
|
||||
@ -669,20 +670,20 @@ fn atomCount(self: *Plan9) usize {
|
||||
return data_decl_count + fn_decl_count + unnamed_const_count + lazy_atom_count + extern_atom_count + anon_atom_count;
|
||||
}
|
||||
|
||||
pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void {
|
||||
pub fn flushModule(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
|
||||
if (build_options.skip_non_native and builtin.object_format != .plan9) {
|
||||
@panic("Attempted to compile for object format that was disabled by build configuration");
|
||||
}
|
||||
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
_ = arena; // Has the same lifetime as the call to Compilation.update.
|
||||
|
||||
const comp = self.base.comp;
|
||||
const gpa = comp.gpa;
|
||||
const target = comp.root_mod.resolved_target.result;
|
||||
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
const sub_prog_node = prog_node.start("Flush Module", 0);
|
||||
defer sub_prog_node.end();
|
||||
|
||||
@ -690,21 +691,26 @@ pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node)
|
||||
|
||||
defer assert(self.hdr.entry != 0x0);
|
||||
|
||||
const mod = self.base.comp.module orelse return error.LinkingWithoutZigSourceUnimplemented;
|
||||
const pt: Zcu.PerThread = .{
|
||||
.zcu = self.base.comp.module orelse return error.LinkingWithoutZigSourceUnimplemented,
|
||||
.tid = tid,
|
||||
};
|
||||
|
||||
// finish up the lazy syms
|
||||
if (self.lazy_syms.getPtr(.none)) |metadata| {
|
||||
// Most lazy symbols can be updated on first use, but
|
||||
// anyerror needs to wait for everything to be flushed.
|
||||
if (metadata.text_state != .unused) self.updateLazySymbolAtom(
|
||||
File.LazySymbol.initDecl(.code, null, mod),
|
||||
pt,
|
||||
File.LazySymbol.initDecl(.code, null, pt.zcu),
|
||||
metadata.text_atom,
|
||||
) catch |err| return switch (err) {
|
||||
error.CodegenFail => error.FlushFailure,
|
||||
else => |e| e,
|
||||
};
|
||||
if (metadata.rodata_state != .unused) self.updateLazySymbolAtom(
|
||||
File.LazySymbol.initDecl(.const_data, null, mod),
|
||||
pt,
|
||||
File.LazySymbol.initDecl(.const_data, null, pt.zcu),
|
||||
metadata.rodata_atom,
|
||||
) catch |err| return switch (err) {
|
||||
error.CodegenFail => error.FlushFailure,
|
||||
@ -747,7 +753,7 @@ pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node)
|
||||
var it = fentry.value_ptr.functions.iterator();
|
||||
while (it.next()) |entry| {
|
||||
const decl_index = entry.key_ptr.*;
|
||||
const decl = mod.declPtr(decl_index);
|
||||
const decl = pt.zcu.declPtr(decl_index);
|
||||
const atom = self.getAtomPtr(self.decls.get(decl_index).?.index);
|
||||
const out = entry.value_ptr.*;
|
||||
{
|
||||
@ -767,7 +773,7 @@ pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node)
|
||||
const off = self.getAddr(text_i, .t);
|
||||
text_i += out.code.len;
|
||||
atom.offset = off;
|
||||
log.debug("write text decl {*} ({}), lines {d} to {d}.;__GOT+0x{x} vaddr: 0x{x}", .{ decl, decl.name.fmt(&mod.intern_pool), out.start_line + 1, out.end_line, atom.got_index.? * 8, off });
|
||||
log.debug("write text decl {*} ({}), lines {d} to {d}.;__GOT+0x{x} vaddr: 0x{x}", .{ decl, decl.name.fmt(&pt.zcu.intern_pool), out.start_line + 1, out.end_line, atom.got_index.? * 8, off });
|
||||
if (!self.sixtyfour_bit) {
|
||||
mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), target.cpu.arch.endian());
|
||||
} else {
|
||||
@ -775,7 +781,7 @@ pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node)
|
||||
}
|
||||
self.syms.items[atom.sym_index.?].value = off;
|
||||
if (self.decl_exports.get(decl_index)) |export_indices| {
|
||||
try self.addDeclExports(mod, decl_index, export_indices);
|
||||
try self.addDeclExports(pt.zcu, decl_index, export_indices);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -841,7 +847,7 @@ pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node)
|
||||
}
|
||||
self.syms.items[atom.sym_index.?].value = off;
|
||||
if (self.decl_exports.get(decl_index)) |export_indices| {
|
||||
try self.addDeclExports(mod, decl_index, export_indices);
|
||||
try self.addDeclExports(pt.zcu, decl_index, export_indices);
|
||||
}
|
||||
}
|
||||
// write the unnamed constants after the other data decls
|
||||
@ -1009,7 +1015,7 @@ pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node)
|
||||
}
|
||||
fn addDeclExports(
|
||||
self: *Plan9,
|
||||
mod: *Module,
|
||||
mod: *Zcu,
|
||||
decl_index: InternPool.DeclIndex,
|
||||
export_indices: []const u32,
|
||||
) !void {
|
||||
@ -1025,7 +1031,7 @@ fn addDeclExports(
|
||||
if (!section_name.eqlSlice(".text", &mod.intern_pool) and
|
||||
!section_name.eqlSlice(".data", &mod.intern_pool))
|
||||
{
|
||||
try mod.failed_exports.put(mod.gpa, export_idx, try Module.ErrorMsg.create(
|
||||
try mod.failed_exports.put(mod.gpa, export_idx, try Zcu.ErrorMsg.create(
|
||||
gpa,
|
||||
mod.declPtr(decl_index).navSrcLoc(mod),
|
||||
"plan9 does not support extra sections",
|
||||
@ -1155,8 +1161,8 @@ pub fn seeDecl(self: *Plan9, decl_index: InternPool.DeclIndex) !Atom.Index {
|
||||
|
||||
pub fn updateExports(
|
||||
self: *Plan9,
|
||||
module: *Module,
|
||||
exported: Module.Exported,
|
||||
pt: Zcu.PerThread,
|
||||
exported: Zcu.Exported,
|
||||
export_indices: []const u32,
|
||||
) !void {
|
||||
const gpa = self.base.comp.gpa;
|
||||
@ -1173,11 +1179,11 @@ pub fn updateExports(
|
||||
},
|
||||
}
|
||||
// all proper work is done in flush
|
||||
_ = module;
|
||||
_ = pt;
|
||||
}
|
||||
|
||||
pub fn getOrCreateAtomForLazySymbol(self: *Plan9, sym: File.LazySymbol) !Atom.Index {
|
||||
const gpa = self.base.comp.gpa;
|
||||
pub fn getOrCreateAtomForLazySymbol(self: *Plan9, pt: Zcu.PerThread, sym: File.LazySymbol) !Atom.Index {
|
||||
const gpa = pt.zcu.gpa;
|
||||
const gop = try self.lazy_syms.getOrPut(gpa, sym.getDecl(self.base.comp.module.?));
|
||||
errdefer _ = if (!gop.found_existing) self.lazy_syms.pop();
|
||||
|
||||
@ -1198,14 +1204,13 @@ pub fn getOrCreateAtomForLazySymbol(self: *Plan9, sym: File.LazySymbol) !Atom.In
|
||||
_ = self.getAtomPtr(atom).getOrCreateOffsetTableEntry(self);
|
||||
// anyerror needs to be deferred until flushModule
|
||||
if (sym.getDecl(self.base.comp.module.?) != .none) {
|
||||
try self.updateLazySymbolAtom(sym, atom);
|
||||
try self.updateLazySymbolAtom(pt, sym, atom);
|
||||
}
|
||||
return atom;
|
||||
}
|
||||
|
||||
fn updateLazySymbolAtom(self: *Plan9, sym: File.LazySymbol, atom_index: Atom.Index) !void {
|
||||
const gpa = self.base.comp.gpa;
|
||||
const mod = self.base.comp.module.?;
|
||||
fn updateLazySymbolAtom(self: *Plan9, pt: Zcu.PerThread, sym: File.LazySymbol, atom_index: Atom.Index) !void {
|
||||
const gpa = pt.zcu.gpa;
|
||||
|
||||
var required_alignment: InternPool.Alignment = .none;
|
||||
var code_buffer = std.ArrayList(u8).init(gpa);
|
||||
@ -1214,7 +1219,7 @@ fn updateLazySymbolAtom(self: *Plan9, sym: File.LazySymbol, atom_index: Atom.Ind
|
||||
// create the symbol for the name
|
||||
const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{}", .{
|
||||
@tagName(sym.kind),
|
||||
sym.ty.fmt(mod),
|
||||
sym.ty.fmt(pt),
|
||||
});
|
||||
|
||||
const symbol: aout.Sym = .{
|
||||
@ -1225,9 +1230,10 @@ fn updateLazySymbolAtom(self: *Plan9, sym: File.LazySymbol, atom_index: Atom.Ind
|
||||
self.syms.items[self.getAtomPtr(atom_index).sym_index.?] = symbol;
|
||||
|
||||
// generate the code
|
||||
const src = sym.ty.srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded;
|
||||
const src = sym.ty.srcLocOrNull(pt.zcu) orelse Zcu.LazySrcLoc.unneeded;
|
||||
const res = try codegen.generateLazySymbol(
|
||||
&self.base,
|
||||
pt,
|
||||
src,
|
||||
sym,
|
||||
&required_alignment,
|
||||
@ -1490,7 +1496,7 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
|
||||
}
|
||||
|
||||
/// Must be called only after a successful call to `updateDecl`.
|
||||
pub fn updateDeclLineNumber(self: *Plan9, mod: *Module, decl_index: InternPool.DeclIndex) !void {
|
||||
pub fn updateDeclLineNumber(self: *Plan9, mod: *Zcu, decl_index: InternPool.DeclIndex) !void {
|
||||
_ = self;
|
||||
_ = mod;
|
||||
_ = decl_index;
|
||||
@ -1544,9 +1550,10 @@ pub fn getDeclVAddr(
|
||||
|
||||
pub fn lowerAnonDecl(
|
||||
self: *Plan9,
|
||||
pt: Zcu.PerThread,
|
||||
decl_val: InternPool.Index,
|
||||
explicit_alignment: InternPool.Alignment,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
) !codegen.Result {
|
||||
_ = explicit_alignment;
|
||||
// This is basically the same as lowerUnnamedConst.
|
||||
@ -1569,7 +1576,7 @@ pub fn lowerAnonDecl(
|
||||
gop.value_ptr.* = index;
|
||||
// we need to free name latex
|
||||
var code_buffer = std.ArrayList(u8).init(gpa);
|
||||
const res = try codegen.generateSymbol(&self.base, src_loc, val, &code_buffer, .{ .none = {} }, .{ .parent_atom_index = index });
|
||||
const res = try codegen.generateSymbol(&self.base, pt, src_loc, val, &code_buffer, .{ .none = {} }, .{ .parent_atom_index = index });
|
||||
const code = switch (res) {
|
||||
.ok => code_buffer.items,
|
||||
.fail => |em| return .{ .fail = em },
|
||||
|
||||
@ -28,8 +28,6 @@ const assert = std.debug.assert;
|
||||
const log = std.log.scoped(.link);
|
||||
|
||||
const Zcu = @import("../Zcu.zig");
|
||||
/// Deprecated.
|
||||
const Module = Zcu;
|
||||
const InternPool = @import("../InternPool.zig");
|
||||
const Compilation = @import("../Compilation.zig");
|
||||
const link = @import("../link.zig");
|
||||
@ -125,35 +123,36 @@ pub fn deinit(self: *SpirV) void {
|
||||
self.object.deinit();
|
||||
}
|
||||
|
||||
pub fn updateFunc(self: *SpirV, module: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
|
||||
pub fn updateFunc(self: *SpirV, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
|
||||
if (build_options.skip_non_native) {
|
||||
@panic("Attempted to compile for architecture that was disabled by build configuration");
|
||||
}
|
||||
|
||||
const func = module.funcInfo(func_index);
|
||||
const decl = module.declPtr(func.owner_decl);
|
||||
log.debug("lowering function {}", .{decl.name.fmt(&module.intern_pool)});
|
||||
const func = pt.zcu.funcInfo(func_index);
|
||||
const decl = pt.zcu.declPtr(func.owner_decl);
|
||||
log.debug("lowering function {}", .{decl.name.fmt(&pt.zcu.intern_pool)});
|
||||
|
||||
try self.object.updateFunc(module, func_index, air, liveness);
|
||||
try self.object.updateFunc(pt, func_index, air, liveness);
|
||||
}
|
||||
|
||||
pub fn updateDecl(self: *SpirV, module: *Module, decl_index: InternPool.DeclIndex) !void {
|
||||
pub fn updateDecl(self: *SpirV, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void {
|
||||
if (build_options.skip_non_native) {
|
||||
@panic("Attempted to compile for architecture that was disabled by build configuration");
|
||||
}
|
||||
|
||||
const decl = module.declPtr(decl_index);
|
||||
log.debug("lowering declaration {}", .{decl.name.fmt(&module.intern_pool)});
|
||||
const decl = pt.zcu.declPtr(decl_index);
|
||||
log.debug("lowering declaration {}", .{decl.name.fmt(&pt.zcu.intern_pool)});
|
||||
|
||||
try self.object.updateDecl(module, decl_index);
|
||||
try self.object.updateDecl(pt, decl_index);
|
||||
}
|
||||
|
||||
pub fn updateExports(
|
||||
self: *SpirV,
|
||||
mod: *Module,
|
||||
exported: Module.Exported,
|
||||
pt: Zcu.PerThread,
|
||||
exported: Zcu.Exported,
|
||||
export_indices: []const u32,
|
||||
) !void {
|
||||
const mod = pt.zcu;
|
||||
const decl_index = switch (exported) {
|
||||
.decl_index => |i| i,
|
||||
.value => |val| {
|
||||
@ -196,11 +195,11 @@ pub fn freeDecl(self: *SpirV, decl_index: InternPool.DeclIndex) void {
|
||||
_ = decl_index;
|
||||
}
|
||||
|
||||
pub fn flush(self: *SpirV, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void {
|
||||
return self.flushModule(arena, prog_node);
|
||||
pub fn flush(self: *SpirV, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
|
||||
return self.flushModule(arena, tid, prog_node);
|
||||
}
|
||||
|
||||
pub fn flushModule(self: *SpirV, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void {
|
||||
pub fn flushModule(self: *SpirV, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
|
||||
if (build_options.skip_non_native) {
|
||||
@panic("Attempted to compile for architecture that was disabled by build configuration");
|
||||
}
|
||||
@ -216,6 +215,7 @@ pub fn flushModule(self: *SpirV, arena: Allocator, prog_node: std.Progress.Node)
|
||||
const comp = self.base.comp;
|
||||
const gpa = comp.gpa;
|
||||
const target = comp.getTarget();
|
||||
_ = tid;
|
||||
|
||||
try writeCapabilities(spv, target);
|
||||
try writeMemoryModel(spv, target);
|
||||
|
||||
@ -29,8 +29,6 @@ const InternPool = @import("../InternPool.zig");
|
||||
const Liveness = @import("../Liveness.zig");
|
||||
const LlvmObject = @import("../codegen/llvm.zig").Object;
|
||||
const Zcu = @import("../Zcu.zig");
|
||||
/// Deprecated.
|
||||
const Module = Zcu;
|
||||
const Object = @import("Wasm/Object.zig");
|
||||
const Symbol = @import("Wasm/Symbol.zig");
|
||||
const Type = @import("../Type.zig");
|
||||
@ -1441,25 +1439,25 @@ pub fn deinit(wasm: *Wasm) void {
|
||||
wasm.files.deinit(gpa);
|
||||
}
|
||||
|
||||
pub fn updateFunc(wasm: *Wasm, mod: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
|
||||
pub fn updateFunc(wasm: *Wasm, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
|
||||
if (build_options.skip_non_native and builtin.object_format != .wasm) {
|
||||
@panic("Attempted to compile for object format that was disabled by build configuration");
|
||||
}
|
||||
if (wasm.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func_index, air, liveness);
|
||||
try wasm.zigObjectPtr().?.updateFunc(wasm, mod, func_index, air, liveness);
|
||||
if (wasm.llvm_object) |llvm_object| return llvm_object.updateFunc(pt, func_index, air, liveness);
|
||||
try wasm.zigObjectPtr().?.updateFunc(wasm, pt, func_index, air, liveness);
|
||||
}
|
||||
|
||||
// Generate code for the Decl, storing it in memory to be later written to
|
||||
// the file on flush().
|
||||
pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: InternPool.DeclIndex) !void {
|
||||
pub fn updateDecl(wasm: *Wasm, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void {
|
||||
if (build_options.skip_non_native and builtin.object_format != .wasm) {
|
||||
@panic("Attempted to compile for object format that was disabled by build configuration");
|
||||
}
|
||||
if (wasm.llvm_object) |llvm_object| return llvm_object.updateDecl(mod, decl_index);
|
||||
try wasm.zigObjectPtr().?.updateDecl(wasm, mod, decl_index);
|
||||
if (wasm.llvm_object) |llvm_object| return llvm_object.updateDecl(pt, decl_index);
|
||||
try wasm.zigObjectPtr().?.updateDecl(wasm, pt, decl_index);
|
||||
}
|
||||
|
||||
pub fn updateDeclLineNumber(wasm: *Wasm, mod: *Module, decl_index: InternPool.DeclIndex) !void {
|
||||
pub fn updateDeclLineNumber(wasm: *Wasm, mod: *Zcu, decl_index: InternPool.DeclIndex) !void {
|
||||
if (wasm.llvm_object) |_| return;
|
||||
try wasm.zigObjectPtr().?.updateDeclLineNumber(mod, decl_index);
|
||||
}
|
||||
@ -1506,8 +1504,8 @@ fn getFunctionSignature(wasm: *const Wasm, loc: SymbolLoc) std.wasm.Type {
|
||||
/// Lowers a constant typed value to a local symbol and atom.
|
||||
/// Returns the symbol index of the local
|
||||
/// The given `decl` is the parent decl whom owns the constant.
|
||||
pub fn lowerUnnamedConst(wasm: *Wasm, val: Value, decl_index: InternPool.DeclIndex) !u32 {
|
||||
return wasm.zigObjectPtr().?.lowerUnnamedConst(wasm, val, decl_index);
|
||||
pub fn lowerUnnamedConst(wasm: *Wasm, pt: Zcu.PerThread, val: Value, decl_index: InternPool.DeclIndex) !u32 {
|
||||
return wasm.zigObjectPtr().?.lowerUnnamedConst(wasm, pt, val, decl_index);
|
||||
}
|
||||
|
||||
/// Returns the symbol index from a symbol of which its flag is set global,
|
||||
@ -1531,11 +1529,12 @@ pub fn getDeclVAddr(
|
||||
|
||||
pub fn lowerAnonDecl(
|
||||
wasm: *Wasm,
|
||||
pt: Zcu.PerThread,
|
||||
decl_val: InternPool.Index,
|
||||
explicit_alignment: Alignment,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
) !codegen.Result {
|
||||
return wasm.zigObjectPtr().?.lowerAnonDecl(wasm, decl_val, explicit_alignment, src_loc);
|
||||
return wasm.zigObjectPtr().?.lowerAnonDecl(wasm, pt, decl_val, explicit_alignment, src_loc);
|
||||
}
|
||||
|
||||
pub fn getAnonDeclVAddr(wasm: *Wasm, decl_val: InternPool.Index, reloc_info: link.File.RelocInfo) !u64 {
|
||||
@ -1553,15 +1552,15 @@ pub fn deleteExport(
|
||||
|
||||
pub fn updateExports(
|
||||
wasm: *Wasm,
|
||||
mod: *Module,
|
||||
exported: Module.Exported,
|
||||
pt: Zcu.PerThread,
|
||||
exported: Zcu.Exported,
|
||||
export_indices: []const u32,
|
||||
) !void {
|
||||
if (build_options.skip_non_native and builtin.object_format != .wasm) {
|
||||
@panic("Attempted to compile for object format that was disabled by build configuration");
|
||||
}
|
||||
if (wasm.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, export_indices);
|
||||
return wasm.zigObjectPtr().?.updateExports(wasm, mod, exported, export_indices);
|
||||
if (wasm.llvm_object) |llvm_object| return llvm_object.updateExports(pt, exported, export_indices);
|
||||
return wasm.zigObjectPtr().?.updateExports(wasm, pt, exported, export_indices);
|
||||
}
|
||||
|
||||
pub fn freeDecl(wasm: *Wasm, decl_index: InternPool.DeclIndex) void {
|
||||
@ -2466,18 +2465,18 @@ fn appendDummySegment(wasm: *Wasm) !void {
|
||||
});
|
||||
}
|
||||
|
||||
pub fn flush(wasm: *Wasm, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void {
|
||||
pub fn flush(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
|
||||
const comp = wasm.base.comp;
|
||||
const use_lld = build_options.have_llvm and comp.config.use_lld;
|
||||
|
||||
if (use_lld) {
|
||||
return wasm.linkWithLLD(arena, prog_node);
|
||||
return wasm.linkWithLLD(arena, tid, prog_node);
|
||||
}
|
||||
return wasm.flushModule(arena, prog_node);
|
||||
return wasm.flushModule(arena, tid, prog_node);
|
||||
}
|
||||
|
||||
/// Uses the in-house linker to link one or multiple object -and archive files into a WebAssembly binary.
|
||||
pub fn flushModule(wasm: *Wasm, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void {
|
||||
pub fn flushModule(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
@ -2513,7 +2512,7 @@ pub fn flushModule(wasm: *Wasm, arena: Allocator, prog_node: std.Progress.Node)
|
||||
const wasi_exec_model = comp.config.wasi_exec_model;
|
||||
|
||||
if (wasm.zigObjectPtr()) |zig_object| {
|
||||
try zig_object.flushModule(wasm);
|
||||
try zig_object.flushModule(wasm, tid);
|
||||
}
|
||||
|
||||
// When the target os is WASI, we allow linking with WASI-LIBC
|
||||
@ -3324,7 +3323,7 @@ fn emitImport(wasm: *Wasm, writer: anytype, import: types.Import) !void {
|
||||
}
|
||||
}
|
||||
|
||||
fn linkWithLLD(wasm: *Wasm, arena: Allocator, prog_node: std.Progress.Node) !void {
|
||||
fn linkWithLLD(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
@ -3342,7 +3341,7 @@ fn linkWithLLD(wasm: *Wasm, arena: Allocator, prog_node: std.Progress.Node) !voi
|
||||
// If there is no Zig code to compile, then we should skip flushing the output file because it
|
||||
// will not be part of the linker line anyway.
|
||||
const module_obj_path: ?[]const u8 = if (comp.module != null) blk: {
|
||||
try wasm.flushModule(arena, prog_node);
|
||||
try wasm.flushModule(arena, tid, prog_node);
|
||||
|
||||
if (fs.path.dirname(full_out_path)) |dirname| {
|
||||
break :blk try fs.path.join(arena, &.{ dirname, wasm.base.zcu_object_sub_path.? });
|
||||
@ -4009,8 +4008,8 @@ pub fn storeDeclType(wasm: *Wasm, decl_index: InternPool.DeclIndex, func_type: s
|
||||
/// Returns the symbol index of the error name table.
|
||||
///
|
||||
/// When the symbol does not yet exist, it will create a new one instead.
|
||||
pub fn getErrorTableSymbol(wasm_file: *Wasm) !u32 {
|
||||
const sym_index = try wasm_file.zigObjectPtr().?.getErrorTableSymbol(wasm_file);
|
||||
pub fn getErrorTableSymbol(wasm_file: *Wasm, pt: Zcu.PerThread) !u32 {
|
||||
const sym_index = try wasm_file.zigObjectPtr().?.getErrorTableSymbol(wasm_file, pt);
|
||||
return @intFromEnum(sym_index);
|
||||
}
|
||||
|
||||
|
||||
@ -241,9 +241,10 @@ pub fn allocateSymbol(zig_object: *ZigObject, gpa: std.mem.Allocator) !Symbol.In
|
||||
pub fn updateDecl(
|
||||
zig_object: *ZigObject,
|
||||
wasm_file: *Wasm,
|
||||
mod: *Module,
|
||||
pt: Zcu.PerThread,
|
||||
decl_index: InternPool.DeclIndex,
|
||||
) !void {
|
||||
const mod = pt.zcu;
|
||||
const decl = mod.declPtr(decl_index);
|
||||
if (decl.val.getFunction(mod)) |_| {
|
||||
return;
|
||||
@ -269,6 +270,7 @@ pub fn updateDecl(
|
||||
|
||||
const res = try codegen.generateSymbol(
|
||||
&wasm_file.base,
|
||||
pt,
|
||||
decl.navSrcLoc(mod),
|
||||
val,
|
||||
&code_writer,
|
||||
@ -285,21 +287,21 @@ pub fn updateDecl(
|
||||
},
|
||||
};
|
||||
|
||||
return zig_object.finishUpdateDecl(wasm_file, decl_index, code);
|
||||
return zig_object.finishUpdateDecl(wasm_file, pt, decl_index, code);
|
||||
}
|
||||
|
||||
pub fn updateFunc(
|
||||
zig_object: *ZigObject,
|
||||
wasm_file: *Wasm,
|
||||
mod: *Module,
|
||||
pt: Zcu.PerThread,
|
||||
func_index: InternPool.Index,
|
||||
air: Air,
|
||||
liveness: Liveness,
|
||||
) !void {
|
||||
const gpa = wasm_file.base.comp.gpa;
|
||||
const func = mod.funcInfo(func_index);
|
||||
const func = pt.zcu.funcInfo(func_index);
|
||||
const decl_index = func.owner_decl;
|
||||
const decl = mod.declPtr(decl_index);
|
||||
const decl = pt.zcu.declPtr(decl_index);
|
||||
const atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, decl_index);
|
||||
const atom = wasm_file.getAtomPtr(atom_index);
|
||||
atom.clear();
|
||||
@ -308,7 +310,8 @@ pub fn updateFunc(
|
||||
defer code_writer.deinit();
|
||||
const result = try codegen.generateFunction(
|
||||
&wasm_file.base,
|
||||
decl.navSrcLoc(mod),
|
||||
pt,
|
||||
decl.navSrcLoc(pt.zcu),
|
||||
func_index,
|
||||
air,
|
||||
liveness,
|
||||
@ -320,29 +323,31 @@ pub fn updateFunc(
|
||||
.ok => code_writer.items,
|
||||
.fail => |em| {
|
||||
decl.analysis = .codegen_failure;
|
||||
try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em);
|
||||
try pt.zcu.failed_analysis.put(gpa, AnalUnit.wrap(.{ .decl = decl_index }), em);
|
||||
return;
|
||||
},
|
||||
};
|
||||
|
||||
return zig_object.finishUpdateDecl(wasm_file, decl_index, code);
|
||||
return zig_object.finishUpdateDecl(wasm_file, pt, decl_index, code);
|
||||
}
|
||||
|
||||
fn finishUpdateDecl(
|
||||
zig_object: *ZigObject,
|
||||
wasm_file: *Wasm,
|
||||
pt: Zcu.PerThread,
|
||||
decl_index: InternPool.DeclIndex,
|
||||
code: []const u8,
|
||||
) !void {
|
||||
const gpa = wasm_file.base.comp.gpa;
|
||||
const zcu = wasm_file.base.comp.module.?;
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const gpa = zcu.gpa;
|
||||
const decl = zcu.declPtr(decl_index);
|
||||
const decl_info = zig_object.decls_map.get(decl_index).?;
|
||||
const atom_index = decl_info.atom;
|
||||
const atom = wasm_file.getAtomPtr(atom_index);
|
||||
const sym = zig_object.symbol(atom.sym_index);
|
||||
const full_name = try decl.fullyQualifiedName(zcu);
|
||||
sym.name = try zig_object.string_table.insert(gpa, full_name.toSlice(&zcu.intern_pool));
|
||||
sym.name = try zig_object.string_table.insert(gpa, full_name.toSlice(ip));
|
||||
try atom.code.appendSlice(gpa, code);
|
||||
atom.size = @intCast(code.len);
|
||||
|
||||
@ -382,7 +387,7 @@ fn finishUpdateDecl(
|
||||
// Will be freed upon freeing of decl or after cleanup of Wasm binary.
|
||||
const full_segment_name = try std.mem.concat(gpa, u8, &.{
|
||||
segment_name,
|
||||
full_name.toSlice(&zcu.intern_pool),
|
||||
full_name.toSlice(ip),
|
||||
});
|
||||
errdefer gpa.free(full_segment_name);
|
||||
sym.tag = .data;
|
||||
@ -390,7 +395,7 @@ fn finishUpdateDecl(
|
||||
},
|
||||
}
|
||||
if (code.len == 0) return;
|
||||
atom.alignment = decl.getAlignment(zcu);
|
||||
atom.alignment = decl.getAlignment(pt);
|
||||
}
|
||||
|
||||
/// Creates and initializes a new segment in the 'Data' section.
|
||||
@ -437,9 +442,10 @@ pub fn getOrCreateAtomForDecl(zig_object: *ZigObject, wasm_file: *Wasm, decl_ind
|
||||
pub fn lowerAnonDecl(
|
||||
zig_object: *ZigObject,
|
||||
wasm_file: *Wasm,
|
||||
pt: Zcu.PerThread,
|
||||
decl_val: InternPool.Index,
|
||||
explicit_alignment: InternPool.Alignment,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
) !codegen.Result {
|
||||
const gpa = wasm_file.base.comp.gpa;
|
||||
const gop = try zig_object.anon_decls.getOrPut(gpa, decl_val);
|
||||
@ -449,7 +455,7 @@ pub fn lowerAnonDecl(
|
||||
@intFromEnum(decl_val),
|
||||
}) catch unreachable;
|
||||
|
||||
switch (try zig_object.lowerConst(wasm_file, name, Value.fromInterned(decl_val), src_loc)) {
|
||||
switch (try zig_object.lowerConst(wasm_file, pt, name, Value.fromInterned(decl_val), src_loc)) {
|
||||
.ok => |atom_index| zig_object.anon_decls.values()[gop.index] = atom_index,
|
||||
.fail => |em| return .{ .fail = em },
|
||||
}
|
||||
@ -469,9 +475,15 @@ pub fn lowerAnonDecl(
|
||||
/// Lowers a constant typed value to a local symbol and atom.
|
||||
/// Returns the symbol index of the local
|
||||
/// The given `decl` is the parent decl whom owns the constant.
|
||||
pub fn lowerUnnamedConst(zig_object: *ZigObject, wasm_file: *Wasm, val: Value, decl_index: InternPool.DeclIndex) !u32 {
|
||||
const gpa = wasm_file.base.comp.gpa;
|
||||
const mod = wasm_file.base.comp.module.?;
|
||||
pub fn lowerUnnamedConst(
|
||||
zig_object: *ZigObject,
|
||||
wasm_file: *Wasm,
|
||||
pt: Zcu.PerThread,
|
||||
val: Value,
|
||||
decl_index: InternPool.DeclIndex,
|
||||
) !u32 {
|
||||
const mod = pt.zcu;
|
||||
const gpa = mod.gpa;
|
||||
std.debug.assert(val.typeOf(mod).zigTypeTag(mod) != .Fn); // cannot create local symbols for functions
|
||||
const decl = mod.declPtr(decl_index);
|
||||
|
||||
@ -494,7 +506,7 @@ pub fn lowerUnnamedConst(zig_object: *ZigObject, wasm_file: *Wasm, val: Value, d
|
||||
else
|
||||
decl.navSrcLoc(mod);
|
||||
|
||||
switch (try zig_object.lowerConst(wasm_file, name, val, decl_src)) {
|
||||
switch (try zig_object.lowerConst(wasm_file, pt, name, val, decl_src)) {
|
||||
.ok => |atom_index| {
|
||||
try wasm_file.getAtomPtr(parent_atom_index).locals.append(gpa, atom_index);
|
||||
return @intFromEnum(wasm_file.getAtom(atom_index).sym_index);
|
||||
@ -509,10 +521,17 @@ pub fn lowerUnnamedConst(zig_object: *ZigObject, wasm_file: *Wasm, val: Value, d
|
||||
|
||||
const LowerConstResult = union(enum) {
|
||||
ok: Atom.Index,
|
||||
fail: *Module.ErrorMsg,
|
||||
fail: *Zcu.ErrorMsg,
|
||||
};
|
||||
|
||||
fn lowerConst(zig_object: *ZigObject, wasm_file: *Wasm, name: []const u8, val: Value, src_loc: Module.LazySrcLoc) !LowerConstResult {
|
||||
fn lowerConst(
|
||||
zig_object: *ZigObject,
|
||||
wasm_file: *Wasm,
|
||||
pt: Zcu.PerThread,
|
||||
name: []const u8,
|
||||
val: Value,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
) !LowerConstResult {
|
||||
const gpa = wasm_file.base.comp.gpa;
|
||||
const mod = wasm_file.base.comp.module.?;
|
||||
|
||||
@ -526,7 +545,7 @@ fn lowerConst(zig_object: *ZigObject, wasm_file: *Wasm, name: []const u8, val: V
|
||||
|
||||
const code = code: {
|
||||
const atom = wasm_file.getAtomPtr(atom_index);
|
||||
atom.alignment = ty.abiAlignment(mod);
|
||||
atom.alignment = ty.abiAlignment(pt);
|
||||
const segment_name = try std.mem.concat(gpa, u8, &.{ ".rodata.", name });
|
||||
errdefer gpa.free(segment_name);
|
||||
zig_object.symbol(sym_index).* = .{
|
||||
@ -536,13 +555,14 @@ fn lowerConst(zig_object: *ZigObject, wasm_file: *Wasm, name: []const u8, val: V
|
||||
.index = try zig_object.createDataSegment(
|
||||
gpa,
|
||||
segment_name,
|
||||
ty.abiAlignment(mod),
|
||||
ty.abiAlignment(pt),
|
||||
),
|
||||
.virtual_address = undefined,
|
||||
};
|
||||
|
||||
const result = try codegen.generateSymbol(
|
||||
&wasm_file.base,
|
||||
pt,
|
||||
src_loc,
|
||||
val,
|
||||
&value_bytes,
|
||||
@ -568,7 +588,7 @@ fn lowerConst(zig_object: *ZigObject, wasm_file: *Wasm, name: []const u8, val: V
|
||||
/// Returns the symbol index of the error name table.
|
||||
///
|
||||
/// When the symbol does not yet exist, it will create a new one instead.
|
||||
pub fn getErrorTableSymbol(zig_object: *ZigObject, wasm_file: *Wasm) !Symbol.Index {
|
||||
pub fn getErrorTableSymbol(zig_object: *ZigObject, wasm_file: *Wasm, pt: Zcu.PerThread) !Symbol.Index {
|
||||
if (zig_object.error_table_symbol != .null) {
|
||||
return zig_object.error_table_symbol;
|
||||
}
|
||||
@ -581,8 +601,7 @@ pub fn getErrorTableSymbol(zig_object: *ZigObject, wasm_file: *Wasm) !Symbol.Ind
|
||||
const atom_index = try wasm_file.createAtom(sym_index, zig_object.index);
|
||||
const atom = wasm_file.getAtomPtr(atom_index);
|
||||
const slice_ty = Type.slice_const_u8_sentinel_0;
|
||||
const mod = wasm_file.base.comp.module.?;
|
||||
atom.alignment = slice_ty.abiAlignment(mod);
|
||||
atom.alignment = slice_ty.abiAlignment(pt);
|
||||
|
||||
const sym_name = try zig_object.string_table.insert(gpa, "__zig_err_name_table");
|
||||
const segment_name = try gpa.dupe(u8, ".rodata.__zig_err_name_table");
|
||||
@ -604,7 +623,7 @@ pub fn getErrorTableSymbol(zig_object: *ZigObject, wasm_file: *Wasm) !Symbol.Ind
|
||||
///
|
||||
/// This creates a table that consists of pointers and length to each error name.
|
||||
/// The table is what is being pointed to within the runtime bodies that are generated.
|
||||
fn populateErrorNameTable(zig_object: *ZigObject, wasm_file: *Wasm) !void {
|
||||
fn populateErrorNameTable(zig_object: *ZigObject, wasm_file: *Wasm, tid: Zcu.PerThread.Id) !void {
|
||||
if (zig_object.error_table_symbol == .null) return;
|
||||
const gpa = wasm_file.base.comp.gpa;
|
||||
const atom_index = wasm_file.symbol_atom.get(.{ .file = zig_object.index, .index = zig_object.error_table_symbol }).?;
|
||||
@ -631,11 +650,11 @@ fn populateErrorNameTable(zig_object: *ZigObject, wasm_file: *Wasm) !void {
|
||||
|
||||
// Addend for each relocation to the table
|
||||
var addend: u32 = 0;
|
||||
const mod = wasm_file.base.comp.module.?;
|
||||
for (mod.global_error_set.keys()) |error_name| {
|
||||
const pt: Zcu.PerThread = .{ .zcu = wasm_file.base.comp.module.?, .tid = tid };
|
||||
for (pt.zcu.global_error_set.keys()) |error_name| {
|
||||
const atom = wasm_file.getAtomPtr(atom_index);
|
||||
|
||||
const error_name_slice = error_name.toSlice(&mod.intern_pool);
|
||||
const error_name_slice = error_name.toSlice(&pt.zcu.intern_pool);
|
||||
const len: u32 = @intCast(error_name_slice.len + 1); // names are 0-terminated
|
||||
|
||||
const slice_ty = Type.slice_const_u8_sentinel_0;
|
||||
@ -650,14 +669,14 @@ fn populateErrorNameTable(zig_object: *ZigObject, wasm_file: *Wasm) !void {
|
||||
.offset = offset,
|
||||
.addend = @intCast(addend),
|
||||
});
|
||||
atom.size += @intCast(slice_ty.abiSize(mod));
|
||||
atom.size += @intCast(slice_ty.abiSize(pt));
|
||||
addend += len;
|
||||
|
||||
// as we updated the error name table, we now store the actual name within the names atom
|
||||
try names_atom.code.ensureUnusedCapacity(gpa, len);
|
||||
names_atom.code.appendSliceAssumeCapacity(error_name_slice[0..len]);
|
||||
|
||||
log.debug("Populated error name: '{}'", .{error_name.fmt(&mod.intern_pool)});
|
||||
log.debug("Populated error name: '{}'", .{error_name.fmt(&pt.zcu.intern_pool)});
|
||||
}
|
||||
names_atom.size = addend;
|
||||
zig_object.error_names_atom = names_atom_index;
|
||||
@ -858,10 +877,11 @@ pub fn deleteExport(
|
||||
pub fn updateExports(
|
||||
zig_object: *ZigObject,
|
||||
wasm_file: *Wasm,
|
||||
mod: *Module,
|
||||
exported: Module.Exported,
|
||||
pt: Zcu.PerThread,
|
||||
exported: Zcu.Exported,
|
||||
export_indices: []const u32,
|
||||
) !void {
|
||||
const mod = pt.zcu;
|
||||
const decl_index = switch (exported) {
|
||||
.decl_index => |i| i,
|
||||
.value => |val| {
|
||||
@ -880,7 +900,7 @@ pub fn updateExports(
|
||||
for (export_indices) |export_idx| {
|
||||
const exp = mod.all_exports.items[export_idx];
|
||||
if (exp.opts.section.toSlice(&mod.intern_pool)) |section| {
|
||||
try mod.failed_exports.putNoClobber(gpa, export_idx, try Module.ErrorMsg.create(
|
||||
try mod.failed_exports.putNoClobber(gpa, export_idx, try Zcu.ErrorMsg.create(
|
||||
gpa,
|
||||
decl.navSrcLoc(mod),
|
||||
"Unimplemented: ExportOptions.section '{s}'",
|
||||
@ -913,7 +933,7 @@ pub fn updateExports(
|
||||
},
|
||||
.strong => {}, // symbols are strong by default
|
||||
.link_once => {
|
||||
try mod.failed_exports.putNoClobber(gpa, export_idx, try Module.ErrorMsg.create(
|
||||
try mod.failed_exports.putNoClobber(gpa, export_idx, try Zcu.ErrorMsg.create(
|
||||
gpa,
|
||||
decl.navSrcLoc(mod),
|
||||
"Unimplemented: LinkOnce",
|
||||
@ -1096,7 +1116,7 @@ pub fn createDebugSectionForIndex(zig_object: *ZigObject, wasm_file: *Wasm, inde
|
||||
return atom_index;
|
||||
}
|
||||
|
||||
pub fn updateDeclLineNumber(zig_object: *ZigObject, mod: *Module, decl_index: InternPool.DeclIndex) !void {
|
||||
pub fn updateDeclLineNumber(zig_object: *ZigObject, mod: *Zcu, decl_index: InternPool.DeclIndex) !void {
|
||||
if (zig_object.dwarf) |*dw| {
|
||||
const decl = mod.declPtr(decl_index);
|
||||
const decl_name = try decl.fullyQualifiedName(mod);
|
||||
@ -1228,8 +1248,8 @@ fn appendFunction(zig_object: *ZigObject, gpa: std.mem.Allocator, func: std.wasm
|
||||
return index;
|
||||
}
|
||||
|
||||
pub fn flushModule(zig_object: *ZigObject, wasm_file: *Wasm) !void {
|
||||
try zig_object.populateErrorNameTable(wasm_file);
|
||||
pub fn flushModule(zig_object: *ZigObject, wasm_file: *Wasm, tid: Zcu.PerThread.Id) !void {
|
||||
try zig_object.populateErrorNameTable(wasm_file, tid);
|
||||
try zig_object.setupErrorsLen(wasm_file);
|
||||
}
|
||||
|
||||
@ -1248,8 +1268,6 @@ const File = @import("file.zig").File;
|
||||
const InternPool = @import("../../InternPool.zig");
|
||||
const Liveness = @import("../../Liveness.zig");
|
||||
const Zcu = @import("../../Zcu.zig");
|
||||
/// Deprecated.
|
||||
const Module = Zcu;
|
||||
const StringTable = @import("../StringTable.zig");
|
||||
const Symbol = @import("Symbol.zig");
|
||||
const Type = @import("../../Type.zig");
|
||||
|
||||
@ -172,7 +172,7 @@ pub fn main() anyerror!void {
|
||||
}
|
||||
// We would prefer to use raw libc allocator here, but cannot
|
||||
// use it if it won't support the alignment we need.
|
||||
if (@alignOf(std.c.max_align_t) < @alignOf(i128)) {
|
||||
if (@alignOf(std.c.max_align_t) < @max(@alignOf(i128), std.atomic.cache_line)) {
|
||||
break :gpa std.heap.c_allocator;
|
||||
}
|
||||
break :gpa std.heap.raw_c_allocator;
|
||||
@ -3092,7 +3092,7 @@ fn buildOutputType(
|
||||
defer emit_implib_resolved.deinit();
|
||||
|
||||
var thread_pool: ThreadPool = undefined;
|
||||
try thread_pool.init(.{ .allocator = gpa });
|
||||
try thread_pool.init(.{ .allocator = gpa, .track_ids = true });
|
||||
defer thread_pool.deinit();
|
||||
|
||||
var cleanup_local_cache_dir: ?fs.Dir = null;
|
||||
@ -4895,7 +4895,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
|
||||
child_argv.items[argv_index_cache_dir] = local_cache_directory.path orelse cwd_path;
|
||||
|
||||
var thread_pool: ThreadPool = undefined;
|
||||
try thread_pool.init(.{ .allocator = gpa });
|
||||
try thread_pool.init(.{ .allocator = gpa, .track_ids = true });
|
||||
defer thread_pool.deinit();
|
||||
|
||||
// Dummy http client that is not actually used when only_core_functionality is enabled.
|
||||
@ -5329,7 +5329,7 @@ fn jitCmd(
|
||||
defer global_cache_directory.handle.close();
|
||||
|
||||
var thread_pool: ThreadPool = undefined;
|
||||
try thread_pool.init(.{ .allocator = gpa });
|
||||
try thread_pool.init(.{ .allocator = gpa, .track_ids = true });
|
||||
defer thread_pool.deinit();
|
||||
|
||||
var child_argv: std.ArrayListUnmanaged([]const u8) = .{};
|
||||
|
||||
@ -54,46 +54,44 @@ pub const MutableValue = union(enum) {
|
||||
payload: *MutableValue,
|
||||
};
|
||||
|
||||
pub fn intern(mv: MutableValue, zcu: *Zcu, arena: Allocator) Allocator.Error!Value {
|
||||
const ip = &zcu.intern_pool;
|
||||
const gpa = zcu.gpa;
|
||||
pub fn intern(mv: MutableValue, pt: Zcu.PerThread, arena: Allocator) Allocator.Error!Value {
|
||||
return Value.fromInterned(switch (mv) {
|
||||
.interned => |ip_index| ip_index,
|
||||
.eu_payload => |sv| try ip.get(gpa, .{ .error_union = .{
|
||||
.eu_payload => |sv| try pt.intern(.{ .error_union = .{
|
||||
.ty = sv.ty,
|
||||
.val = .{ .payload = (try sv.child.intern(zcu, arena)).toIntern() },
|
||||
.val = .{ .payload = (try sv.child.intern(pt, arena)).toIntern() },
|
||||
} }),
|
||||
.opt_payload => |sv| try ip.get(gpa, .{ .opt = .{
|
||||
.opt_payload => |sv| try pt.intern(.{ .opt = .{
|
||||
.ty = sv.ty,
|
||||
.val = (try sv.child.intern(zcu, arena)).toIntern(),
|
||||
.val = (try sv.child.intern(pt, arena)).toIntern(),
|
||||
} }),
|
||||
.repeated => |sv| try ip.get(gpa, .{ .aggregate = .{
|
||||
.repeated => |sv| try pt.intern(.{ .aggregate = .{
|
||||
.ty = sv.ty,
|
||||
.storage = .{ .repeated_elem = (try sv.child.intern(zcu, arena)).toIntern() },
|
||||
.storage = .{ .repeated_elem = (try sv.child.intern(pt, arena)).toIntern() },
|
||||
} }),
|
||||
.bytes => |b| try ip.get(gpa, .{ .aggregate = .{
|
||||
.bytes => |b| try pt.intern(.{ .aggregate = .{
|
||||
.ty = b.ty,
|
||||
.storage = .{ .bytes = try ip.getOrPutString(gpa, b.data, .maybe_embedded_nulls) },
|
||||
.storage = .{ .bytes = try pt.zcu.intern_pool.getOrPutString(pt.zcu.gpa, b.data, .maybe_embedded_nulls) },
|
||||
} }),
|
||||
.aggregate => |a| {
|
||||
const elems = try arena.alloc(InternPool.Index, a.elems.len);
|
||||
for (a.elems, elems) |mut_elem, *interned_elem| {
|
||||
interned_elem.* = (try mut_elem.intern(zcu, arena)).toIntern();
|
||||
interned_elem.* = (try mut_elem.intern(pt, arena)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try ip.get(gpa, .{ .aggregate = .{
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = a.ty,
|
||||
.storage = .{ .elems = elems },
|
||||
} }));
|
||||
},
|
||||
.slice => |s| try ip.get(gpa, .{ .slice = .{
|
||||
.slice => |s| try pt.intern(.{ .slice = .{
|
||||
.ty = s.ty,
|
||||
.ptr = (try s.ptr.intern(zcu, arena)).toIntern(),
|
||||
.len = (try s.len.intern(zcu, arena)).toIntern(),
|
||||
.ptr = (try s.ptr.intern(pt, arena)).toIntern(),
|
||||
.len = (try s.len.intern(pt, arena)).toIntern(),
|
||||
} }),
|
||||
.un => |u| try ip.get(gpa, .{ .un = .{
|
||||
.un => |u| try pt.intern(.{ .un = .{
|
||||
.ty = u.ty,
|
||||
.tag = u.tag,
|
||||
.val = (try u.payload.intern(zcu, arena)).toIntern(),
|
||||
.val = (try u.payload.intern(pt, arena)).toIntern(),
|
||||
} }),
|
||||
});
|
||||
}
|
||||
@ -108,13 +106,13 @@ pub const MutableValue = union(enum) {
|
||||
/// If `!allow_repeated`, the `repeated` representation will not be used.
|
||||
pub fn unintern(
|
||||
mv: *MutableValue,
|
||||
zcu: *Zcu,
|
||||
pt: Zcu.PerThread,
|
||||
arena: Allocator,
|
||||
allow_bytes: bool,
|
||||
allow_repeated: bool,
|
||||
) Allocator.Error!void {
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const gpa = zcu.gpa;
|
||||
switch (mv.*) {
|
||||
.interned => |ip_index| switch (ip.indexToKey(ip_index)) {
|
||||
.opt => |opt| if (opt.val != .none) {
|
||||
@ -170,7 +168,7 @@ pub const MutableValue = union(enum) {
|
||||
} else {
|
||||
const mut_elems = try arena.alloc(MutableValue, len);
|
||||
for (bytes.toSlice(len, ip), mut_elems) |b, *mut_elem| {
|
||||
mut_elem.* = .{ .interned = try ip.get(gpa, .{ .int = .{
|
||||
mut_elem.* = .{ .interned = try pt.intern(.{ .int = .{
|
||||
.ty = .u8_type,
|
||||
.storage = .{ .u64 = b },
|
||||
} }) };
|
||||
@ -221,12 +219,12 @@ pub const MutableValue = union(enum) {
|
||||
switch (type_tag) {
|
||||
.Array, .Vector => {
|
||||
const elem_ty = ip.childType(ty_ip);
|
||||
const undef_elem = try ip.get(gpa, .{ .undef = elem_ty });
|
||||
const undef_elem = try pt.intern(.{ .undef = elem_ty });
|
||||
@memset(elems[0..@intCast(len_no_sent)], .{ .interned = undef_elem });
|
||||
},
|
||||
.Struct => for (elems[0..@intCast(len_no_sent)], 0..) |*mut_elem, i| {
|
||||
const field_ty = ty.structFieldType(i, zcu).toIntern();
|
||||
mut_elem.* = .{ .interned = try ip.get(gpa, .{ .undef = field_ty }) };
|
||||
mut_elem.* = .{ .interned = try pt.intern(.{ .undef = field_ty }) };
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
@ -238,7 +236,7 @@ pub const MutableValue = union(enum) {
|
||||
} else {
|
||||
const repeated_val = try arena.create(MutableValue);
|
||||
repeated_val.* = .{
|
||||
.interned = try ip.get(gpa, .{ .undef = ip.childType(ty_ip) }),
|
||||
.interned = try pt.intern(.{ .undef = ip.childType(ty_ip) }),
|
||||
};
|
||||
mv.* = .{ .repeated = .{
|
||||
.ty = ty_ip,
|
||||
@ -248,11 +246,8 @@ pub const MutableValue = union(enum) {
|
||||
},
|
||||
.Union => {
|
||||
const payload = try arena.create(MutableValue);
|
||||
const backing_ty = try Type.fromInterned(ty_ip).unionBackingType(zcu);
|
||||
payload.* = .{ .interned = try ip.get(
|
||||
gpa,
|
||||
.{ .undef = backing_ty.toIntern() },
|
||||
) };
|
||||
const backing_ty = try Type.fromInterned(ty_ip).unionBackingType(pt);
|
||||
payload.* = .{ .interned = try pt.intern(.{ .undef = backing_ty.toIntern() }) };
|
||||
mv.* = .{ .un = .{
|
||||
.ty = ty_ip,
|
||||
.tag = .none,
|
||||
@ -264,8 +259,8 @@ pub const MutableValue = union(enum) {
|
||||
if (ptr_ty.flags.size != .Slice) return;
|
||||
const ptr = try arena.create(MutableValue);
|
||||
const len = try arena.create(MutableValue);
|
||||
ptr.* = .{ .interned = try ip.get(gpa, .{ .undef = ip.slicePtrType(ty_ip) }) };
|
||||
len.* = .{ .interned = try ip.get(gpa, .{ .undef = .usize_type }) };
|
||||
ptr.* = .{ .interned = try pt.intern(.{ .undef = ip.slicePtrType(ty_ip) }) };
|
||||
len.* = .{ .interned = try pt.intern(.{ .undef = .usize_type }) };
|
||||
mv.* = .{ .slice = .{
|
||||
.ty = ty_ip,
|
||||
.ptr = ptr,
|
||||
@ -279,7 +274,7 @@ pub const MutableValue = union(enum) {
|
||||
.bytes => |bytes| if (!allow_bytes) {
|
||||
const elems = try arena.alloc(MutableValue, bytes.data.len);
|
||||
for (bytes.data, elems) |byte, *interned_byte| {
|
||||
interned_byte.* = .{ .interned = try ip.get(gpa, .{ .int = .{
|
||||
interned_byte.* = .{ .interned = try pt.intern(.{ .int = .{
|
||||
.ty = .u8_type,
|
||||
.storage = .{ .u64 = byte },
|
||||
} }) };
|
||||
@ -298,22 +293,22 @@ pub const MutableValue = union(enum) {
|
||||
/// The returned pointer is valid until the representation of `mv` changes.
|
||||
pub fn elem(
|
||||
mv: *MutableValue,
|
||||
zcu: *Zcu,
|
||||
pt: Zcu.PerThread,
|
||||
arena: Allocator,
|
||||
field_idx: usize,
|
||||
) Allocator.Error!*MutableValue {
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const gpa = zcu.gpa;
|
||||
// Convert to the `aggregate` representation.
|
||||
switch (mv.*) {
|
||||
.eu_payload, .opt_payload, .un => unreachable,
|
||||
.interned => {
|
||||
try mv.unintern(zcu, arena, false, false);
|
||||
try mv.unintern(pt, arena, false, false);
|
||||
},
|
||||
.bytes => |bytes| {
|
||||
const elems = try arena.alloc(MutableValue, bytes.data.len);
|
||||
for (bytes.data, elems) |byte, *interned_byte| {
|
||||
interned_byte.* = .{ .interned = try ip.get(gpa, .{ .int = .{
|
||||
interned_byte.* = .{ .interned = try pt.intern(.{ .int = .{
|
||||
.ty = .u8_type,
|
||||
.storage = .{ .u64 = byte },
|
||||
} }) };
|
||||
@ -351,14 +346,15 @@ pub const MutableValue = union(enum) {
|
||||
/// For slices, uses `Value.slice_ptr_index` and `Value.slice_len_index`.
|
||||
pub fn setElem(
|
||||
mv: *MutableValue,
|
||||
zcu: *Zcu,
|
||||
pt: Zcu.PerThread,
|
||||
arena: Allocator,
|
||||
field_idx: usize,
|
||||
field_val: MutableValue,
|
||||
) Allocator.Error!void {
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const is_trivial_int = field_val.isTrivialInt(zcu);
|
||||
try mv.unintern(zcu, arena, is_trivial_int, true);
|
||||
try mv.unintern(pt, arena, is_trivial_int, true);
|
||||
switch (mv.*) {
|
||||
.interned,
|
||||
.eu_payload,
|
||||
@ -373,7 +369,7 @@ pub const MutableValue = union(enum) {
|
||||
.bytes => |b| {
|
||||
assert(is_trivial_int);
|
||||
assert(field_val.typeOf(zcu).toIntern() == .u8_type);
|
||||
b.data[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(zcu));
|
||||
b.data[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(pt));
|
||||
},
|
||||
.repeated => |r| {
|
||||
if (field_val.eqlTrivial(r.child.*)) return;
|
||||
@ -386,9 +382,9 @@ pub const MutableValue = union(enum) {
|
||||
{
|
||||
// We can use the `bytes` representation.
|
||||
const bytes = try arena.alloc(u8, @intCast(len_inc_sent));
|
||||
const repeated_byte = Value.fromInterned(r.child.interned).toUnsignedInt(zcu);
|
||||
const repeated_byte = Value.fromInterned(r.child.interned).toUnsignedInt(pt);
|
||||
@memset(bytes, @intCast(repeated_byte));
|
||||
bytes[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(zcu));
|
||||
bytes[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(pt));
|
||||
mv.* = .{ .bytes = .{
|
||||
.ty = r.ty,
|
||||
.data = bytes,
|
||||
@ -435,7 +431,7 @@ pub const MutableValue = union(enum) {
|
||||
} else {
|
||||
const bytes = try arena.alloc(u8, a.elems.len);
|
||||
for (a.elems, bytes) |elem_val, *b| {
|
||||
b.* = @intCast(Value.fromInterned(elem_val.interned).toUnsignedInt(zcu));
|
||||
b.* = @intCast(Value.fromInterned(elem_val.interned).toUnsignedInt(pt));
|
||||
}
|
||||
mv.* = .{ .bytes = .{
|
||||
.ty = a.ty,
|
||||
@ -451,7 +447,7 @@ pub const MutableValue = union(enum) {
|
||||
/// For slices, uses `Value.slice_ptr_index` and `Value.slice_len_index`.
|
||||
pub fn getElem(
|
||||
mv: MutableValue,
|
||||
zcu: *Zcu,
|
||||
pt: Zcu.PerThread,
|
||||
field_idx: usize,
|
||||
) Allocator.Error!MutableValue {
|
||||
return switch (mv) {
|
||||
@ -459,16 +455,16 @@ pub const MutableValue = union(enum) {
|
||||
.opt_payload,
|
||||
=> unreachable,
|
||||
.interned => |ip_index| {
|
||||
const ty = Type.fromInterned(zcu.intern_pool.typeOf(ip_index));
|
||||
switch (ty.zigTypeTag(zcu)) {
|
||||
.Array, .Vector => return .{ .interned = (try Value.fromInterned(ip_index).elemValue(zcu, field_idx)).toIntern() },
|
||||
.Struct, .Union => return .{ .interned = (try Value.fromInterned(ip_index).fieldValue(zcu, field_idx)).toIntern() },
|
||||
const ty = Type.fromInterned(pt.zcu.intern_pool.typeOf(ip_index));
|
||||
switch (ty.zigTypeTag(pt.zcu)) {
|
||||
.Array, .Vector => return .{ .interned = (try Value.fromInterned(ip_index).elemValue(pt, field_idx)).toIntern() },
|
||||
.Struct, .Union => return .{ .interned = (try Value.fromInterned(ip_index).fieldValue(pt, field_idx)).toIntern() },
|
||||
.Pointer => {
|
||||
assert(ty.isSlice(zcu));
|
||||
assert(ty.isSlice(pt.zcu));
|
||||
return switch (field_idx) {
|
||||
Value.slice_ptr_index => .{ .interned = Value.fromInterned(ip_index).slicePtr(zcu).toIntern() },
|
||||
Value.slice_len_index => .{ .interned = switch (zcu.intern_pool.indexToKey(ip_index)) {
|
||||
.undef => try zcu.intern(.{ .undef = .usize_type }),
|
||||
Value.slice_ptr_index => .{ .interned = Value.fromInterned(ip_index).slicePtr(pt.zcu).toIntern() },
|
||||
Value.slice_len_index => .{ .interned = switch (pt.zcu.intern_pool.indexToKey(ip_index)) {
|
||||
.undef => try pt.intern(.{ .undef = .usize_type }),
|
||||
.slice => |s| s.len,
|
||||
else => unreachable,
|
||||
} },
|
||||
@ -487,7 +483,7 @@ pub const MutableValue = union(enum) {
|
||||
Value.slice_len_index => s.len.*,
|
||||
else => unreachable,
|
||||
},
|
||||
.bytes => |b| .{ .interned = try zcu.intern(.{ .int = .{
|
||||
.bytes => |b| .{ .interned = try pt.intern(.{ .int = .{
|
||||
.ty = .u8_type,
|
||||
.storage = .{ .u64 = b.data[field_idx] },
|
||||
} }) },
|
||||
|
||||
@ -9,7 +9,7 @@ const Air = @import("Air.zig");
|
||||
const Liveness = @import("Liveness.zig");
|
||||
const InternPool = @import("InternPool.zig");
|
||||
|
||||
pub fn write(stream: anytype, module: *Zcu, air: Air, liveness: ?Liveness) void {
|
||||
pub fn write(stream: anytype, pt: Zcu.PerThread, air: Air, liveness: ?Liveness) void {
|
||||
const instruction_bytes = air.instructions.len *
|
||||
// Here we don't use @sizeOf(Air.Inst.Data) because it would include
|
||||
// the debug safety tag but we want to measure release size.
|
||||
@ -42,8 +42,8 @@ pub fn write(stream: anytype, module: *Zcu, air: Air, liveness: ?Liveness) void
|
||||
// zig fmt: on
|
||||
|
||||
var writer: Writer = .{
|
||||
.module = module,
|
||||
.gpa = module.gpa,
|
||||
.pt = pt,
|
||||
.gpa = pt.zcu.gpa,
|
||||
.air = air,
|
||||
.liveness = liveness,
|
||||
.indent = 2,
|
||||
@ -55,13 +55,13 @@ pub fn write(stream: anytype, module: *Zcu, air: Air, liveness: ?Liveness) void
|
||||
pub fn writeInst(
|
||||
stream: anytype,
|
||||
inst: Air.Inst.Index,
|
||||
module: *Zcu,
|
||||
pt: Zcu.PerThread,
|
||||
air: Air,
|
||||
liveness: ?Liveness,
|
||||
) void {
|
||||
var writer: Writer = .{
|
||||
.module = module,
|
||||
.gpa = module.gpa,
|
||||
.pt = pt,
|
||||
.gpa = pt.zcu.gpa,
|
||||
.air = air,
|
||||
.liveness = liveness,
|
||||
.indent = 2,
|
||||
@ -70,16 +70,16 @@ pub fn writeInst(
|
||||
writer.writeInst(stream, inst) catch return;
|
||||
}
|
||||
|
||||
pub fn dump(module: *Zcu, air: Air, liveness: ?Liveness) void {
|
||||
write(std.io.getStdErr().writer(), module, air, liveness);
|
||||
pub fn dump(pt: Zcu.PerThread, air: Air, liveness: ?Liveness) void {
|
||||
write(std.io.getStdErr().writer(), pt, air, liveness);
|
||||
}
|
||||
|
||||
pub fn dumpInst(inst: Air.Inst.Index, module: *Zcu, air: Air, liveness: ?Liveness) void {
|
||||
writeInst(std.io.getStdErr().writer(), inst, module, air, liveness);
|
||||
pub fn dumpInst(inst: Air.Inst.Index, pt: Zcu.PerThread, air: Air, liveness: ?Liveness) void {
|
||||
writeInst(std.io.getStdErr().writer(), inst, pt, air, liveness);
|
||||
}
|
||||
|
||||
const Writer = struct {
|
||||
module: *Zcu,
|
||||
pt: Zcu.PerThread,
|
||||
gpa: Allocator,
|
||||
air: Air,
|
||||
liveness: ?Liveness,
|
||||
@ -345,7 +345,7 @@ const Writer = struct {
|
||||
}
|
||||
|
||||
fn writeType(w: *Writer, s: anytype, ty: Type) !void {
|
||||
return ty.print(s, w.module);
|
||||
return ty.print(s, w.pt);
|
||||
}
|
||||
|
||||
fn writeTy(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
|
||||
@ -424,7 +424,7 @@ const Writer = struct {
|
||||
}
|
||||
|
||||
fn writeAggregateInit(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
|
||||
const mod = w.module;
|
||||
const mod = w.pt.zcu;
|
||||
const ty_pl = w.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
||||
const vector_ty = ty_pl.ty.toType();
|
||||
const len = @as(usize, @intCast(vector_ty.arrayLen(mod)));
|
||||
@ -504,7 +504,7 @@ const Writer = struct {
|
||||
}
|
||||
|
||||
fn writeSelect(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
|
||||
const mod = w.module;
|
||||
const mod = w.pt.zcu;
|
||||
const pl_op = w.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
|
||||
const extra = w.air.extraData(Air.Bin, pl_op.payload).data;
|
||||
|
||||
@ -947,11 +947,11 @@ const Writer = struct {
|
||||
if (@intFromEnum(operand) < InternPool.static_len) {
|
||||
return s.print("@{}", .{operand});
|
||||
} else if (operand.toInterned()) |ip_index| {
|
||||
const mod = w.module;
|
||||
const ty = Type.fromInterned(mod.intern_pool.indexToKey(ip_index).typeOf());
|
||||
const pt = w.pt;
|
||||
const ty = Type.fromInterned(pt.zcu.intern_pool.indexToKey(ip_index).typeOf());
|
||||
try s.print("<{}, {}>", .{
|
||||
ty.fmt(mod),
|
||||
Value.fromInterned(ip_index).fmtValue(mod, null),
|
||||
ty.fmt(pt),
|
||||
Value.fromInterned(ip_index).fmtValue(pt, null),
|
||||
});
|
||||
} else {
|
||||
return w.writeInstIndex(s, operand.toIndex().?, dies);
|
||||
@ -970,7 +970,7 @@ const Writer = struct {
|
||||
}
|
||||
|
||||
fn typeOfIndex(w: *Writer, inst: Air.Inst.Index) Type {
|
||||
const mod = w.module;
|
||||
const mod = w.pt.zcu;
|
||||
return w.air.typeOfIndex(inst, &mod.intern_pool);
|
||||
}
|
||||
};
|
||||
|
||||
@ -5,8 +5,6 @@ const std = @import("std");
|
||||
const Type = @import("Type.zig");
|
||||
const Value = @import("Value.zig");
|
||||
const Zcu = @import("Zcu.zig");
|
||||
/// Deprecated.
|
||||
const Module = Zcu;
|
||||
const Sema = @import("Sema.zig");
|
||||
const InternPool = @import("InternPool.zig");
|
||||
const Allocator = std.mem.Allocator;
|
||||
@ -17,7 +15,7 @@ const max_string_len = 256;
|
||||
|
||||
pub const FormatContext = struct {
|
||||
val: Value,
|
||||
mod: *Module,
|
||||
pt: Zcu.PerThread,
|
||||
opt_sema: ?*Sema,
|
||||
depth: u8,
|
||||
};
|
||||
@ -30,7 +28,7 @@ pub fn format(
|
||||
) !void {
|
||||
_ = options;
|
||||
comptime std.debug.assert(fmt.len == 0);
|
||||
return print(ctx.val, writer, ctx.depth, ctx.mod, ctx.opt_sema) catch |err| switch (err) {
|
||||
return print(ctx.val, writer, ctx.depth, ctx.pt, ctx.opt_sema) catch |err| switch (err) {
|
||||
error.OutOfMemory => @panic("OOM"), // We're not allowed to return this from a format function
|
||||
error.ComptimeBreak, error.ComptimeReturn => unreachable,
|
||||
error.AnalysisFail => unreachable, // TODO: re-evaluate when we use `opt_sema` more fully
|
||||
@ -42,10 +40,11 @@ pub fn print(
|
||||
val: Value,
|
||||
writer: anytype,
|
||||
level: u8,
|
||||
mod: *Module,
|
||||
pt: Zcu.PerThread,
|
||||
/// If this `Sema` is provided, we will recurse through pointers where possible to provide friendly output.
|
||||
opt_sema: ?*Sema,
|
||||
) (@TypeOf(writer).Error || Module.CompileError)!void {
|
||||
) (@TypeOf(writer).Error || Zcu.CompileError)!void {
|
||||
const mod = pt.zcu;
|
||||
const ip = &mod.intern_pool;
|
||||
switch (ip.indexToKey(val.toIntern())) {
|
||||
.int_type,
|
||||
@ -64,7 +63,7 @@ pub fn print(
|
||||
.func_type,
|
||||
.error_set_type,
|
||||
.inferred_error_set_type,
|
||||
=> try Type.print(val.toType(), writer, mod),
|
||||
=> try Type.print(val.toType(), writer, pt),
|
||||
.undef => try writer.writeAll("undefined"),
|
||||
.simple_value => |simple_value| switch (simple_value) {
|
||||
.void => try writer.writeAll("{}"),
|
||||
@ -82,13 +81,13 @@ pub fn print(
|
||||
.int => |int| switch (int.storage) {
|
||||
inline .u64, .i64, .big_int => |x| try writer.print("{}", .{x}),
|
||||
.lazy_align => |ty| if (opt_sema != null) {
|
||||
const a = (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .sema)).scalar;
|
||||
const a = (try Type.fromInterned(ty).abiAlignmentAdvanced(pt, .sema)).scalar;
|
||||
try writer.print("{}", .{a.toByteUnits() orelse 0});
|
||||
} else try writer.print("@alignOf({})", .{Type.fromInterned(ty).fmt(mod)}),
|
||||
} else try writer.print("@alignOf({})", .{Type.fromInterned(ty).fmt(pt)}),
|
||||
.lazy_size => |ty| if (opt_sema != null) {
|
||||
const s = (try Type.fromInterned(ty).abiSizeAdvanced(mod, .sema)).scalar;
|
||||
const s = (try Type.fromInterned(ty).abiSizeAdvanced(pt, .sema)).scalar;
|
||||
try writer.print("{}", .{s});
|
||||
} else try writer.print("@sizeOf({})", .{Type.fromInterned(ty).fmt(mod)}),
|
||||
} else try writer.print("@sizeOf({})", .{Type.fromInterned(ty).fmt(pt)}),
|
||||
},
|
||||
.err => |err| try writer.print("error.{}", .{
|
||||
err.name.fmt(ip),
|
||||
@ -97,7 +96,7 @@ pub fn print(
|
||||
.err_name => |err_name| try writer.print("error.{}", .{
|
||||
err_name.fmt(ip),
|
||||
}),
|
||||
.payload => |payload| try print(Value.fromInterned(payload), writer, level, mod, opt_sema),
|
||||
.payload => |payload| try print(Value.fromInterned(payload), writer, level, pt, opt_sema),
|
||||
},
|
||||
.enum_literal => |enum_literal| try writer.print(".{}", .{
|
||||
enum_literal.fmt(ip),
|
||||
@ -111,7 +110,7 @@ pub fn print(
|
||||
return writer.writeAll("@enumFromInt(...)");
|
||||
}
|
||||
try writer.writeAll("@enumFromInt(");
|
||||
try print(Value.fromInterned(enum_tag.int), writer, level - 1, mod, opt_sema);
|
||||
try print(Value.fromInterned(enum_tag.int), writer, level - 1, pt, opt_sema);
|
||||
try writer.writeAll(")");
|
||||
},
|
||||
.empty_enum_value => try writer.writeAll("(empty enum value)"),
|
||||
@ -128,12 +127,12 @@ pub fn print(
|
||||
// TODO: eventually we want to load the slice as an array with `opt_sema`, but that's
|
||||
// currently not possible without e.g. triggering compile errors.
|
||||
}
|
||||
try printPtr(Value.fromInterned(slice.ptr), writer, level, mod, opt_sema);
|
||||
try printPtr(Value.fromInterned(slice.ptr), writer, level, pt, opt_sema);
|
||||
try writer.writeAll("[0..");
|
||||
if (level == 0) {
|
||||
try writer.writeAll("(...)");
|
||||
} else {
|
||||
try print(Value.fromInterned(slice.len), writer, level - 1, mod, opt_sema);
|
||||
try print(Value.fromInterned(slice.len), writer, level - 1, pt, opt_sema);
|
||||
}
|
||||
try writer.writeAll("]");
|
||||
},
|
||||
@ -147,28 +146,28 @@ pub fn print(
|
||||
// TODO: eventually we want to load the pointer with `opt_sema`, but that's
|
||||
// currently not possible without e.g. triggering compile errors.
|
||||
}
|
||||
try printPtr(val, writer, level, mod, opt_sema);
|
||||
try printPtr(val, writer, level, pt, opt_sema);
|
||||
},
|
||||
.opt => |opt| switch (opt.val) {
|
||||
.none => try writer.writeAll("null"),
|
||||
else => |payload| try print(Value.fromInterned(payload), writer, level, mod, opt_sema),
|
||||
else => |payload| try print(Value.fromInterned(payload), writer, level, pt, opt_sema),
|
||||
},
|
||||
.aggregate => |aggregate| try printAggregate(val, aggregate, false, writer, level, mod, opt_sema),
|
||||
.aggregate => |aggregate| try printAggregate(val, aggregate, false, writer, level, pt, opt_sema),
|
||||
.un => |un| {
|
||||
if (level == 0) {
|
||||
try writer.writeAll(".{ ... }");
|
||||
return;
|
||||
}
|
||||
if (un.tag == .none) {
|
||||
const backing_ty = try val.typeOf(mod).unionBackingType(mod);
|
||||
try writer.print("@bitCast(@as({}, ", .{backing_ty.fmt(mod)});
|
||||
try print(Value.fromInterned(un.val), writer, level - 1, mod, opt_sema);
|
||||
const backing_ty = try val.typeOf(mod).unionBackingType(pt);
|
||||
try writer.print("@bitCast(@as({}, ", .{backing_ty.fmt(pt)});
|
||||
try print(Value.fromInterned(un.val), writer, level - 1, pt, opt_sema);
|
||||
try writer.writeAll("))");
|
||||
} else {
|
||||
try writer.writeAll(".{ ");
|
||||
try print(Value.fromInterned(un.tag), writer, level - 1, mod, opt_sema);
|
||||
try print(Value.fromInterned(un.tag), writer, level - 1, pt, opt_sema);
|
||||
try writer.writeAll(" = ");
|
||||
try print(Value.fromInterned(un.val), writer, level - 1, mod, opt_sema);
|
||||
try print(Value.fromInterned(un.val), writer, level - 1, pt, opt_sema);
|
||||
try writer.writeAll(" }");
|
||||
}
|
||||
},
|
||||
@ -182,13 +181,14 @@ fn printAggregate(
|
||||
is_ref: bool,
|
||||
writer: anytype,
|
||||
level: u8,
|
||||
zcu: *Zcu,
|
||||
pt: Zcu.PerThread,
|
||||
opt_sema: ?*Sema,
|
||||
) (@TypeOf(writer).Error || Module.CompileError)!void {
|
||||
) (@TypeOf(writer).Error || Zcu.CompileError)!void {
|
||||
if (level == 0) {
|
||||
if (is_ref) try writer.writeByte('&');
|
||||
return writer.writeAll(".{ ... }");
|
||||
}
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const ty = Type.fromInterned(aggregate.ty);
|
||||
switch (ty.zigTypeTag(zcu)) {
|
||||
@ -203,7 +203,7 @@ fn printAggregate(
|
||||
if (i != 0) try writer.writeAll(", ");
|
||||
const field_name = ty.structFieldName(@intCast(i), zcu).unwrap().?;
|
||||
try writer.print(".{i} = ", .{field_name.fmt(ip)});
|
||||
try print(try val.fieldValue(zcu, i), writer, level - 1, zcu, opt_sema);
|
||||
try print(try val.fieldValue(pt, i), writer, level - 1, pt, opt_sema);
|
||||
}
|
||||
try writer.writeAll(" }");
|
||||
return;
|
||||
@ -230,7 +230,7 @@ fn printAggregate(
|
||||
if (ty.childType(zcu).toIntern() != .u8_type) break :one_byte_str;
|
||||
const elem_val = Value.fromInterned(aggregate.storage.values()[0]);
|
||||
if (elem_val.isUndef(zcu)) break :one_byte_str;
|
||||
const byte = elem_val.toUnsignedInt(zcu);
|
||||
const byte = elem_val.toUnsignedInt(pt);
|
||||
try writer.print("\"{}\"", .{std.zig.fmtEscapes(&.{@intCast(byte)})});
|
||||
if (!is_ref) try writer.writeAll(".*");
|
||||
return;
|
||||
@ -253,7 +253,7 @@ fn printAggregate(
|
||||
const max_len = @min(len, max_aggregate_items);
|
||||
for (0..max_len) |i| {
|
||||
if (i != 0) try writer.writeAll(", ");
|
||||
try print(try val.fieldValue(zcu, i), writer, level - 1, zcu, opt_sema);
|
||||
try print(try val.fieldValue(pt, i), writer, level - 1, pt, opt_sema);
|
||||
}
|
||||
if (len > max_aggregate_items) {
|
||||
try writer.writeAll(", ...");
|
||||
@ -261,8 +261,8 @@ fn printAggregate(
|
||||
return writer.writeAll(" }");
|
||||
}
|
||||
|
||||
fn printPtr(ptr_val: Value, writer: anytype, level: u8, zcu: *Zcu, opt_sema: ?*Sema) (@TypeOf(writer).Error || Module.CompileError)!void {
|
||||
const ptr = switch (zcu.intern_pool.indexToKey(ptr_val.toIntern())) {
|
||||
fn printPtr(ptr_val: Value, writer: anytype, level: u8, pt: Zcu.PerThread, opt_sema: ?*Sema) (@TypeOf(writer).Error || Zcu.CompileError)!void {
|
||||
const ptr = switch (pt.zcu.intern_pool.indexToKey(ptr_val.toIntern())) {
|
||||
.undef => return writer.writeAll("undefined"),
|
||||
.ptr => |ptr| ptr,
|
||||
else => unreachable,
|
||||
@ -270,32 +270,33 @@ fn printPtr(ptr_val: Value, writer: anytype, level: u8, zcu: *Zcu, opt_sema: ?*S
|
||||
|
||||
if (ptr.base_addr == .anon_decl) {
|
||||
// If the value is an aggregate, we can potentially print it more nicely.
|
||||
switch (zcu.intern_pool.indexToKey(ptr.base_addr.anon_decl.val)) {
|
||||
switch (pt.zcu.intern_pool.indexToKey(ptr.base_addr.anon_decl.val)) {
|
||||
.aggregate => |agg| return printAggregate(
|
||||
Value.fromInterned(ptr.base_addr.anon_decl.val),
|
||||
agg,
|
||||
true,
|
||||
writer,
|
||||
level,
|
||||
zcu,
|
||||
pt,
|
||||
opt_sema,
|
||||
),
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
|
||||
var arena = std.heap.ArenaAllocator.init(zcu.gpa);
|
||||
var arena = std.heap.ArenaAllocator.init(pt.zcu.gpa);
|
||||
defer arena.deinit();
|
||||
const derivation = try ptr_val.pointerDerivationAdvanced(arena.allocator(), zcu, opt_sema);
|
||||
try printPtrDerivation(derivation, writer, level, zcu, opt_sema);
|
||||
const derivation = try ptr_val.pointerDerivationAdvanced(arena.allocator(), pt, opt_sema);
|
||||
try printPtrDerivation(derivation, writer, level, pt, opt_sema);
|
||||
}
|
||||
|
||||
/// Print `derivation` as an lvalue, i.e. such that writing `&` before this gives the pointer value.
|
||||
fn printPtrDerivation(derivation: Value.PointerDeriveStep, writer: anytype, level: u8, zcu: *Zcu, opt_sema: ?*Sema) (@TypeOf(writer).Error || Module.CompileError)!void {
|
||||
fn printPtrDerivation(derivation: Value.PointerDeriveStep, writer: anytype, level: u8, pt: Zcu.PerThread, opt_sema: ?*Sema) (@TypeOf(writer).Error || Zcu.CompileError)!void {
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
switch (derivation) {
|
||||
.int => |int| try writer.print("@as({}, @ptrFromInt({x})).*", .{
|
||||
int.ptr_ty.fmt(zcu),
|
||||
int.ptr_ty.fmt(pt),
|
||||
int.addr,
|
||||
}),
|
||||
.decl_ptr => |decl| {
|
||||
@ -303,33 +304,33 @@ fn printPtrDerivation(derivation: Value.PointerDeriveStep, writer: anytype, leve
|
||||
},
|
||||
.anon_decl_ptr => |anon| {
|
||||
const ty = Value.fromInterned(anon.val).typeOf(zcu);
|
||||
try writer.print("@as({}, ", .{ty.fmt(zcu)});
|
||||
try print(Value.fromInterned(anon.val), writer, level - 1, zcu, opt_sema);
|
||||
try writer.print("@as({}, ", .{ty.fmt(pt)});
|
||||
try print(Value.fromInterned(anon.val), writer, level - 1, pt, opt_sema);
|
||||
try writer.writeByte(')');
|
||||
},
|
||||
.comptime_alloc_ptr => |info| {
|
||||
try writer.print("@as({}, ", .{info.val.typeOf(zcu).fmt(zcu)});
|
||||
try print(info.val, writer, level - 1, zcu, opt_sema);
|
||||
try writer.print("@as({}, ", .{info.val.typeOf(zcu).fmt(pt)});
|
||||
try print(info.val, writer, level - 1, pt, opt_sema);
|
||||
try writer.writeByte(')');
|
||||
},
|
||||
.comptime_field_ptr => |val| {
|
||||
const ty = val.typeOf(zcu);
|
||||
try writer.print("@as({}, ", .{ty.fmt(zcu)});
|
||||
try print(val, writer, level - 1, zcu, opt_sema);
|
||||
try writer.print("@as({}, ", .{ty.fmt(pt)});
|
||||
try print(val, writer, level - 1, pt, opt_sema);
|
||||
try writer.writeByte(')');
|
||||
},
|
||||
.eu_payload_ptr => |info| {
|
||||
try writer.writeByte('(');
|
||||
try printPtrDerivation(info.parent.*, writer, level, zcu, opt_sema);
|
||||
try printPtrDerivation(info.parent.*, writer, level, pt, opt_sema);
|
||||
try writer.writeAll(" catch unreachable)");
|
||||
},
|
||||
.opt_payload_ptr => |info| {
|
||||
try printPtrDerivation(info.parent.*, writer, level, zcu, opt_sema);
|
||||
try printPtrDerivation(info.parent.*, writer, level, pt, opt_sema);
|
||||
try writer.writeAll(".?");
|
||||
},
|
||||
.field_ptr => |field| {
|
||||
try printPtrDerivation(field.parent.*, writer, level, zcu, opt_sema);
|
||||
const agg_ty = (try field.parent.ptrType(zcu)).childType(zcu);
|
||||
try printPtrDerivation(field.parent.*, writer, level, pt, opt_sema);
|
||||
const agg_ty = (try field.parent.ptrType(pt)).childType(zcu);
|
||||
switch (agg_ty.zigTypeTag(zcu)) {
|
||||
.Struct => if (agg_ty.structFieldName(field.field_idx, zcu).unwrap()) |field_name| {
|
||||
try writer.print(".{i}", .{field_name.fmt(ip)});
|
||||
@ -350,16 +351,16 @@ fn printPtrDerivation(derivation: Value.PointerDeriveStep, writer: anytype, leve
|
||||
}
|
||||
},
|
||||
.elem_ptr => |elem| {
|
||||
try printPtrDerivation(elem.parent.*, writer, level, zcu, opt_sema);
|
||||
try printPtrDerivation(elem.parent.*, writer, level, pt, opt_sema);
|
||||
try writer.print("[{d}]", .{elem.elem_idx});
|
||||
},
|
||||
.offset_and_cast => |oac| if (oac.byte_offset == 0) {
|
||||
try writer.print("@as({}, @ptrCast(", .{oac.new_ptr_ty.fmt(zcu)});
|
||||
try printPtrDerivation(oac.parent.*, writer, level, zcu, opt_sema);
|
||||
try writer.print("@as({}, @ptrCast(", .{oac.new_ptr_ty.fmt(pt)});
|
||||
try printPtrDerivation(oac.parent.*, writer, level, pt, opt_sema);
|
||||
try writer.writeAll("))");
|
||||
} else {
|
||||
try writer.print("@as({}, @ptrFromInt(@intFromPtr(", .{oac.new_ptr_ty.fmt(zcu)});
|
||||
try printPtrDerivation(oac.parent.*, writer, level, zcu, opt_sema);
|
||||
try writer.print("@as({}, @ptrFromInt(@intFromPtr(", .{oac.new_ptr_ty.fmt(pt)});
|
||||
try printPtrDerivation(oac.parent.*, writer, level, pt, opt_sema);
|
||||
try writer.print(") + {d}))", .{oac.byte_offset});
|
||||
},
|
||||
}
|
||||
|
||||
@ -7,13 +7,12 @@ const InternPool = @import("InternPool.zig");
|
||||
|
||||
const Zir = std.zig.Zir;
|
||||
const Zcu = @import("Zcu.zig");
|
||||
const Module = Zcu;
|
||||
const LazySrcLoc = Zcu.LazySrcLoc;
|
||||
|
||||
/// Write human-readable, debug formatted ZIR code to a file.
|
||||
pub fn renderAsTextToFile(
|
||||
gpa: Allocator,
|
||||
scope_file: *Module.File,
|
||||
scope_file: *Zcu.File,
|
||||
fs_file: std.fs.File,
|
||||
) !void {
|
||||
var arena = std.heap.ArenaAllocator.init(gpa);
|
||||
@ -64,7 +63,7 @@ pub fn renderInstructionContext(
|
||||
gpa: Allocator,
|
||||
block: []const Zir.Inst.Index,
|
||||
block_index: usize,
|
||||
scope_file: *Module.File,
|
||||
scope_file: *Zcu.File,
|
||||
parent_decl_node: Ast.Node.Index,
|
||||
indent: u32,
|
||||
stream: anytype,
|
||||
@ -96,7 +95,7 @@ pub fn renderInstructionContext(
|
||||
pub fn renderSingleInstruction(
|
||||
gpa: Allocator,
|
||||
inst: Zir.Inst.Index,
|
||||
scope_file: *Module.File,
|
||||
scope_file: *Zcu.File,
|
||||
parent_decl_node: Ast.Node.Index,
|
||||
indent: u32,
|
||||
stream: anytype,
|
||||
@ -122,7 +121,7 @@ pub fn renderSingleInstruction(
|
||||
const Writer = struct {
|
||||
gpa: Allocator,
|
||||
arena: Allocator,
|
||||
file: *Module.File,
|
||||
file: *Zcu.File,
|
||||
code: Zir,
|
||||
indent: u32,
|
||||
parent_decl_node: Ast.Node.Index,
|
||||
|
||||
@ -7,8 +7,6 @@ const Air = @import("Air.zig");
|
||||
const StaticBitSet = std.bit_set.StaticBitSet;
|
||||
const Type = @import("Type.zig");
|
||||
const Zcu = @import("Zcu.zig");
|
||||
/// Deprecated.
|
||||
const Module = Zcu;
|
||||
const expect = std.testing.expect;
|
||||
const expectEqual = std.testing.expectEqual;
|
||||
const expectEqualSlices = std.testing.expectEqualSlices;
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user