mirror of
https://github.com/ziglang/zig.git
synced 2026-02-21 16:54:52 +00:00
Merge pull request #21208 from Rexicon226/pt-begone
Cleanup type resolution and finish `zcu` rename
This commit is contained in:
commit
492cc2ef8d
@ -50,8 +50,7 @@ gpa: Allocator,
|
||||
/// be used for other things requiring the same lifetime as the `Compilation`.
|
||||
arena: Allocator,
|
||||
/// Not every Compilation compiles .zig code! For example you could do `zig build-exe foo.o`.
|
||||
/// TODO: rename to zcu: ?*Zcu
|
||||
module: ?*Zcu,
|
||||
zcu: ?*Zcu,
|
||||
/// Contains different state depending on whether the Compilation uses
|
||||
/// incremental or whole cache mode.
|
||||
cache_use: CacheUse,
|
||||
@ -1474,7 +1473,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
|
||||
comp.* = .{
|
||||
.gpa = gpa,
|
||||
.arena = arena,
|
||||
.module = opt_zcu,
|
||||
.zcu = opt_zcu,
|
||||
.cache_use = undefined, // populated below
|
||||
.bin_file = null, // populated below
|
||||
.implib_emit = null, // handled below
|
||||
@ -1926,7 +1925,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
|
||||
|
||||
pub fn destroy(comp: *Compilation) void {
|
||||
if (comp.bin_file) |lf| lf.destroy();
|
||||
if (comp.module) |zcu| zcu.deinit();
|
||||
if (comp.zcu) |zcu| zcu.deinit();
|
||||
comp.cache_use.deinit();
|
||||
for (comp.work_queues) |work_queue| work_queue.deinit();
|
||||
if (!InternPool.single_threaded) comp.codegen_work.queue.deinit();
|
||||
@ -2198,7 +2197,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
|
||||
};
|
||||
}
|
||||
|
||||
if (comp.module) |zcu| {
|
||||
if (comp.zcu) |zcu| {
|
||||
const pt: Zcu.PerThread = .{ .zcu = zcu, .tid = .main };
|
||||
|
||||
zcu.compile_log_text.shrinkAndFree(gpa, 0);
|
||||
@ -2268,7 +2267,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
|
||||
|
||||
try comp.performAllTheWork(main_progress_node);
|
||||
|
||||
if (comp.module) |zcu| {
|
||||
if (comp.zcu) |zcu| {
|
||||
const pt: Zcu.PerThread = .{ .zcu = zcu, .tid = .main };
|
||||
|
||||
if (build_options.enable_debug_extensions and comp.verbose_intern_pool) {
|
||||
@ -2447,7 +2446,7 @@ fn flush(
|
||||
};
|
||||
}
|
||||
|
||||
if (comp.module) |zcu| {
|
||||
if (comp.zcu) |zcu| {
|
||||
try link.File.C.flushEmitH(zcu);
|
||||
|
||||
if (zcu.llvm_object) |llvm_object| {
|
||||
@ -2558,7 +2557,7 @@ fn addNonIncrementalStuffToCacheManifest(
|
||||
|
||||
comptime assert(link_hash_implementation_version == 14);
|
||||
|
||||
if (comp.module) |mod| {
|
||||
if (comp.zcu) |mod| {
|
||||
try addModuleTableToCacheHash(gpa, arena, &man.hash, mod.root_mod, mod.main_mod, .{ .files = man });
|
||||
|
||||
// Synchronize with other matching comments: ZigOnlyHashStuff
|
||||
@ -2692,7 +2691,7 @@ fn addNonIncrementalStuffToCacheManifest(
|
||||
}
|
||||
|
||||
fn emitOthers(comp: *Compilation) void {
|
||||
if (comp.config.output_mode != .Obj or comp.module != null or
|
||||
if (comp.config.output_mode != .Obj or comp.zcu != null or
|
||||
comp.c_object_table.count() == 0)
|
||||
{
|
||||
return;
|
||||
@ -2951,7 +2950,7 @@ pub fn saveState(comp: *Compilation) !void {
|
||||
var pt_headers = std.ArrayList(Header.PerThread).init(gpa);
|
||||
defer pt_headers.deinit();
|
||||
|
||||
if (comp.module) |zcu| {
|
||||
if (comp.zcu) |zcu| {
|
||||
const ip = &zcu.intern_pool;
|
||||
const header: Header = .{
|
||||
.intern_pool = .{
|
||||
@ -3092,7 +3091,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
|
||||
var all_references: ?std.AutoHashMapUnmanaged(InternPool.AnalUnit, ?Zcu.ResolvedReference) = null;
|
||||
defer if (all_references) |*a| a.deinit(gpa);
|
||||
|
||||
if (comp.module) |zcu| {
|
||||
if (comp.zcu) |zcu| {
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
for (zcu.failed_files.keys(), zcu.failed_files.values()) |file, error_msg| {
|
||||
@ -3246,7 +3245,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
|
||||
}
|
||||
}
|
||||
|
||||
if (comp.module) |zcu| {
|
||||
if (comp.zcu) |zcu| {
|
||||
if (bundle.root_list.items.len == 0 and zcu.compile_log_sources.count() != 0) {
|
||||
const values = zcu.compile_log_sources.values();
|
||||
// First one will be the error; subsequent ones will be notes.
|
||||
@ -3269,7 +3268,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
|
||||
}
|
||||
}
|
||||
|
||||
if (comp.module) |zcu| {
|
||||
if (comp.zcu) |zcu| {
|
||||
if (comp.incremental and bundle.root_list.items.len == 0) {
|
||||
const should_have_error = for (zcu.transitive_failed_analysis.keys()) |failed_unit| {
|
||||
if (all_references == null) {
|
||||
@ -3283,7 +3282,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
|
||||
}
|
||||
}
|
||||
|
||||
const compile_log_text = if (comp.module) |m| m.compile_log_text.items else "";
|
||||
const compile_log_text = if (comp.zcu) |m| m.compile_log_text.items else "";
|
||||
return bundle.toOwnedBundle(compile_log_text);
|
||||
}
|
||||
|
||||
@ -3497,7 +3496,7 @@ pub fn performAllTheWork(
|
||||
comp: *Compilation,
|
||||
main_progress_node: std.Progress.Node,
|
||||
) JobError!void {
|
||||
defer if (comp.module) |mod| {
|
||||
defer if (comp.zcu) |mod| {
|
||||
mod.sema_prog_node.end();
|
||||
mod.sema_prog_node = std.Progress.Node.none;
|
||||
mod.codegen_prog_node.end();
|
||||
@ -3543,8 +3542,7 @@ fn performAllTheWorkInner(
|
||||
// in the `astgen_wait_group`.
|
||||
if (comp.job_queued_update_builtin_zig) b: {
|
||||
comp.job_queued_update_builtin_zig = false;
|
||||
const zcu = comp.module orelse break :b;
|
||||
_ = zcu;
|
||||
if (comp.zcu == null) break :b;
|
||||
// TODO put all the modules in a flat array to make them easy to iterate.
|
||||
var seen: std.AutoArrayHashMapUnmanaged(*Package.Module, void) = .{};
|
||||
defer seen.deinit(comp.gpa);
|
||||
@ -3563,7 +3561,7 @@ fn performAllTheWorkInner(
|
||||
}
|
||||
}
|
||||
|
||||
if (comp.module) |zcu| {
|
||||
if (comp.zcu) |zcu| {
|
||||
{
|
||||
// Worker threads may append to zcu.files and zcu.import_table
|
||||
// so we must hold the lock while spawning those tasks, since
|
||||
@ -3606,7 +3604,7 @@ fn performAllTheWorkInner(
|
||||
if (comp.job_queued_compiler_rt_obj) work_queue_wait_group.spawnManager(buildRt, .{ comp, "compiler_rt.zig", .compiler_rt, .Obj, &comp.compiler_rt_obj, main_progress_node });
|
||||
if (comp.job_queued_fuzzer_lib) work_queue_wait_group.spawnManager(buildRt, .{ comp, "fuzzer.zig", .libfuzzer, .Lib, &comp.fuzzer_lib, main_progress_node });
|
||||
|
||||
if (comp.module) |zcu| {
|
||||
if (comp.zcu) |zcu| {
|
||||
const pt: Zcu.PerThread = .{ .zcu = zcu, .tid = .main };
|
||||
if (comp.incremental) {
|
||||
const update_zir_refs_node = main_progress_node.start("Update ZIR References", 0);
|
||||
@ -3638,7 +3636,7 @@ fn performAllTheWorkInner(
|
||||
try processOneJob(@intFromEnum(Zcu.PerThread.Id.main), comp, job, main_progress_node);
|
||||
continue :work;
|
||||
};
|
||||
if (comp.module) |zcu| {
|
||||
if (comp.zcu) |zcu| {
|
||||
// If there's no work queued, check if there's anything outdated
|
||||
// which we need to work on, and queue it if so.
|
||||
if (try zcu.findOutdatedToAnalyze()) |outdated| {
|
||||
@ -3666,7 +3664,7 @@ pub fn queueJobs(comp: *Compilation, jobs: []const Job) !void {
|
||||
fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progress.Node) JobError!void {
|
||||
switch (job) {
|
||||
.codegen_nav => |nav_index| {
|
||||
const zcu = comp.module.?;
|
||||
const zcu = comp.zcu.?;
|
||||
const nav = zcu.intern_pool.getNav(nav_index);
|
||||
if (nav.analysis_owner.unwrap()) |cau| {
|
||||
const unit = InternPool.AnalUnit.wrap(.{ .cau = cau });
|
||||
@ -3689,14 +3687,14 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progre
|
||||
const named_frame = tracy.namedFrame("analyze_func");
|
||||
defer named_frame.end();
|
||||
|
||||
const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) };
|
||||
const pt: Zcu.PerThread = .{ .zcu = comp.zcu.?, .tid = @enumFromInt(tid) };
|
||||
pt.ensureFuncBodyAnalyzed(func) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.AnalysisFail => return,
|
||||
};
|
||||
},
|
||||
.analyze_cau => |cau_index| {
|
||||
const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) };
|
||||
const pt: Zcu.PerThread = .{ .zcu = comp.zcu.?, .tid = @enumFromInt(tid) };
|
||||
pt.ensureCauAnalyzed(cau_index) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.AnalysisFail => return,
|
||||
@ -3725,7 +3723,7 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progre
|
||||
const named_frame = tracy.namedFrame("resolve_type_fully");
|
||||
defer named_frame.end();
|
||||
|
||||
const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) };
|
||||
const pt: Zcu.PerThread = .{ .zcu = comp.zcu.?, .tid = @enumFromInt(tid) };
|
||||
Type.fromInterned(ty).resolveFully(pt) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.AnalysisFail => return,
|
||||
@ -3738,7 +3736,7 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progre
|
||||
if (true) @panic("TODO: update_line_number");
|
||||
|
||||
const gpa = comp.gpa;
|
||||
const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) };
|
||||
const pt: Zcu.PerThread = .{ .zcu = comp.zcu.?, .tid = @enumFromInt(tid) };
|
||||
const decl = pt.zcu.declPtr(decl_index);
|
||||
const lf = comp.bin_file.?;
|
||||
lf.updateDeclLineNumber(pt, decl_index) catch |err| {
|
||||
@ -3760,7 +3758,7 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progre
|
||||
const named_frame = tracy.namedFrame("analyze_mod");
|
||||
defer named_frame.end();
|
||||
|
||||
const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) };
|
||||
const pt: Zcu.PerThread = .{ .zcu = comp.zcu.?, .tid = @enumFromInt(tid) };
|
||||
pt.semaPkg(mod) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.AnalysisFail => return,
|
||||
@ -3924,7 +3922,7 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progre
|
||||
|
||||
fn queueCodegenJob(comp: *Compilation, tid: usize, codegen_job: CodegenJob) !void {
|
||||
if (InternPool.single_threaded or
|
||||
!comp.module.?.backendSupportsFeature(.separate_thread))
|
||||
!comp.zcu.?.backendSupportsFeature(.separate_thread))
|
||||
return processOneCodegenJob(tid, comp, codegen_job);
|
||||
|
||||
{
|
||||
@ -3963,14 +3961,14 @@ fn processOneCodegenJob(tid: usize, comp: *Compilation, codegen_job: CodegenJob)
|
||||
const named_frame = tracy.namedFrame("codegen_nav");
|
||||
defer named_frame.end();
|
||||
|
||||
const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) };
|
||||
const pt: Zcu.PerThread = .{ .zcu = comp.zcu.?, .tid = @enumFromInt(tid) };
|
||||
try pt.linkerUpdateNav(nav_index);
|
||||
},
|
||||
.func => |func| {
|
||||
const named_frame = tracy.namedFrame("codegen_func");
|
||||
defer named_frame.end();
|
||||
|
||||
const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) };
|
||||
const pt: Zcu.PerThread = .{ .zcu = comp.zcu.?, .tid = @enumFromInt(tid) };
|
||||
// This call takes ownership of `func.air`.
|
||||
try pt.linkerUpdateFunc(func.func, func.air);
|
||||
},
|
||||
@ -3978,7 +3976,7 @@ fn processOneCodegenJob(tid: usize, comp: *Compilation, codegen_job: CodegenJob)
|
||||
const named_frame = tracy.namedFrame("codegen_type");
|
||||
defer named_frame.end();
|
||||
|
||||
const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) };
|
||||
const pt: Zcu.PerThread = .{ .zcu = comp.zcu.?, .tid = @enumFromInt(tid) };
|
||||
try pt.linkerUpdateContainerType(ty);
|
||||
},
|
||||
}
|
||||
@ -3995,7 +3993,7 @@ fn workerDocsCopy(comp: *Compilation) void {
|
||||
}
|
||||
|
||||
fn docsCopyFallible(comp: *Compilation) anyerror!void {
|
||||
const zcu = comp.module orelse
|
||||
const zcu = comp.zcu orelse
|
||||
return comp.lockAndSetMiscFailure(.docs_copy, "no Zig code to document", .{});
|
||||
|
||||
const emit = comp.docs_emit.?;
|
||||
@ -4260,7 +4258,7 @@ fn workerAstGenFile(
|
||||
const child_prog_node = prog_node.start(file.sub_file_path, 0);
|
||||
defer child_prog_node.end();
|
||||
|
||||
const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) };
|
||||
const pt: Zcu.PerThread = .{ .zcu = comp.zcu.?, .tid = @enumFromInt(tid) };
|
||||
pt.astGenFile(file, path_digest) catch |err| switch (err) {
|
||||
error.AnalysisFail => return,
|
||||
else => {
|
||||
@ -4352,8 +4350,8 @@ fn workerCheckEmbedFile(comp: *Compilation, embed_file: *Zcu.EmbedFile) void {
|
||||
}
|
||||
|
||||
fn detectEmbedFileUpdate(comp: *Compilation, embed_file: *Zcu.EmbedFile) !void {
|
||||
const mod = comp.module.?;
|
||||
const ip = &mod.intern_pool;
|
||||
const zcu = comp.zcu.?;
|
||||
const ip = &zcu.intern_pool;
|
||||
var file = try embed_file.owner.root.openFile(embed_file.sub_file_path.toSlice(ip), .{});
|
||||
defer file.close();
|
||||
|
||||
@ -4665,10 +4663,10 @@ fn reportRetryableEmbedFileError(
|
||||
embed_file: *Zcu.EmbedFile,
|
||||
err: anyerror,
|
||||
) error{OutOfMemory}!void {
|
||||
const mod = comp.module.?;
|
||||
const gpa = mod.gpa;
|
||||
const zcu = comp.zcu.?;
|
||||
const gpa = zcu.gpa;
|
||||
const src_loc = embed_file.src_loc;
|
||||
const ip = &mod.intern_pool;
|
||||
const ip = &zcu.intern_pool;
|
||||
const err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "unable to load '{}/{s}': {s}", .{
|
||||
embed_file.owner.root,
|
||||
embed_file.sub_file_path.toSlice(ip),
|
||||
@ -4680,7 +4678,7 @@ fn reportRetryableEmbedFileError(
|
||||
{
|
||||
comp.mutex.lock();
|
||||
defer comp.mutex.unlock();
|
||||
try mod.failed_embed_files.putNoClobber(gpa, embed_file, err_msg);
|
||||
try zcu.failed_embed_files.putNoClobber(gpa, embed_file, err_msg);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4730,7 +4728,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
|
||||
// Special case when doing build-obj for just one C file. When there are more than one object
|
||||
// file and building an object we need to link them together, but with just one it should go
|
||||
// directly to the output file.
|
||||
const direct_o = comp.c_source_files.len == 1 and comp.module == null and
|
||||
const direct_o = comp.c_source_files.len == 1 and comp.zcu == null and
|
||||
comp.config.output_mode == .Obj and comp.objects.len == 0;
|
||||
const o_basename_noext = if (direct_o)
|
||||
comp.root_name
|
||||
|
||||
@ -3483,7 +3483,7 @@ pub const LoadedStructType = struct {
|
||||
return s.field_aligns.get(ip)[i];
|
||||
}
|
||||
|
||||
pub fn fieldInit(s: LoadedStructType, ip: *InternPool, i: usize) Index {
|
||||
pub fn fieldInit(s: LoadedStructType, ip: *const InternPool, i: usize) Index {
|
||||
if (s.field_inits.len == 0) return .none;
|
||||
assert(s.haveFieldInits(ip));
|
||||
return s.field_inits.get(ip)[i];
|
||||
@ -11066,7 +11066,7 @@ pub fn destroyNamespace(
|
||||
local.mutate.namespaces.free_list = @intFromEnum(namespace_index);
|
||||
}
|
||||
|
||||
pub fn filePtr(ip: *InternPool, file_index: FileIndex) *Zcu.File {
|
||||
pub fn filePtr(ip: *const InternPool, file_index: FileIndex) *Zcu.File {
|
||||
const file_index_unwrapped = file_index.unwrap(ip);
|
||||
const files = ip.getLocalShared(file_index_unwrapped.tid).files.acquire();
|
||||
return files.view().items(.file)[file_index_unwrapped.index];
|
||||
|
||||
@ -9,7 +9,7 @@ const Zcu = @import("Zcu.zig");
|
||||
const RangeSet = @This();
|
||||
const LazySrcLoc = Zcu.LazySrcLoc;
|
||||
|
||||
pt: Zcu.PerThread,
|
||||
zcu: *Zcu,
|
||||
ranges: std.ArrayList(Range),
|
||||
|
||||
pub const Range = struct {
|
||||
@ -18,9 +18,9 @@ pub const Range = struct {
|
||||
src: LazySrcLoc,
|
||||
};
|
||||
|
||||
pub fn init(allocator: std.mem.Allocator, pt: Zcu.PerThread) RangeSet {
|
||||
pub fn init(allocator: std.mem.Allocator, zcu: *Zcu) RangeSet {
|
||||
return .{
|
||||
.pt = pt,
|
||||
.zcu = zcu,
|
||||
.ranges = std.ArrayList(Range).init(allocator),
|
||||
};
|
||||
}
|
||||
@ -35,8 +35,8 @@ pub fn add(
|
||||
last: InternPool.Index,
|
||||
src: LazySrcLoc,
|
||||
) !?LazySrcLoc {
|
||||
const pt = self.pt;
|
||||
const ip = &pt.zcu.intern_pool;
|
||||
const zcu = self.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
const ty = ip.typeOf(first);
|
||||
assert(ty == ip.typeOf(last));
|
||||
@ -45,8 +45,8 @@ pub fn add(
|
||||
assert(ty == ip.typeOf(range.first));
|
||||
assert(ty == ip.typeOf(range.last));
|
||||
|
||||
if (Value.fromInterned(last).compareScalar(.gte, Value.fromInterned(range.first), Type.fromInterned(ty), pt) and
|
||||
Value.fromInterned(first).compareScalar(.lte, Value.fromInterned(range.last), Type.fromInterned(ty), pt))
|
||||
if (Value.fromInterned(last).compareScalar(.gte, Value.fromInterned(range.first), Type.fromInterned(ty), zcu) and
|
||||
Value.fromInterned(first).compareScalar(.lte, Value.fromInterned(range.last), Type.fromInterned(ty), zcu))
|
||||
{
|
||||
return range.src; // They overlap.
|
||||
}
|
||||
@ -61,20 +61,20 @@ pub fn add(
|
||||
}
|
||||
|
||||
/// Assumes a and b do not overlap
|
||||
fn lessThan(pt: Zcu.PerThread, a: Range, b: Range) bool {
|
||||
const ty = Type.fromInterned(pt.zcu.intern_pool.typeOf(a.first));
|
||||
return Value.fromInterned(a.first).compareScalar(.lt, Value.fromInterned(b.first), ty, pt);
|
||||
fn lessThan(zcu: *Zcu, a: Range, b: Range) bool {
|
||||
const ty = Type.fromInterned(zcu.intern_pool.typeOf(a.first));
|
||||
return Value.fromInterned(a.first).compareScalar(.lt, Value.fromInterned(b.first), ty, zcu);
|
||||
}
|
||||
|
||||
pub fn spans(self: *RangeSet, first: InternPool.Index, last: InternPool.Index) !bool {
|
||||
const pt = self.pt;
|
||||
const ip = &pt.zcu.intern_pool;
|
||||
const zcu = self.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
assert(ip.typeOf(first) == ip.typeOf(last));
|
||||
|
||||
if (self.ranges.items.len == 0)
|
||||
return false;
|
||||
|
||||
std.mem.sort(Range, self.ranges.items, pt, lessThan);
|
||||
std.mem.sort(Range, self.ranges.items, zcu, lessThan);
|
||||
|
||||
if (self.ranges.items[0].first != first or
|
||||
self.ranges.items[self.ranges.items.len - 1].last != last)
|
||||
@ -93,10 +93,10 @@ pub fn spans(self: *RangeSet, first: InternPool.Index, last: InternPool.Index) !
|
||||
const prev = self.ranges.items[i];
|
||||
|
||||
// prev.last + 1 == cur.first
|
||||
try counter.copy(Value.fromInterned(prev.last).toBigInt(&space, pt));
|
||||
try counter.copy(Value.fromInterned(prev.last).toBigInt(&space, zcu));
|
||||
try counter.addScalar(&counter, 1);
|
||||
|
||||
const cur_start_int = Value.fromInterned(cur.first).toBigInt(&space, pt);
|
||||
const cur_start_int = Value.fromInterned(cur.first).toBigInt(&space, zcu);
|
||||
if (!cur_start_int.eql(counter.toConst())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
4447
src/Sema.zig
4447
src/Sema.zig
File diff suppressed because it is too large
Load Diff
@ -85,23 +85,23 @@ fn bitCastInner(
|
||||
assert(val_ty.hasWellDefinedLayout(zcu));
|
||||
|
||||
const abi_pad_bits, const host_pad_bits = if (host_bits > 0)
|
||||
.{ val_ty.abiSize(pt) * 8 - host_bits, host_bits - val_ty.bitSize(pt) }
|
||||
.{ val_ty.abiSize(zcu) * 8 - host_bits, host_bits - val_ty.bitSize(zcu) }
|
||||
else
|
||||
.{ val_ty.abiSize(pt) * 8 - val_ty.bitSize(pt), 0 };
|
||||
.{ val_ty.abiSize(zcu) * 8 - val_ty.bitSize(zcu), 0 };
|
||||
|
||||
const skip_bits = switch (endian) {
|
||||
.little => bit_offset + byte_offset * 8,
|
||||
.big => if (host_bits > 0)
|
||||
val_ty.abiSize(pt) * 8 - byte_offset * 8 - host_bits + bit_offset
|
||||
val_ty.abiSize(zcu) * 8 - byte_offset * 8 - host_bits + bit_offset
|
||||
else
|
||||
val_ty.abiSize(pt) * 8 - byte_offset * 8 - dest_ty.bitSize(pt),
|
||||
val_ty.abiSize(zcu) * 8 - byte_offset * 8 - dest_ty.bitSize(zcu),
|
||||
};
|
||||
|
||||
var unpack: UnpackValueBits = .{
|
||||
.pt = sema.pt,
|
||||
.arena = sema.arena,
|
||||
.skip_bits = skip_bits,
|
||||
.remaining_bits = dest_ty.bitSize(pt),
|
||||
.remaining_bits = dest_ty.bitSize(zcu),
|
||||
.unpacked = std.ArrayList(InternPool.Index).init(sema.arena),
|
||||
};
|
||||
switch (endian) {
|
||||
@ -141,22 +141,22 @@ fn bitCastSpliceInner(
|
||||
try val_ty.resolveLayout(pt);
|
||||
try splice_val_ty.resolveLayout(pt);
|
||||
|
||||
const splice_bits = splice_val_ty.bitSize(pt);
|
||||
const splice_bits = splice_val_ty.bitSize(zcu);
|
||||
|
||||
const splice_offset = switch (endian) {
|
||||
.little => bit_offset + byte_offset * 8,
|
||||
.big => if (host_bits > 0)
|
||||
val_ty.abiSize(pt) * 8 - byte_offset * 8 - host_bits + bit_offset
|
||||
val_ty.abiSize(zcu) * 8 - byte_offset * 8 - host_bits + bit_offset
|
||||
else
|
||||
val_ty.abiSize(pt) * 8 - byte_offset * 8 - splice_bits,
|
||||
val_ty.abiSize(zcu) * 8 - byte_offset * 8 - splice_bits,
|
||||
};
|
||||
|
||||
assert(splice_offset + splice_bits <= val_ty.abiSize(pt) * 8);
|
||||
assert(splice_offset + splice_bits <= val_ty.abiSize(zcu) * 8);
|
||||
|
||||
const abi_pad_bits, const host_pad_bits = if (host_bits > 0)
|
||||
.{ val_ty.abiSize(pt) * 8 - host_bits, host_bits - val_ty.bitSize(pt) }
|
||||
.{ val_ty.abiSize(zcu) * 8 - host_bits, host_bits - val_ty.bitSize(zcu) }
|
||||
else
|
||||
.{ val_ty.abiSize(pt) * 8 - val_ty.bitSize(pt), 0 };
|
||||
.{ val_ty.abiSize(zcu) * 8 - val_ty.bitSize(zcu), 0 };
|
||||
|
||||
var unpack: UnpackValueBits = .{
|
||||
.pt = pt,
|
||||
@ -181,7 +181,7 @@ fn bitCastSpliceInner(
|
||||
try unpack.add(splice_val);
|
||||
|
||||
unpack.skip_bits = splice_offset + splice_bits;
|
||||
unpack.remaining_bits = val_ty.abiSize(pt) * 8 - splice_offset - splice_bits;
|
||||
unpack.remaining_bits = val_ty.abiSize(zcu) * 8 - splice_offset - splice_bits;
|
||||
switch (endian) {
|
||||
.little => {
|
||||
try unpack.add(val);
|
||||
@ -229,7 +229,7 @@ const UnpackValueBits = struct {
|
||||
}
|
||||
|
||||
const ty = val.typeOf(zcu);
|
||||
const bit_size = ty.bitSize(pt);
|
||||
const bit_size = ty.bitSize(zcu);
|
||||
|
||||
if (unpack.skip_bits >= bit_size) {
|
||||
unpack.skip_bits -= bit_size;
|
||||
@ -291,7 +291,7 @@ const UnpackValueBits = struct {
|
||||
// The final element does not have trailing padding.
|
||||
// Elements are reversed in packed memory on BE targets.
|
||||
const elem_ty = ty.childType(zcu);
|
||||
const pad_bits = elem_ty.abiSize(pt) * 8 - elem_ty.bitSize(pt);
|
||||
const pad_bits = elem_ty.abiSize(zcu) * 8 - elem_ty.bitSize(zcu);
|
||||
const len = ty.arrayLen(zcu);
|
||||
const maybe_sent = ty.sentinel(zcu);
|
||||
|
||||
@ -323,12 +323,12 @@ const UnpackValueBits = struct {
|
||||
var cur_bit_off: u64 = 0;
|
||||
var it = zcu.typeToStruct(ty).?.iterateRuntimeOrder(ip);
|
||||
while (it.next()) |field_idx| {
|
||||
const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8;
|
||||
const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8;
|
||||
const pad_bits = want_bit_off - cur_bit_off;
|
||||
const field_val = try val.fieldValue(pt, field_idx);
|
||||
try unpack.padding(pad_bits);
|
||||
try unpack.add(field_val);
|
||||
cur_bit_off = want_bit_off + field_val.typeOf(zcu).bitSize(pt);
|
||||
cur_bit_off = want_bit_off + field_val.typeOf(zcu).bitSize(zcu);
|
||||
}
|
||||
// Add trailing padding bits.
|
||||
try unpack.padding(bit_size - cur_bit_off);
|
||||
@ -339,11 +339,11 @@ const UnpackValueBits = struct {
|
||||
while (it.next()) |field_idx| {
|
||||
const field_val = try val.fieldValue(pt, field_idx);
|
||||
const field_ty = field_val.typeOf(zcu);
|
||||
const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8 + field_ty.bitSize(pt);
|
||||
const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8 + field_ty.bitSize(zcu);
|
||||
const pad_bits = cur_bit_off - want_bit_off;
|
||||
try unpack.padding(pad_bits);
|
||||
try unpack.add(field_val);
|
||||
cur_bit_off = want_bit_off - field_ty.bitSize(pt);
|
||||
cur_bit_off = want_bit_off - field_ty.bitSize(zcu);
|
||||
}
|
||||
assert(cur_bit_off == 0);
|
||||
},
|
||||
@ -366,7 +366,7 @@ const UnpackValueBits = struct {
|
||||
// This correctly handles the case where `tag == .none`, since the payload is then
|
||||
// either an integer or a byte array, both of which we can unpack.
|
||||
const payload_val = Value.fromInterned(un.val);
|
||||
const pad_bits = bit_size - payload_val.typeOf(zcu).bitSize(pt);
|
||||
const pad_bits = bit_size - payload_val.typeOf(zcu).bitSize(zcu);
|
||||
if (endian == .little or ty.containerLayout(zcu) == .@"packed") {
|
||||
try unpack.add(payload_val);
|
||||
try unpack.padding(pad_bits);
|
||||
@ -398,13 +398,14 @@ const UnpackValueBits = struct {
|
||||
|
||||
fn primitive(unpack: *UnpackValueBits, val: Value) BitCastError!void {
|
||||
const pt = unpack.pt;
|
||||
const zcu = pt.zcu;
|
||||
|
||||
if (unpack.remaining_bits == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const ty = val.typeOf(pt.zcu);
|
||||
const bit_size = ty.bitSize(pt);
|
||||
const bit_size = ty.bitSize(zcu);
|
||||
|
||||
// Note that this skips all zero-bit types.
|
||||
if (unpack.skip_bits >= bit_size) {
|
||||
@ -429,9 +430,10 @@ const UnpackValueBits = struct {
|
||||
|
||||
fn splitPrimitive(unpack: *UnpackValueBits, val: Value, bit_offset: u64, bit_count: u64) BitCastError!void {
|
||||
const pt = unpack.pt;
|
||||
const zcu = pt.zcu;
|
||||
const ty = val.typeOf(pt.zcu);
|
||||
|
||||
const val_bits = ty.bitSize(pt);
|
||||
const val_bits = ty.bitSize(zcu);
|
||||
assert(bit_offset + bit_count <= val_bits);
|
||||
|
||||
switch (pt.zcu.intern_pool.indexToKey(val.toIntern())) {
|
||||
@ -499,12 +501,12 @@ const PackValueBits = struct {
|
||||
const len = ty.arrayLen(zcu);
|
||||
const elem_ty = ty.childType(zcu);
|
||||
const maybe_sent = ty.sentinel(zcu);
|
||||
const pad_bits = elem_ty.abiSize(pt) * 8 - elem_ty.bitSize(pt);
|
||||
const pad_bits = elem_ty.abiSize(zcu) * 8 - elem_ty.bitSize(zcu);
|
||||
const elems = try arena.alloc(InternPool.Index, @intCast(len));
|
||||
|
||||
if (endian == .big and maybe_sent != null) {
|
||||
// TODO: validate sentinel was preserved!
|
||||
try pack.padding(elem_ty.bitSize(pt));
|
||||
try pack.padding(elem_ty.bitSize(zcu));
|
||||
if (len != 0) try pack.padding(pad_bits);
|
||||
}
|
||||
|
||||
@ -520,7 +522,7 @@ const PackValueBits = struct {
|
||||
if (endian == .little and maybe_sent != null) {
|
||||
// TODO: validate sentinel was preserved!
|
||||
if (len != 0) try pack.padding(pad_bits);
|
||||
try pack.padding(elem_ty.bitSize(pt));
|
||||
try pack.padding(elem_ty.bitSize(zcu));
|
||||
}
|
||||
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
@ -538,23 +540,23 @@ const PackValueBits = struct {
|
||||
var cur_bit_off: u64 = 0;
|
||||
var it = zcu.typeToStruct(ty).?.iterateRuntimeOrder(ip);
|
||||
while (it.next()) |field_idx| {
|
||||
const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8;
|
||||
const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8;
|
||||
try pack.padding(want_bit_off - cur_bit_off);
|
||||
const field_ty = ty.structFieldType(field_idx, zcu);
|
||||
const field_ty = ty.fieldType(field_idx, zcu);
|
||||
elems[field_idx] = (try pack.get(field_ty)).toIntern();
|
||||
cur_bit_off = want_bit_off + field_ty.bitSize(pt);
|
||||
cur_bit_off = want_bit_off + field_ty.bitSize(zcu);
|
||||
}
|
||||
try pack.padding(ty.bitSize(pt) - cur_bit_off);
|
||||
try pack.padding(ty.bitSize(zcu) - cur_bit_off);
|
||||
},
|
||||
.big => {
|
||||
var cur_bit_off: u64 = ty.bitSize(pt);
|
||||
var cur_bit_off: u64 = ty.bitSize(zcu);
|
||||
var it = zcu.typeToStruct(ty).?.iterateRuntimeOrderReverse(ip);
|
||||
while (it.next()) |field_idx| {
|
||||
const field_ty = ty.structFieldType(field_idx, zcu);
|
||||
const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8 + field_ty.bitSize(pt);
|
||||
const field_ty = ty.fieldType(field_idx, zcu);
|
||||
const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8 + field_ty.bitSize(zcu);
|
||||
try pack.padding(cur_bit_off - want_bit_off);
|
||||
elems[field_idx] = (try pack.get(field_ty)).toIntern();
|
||||
cur_bit_off = want_bit_off - field_ty.bitSize(pt);
|
||||
cur_bit_off = want_bit_off - field_ty.bitSize(zcu);
|
||||
}
|
||||
assert(cur_bit_off == 0);
|
||||
},
|
||||
@ -576,7 +578,7 @@ const PackValueBits = struct {
|
||||
// This is identical between LE and BE targets.
|
||||
const elems = try arena.alloc(InternPool.Index, ty.structFieldCount(zcu));
|
||||
for (elems, 0..) |*elem, i| {
|
||||
const field_ty = ty.structFieldType(i, zcu);
|
||||
const field_ty = ty.fieldType(i, zcu);
|
||||
elem.* = (try pack.get(field_ty)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
@ -622,16 +624,16 @@ const PackValueBits = struct {
|
||||
for (field_order, 0..) |*f, i| f.* = @intCast(i);
|
||||
// Sort `field_order` to put the fields with the largest bit sizes first.
|
||||
const SizeSortCtx = struct {
|
||||
pt: Zcu.PerThread,
|
||||
zcu: *Zcu,
|
||||
field_types: []const InternPool.Index,
|
||||
fn lessThan(ctx: @This(), a_idx: u32, b_idx: u32) bool {
|
||||
const a_ty = Type.fromInterned(ctx.field_types[a_idx]);
|
||||
const b_ty = Type.fromInterned(ctx.field_types[b_idx]);
|
||||
return a_ty.bitSize(ctx.pt) > b_ty.bitSize(ctx.pt);
|
||||
return a_ty.bitSize(ctx.zcu) > b_ty.bitSize(ctx.zcu);
|
||||
}
|
||||
};
|
||||
std.mem.sortUnstable(u32, field_order, SizeSortCtx{
|
||||
.pt = pt,
|
||||
.zcu = zcu,
|
||||
.field_types = zcu.typeToUnion(ty).?.field_types.get(ip),
|
||||
}, SizeSortCtx.lessThan);
|
||||
|
||||
@ -639,7 +641,7 @@ const PackValueBits = struct {
|
||||
|
||||
for (field_order) |field_idx| {
|
||||
const field_ty = Type.fromInterned(zcu.typeToUnion(ty).?.field_types.get(ip)[field_idx]);
|
||||
const pad_bits = ty.bitSize(pt) - field_ty.bitSize(pt);
|
||||
const pad_bits = ty.bitSize(zcu) - field_ty.bitSize(zcu);
|
||||
if (!padding_after) try pack.padding(pad_bits);
|
||||
const field_val = pack.get(field_ty) catch |err| switch (err) {
|
||||
error.ReinterpretDeclRef => {
|
||||
@ -682,10 +684,11 @@ const PackValueBits = struct {
|
||||
|
||||
fn primitive(pack: *PackValueBits, want_ty: Type) BitCastError!Value {
|
||||
const pt = pack.pt;
|
||||
const vals, const bit_offset = pack.prepareBits(want_ty.bitSize(pt));
|
||||
const zcu = pt.zcu;
|
||||
const vals, const bit_offset = pack.prepareBits(want_ty.bitSize(zcu));
|
||||
|
||||
for (vals) |val| {
|
||||
if (!Value.fromInterned(val).isUndef(pt.zcu)) break;
|
||||
if (!Value.fromInterned(val).isUndef(zcu)) break;
|
||||
} else {
|
||||
// All bits of the value are `undefined`.
|
||||
return pt.undefValue(want_ty);
|
||||
@ -706,8 +709,8 @@ const PackValueBits = struct {
|
||||
ptr_cast: {
|
||||
if (vals.len != 1) break :ptr_cast;
|
||||
const val = Value.fromInterned(vals[0]);
|
||||
if (!val.typeOf(pt.zcu).isPtrAtRuntime(pt.zcu)) break :ptr_cast;
|
||||
if (!want_ty.isPtrAtRuntime(pt.zcu)) break :ptr_cast;
|
||||
if (!val.typeOf(zcu).isPtrAtRuntime(zcu)) break :ptr_cast;
|
||||
if (!want_ty.isPtrAtRuntime(zcu)) break :ptr_cast;
|
||||
return pt.getCoerced(val, want_ty);
|
||||
}
|
||||
|
||||
@ -717,7 +720,7 @@ const PackValueBits = struct {
|
||||
for (vals) |ip_val| {
|
||||
const val = Value.fromInterned(ip_val);
|
||||
const ty = val.typeOf(pt.zcu);
|
||||
buf_bits += ty.bitSize(pt);
|
||||
buf_bits += ty.bitSize(zcu);
|
||||
}
|
||||
|
||||
const buf = try pack.arena.alloc(u8, @intCast((buf_bits + 7) / 8));
|
||||
@ -726,11 +729,11 @@ const PackValueBits = struct {
|
||||
var cur_bit_off: usize = 0;
|
||||
for (vals) |ip_val| {
|
||||
const val = Value.fromInterned(ip_val);
|
||||
const ty = val.typeOf(pt.zcu);
|
||||
if (!val.isUndef(pt.zcu)) {
|
||||
const ty = val.typeOf(zcu);
|
||||
if (!val.isUndef(zcu)) {
|
||||
try val.writeToPackedMemory(ty, pt, buf, cur_bit_off);
|
||||
}
|
||||
cur_bit_off += @intCast(ty.bitSize(pt));
|
||||
cur_bit_off += @intCast(ty.bitSize(zcu));
|
||||
}
|
||||
|
||||
return Value.readFromPackedMemory(want_ty, pt, buf, @intCast(bit_offset), pack.arena);
|
||||
@ -740,11 +743,12 @@ const PackValueBits = struct {
|
||||
if (need_bits == 0) return .{ &.{}, 0 };
|
||||
|
||||
const pt = pack.pt;
|
||||
const zcu = pt.zcu;
|
||||
|
||||
var bits: u64 = 0;
|
||||
var len: usize = 0;
|
||||
while (bits < pack.bit_offset + need_bits) {
|
||||
bits += Value.fromInterned(pack.unpacked[len]).typeOf(pt.zcu).bitSize(pt);
|
||||
bits += Value.fromInterned(pack.unpacked[len]).typeOf(pt.zcu).bitSize(zcu);
|
||||
len += 1;
|
||||
}
|
||||
|
||||
@ -757,7 +761,7 @@ const PackValueBits = struct {
|
||||
pack.bit_offset = 0;
|
||||
} else {
|
||||
pack.unpacked = pack.unpacked[len - 1 ..];
|
||||
pack.bit_offset = Value.fromInterned(pack.unpacked[0]).typeOf(pt.zcu).bitSize(pt) - extra_bits;
|
||||
pack.bit_offset = Value.fromInterned(pack.unpacked[0]).typeOf(pt.zcu).bitSize(zcu) - extra_bits;
|
||||
}
|
||||
|
||||
return .{ result_vals, result_offset };
|
||||
|
||||
@ -13,14 +13,15 @@ pub const ComptimeLoadResult = union(enum) {
|
||||
|
||||
pub fn loadComptimePtr(sema: *Sema, block: *Block, src: LazySrcLoc, ptr: Value) !ComptimeLoadResult {
|
||||
const pt = sema.pt;
|
||||
const zcu = pt.zcu;
|
||||
const ptr_info = ptr.typeOf(pt.zcu).ptrInfo(pt.zcu);
|
||||
// TODO: host size for vectors is terrible
|
||||
const host_bits = switch (ptr_info.flags.vector_index) {
|
||||
.none => ptr_info.packed_offset.host_size * 8,
|
||||
else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(pt),
|
||||
else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(zcu),
|
||||
};
|
||||
const bit_offset = if (host_bits != 0) bit_offset: {
|
||||
const child_bits = Type.fromInterned(ptr_info.child).bitSize(pt);
|
||||
const child_bits = Type.fromInterned(ptr_info.child).bitSize(zcu);
|
||||
const bit_offset = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) {
|
||||
.none => 0,
|
||||
.runtime => return .runtime_load,
|
||||
@ -67,18 +68,18 @@ pub fn storeComptimePtr(
|
||||
// TODO: host size for vectors is terrible
|
||||
const host_bits = switch (ptr_info.flags.vector_index) {
|
||||
.none => ptr_info.packed_offset.host_size * 8,
|
||||
else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(pt),
|
||||
else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(zcu),
|
||||
};
|
||||
const bit_offset = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) {
|
||||
.none => 0,
|
||||
.runtime => return .runtime_store,
|
||||
else => |idx| switch (zcu.getTarget().cpu.arch.endian()) {
|
||||
.little => Type.fromInterned(ptr_info.child).bitSize(pt) * @intFromEnum(idx),
|
||||
.big => host_bits - Type.fromInterned(ptr_info.child).bitSize(pt) * (@intFromEnum(idx) + 1), // element order reversed on big endian
|
||||
.little => Type.fromInterned(ptr_info.child).bitSize(zcu) * @intFromEnum(idx),
|
||||
.big => host_bits - Type.fromInterned(ptr_info.child).bitSize(zcu) * (@intFromEnum(idx) + 1), // element order reversed on big endian
|
||||
},
|
||||
};
|
||||
const pseudo_store_ty = if (host_bits > 0) t: {
|
||||
const need_bits = Type.fromInterned(ptr_info.child).bitSize(pt);
|
||||
const need_bits = Type.fromInterned(ptr_info.child).bitSize(zcu);
|
||||
if (need_bits + bit_offset > host_bits) {
|
||||
return .exceeds_host_size;
|
||||
}
|
||||
@ -166,9 +167,9 @@ pub fn storeComptimePtr(
|
||||
.direct => |direct| .{ direct.val, 0 },
|
||||
.index => |index| .{
|
||||
index.val,
|
||||
index.elem_index * index.val.typeOf(zcu).childType(zcu).abiSize(pt),
|
||||
index.elem_index * index.val.typeOf(zcu).childType(zcu).abiSize(zcu),
|
||||
},
|
||||
.flat_index => |flat| .{ flat.val, flat.flat_elem_index * flat.val.typeOf(zcu).arrayBase(zcu)[0].abiSize(pt) },
|
||||
.flat_index => |flat| .{ flat.val, flat.flat_elem_index * flat.val.typeOf(zcu).arrayBase(zcu)[0].abiSize(zcu) },
|
||||
.reinterpret => |reinterpret| .{ reinterpret.val, reinterpret.byte_offset },
|
||||
else => unreachable,
|
||||
};
|
||||
@ -347,8 +348,8 @@ fn loadComptimePtrInner(
|
||||
const load_one_ty, const load_count = load_ty.arrayBase(zcu);
|
||||
|
||||
const extra_base_index: u64 = if (ptr.byte_offset == 0) 0 else idx: {
|
||||
if (try sema.typeRequiresComptime(load_one_ty)) break :restructure_array;
|
||||
const elem_len = try sema.typeAbiSize(load_one_ty);
|
||||
if (try load_one_ty.comptimeOnlySema(pt)) break :restructure_array;
|
||||
const elem_len = try load_one_ty.abiSizeSema(pt);
|
||||
if (ptr.byte_offset % elem_len != 0) break :restructure_array;
|
||||
break :idx @divExact(ptr.byte_offset, elem_len);
|
||||
};
|
||||
@ -394,12 +395,12 @@ fn loadComptimePtrInner(
|
||||
var cur_offset = ptr.byte_offset;
|
||||
|
||||
if (load_ty.zigTypeTag(zcu) == .Array and array_offset > 0) {
|
||||
cur_offset += try sema.typeAbiSize(load_ty.childType(zcu)) * array_offset;
|
||||
cur_offset += try load_ty.childType(zcu).abiSizeSema(pt) * array_offset;
|
||||
}
|
||||
|
||||
const need_bytes = if (host_bits > 0) (host_bits + 7) / 8 else try sema.typeAbiSize(load_ty);
|
||||
const need_bytes = if (host_bits > 0) (host_bits + 7) / 8 else try load_ty.abiSizeSema(pt);
|
||||
|
||||
if (cur_offset + need_bytes > try sema.typeAbiSize(cur_val.typeOf(zcu))) {
|
||||
if (cur_offset + need_bytes > try cur_val.typeOf(zcu).abiSizeSema(pt)) {
|
||||
return .{ .out_of_bounds = cur_val.typeOf(zcu) };
|
||||
}
|
||||
|
||||
@ -434,7 +435,7 @@ fn loadComptimePtrInner(
|
||||
.Optional => break, // this can only be a pointer-like optional so is terminal
|
||||
.Array => {
|
||||
const elem_ty = cur_ty.childType(zcu);
|
||||
const elem_size = try sema.typeAbiSize(elem_ty);
|
||||
const elem_size = try elem_ty.abiSizeSema(pt);
|
||||
const elem_idx = cur_offset / elem_size;
|
||||
const next_elem_off = elem_size * (elem_idx + 1);
|
||||
if (cur_offset + need_bytes <= next_elem_off) {
|
||||
@ -449,8 +450,8 @@ fn loadComptimePtrInner(
|
||||
.auto => unreachable, // ill-defined layout
|
||||
.@"packed" => break, // let the bitcast logic handle this
|
||||
.@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| {
|
||||
const start_off = cur_ty.structFieldOffset(field_idx, pt);
|
||||
const end_off = start_off + try sema.typeAbiSize(cur_ty.structFieldType(field_idx, zcu));
|
||||
const start_off = cur_ty.structFieldOffset(field_idx, zcu);
|
||||
const end_off = start_off + try cur_ty.fieldType(field_idx, zcu).abiSizeSema(pt);
|
||||
if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) {
|
||||
cur_val = try cur_val.getElem(sema.pt, field_idx);
|
||||
cur_offset -= start_off;
|
||||
@ -477,7 +478,7 @@ fn loadComptimePtrInner(
|
||||
};
|
||||
// The payload always has offset 0. If it's big enough
|
||||
// to represent the whole load type, we can use it.
|
||||
if (try sema.typeAbiSize(payload.typeOf(zcu)) >= need_bytes) {
|
||||
if (try payload.typeOf(zcu).abiSizeSema(pt) >= need_bytes) {
|
||||
cur_val = payload;
|
||||
} else {
|
||||
break;
|
||||
@ -746,8 +747,8 @@ fn prepareComptimePtrStore(
|
||||
|
||||
const store_one_ty, const store_count = store_ty.arrayBase(zcu);
|
||||
const extra_base_index: u64 = if (ptr.byte_offset == 0) 0 else idx: {
|
||||
if (try sema.typeRequiresComptime(store_one_ty)) break :restructure_array;
|
||||
const elem_len = try sema.typeAbiSize(store_one_ty);
|
||||
if (try store_one_ty.comptimeOnlySema(pt)) break :restructure_array;
|
||||
const elem_len = try store_one_ty.abiSizeSema(pt);
|
||||
if (ptr.byte_offset % elem_len != 0) break :restructure_array;
|
||||
break :idx @divExact(ptr.byte_offset, elem_len);
|
||||
};
|
||||
@ -800,11 +801,11 @@ fn prepareComptimePtrStore(
|
||||
var cur_val: *MutableValue, var cur_offset: u64 = switch (base_strat) {
|
||||
.direct => |direct| .{ direct.val, 0 },
|
||||
// It's okay to do `abiSize` - the comptime-only case will be caught below.
|
||||
.index => |index| .{ index.val, index.elem_index * try sema.typeAbiSize(index.val.typeOf(zcu).childType(zcu)) },
|
||||
.index => |index| .{ index.val, index.elem_index * try index.val.typeOf(zcu).childType(zcu).abiSizeSema(pt) },
|
||||
.flat_index => |flat_index| .{
|
||||
flat_index.val,
|
||||
// It's okay to do `abiSize` - the comptime-only case will be caught below.
|
||||
flat_index.flat_elem_index * try sema.typeAbiSize(flat_index.val.typeOf(zcu).arrayBase(zcu)[0]),
|
||||
flat_index.flat_elem_index * try flat_index.val.typeOf(zcu).arrayBase(zcu)[0].abiSizeSema(pt),
|
||||
},
|
||||
.reinterpret => |r| .{ r.val, r.byte_offset },
|
||||
else => unreachable,
|
||||
@ -816,12 +817,12 @@ fn prepareComptimePtrStore(
|
||||
}
|
||||
|
||||
if (store_ty.zigTypeTag(zcu) == .Array and array_offset > 0) {
|
||||
cur_offset += try sema.typeAbiSize(store_ty.childType(zcu)) * array_offset;
|
||||
cur_offset += try store_ty.childType(zcu).abiSizeSema(pt) * array_offset;
|
||||
}
|
||||
|
||||
const need_bytes = try sema.typeAbiSize(store_ty);
|
||||
const need_bytes = try store_ty.abiSizeSema(pt);
|
||||
|
||||
if (cur_offset + need_bytes > try sema.typeAbiSize(cur_val.typeOf(zcu))) {
|
||||
if (cur_offset + need_bytes > try cur_val.typeOf(zcu).abiSizeSema(pt)) {
|
||||
return .{ .out_of_bounds = cur_val.typeOf(zcu) };
|
||||
}
|
||||
|
||||
@ -856,7 +857,7 @@ fn prepareComptimePtrStore(
|
||||
.Optional => break, // this can only be a pointer-like optional so is terminal
|
||||
.Array => {
|
||||
const elem_ty = cur_ty.childType(zcu);
|
||||
const elem_size = try sema.typeAbiSize(elem_ty);
|
||||
const elem_size = try elem_ty.abiSizeSema(pt);
|
||||
const elem_idx = cur_offset / elem_size;
|
||||
const next_elem_off = elem_size * (elem_idx + 1);
|
||||
if (cur_offset + need_bytes <= next_elem_off) {
|
||||
@ -871,8 +872,8 @@ fn prepareComptimePtrStore(
|
||||
.auto => unreachable, // ill-defined layout
|
||||
.@"packed" => break, // let the bitcast logic handle this
|
||||
.@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| {
|
||||
const start_off = cur_ty.structFieldOffset(field_idx, pt);
|
||||
const end_off = start_off + try sema.typeAbiSize(cur_ty.structFieldType(field_idx, zcu));
|
||||
const start_off = cur_ty.structFieldOffset(field_idx, zcu);
|
||||
const end_off = start_off + try cur_ty.fieldType(field_idx, zcu).abiSizeSema(pt);
|
||||
if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) {
|
||||
cur_val = try cur_val.elem(pt, sema.arena, field_idx);
|
||||
cur_offset -= start_off;
|
||||
@ -895,7 +896,7 @@ fn prepareComptimePtrStore(
|
||||
};
|
||||
// The payload always has offset 0. If it's big enough
|
||||
// to represent the whole load type, we can use it.
|
||||
if (try sema.typeAbiSize(payload.typeOf(zcu)) >= need_bytes) {
|
||||
if (try payload.typeOf(zcu).abiSizeSema(pt) >= need_bytes) {
|
||||
cur_val = payload;
|
||||
} else {
|
||||
break;
|
||||
|
||||
1359
src/Type.zig
1359
src/Type.zig
File diff suppressed because it is too large
Load Diff
1466
src/Value.zig
1466
src/Value.zig
File diff suppressed because it is too large
Load Diff
89
src/Zcu.zig
89
src/Zcu.zig
@ -2109,9 +2109,9 @@ pub const CompileError = error{
|
||||
ComptimeBreak,
|
||||
};
|
||||
|
||||
pub fn init(mod: *Zcu, thread_count: usize) !void {
|
||||
const gpa = mod.gpa;
|
||||
try mod.intern_pool.init(gpa, thread_count);
|
||||
pub fn init(zcu: *Zcu, thread_count: usize) !void {
|
||||
const gpa = zcu.gpa;
|
||||
try zcu.intern_pool.init(gpa, thread_count);
|
||||
}
|
||||
|
||||
pub fn deinit(zcu: *Zcu) void {
|
||||
@ -2204,8 +2204,8 @@ pub fn namespacePtr(zcu: *Zcu, index: Namespace.Index) *Namespace {
|
||||
return zcu.intern_pool.namespacePtr(index);
|
||||
}
|
||||
|
||||
pub fn namespacePtrUnwrap(mod: *Zcu, index: Namespace.OptionalIndex) ?*Namespace {
|
||||
return mod.namespacePtr(index.unwrap() orelse return null);
|
||||
pub fn namespacePtrUnwrap(zcu: *Zcu, index: Namespace.OptionalIndex) ?*Namespace {
|
||||
return zcu.namespacePtr(index.unwrap() orelse return null);
|
||||
}
|
||||
|
||||
// TODO https://github.com/ziglang/zig/issues/8643
|
||||
@ -2682,7 +2682,7 @@ pub fn mapOldZirToNew(
|
||||
///
|
||||
/// The caller is responsible for ensuring the function decl itself is already
|
||||
/// analyzed, and for ensuring it can exist at runtime (see
|
||||
/// `sema.fnHasRuntimeBits`). This function does *not* guarantee that the body
|
||||
/// `Type.fnHasRuntimeBitsSema`). This function does *not* guarantee that the body
|
||||
/// will be analyzed when it returns: for that, see `ensureFuncBodyAnalyzed`.
|
||||
pub fn ensureFuncBodyAnalysisQueued(zcu: *Zcu, func_index: InternPool.Index) !void {
|
||||
const ip = &zcu.intern_pool;
|
||||
@ -2840,22 +2840,22 @@ pub fn addTypeReference(zcu: *Zcu, src_unit: AnalUnit, referenced_type: InternPo
|
||||
gop.value_ptr.* = @intCast(ref_idx);
|
||||
}
|
||||
|
||||
pub fn errorSetBits(mod: *Zcu) u16 {
|
||||
if (mod.error_limit == 0) return 0;
|
||||
return @as(u16, std.math.log2_int(ErrorInt, mod.error_limit)) + 1;
|
||||
pub fn errorSetBits(zcu: *const Zcu) u16 {
|
||||
if (zcu.error_limit == 0) return 0;
|
||||
return @as(u16, std.math.log2_int(ErrorInt, zcu.error_limit)) + 1;
|
||||
}
|
||||
|
||||
pub fn errNote(
|
||||
mod: *Zcu,
|
||||
zcu: *Zcu,
|
||||
src_loc: LazySrcLoc,
|
||||
parent: *ErrorMsg,
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) error{OutOfMemory}!void {
|
||||
const msg = try std.fmt.allocPrint(mod.gpa, format, args);
|
||||
errdefer mod.gpa.free(msg);
|
||||
const msg = try std.fmt.allocPrint(zcu.gpa, format, args);
|
||||
errdefer zcu.gpa.free(msg);
|
||||
|
||||
parent.notes = try mod.gpa.realloc(parent.notes, parent.notes.len + 1);
|
||||
parent.notes = try zcu.gpa.realloc(parent.notes, parent.notes.len + 1);
|
||||
parent.notes[parent.notes.len - 1] = .{
|
||||
.src_loc = src_loc,
|
||||
.msg = msg,
|
||||
@ -2876,14 +2876,14 @@ pub fn optimizeMode(zcu: *const Zcu) std.builtin.OptimizeMode {
|
||||
return zcu.root_mod.optimize_mode;
|
||||
}
|
||||
|
||||
fn lockAndClearFileCompileError(mod: *Zcu, file: *File) void {
|
||||
fn lockAndClearFileCompileError(zcu: *Zcu, file: *File) void {
|
||||
switch (file.status) {
|
||||
.success_zir, .retryable_failure => {},
|
||||
.never_loaded, .parse_failure, .astgen_failure => {
|
||||
mod.comp.mutex.lock();
|
||||
defer mod.comp.mutex.unlock();
|
||||
if (mod.failed_files.fetchSwapRemove(file)) |kv| {
|
||||
if (kv.value) |msg| msg.destroy(mod.gpa); // Delete previous error message.
|
||||
zcu.comp.mutex.lock();
|
||||
defer zcu.comp.mutex.unlock();
|
||||
if (zcu.failed_files.fetchSwapRemove(file)) |kv| {
|
||||
if (kv.value) |msg| msg.destroy(zcu.gpa); // Delete previous error message.
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -2923,10 +2923,23 @@ pub fn addGlobalAssembly(zcu: *Zcu, cau: InternPool.Cau.Index, source: []const u
|
||||
}
|
||||
|
||||
pub const Feature = enum {
|
||||
/// When this feature is enabled, Sema will emit calls to `std.builtin.panic`
|
||||
/// for things like safety checks and unreachables. Otherwise traps will be emitted.
|
||||
panic_fn,
|
||||
/// When this feature is enabled, Sema will emit calls to `std.builtin.panicUnwrapError`.
|
||||
/// This error message requires more advanced formatting, hence it being seperate from `panic_fn`.
|
||||
/// Otherwise traps will be emitted.
|
||||
panic_unwrap_error,
|
||||
/// When this feature is enabled, Sema will emit calls to the more complex panic functions
|
||||
/// that use formatting to add detail to error messages. Similar to `panic_unwrap_error`.
|
||||
/// Otherwise traps will be emitted.
|
||||
safety_check_formatted,
|
||||
/// When this feature is enabled, Sema will insert tracer functions for gathering a stack
|
||||
/// trace for error returns.
|
||||
error_return_trace,
|
||||
/// When this feature is enabled, Sema will emit the `is_named_enum_value` AIR instructions
|
||||
/// and use it to check for corrupt switches. Backends currently need to implement their own
|
||||
/// logic to determine whether an enum value is in the set of named values.
|
||||
is_named_enum_value,
|
||||
error_set_has_value,
|
||||
field_reordering,
|
||||
@ -2965,11 +2978,11 @@ pub const AtomicPtrAlignmentDiagnostics = struct {
|
||||
// TODO this function does not take into account CPU features, which can affect
|
||||
// this value. Audit this!
|
||||
pub fn atomicPtrAlignment(
|
||||
mod: *Zcu,
|
||||
zcu: *Zcu,
|
||||
ty: Type,
|
||||
diags: *AtomicPtrAlignmentDiagnostics,
|
||||
) AtomicPtrAlignmentError!Alignment {
|
||||
const target = mod.getTarget();
|
||||
const target = zcu.getTarget();
|
||||
const max_atomic_bits: u16 = switch (target.cpu.arch) {
|
||||
.avr,
|
||||
.msp430,
|
||||
@ -3039,8 +3052,8 @@ pub fn atomicPtrAlignment(
|
||||
}
|
||||
return .none;
|
||||
}
|
||||
if (ty.isAbiInt(mod)) {
|
||||
const bit_count = ty.intInfo(mod).bits;
|
||||
if (ty.isAbiInt(zcu)) {
|
||||
const bit_count = ty.intInfo(zcu).bits;
|
||||
if (bit_count > max_atomic_bits) {
|
||||
diags.* = .{
|
||||
.bits = bit_count,
|
||||
@ -3050,7 +3063,7 @@ pub fn atomicPtrAlignment(
|
||||
}
|
||||
return .none;
|
||||
}
|
||||
if (ty.isPtrAtRuntime(mod)) return .none;
|
||||
if (ty.isPtrAtRuntime(zcu)) return .none;
|
||||
return error.BadType;
|
||||
}
|
||||
|
||||
@ -3058,45 +3071,45 @@ pub fn atomicPtrAlignment(
|
||||
/// * `@TypeOf(.{})`
|
||||
/// * A struct which has no fields (`struct {}`).
|
||||
/// * Not a struct.
|
||||
pub fn typeToStruct(mod: *Zcu, ty: Type) ?InternPool.LoadedStructType {
|
||||
pub fn typeToStruct(zcu: *Zcu, ty: Type) ?InternPool.LoadedStructType {
|
||||
if (ty.ip_index == .none) return null;
|
||||
const ip = &mod.intern_pool;
|
||||
const ip = &zcu.intern_pool;
|
||||
return switch (ip.indexToKey(ty.ip_index)) {
|
||||
.struct_type => ip.loadStructType(ty.ip_index),
|
||||
else => null,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn typeToPackedStruct(mod: *Zcu, ty: Type) ?InternPool.LoadedStructType {
|
||||
const s = mod.typeToStruct(ty) orelse return null;
|
||||
pub fn typeToPackedStruct(zcu: *Zcu, ty: Type) ?InternPool.LoadedStructType {
|
||||
const s = zcu.typeToStruct(ty) orelse return null;
|
||||
if (s.layout != .@"packed") return null;
|
||||
return s;
|
||||
}
|
||||
|
||||
pub fn typeToUnion(mod: *Zcu, ty: Type) ?InternPool.LoadedUnionType {
|
||||
pub fn typeToUnion(zcu: *const Zcu, ty: Type) ?InternPool.LoadedUnionType {
|
||||
if (ty.ip_index == .none) return null;
|
||||
const ip = &mod.intern_pool;
|
||||
const ip = &zcu.intern_pool;
|
||||
return switch (ip.indexToKey(ty.ip_index)) {
|
||||
.union_type => ip.loadUnionType(ty.ip_index),
|
||||
else => null,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn typeToFunc(mod: *Zcu, ty: Type) ?InternPool.Key.FuncType {
|
||||
pub fn typeToFunc(zcu: *const Zcu, ty: Type) ?InternPool.Key.FuncType {
|
||||
if (ty.ip_index == .none) return null;
|
||||
return mod.intern_pool.indexToFuncType(ty.toIntern());
|
||||
return zcu.intern_pool.indexToFuncType(ty.toIntern());
|
||||
}
|
||||
|
||||
pub fn iesFuncIndex(zcu: *const Zcu, ies_index: InternPool.Index) InternPool.Index {
|
||||
return zcu.intern_pool.iesFuncIndex(ies_index);
|
||||
}
|
||||
|
||||
pub fn funcInfo(mod: *Zcu, func_index: InternPool.Index) InternPool.Key.Func {
|
||||
return mod.intern_pool.indexToKey(func_index).func;
|
||||
pub fn funcInfo(zcu: *const Zcu, func_index: InternPool.Index) InternPool.Key.Func {
|
||||
return zcu.intern_pool.indexToKey(func_index).func;
|
||||
}
|
||||
|
||||
pub fn toEnum(mod: *Zcu, comptime E: type, val: Value) E {
|
||||
return mod.intern_pool.toEnum(E, val.toIntern());
|
||||
pub fn toEnum(zcu: *const Zcu, comptime E: type, val: Value) E {
|
||||
return zcu.intern_pool.toEnum(E, val.toIntern());
|
||||
}
|
||||
|
||||
pub const UnionLayout = struct {
|
||||
@ -3121,8 +3134,8 @@ pub const UnionLayout = struct {
|
||||
};
|
||||
|
||||
/// Returns the index of the active field, given the current tag value
|
||||
pub fn unionTagFieldIndex(mod: *Zcu, loaded_union: InternPool.LoadedUnionType, enum_tag: Value) ?u32 {
|
||||
const ip = &mod.intern_pool;
|
||||
pub fn unionTagFieldIndex(zcu: *const Zcu, loaded_union: InternPool.LoadedUnionType, enum_tag: Value) ?u32 {
|
||||
const ip = &zcu.intern_pool;
|
||||
if (enum_tag.toIntern() == .none) return null;
|
||||
assert(ip.typeOf(enum_tag.toIntern()) == loaded_union.enum_tag_ty);
|
||||
return loaded_union.loadTagType(ip).tagValueIndex(ip, enum_tag.toIntern());
|
||||
@ -3348,7 +3361,7 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve
|
||||
return result;
|
||||
}
|
||||
|
||||
pub fn fileByIndex(zcu: *Zcu, file_index: File.Index) *File {
|
||||
pub fn fileByIndex(zcu: *const Zcu, file_index: File.Index) *File {
|
||||
return zcu.intern_pool.filePtr(file_index);
|
||||
}
|
||||
|
||||
|
||||
@ -1326,7 +1326,7 @@ fn semaCau(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) !SemaCauResult {
|
||||
try decl_ty.resolveFully(pt);
|
||||
}
|
||||
|
||||
if (!resolve_type or !decl_ty.hasRuntimeBits(pt)) {
|
||||
if (!resolve_type or !decl_ty.hasRuntimeBits(zcu)) {
|
||||
if (zcu.comp.config.use_llvm) break :queue_codegen;
|
||||
if (file.mod.strip) break :queue_codegen;
|
||||
}
|
||||
@ -1555,8 +1555,8 @@ pub fn embedFile(
|
||||
import_string: []const u8,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
) !InternPool.Index {
|
||||
const mod = pt.zcu;
|
||||
const gpa = mod.gpa;
|
||||
const zcu = pt.zcu;
|
||||
const gpa = zcu.gpa;
|
||||
|
||||
if (cur_file.mod.deps.get(import_string)) |pkg| {
|
||||
const resolved_path = try std.fs.path.resolve(gpa, &.{
|
||||
@ -1567,9 +1567,9 @@ pub fn embedFile(
|
||||
var keep_resolved_path = false;
|
||||
defer if (!keep_resolved_path) gpa.free(resolved_path);
|
||||
|
||||
const gop = try mod.embed_table.getOrPut(gpa, resolved_path);
|
||||
const gop = try zcu.embed_table.getOrPut(gpa, resolved_path);
|
||||
errdefer {
|
||||
assert(std.mem.eql(u8, mod.embed_table.pop().key, resolved_path));
|
||||
assert(std.mem.eql(u8, zcu.embed_table.pop().key, resolved_path));
|
||||
keep_resolved_path = false;
|
||||
}
|
||||
if (gop.found_existing) return gop.value_ptr.*.val;
|
||||
@ -1594,9 +1594,9 @@ pub fn embedFile(
|
||||
var keep_resolved_path = false;
|
||||
defer if (!keep_resolved_path) gpa.free(resolved_path);
|
||||
|
||||
const gop = try mod.embed_table.getOrPut(gpa, resolved_path);
|
||||
const gop = try zcu.embed_table.getOrPut(gpa, resolved_path);
|
||||
errdefer {
|
||||
assert(std.mem.eql(u8, mod.embed_table.pop().key, resolved_path));
|
||||
assert(std.mem.eql(u8, zcu.embed_table.pop().key, resolved_path));
|
||||
keep_resolved_path = false;
|
||||
}
|
||||
if (gop.found_existing) return gop.value_ptr.*.val;
|
||||
@ -1631,9 +1631,9 @@ fn newEmbedFile(
|
||||
result: **Zcu.EmbedFile,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
) !InternPool.Index {
|
||||
const mod = pt.zcu;
|
||||
const gpa = mod.gpa;
|
||||
const ip = &mod.intern_pool;
|
||||
const zcu = pt.zcu;
|
||||
const gpa = zcu.gpa;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
const new_file = try gpa.create(Zcu.EmbedFile);
|
||||
errdefer gpa.destroy(new_file);
|
||||
@ -1655,7 +1655,7 @@ fn newEmbedFile(
|
||||
if (actual_read != size) return error.UnexpectedEndOfFile;
|
||||
bytes[0][size] = 0;
|
||||
|
||||
const comp = mod.comp;
|
||||
const comp = zcu.comp;
|
||||
switch (comp.cache_use) {
|
||||
.whole => |whole| if (whole.cache_manifest) |man| {
|
||||
const copied_resolved_path = try gpa.dupe(u8, resolved_path);
|
||||
@ -2756,7 +2756,7 @@ pub fn ptrType(pt: Zcu.PerThread, info: InternPool.Key.PtrType) Allocator.Error!
|
||||
// pointee type needs to be resolved more, that needs to be done before calling
|
||||
// this ptr() function.
|
||||
if (info.flags.alignment != .none and
|
||||
info.flags.alignment == Type.fromInterned(info.child).abiAlignment(pt))
|
||||
info.flags.alignment == Type.fromInterned(info.child).abiAlignment(pt.zcu))
|
||||
{
|
||||
canon_info.flags.alignment = .none;
|
||||
}
|
||||
@ -2766,7 +2766,7 @@ pub fn ptrType(pt: Zcu.PerThread, info: InternPool.Key.PtrType) Allocator.Error!
|
||||
// we change it to 0 here. If this causes an assertion trip, the pointee type
|
||||
// needs to be resolved before calling this ptr() function.
|
||||
.none => if (info.packed_offset.host_size != 0) {
|
||||
const elem_bit_size = Type.fromInterned(info.child).bitSize(pt);
|
||||
const elem_bit_size = Type.fromInterned(info.child).bitSize(pt.zcu);
|
||||
assert(info.packed_offset.bit_offset + elem_bit_size <= info.packed_offset.host_size * 8);
|
||||
if (info.packed_offset.host_size * 8 == elem_bit_size) {
|
||||
canon_info.packed_offset.host_size = 0;
|
||||
@ -2784,7 +2784,7 @@ pub fn ptrType(pt: Zcu.PerThread, info: InternPool.Key.PtrType) Allocator.Error!
|
||||
/// In general, prefer this function during semantic analysis.
|
||||
pub fn ptrTypeSema(pt: Zcu.PerThread, info: InternPool.Key.PtrType) Zcu.SemaError!Type {
|
||||
if (info.flags.alignment != .none) {
|
||||
_ = try Type.fromInterned(info.child).abiAlignmentAdvanced(pt, .sema);
|
||||
_ = try Type.fromInterned(info.child).abiAlignmentSema(pt);
|
||||
}
|
||||
return pt.ptrType(info);
|
||||
}
|
||||
@ -2857,9 +2857,9 @@ pub fn errorSetFromUnsortedNames(
|
||||
|
||||
/// Supports only pointers, not pointer-like optionals.
|
||||
pub fn ptrIntValue(pt: Zcu.PerThread, ty: Type, x: u64) Allocator.Error!Value {
|
||||
const mod = pt.zcu;
|
||||
assert(ty.zigTypeTag(mod) == .Pointer and !ty.isSlice(mod));
|
||||
assert(x != 0 or ty.isAllowzeroPtr(mod));
|
||||
const zcu = pt.zcu;
|
||||
assert(ty.zigTypeTag(zcu) == .Pointer and !ty.isSlice(zcu));
|
||||
assert(x != 0 or ty.isAllowzeroPtr(zcu));
|
||||
return Value.fromInterned(try pt.intern(.{ .ptr = .{
|
||||
.ty = ty.toIntern(),
|
||||
.base_addr = .int,
|
||||
@ -2984,15 +2984,15 @@ pub fn smallestUnsignedInt(pt: Zcu.PerThread, max: u64) Allocator.Error!Type {
|
||||
/// `max`. Asserts that neither value is undef.
|
||||
/// TODO: if #3806 is implemented, this becomes trivial
|
||||
pub fn intFittingRange(pt: Zcu.PerThread, min: Value, max: Value) !Type {
|
||||
const mod = pt.zcu;
|
||||
assert(!min.isUndef(mod));
|
||||
assert(!max.isUndef(mod));
|
||||
const zcu = pt.zcu;
|
||||
assert(!min.isUndef(zcu));
|
||||
assert(!max.isUndef(zcu));
|
||||
|
||||
if (std.debug.runtime_safety) {
|
||||
assert(Value.order(min, max, pt).compare(.lte));
|
||||
assert(Value.order(min, max, zcu).compare(.lte));
|
||||
}
|
||||
|
||||
const sign = min.orderAgainstZero(pt) == .lt;
|
||||
const sign = min.orderAgainstZero(zcu) == .lt;
|
||||
|
||||
const min_val_bits = pt.intBitsForValue(min, sign);
|
||||
const max_val_bits = pt.intBitsForValue(max, sign);
|
||||
@ -3008,10 +3008,10 @@ pub fn intFittingRange(pt: Zcu.PerThread, min: Value, max: Value) !Type {
|
||||
/// twos-complement integer; otherwise in an unsigned integer.
|
||||
/// Asserts that `val` is not undef. If `val` is negative, asserts that `sign` is true.
|
||||
pub fn intBitsForValue(pt: Zcu.PerThread, val: Value, sign: bool) u16 {
|
||||
const mod = pt.zcu;
|
||||
assert(!val.isUndef(mod));
|
||||
const zcu = pt.zcu;
|
||||
assert(!val.isUndef(zcu));
|
||||
|
||||
const key = mod.intern_pool.indexToKey(val.toIntern());
|
||||
const key = zcu.intern_pool.indexToKey(val.toIntern());
|
||||
switch (key.int.storage) {
|
||||
.i64 => |x| {
|
||||
if (std.math.cast(u64, x)) |casted| return Type.smallestUnsignedBits(casted) + @intFromBool(sign);
|
||||
@ -3032,154 +3032,14 @@ pub fn intBitsForValue(pt: Zcu.PerThread, val: Value, sign: bool) u16 {
|
||||
return @as(u16, @intCast(big.bitCountTwosComp()));
|
||||
},
|
||||
.lazy_align => |lazy_ty| {
|
||||
return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiAlignment(pt).toByteUnits() orelse 0) + @intFromBool(sign);
|
||||
return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiAlignment(pt.zcu).toByteUnits() orelse 0) + @intFromBool(sign);
|
||||
},
|
||||
.lazy_size => |lazy_ty| {
|
||||
return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiSize(pt)) + @intFromBool(sign);
|
||||
return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiSize(pt.zcu)) + @intFromBool(sign);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn getUnionLayout(pt: Zcu.PerThread, loaded_union: InternPool.LoadedUnionType) Zcu.UnionLayout {
|
||||
const mod = pt.zcu;
|
||||
const ip = &mod.intern_pool;
|
||||
assert(loaded_union.haveLayout(ip));
|
||||
var most_aligned_field: u32 = undefined;
|
||||
var most_aligned_field_size: u64 = undefined;
|
||||
var biggest_field: u32 = undefined;
|
||||
var payload_size: u64 = 0;
|
||||
var payload_align: InternPool.Alignment = .@"1";
|
||||
for (loaded_union.field_types.get(ip), 0..) |field_ty, field_index| {
|
||||
if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue;
|
||||
|
||||
const explicit_align = loaded_union.fieldAlign(ip, field_index);
|
||||
const field_align = if (explicit_align != .none)
|
||||
explicit_align
|
||||
else
|
||||
Type.fromInterned(field_ty).abiAlignment(pt);
|
||||
const field_size = Type.fromInterned(field_ty).abiSize(pt);
|
||||
if (field_size > payload_size) {
|
||||
payload_size = field_size;
|
||||
biggest_field = @intCast(field_index);
|
||||
}
|
||||
if (field_align.compare(.gte, payload_align)) {
|
||||
payload_align = field_align;
|
||||
most_aligned_field = @intCast(field_index);
|
||||
most_aligned_field_size = field_size;
|
||||
}
|
||||
}
|
||||
const have_tag = loaded_union.flagsUnordered(ip).runtime_tag.hasTag();
|
||||
if (!have_tag or !Type.fromInterned(loaded_union.enum_tag_ty).hasRuntimeBits(pt)) {
|
||||
return .{
|
||||
.abi_size = payload_align.forward(payload_size),
|
||||
.abi_align = payload_align,
|
||||
.most_aligned_field = most_aligned_field,
|
||||
.most_aligned_field_size = most_aligned_field_size,
|
||||
.biggest_field = biggest_field,
|
||||
.payload_size = payload_size,
|
||||
.payload_align = payload_align,
|
||||
.tag_align = .none,
|
||||
.tag_size = 0,
|
||||
.padding = 0,
|
||||
};
|
||||
}
|
||||
|
||||
const tag_size = Type.fromInterned(loaded_union.enum_tag_ty).abiSize(pt);
|
||||
const tag_align = Type.fromInterned(loaded_union.enum_tag_ty).abiAlignment(pt).max(.@"1");
|
||||
return .{
|
||||
.abi_size = loaded_union.sizeUnordered(ip),
|
||||
.abi_align = tag_align.max(payload_align),
|
||||
.most_aligned_field = most_aligned_field,
|
||||
.most_aligned_field_size = most_aligned_field_size,
|
||||
.biggest_field = biggest_field,
|
||||
.payload_size = payload_size,
|
||||
.payload_align = payload_align,
|
||||
.tag_align = tag_align,
|
||||
.tag_size = tag_size,
|
||||
.padding = loaded_union.paddingUnordered(ip),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn unionAbiSize(mod: *Module, loaded_union: InternPool.LoadedUnionType) u64 {
|
||||
return mod.getUnionLayout(loaded_union).abi_size;
|
||||
}
|
||||
|
||||
/// Returns 0 if the union is represented with 0 bits at runtime.
|
||||
pub fn unionAbiAlignment(pt: Zcu.PerThread, loaded_union: InternPool.LoadedUnionType) InternPool.Alignment {
|
||||
const mod = pt.zcu;
|
||||
const ip = &mod.intern_pool;
|
||||
const have_tag = loaded_union.flagsPtr(ip).runtime_tag.hasTag();
|
||||
var max_align: InternPool.Alignment = .none;
|
||||
if (have_tag) max_align = Type.fromInterned(loaded_union.enum_tag_ty).abiAlignment(pt);
|
||||
for (loaded_union.field_types.get(ip), 0..) |field_ty, field_index| {
|
||||
if (!Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue;
|
||||
|
||||
const field_align = mod.unionFieldNormalAlignment(loaded_union, @intCast(field_index));
|
||||
max_align = max_align.max(field_align);
|
||||
}
|
||||
return max_align;
|
||||
}
|
||||
|
||||
/// Returns the field alignment of a non-packed union. Asserts the layout is not packed.
|
||||
pub fn unionFieldNormalAlignment(
|
||||
pt: Zcu.PerThread,
|
||||
loaded_union: InternPool.LoadedUnionType,
|
||||
field_index: u32,
|
||||
) InternPool.Alignment {
|
||||
return pt.unionFieldNormalAlignmentAdvanced(loaded_union, field_index, .normal) catch unreachable;
|
||||
}
|
||||
|
||||
/// Returns the field alignment of a non-packed union. Asserts the layout is not packed.
|
||||
/// If `strat` is `.sema`, may perform type resolution.
|
||||
pub fn unionFieldNormalAlignmentAdvanced(
|
||||
pt: Zcu.PerThread,
|
||||
loaded_union: InternPool.LoadedUnionType,
|
||||
field_index: u32,
|
||||
comptime strat: Type.ResolveStrat,
|
||||
) Zcu.SemaError!InternPool.Alignment {
|
||||
const ip = &pt.zcu.intern_pool;
|
||||
assert(loaded_union.flagsUnordered(ip).layout != .@"packed");
|
||||
const field_align = loaded_union.fieldAlign(ip, field_index);
|
||||
if (field_align != .none) return field_align;
|
||||
const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
|
||||
if (field_ty.isNoReturn(pt.zcu)) return .none;
|
||||
return (try field_ty.abiAlignmentAdvanced(pt, strat.toLazy())).scalar;
|
||||
}
|
||||
|
||||
/// Returns the field alignment of a non-packed struct. Asserts the layout is not packed.
|
||||
pub fn structFieldAlignment(
|
||||
pt: Zcu.PerThread,
|
||||
explicit_alignment: InternPool.Alignment,
|
||||
field_ty: Type,
|
||||
layout: std.builtin.Type.ContainerLayout,
|
||||
) InternPool.Alignment {
|
||||
return pt.structFieldAlignmentAdvanced(explicit_alignment, field_ty, layout, .normal) catch unreachable;
|
||||
}
|
||||
|
||||
/// Returns the field alignment of a non-packed struct. Asserts the layout is not packed.
|
||||
/// If `strat` is `.sema`, may perform type resolution.
|
||||
pub fn structFieldAlignmentAdvanced(
|
||||
pt: Zcu.PerThread,
|
||||
explicit_alignment: InternPool.Alignment,
|
||||
field_ty: Type,
|
||||
layout: std.builtin.Type.ContainerLayout,
|
||||
comptime strat: Type.ResolveStrat,
|
||||
) Zcu.SemaError!InternPool.Alignment {
|
||||
assert(layout != .@"packed");
|
||||
if (explicit_alignment != .none) return explicit_alignment;
|
||||
const ty_abi_align = (try field_ty.abiAlignmentAdvanced(pt, strat.toLazy())).scalar;
|
||||
switch (layout) {
|
||||
.@"packed" => unreachable,
|
||||
.auto => if (pt.zcu.getTarget().ofmt != .c) return ty_abi_align,
|
||||
.@"extern" => {},
|
||||
}
|
||||
// extern
|
||||
if (field_ty.isAbiInt(pt.zcu) and field_ty.intInfo(pt.zcu).bits >= 128) {
|
||||
return ty_abi_align.maxStrict(.@"16");
|
||||
}
|
||||
return ty_abi_align;
|
||||
}
|
||||
|
||||
/// https://github.com/ziglang/zig/issues/17178 explored storing these bit offsets
|
||||
/// into the packed struct InternPool data rather than computing this on the
|
||||
/// fly, however it was found to perform worse when measured on real world
|
||||
@ -3189,8 +3049,8 @@ pub fn structPackedFieldBitOffset(
|
||||
struct_type: InternPool.LoadedStructType,
|
||||
field_index: u32,
|
||||
) u16 {
|
||||
const mod = pt.zcu;
|
||||
const ip = &mod.intern_pool;
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
assert(struct_type.layout == .@"packed");
|
||||
assert(struct_type.haveLayout(ip));
|
||||
var bit_sum: u64 = 0;
|
||||
@ -3199,7 +3059,7 @@ pub fn structPackedFieldBitOffset(
|
||||
return @intCast(bit_sum);
|
||||
}
|
||||
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
|
||||
bit_sum += field_ty.bitSize(pt);
|
||||
bit_sum += field_ty.bitSize(zcu);
|
||||
}
|
||||
unreachable; // index out of bounds
|
||||
}
|
||||
@ -3244,7 +3104,7 @@ pub fn navPtrType(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) Allocator.
|
||||
return pt.ptrType(.{
|
||||
.child = ty.toIntern(),
|
||||
.flags = .{
|
||||
.alignment = if (r.alignment == ty.abiAlignment(pt))
|
||||
.alignment = if (r.alignment == ty.abiAlignment(zcu))
|
||||
.none
|
||||
else
|
||||
r.alignment,
|
||||
@ -3274,7 +3134,7 @@ pub fn navAlignment(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) InternPo
|
||||
const zcu = pt.zcu;
|
||||
const r = zcu.intern_pool.getNav(nav_index).status.resolved;
|
||||
if (r.alignment != .none) return r.alignment;
|
||||
return Value.fromInterned(r.val).typeOf(zcu).abiAlignment(pt);
|
||||
return Value.fromInterned(r.val).typeOf(zcu).abiAlignment(zcu);
|
||||
}
|
||||
|
||||
/// Given a container type requiring resolution, ensures that it is up-to-date.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -15,44 +15,44 @@ pub const Class = union(enum) {
|
||||
};
|
||||
|
||||
/// For `float_array` the second element will be the amount of floats.
|
||||
pub fn classifyType(ty: Type, pt: Zcu.PerThread) Class {
|
||||
std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(pt));
|
||||
pub fn classifyType(ty: Type, zcu: *Zcu) Class {
|
||||
std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(zcu));
|
||||
|
||||
var maybe_float_bits: ?u16 = null;
|
||||
switch (ty.zigTypeTag(pt.zcu)) {
|
||||
switch (ty.zigTypeTag(zcu)) {
|
||||
.Struct => {
|
||||
if (ty.containerLayout(pt.zcu) == .@"packed") return .byval;
|
||||
const float_count = countFloats(ty, pt.zcu, &maybe_float_bits);
|
||||
if (ty.containerLayout(zcu) == .@"packed") return .byval;
|
||||
const float_count = countFloats(ty, zcu, &maybe_float_bits);
|
||||
if (float_count <= sret_float_count) return .{ .float_array = float_count };
|
||||
|
||||
const bit_size = ty.bitSize(pt);
|
||||
const bit_size = ty.bitSize(zcu);
|
||||
if (bit_size > 128) return .memory;
|
||||
if (bit_size > 64) return .double_integer;
|
||||
return .integer;
|
||||
},
|
||||
.Union => {
|
||||
if (ty.containerLayout(pt.zcu) == .@"packed") return .byval;
|
||||
const float_count = countFloats(ty, pt.zcu, &maybe_float_bits);
|
||||
if (ty.containerLayout(zcu) == .@"packed") return .byval;
|
||||
const float_count = countFloats(ty, zcu, &maybe_float_bits);
|
||||
if (float_count <= sret_float_count) return .{ .float_array = float_count };
|
||||
|
||||
const bit_size = ty.bitSize(pt);
|
||||
const bit_size = ty.bitSize(zcu);
|
||||
if (bit_size > 128) return .memory;
|
||||
if (bit_size > 64) return .double_integer;
|
||||
return .integer;
|
||||
},
|
||||
.Int, .Enum, .ErrorSet, .Float, .Bool => return .byval,
|
||||
.Vector => {
|
||||
const bit_size = ty.bitSize(pt);
|
||||
const bit_size = ty.bitSize(zcu);
|
||||
// TODO is this controlled by a cpu feature?
|
||||
if (bit_size > 128) return .memory;
|
||||
return .byval;
|
||||
},
|
||||
.Optional => {
|
||||
std.debug.assert(ty.isPtrLikeOptional(pt.zcu));
|
||||
std.debug.assert(ty.isPtrLikeOptional(zcu));
|
||||
return .byval;
|
||||
},
|
||||
.Pointer => {
|
||||
std.debug.assert(!ty.isSlice(pt.zcu));
|
||||
std.debug.assert(!ty.isSlice(zcu));
|
||||
return .byval;
|
||||
},
|
||||
.ErrorUnion,
|
||||
@ -95,7 +95,7 @@ fn countFloats(ty: Type, zcu: *Zcu, maybe_float_bits: *?u16) u8 {
|
||||
var count: u8 = 0;
|
||||
var i: u32 = 0;
|
||||
while (i < fields_len) : (i += 1) {
|
||||
const field_ty = ty.structFieldType(i, zcu);
|
||||
const field_ty = ty.fieldType(i, zcu);
|
||||
const field_count = countFloats(field_ty, zcu, maybe_float_bits);
|
||||
if (field_count == invalid) return invalid;
|
||||
count += field_count;
|
||||
@ -130,7 +130,7 @@ pub fn getFloatArrayType(ty: Type, zcu: *Zcu) ?Type {
|
||||
const fields_len = ty.structFieldCount(zcu);
|
||||
var i: u32 = 0;
|
||||
while (i < fields_len) : (i += 1) {
|
||||
const field_ty = ty.structFieldType(i, zcu);
|
||||
const field_ty = ty.fieldType(i, zcu);
|
||||
if (getFloatArrayType(field_ty, zcu)) |some| return some;
|
||||
}
|
||||
return null;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -24,29 +24,29 @@ pub const Class = union(enum) {
|
||||
|
||||
pub const Context = enum { ret, arg };
|
||||
|
||||
pub fn classifyType(ty: Type, pt: Zcu.PerThread, ctx: Context) Class {
|
||||
assert(ty.hasRuntimeBitsIgnoreComptime(pt));
|
||||
pub fn classifyType(ty: Type, zcu: *Zcu, ctx: Context) Class {
|
||||
assert(ty.hasRuntimeBitsIgnoreComptime(zcu));
|
||||
|
||||
var maybe_float_bits: ?u16 = null;
|
||||
const max_byval_size = 512;
|
||||
const ip = &pt.zcu.intern_pool;
|
||||
switch (ty.zigTypeTag(pt.zcu)) {
|
||||
const ip = &zcu.intern_pool;
|
||||
switch (ty.zigTypeTag(zcu)) {
|
||||
.Struct => {
|
||||
const bit_size = ty.bitSize(pt);
|
||||
if (ty.containerLayout(pt.zcu) == .@"packed") {
|
||||
const bit_size = ty.bitSize(zcu);
|
||||
if (ty.containerLayout(zcu) == .@"packed") {
|
||||
if (bit_size > 64) return .memory;
|
||||
return .byval;
|
||||
}
|
||||
if (bit_size > max_byval_size) return .memory;
|
||||
const float_count = countFloats(ty, pt.zcu, &maybe_float_bits);
|
||||
const float_count = countFloats(ty, zcu, &maybe_float_bits);
|
||||
if (float_count <= byval_float_count) return .byval;
|
||||
|
||||
const fields = ty.structFieldCount(pt.zcu);
|
||||
const fields = ty.structFieldCount(zcu);
|
||||
var i: u32 = 0;
|
||||
while (i < fields) : (i += 1) {
|
||||
const field_ty = ty.structFieldType(i, pt.zcu);
|
||||
const field_alignment = ty.structFieldAlign(i, pt);
|
||||
const field_size = field_ty.bitSize(pt);
|
||||
const field_ty = ty.fieldType(i, zcu);
|
||||
const field_alignment = ty.fieldAlignment(i, zcu);
|
||||
const field_size = field_ty.bitSize(zcu);
|
||||
if (field_size > 32 or field_alignment.compare(.gt, .@"32")) {
|
||||
return Class.arrSize(bit_size, 64);
|
||||
}
|
||||
@ -54,19 +54,19 @@ pub fn classifyType(ty: Type, pt: Zcu.PerThread, ctx: Context) Class {
|
||||
return Class.arrSize(bit_size, 32);
|
||||
},
|
||||
.Union => {
|
||||
const bit_size = ty.bitSize(pt);
|
||||
const union_obj = pt.zcu.typeToUnion(ty).?;
|
||||
const bit_size = ty.bitSize(zcu);
|
||||
const union_obj = zcu.typeToUnion(ty).?;
|
||||
if (union_obj.flagsUnordered(ip).layout == .@"packed") {
|
||||
if (bit_size > 64) return .memory;
|
||||
return .byval;
|
||||
}
|
||||
if (bit_size > max_byval_size) return .memory;
|
||||
const float_count = countFloats(ty, pt.zcu, &maybe_float_bits);
|
||||
const float_count = countFloats(ty, zcu, &maybe_float_bits);
|
||||
if (float_count <= byval_float_count) return .byval;
|
||||
|
||||
for (union_obj.field_types.get(ip), 0..) |field_ty, field_index| {
|
||||
if (Type.fromInterned(field_ty).bitSize(pt) > 32 or
|
||||
pt.unionFieldNormalAlignment(union_obj, @intCast(field_index)).compare(.gt, .@"32"))
|
||||
if (Type.fromInterned(field_ty).bitSize(zcu) > 32 or
|
||||
ty.fieldAlignment(field_index, zcu).compare(.gt, .@"32"))
|
||||
{
|
||||
return Class.arrSize(bit_size, 64);
|
||||
}
|
||||
@ -77,28 +77,28 @@ pub fn classifyType(ty: Type, pt: Zcu.PerThread, ctx: Context) Class {
|
||||
.Int => {
|
||||
// TODO this is incorrect for _BitInt(128) but implementing
|
||||
// this correctly makes implementing compiler-rt impossible.
|
||||
// const bit_size = ty.bitSize(pt);
|
||||
// const bit_size = ty.bitSize(zcu);
|
||||
// if (bit_size > 64) return .memory;
|
||||
return .byval;
|
||||
},
|
||||
.Enum, .ErrorSet => {
|
||||
const bit_size = ty.bitSize(pt);
|
||||
const bit_size = ty.bitSize(zcu);
|
||||
if (bit_size > 64) return .memory;
|
||||
return .byval;
|
||||
},
|
||||
.Vector => {
|
||||
const bit_size = ty.bitSize(pt);
|
||||
const bit_size = ty.bitSize(zcu);
|
||||
// TODO is this controlled by a cpu feature?
|
||||
if (ctx == .ret and bit_size > 128) return .memory;
|
||||
if (bit_size > 512) return .memory;
|
||||
return .byval;
|
||||
},
|
||||
.Optional => {
|
||||
assert(ty.isPtrLikeOptional(pt.zcu));
|
||||
assert(ty.isPtrLikeOptional(zcu));
|
||||
return .byval;
|
||||
},
|
||||
.Pointer => {
|
||||
assert(!ty.isSlice(pt.zcu));
|
||||
assert(!ty.isSlice(zcu));
|
||||
return .byval;
|
||||
},
|
||||
.ErrorUnion,
|
||||
@ -141,7 +141,7 @@ fn countFloats(ty: Type, zcu: *Zcu, maybe_float_bits: *?u16) u32 {
|
||||
var count: u32 = 0;
|
||||
var i: u32 = 0;
|
||||
while (i < fields_len) : (i += 1) {
|
||||
const field_ty = ty.structFieldType(i, zcu);
|
||||
const field_ty = ty.fieldType(i, zcu);
|
||||
const field_count = countFloats(field_ty, zcu, maybe_float_bits);
|
||||
if (field_count == invalid) return invalid;
|
||||
count += field_count;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -49,6 +49,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index, options: struct {
|
||||
relocs: []const Reloc,
|
||||
} {
|
||||
const pt = lower.pt;
|
||||
const zcu = pt.zcu;
|
||||
|
||||
lower.result_insts = undefined;
|
||||
lower.result_relocs = undefined;
|
||||
@ -308,11 +309,11 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index, options: struct {
|
||||
|
||||
const class = rs1.class();
|
||||
const ty = compare.ty;
|
||||
const size = std.math.ceilPowerOfTwo(u64, ty.bitSize(pt)) catch {
|
||||
return lower.fail("pseudo_compare size {}", .{ty.bitSize(pt)});
|
||||
const size = std.math.ceilPowerOfTwo(u64, ty.bitSize(zcu)) catch {
|
||||
return lower.fail("pseudo_compare size {}", .{ty.bitSize(zcu)});
|
||||
};
|
||||
|
||||
const is_unsigned = ty.isUnsignedInt(pt.zcu);
|
||||
const is_unsigned = ty.isUnsignedInt(zcu);
|
||||
const less_than: Mnemonic = if (is_unsigned) .sltu else .slt;
|
||||
|
||||
switch (class) {
|
||||
|
||||
@ -9,15 +9,15 @@ const assert = std.debug.assert;
|
||||
|
||||
pub const Class = enum { memory, byval, integer, double_integer, fields };
|
||||
|
||||
pub fn classifyType(ty: Type, pt: Zcu.PerThread) Class {
|
||||
const target = pt.zcu.getTarget();
|
||||
std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(pt));
|
||||
pub fn classifyType(ty: Type, zcu: *Zcu) Class {
|
||||
const target = zcu.getTarget();
|
||||
std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(zcu));
|
||||
|
||||
const max_byval_size = target.ptrBitWidth() * 2;
|
||||
switch (ty.zigTypeTag(pt.zcu)) {
|
||||
switch (ty.zigTypeTag(zcu)) {
|
||||
.Struct => {
|
||||
const bit_size = ty.bitSize(pt);
|
||||
if (ty.containerLayout(pt.zcu) == .@"packed") {
|
||||
const bit_size = ty.bitSize(zcu);
|
||||
if (ty.containerLayout(zcu) == .@"packed") {
|
||||
if (bit_size > max_byval_size) return .memory;
|
||||
return .byval;
|
||||
}
|
||||
@ -25,12 +25,12 @@ pub fn classifyType(ty: Type, pt: Zcu.PerThread) Class {
|
||||
if (std.Target.riscv.featureSetHas(target.cpu.features, .d)) fields: {
|
||||
var any_fp = false;
|
||||
var field_count: usize = 0;
|
||||
for (0..ty.structFieldCount(pt.zcu)) |field_index| {
|
||||
const field_ty = ty.structFieldType(field_index, pt.zcu);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
|
||||
for (0..ty.structFieldCount(zcu)) |field_index| {
|
||||
const field_ty = ty.fieldType(field_index, zcu);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
if (field_ty.isRuntimeFloat())
|
||||
any_fp = true
|
||||
else if (!field_ty.isAbiInt(pt.zcu))
|
||||
else if (!field_ty.isAbiInt(zcu))
|
||||
break :fields;
|
||||
field_count += 1;
|
||||
if (field_count > 2) break :fields;
|
||||
@ -45,8 +45,8 @@ pub fn classifyType(ty: Type, pt: Zcu.PerThread) Class {
|
||||
return .integer;
|
||||
},
|
||||
.Union => {
|
||||
const bit_size = ty.bitSize(pt);
|
||||
if (ty.containerLayout(pt.zcu) == .@"packed") {
|
||||
const bit_size = ty.bitSize(zcu);
|
||||
if (ty.containerLayout(zcu) == .@"packed") {
|
||||
if (bit_size > max_byval_size) return .memory;
|
||||
return .byval;
|
||||
}
|
||||
@ -58,21 +58,21 @@ pub fn classifyType(ty: Type, pt: Zcu.PerThread) Class {
|
||||
.Bool => return .integer,
|
||||
.Float => return .byval,
|
||||
.Int, .Enum, .ErrorSet => {
|
||||
const bit_size = ty.bitSize(pt);
|
||||
const bit_size = ty.bitSize(zcu);
|
||||
if (bit_size > max_byval_size) return .memory;
|
||||
return .byval;
|
||||
},
|
||||
.Vector => {
|
||||
const bit_size = ty.bitSize(pt);
|
||||
const bit_size = ty.bitSize(zcu);
|
||||
if (bit_size > max_byval_size) return .memory;
|
||||
return .integer;
|
||||
},
|
||||
.Optional => {
|
||||
std.debug.assert(ty.isPtrLikeOptional(pt.zcu));
|
||||
std.debug.assert(ty.isPtrLikeOptional(zcu));
|
||||
return .byval;
|
||||
},
|
||||
.Pointer => {
|
||||
std.debug.assert(!ty.isSlice(pt.zcu));
|
||||
std.debug.assert(!ty.isSlice(zcu));
|
||||
return .byval;
|
||||
},
|
||||
.ErrorUnion,
|
||||
@ -97,19 +97,18 @@ pub const SystemClass = enum { integer, float, memory, none };
|
||||
|
||||
/// There are a maximum of 8 possible return slots. Returned values are in
|
||||
/// the beginning of the array; unused slots are filled with .none.
|
||||
pub fn classifySystem(ty: Type, pt: Zcu.PerThread) [8]SystemClass {
|
||||
const zcu = pt.zcu;
|
||||
pub fn classifySystem(ty: Type, zcu: *Zcu) [8]SystemClass {
|
||||
var result = [1]SystemClass{.none} ** 8;
|
||||
const memory_class = [_]SystemClass{
|
||||
.memory, .none, .none, .none,
|
||||
.none, .none, .none, .none,
|
||||
};
|
||||
switch (ty.zigTypeTag(pt.zcu)) {
|
||||
switch (ty.zigTypeTag(zcu)) {
|
||||
.Bool, .Void, .NoReturn => {
|
||||
result[0] = .integer;
|
||||
return result;
|
||||
},
|
||||
.Pointer => switch (ty.ptrSize(pt.zcu)) {
|
||||
.Pointer => switch (ty.ptrSize(zcu)) {
|
||||
.Slice => {
|
||||
result[0] = .integer;
|
||||
result[1] = .integer;
|
||||
@ -121,14 +120,14 @@ pub fn classifySystem(ty: Type, pt: Zcu.PerThread) [8]SystemClass {
|
||||
},
|
||||
},
|
||||
.Optional => {
|
||||
if (ty.isPtrLikeOptional(pt.zcu)) {
|
||||
if (ty.isPtrLikeOptional(zcu)) {
|
||||
result[0] = .integer;
|
||||
return result;
|
||||
}
|
||||
return memory_class;
|
||||
},
|
||||
.Int, .Enum, .ErrorSet => {
|
||||
const int_bits = ty.intInfo(pt.zcu).bits;
|
||||
const int_bits = ty.intInfo(zcu).bits;
|
||||
if (int_bits <= 64) {
|
||||
result[0] = .integer;
|
||||
return result;
|
||||
@ -153,8 +152,8 @@ pub fn classifySystem(ty: Type, pt: Zcu.PerThread) [8]SystemClass {
|
||||
unreachable; // support split float args
|
||||
},
|
||||
.ErrorUnion => {
|
||||
const payload_ty = ty.errorUnionPayload(pt.zcu);
|
||||
const payload_bits = payload_ty.bitSize(pt);
|
||||
const payload_ty = ty.errorUnionPayload(zcu);
|
||||
const payload_bits = payload_ty.bitSize(zcu);
|
||||
|
||||
// the error union itself
|
||||
result[0] = .integer;
|
||||
@ -165,8 +164,8 @@ pub fn classifySystem(ty: Type, pt: Zcu.PerThread) [8]SystemClass {
|
||||
return memory_class;
|
||||
},
|
||||
.Struct, .Union => {
|
||||
const layout = ty.containerLayout(pt.zcu);
|
||||
const ty_size = ty.abiSize(pt);
|
||||
const layout = ty.containerLayout(zcu);
|
||||
const ty_size = ty.abiSize(zcu);
|
||||
|
||||
if (layout == .@"packed") {
|
||||
assert(ty_size <= 16);
|
||||
@ -178,7 +177,7 @@ pub fn classifySystem(ty: Type, pt: Zcu.PerThread) [8]SystemClass {
|
||||
return memory_class;
|
||||
},
|
||||
.Array => {
|
||||
const ty_size = ty.abiSize(pt);
|
||||
const ty_size = ty.abiSize(zcu);
|
||||
if (ty_size <= 8) {
|
||||
result[0] = .integer;
|
||||
return result;
|
||||
@ -192,7 +191,7 @@ pub fn classifySystem(ty: Type, pt: Zcu.PerThread) [8]SystemClass {
|
||||
},
|
||||
.Vector => {
|
||||
// we pass vectors through integer registers if they are small enough to fit.
|
||||
const vec_bits = ty.totalVectorBits(pt);
|
||||
const vec_bits = ty.totalVectorBits(zcu);
|
||||
if (vec_bits <= 64) {
|
||||
result[0] = .integer;
|
||||
return result;
|
||||
|
||||
@ -365,8 +365,8 @@ pub fn generate(
|
||||
|
||||
fn gen(self: *Self) !void {
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const cc = self.fn_type.fnCallingConvention(mod);
|
||||
const zcu = pt.zcu;
|
||||
const cc = self.fn_type.fnCallingConvention(zcu);
|
||||
if (cc != .Naked) {
|
||||
// TODO Finish function prologue and epilogue for sparc64.
|
||||
|
||||
@ -494,8 +494,8 @@ fn gen(self: *Self) !void {
|
||||
|
||||
fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const ip = &mod.intern_pool;
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const air_tags = self.air.instructions.items(.tag);
|
||||
|
||||
for (body) |inst| {
|
||||
@ -760,18 +760,18 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
||||
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const zcu = pt.zcu;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const lhs = try self.resolveInst(extra.lhs);
|
||||
const rhs = try self.resolveInst(extra.rhs);
|
||||
const lhs_ty = self.typeOf(extra.lhs);
|
||||
const rhs_ty = self.typeOf(extra.rhs);
|
||||
|
||||
switch (lhs_ty.zigTypeTag(mod)) {
|
||||
switch (lhs_ty.zigTypeTag(zcu)) {
|
||||
.Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}),
|
||||
.Int => {
|
||||
assert(lhs_ty.eql(rhs_ty, mod));
|
||||
const int_info = lhs_ty.intInfo(mod);
|
||||
assert(lhs_ty.eql(rhs_ty, zcu));
|
||||
const int_info = lhs_ty.intInfo(zcu);
|
||||
switch (int_info.bits) {
|
||||
32, 64 => {
|
||||
// Only say yes if the operation is
|
||||
@ -839,9 +839,9 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const zcu = pt.zcu;
|
||||
const vector_ty = self.typeOfIndex(inst);
|
||||
const len = vector_ty.vectorLen(mod);
|
||||
const len = vector_ty.vectorLen(zcu);
|
||||
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
||||
const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len]));
|
||||
const result: MCValue = res: {
|
||||
@ -874,13 +874,13 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const zcu = pt.zcu;
|
||||
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const ptr_ty = self.typeOf(ty_op.operand);
|
||||
const ptr = try self.resolveInst(ty_op.operand);
|
||||
const array_ty = ptr_ty.childType(mod);
|
||||
const array_len = @as(u32, @intCast(array_ty.arrayLen(mod)));
|
||||
const array_ty = ptr_ty.childType(zcu);
|
||||
const array_len = @as(u32, @intCast(array_ty.arrayLen(zcu)));
|
||||
const ptr_bytes = 8;
|
||||
const stack_offset = try self.allocMem(inst, ptr_bytes * 2, .@"8");
|
||||
try self.genSetStack(ptr_ty, stack_offset, ptr);
|
||||
@ -1012,6 +1012,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
fn airArg(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const pt = self.pt;
|
||||
const zcu = pt.zcu;
|
||||
const arg_index = self.arg_index;
|
||||
self.arg_index += 1;
|
||||
|
||||
@ -1021,7 +1022,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const mcv = blk: {
|
||||
switch (arg) {
|
||||
.stack_offset => |off| {
|
||||
const abi_size = math.cast(u32, ty.abiSize(pt)) orelse {
|
||||
const abi_size = math.cast(u32, ty.abiSize(zcu)) orelse {
|
||||
return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(pt)});
|
||||
};
|
||||
const offset = off + abi_size;
|
||||
@ -1211,7 +1212,7 @@ fn airBreakpoint(self: *Self) !void {
|
||||
|
||||
fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const zcu = pt.zcu;
|
||||
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
||||
|
||||
// We have hardware byteswapper in SPARCv9, don't let mainstream compilers mislead you.
|
||||
@ -1227,14 +1228,14 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const operand = try self.resolveInst(ty_op.operand);
|
||||
const operand_ty = self.typeOf(ty_op.operand);
|
||||
switch (operand_ty.zigTypeTag(mod)) {
|
||||
switch (operand_ty.zigTypeTag(zcu)) {
|
||||
.Vector => return self.fail("TODO byteswap for vectors", .{}),
|
||||
.Int => {
|
||||
const int_info = operand_ty.intInfo(mod);
|
||||
const int_info = operand_ty.intInfo(zcu);
|
||||
if (int_info.bits == 8) break :result operand;
|
||||
|
||||
const abi_size = int_info.bits >> 3;
|
||||
const abi_align = operand_ty.abiAlignment(pt);
|
||||
const abi_align = operand_ty.abiAlignment(zcu);
|
||||
const opposite_endian_asi = switch (self.target.cpu.arch.endian()) {
|
||||
Endian.big => ASI.asi_primary_little,
|
||||
Endian.little => ASI.asi_primary,
|
||||
@ -1304,11 +1305,11 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
||||
const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end .. extra.end + extra.data.args_len]));
|
||||
const ty = self.typeOf(callee);
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const ip = &mod.intern_pool;
|
||||
const fn_ty = switch (ty.zigTypeTag(mod)) {
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const fn_ty = switch (ty.zigTypeTag(zcu)) {
|
||||
.Fn => ty,
|
||||
.Pointer => ty.childType(mod),
|
||||
.Pointer => ty.childType(zcu),
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
@ -1360,7 +1361,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
||||
return self.fail("TODO implement calling bitcasted functions", .{});
|
||||
},
|
||||
} else {
|
||||
assert(ty.zigTypeTag(mod) == .Pointer);
|
||||
assert(ty.zigTypeTag(zcu) == .Pointer);
|
||||
const mcv = try self.resolveInst(callee);
|
||||
try self.genSetReg(ty, .o7, mcv);
|
||||
|
||||
@ -1409,24 +1410,24 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
|
||||
fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
|
||||
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const zcu = pt.zcu;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const lhs = try self.resolveInst(bin_op.lhs);
|
||||
const rhs = try self.resolveInst(bin_op.rhs);
|
||||
const lhs_ty = self.typeOf(bin_op.lhs);
|
||||
|
||||
const int_ty = switch (lhs_ty.zigTypeTag(mod)) {
|
||||
const int_ty = switch (lhs_ty.zigTypeTag(zcu)) {
|
||||
.Vector => unreachable, // Handled by cmp_vector.
|
||||
.Enum => lhs_ty.intTagType(mod),
|
||||
.Enum => lhs_ty.intTagType(zcu),
|
||||
.Int => lhs_ty,
|
||||
.Bool => Type.u1,
|
||||
.Pointer => Type.usize,
|
||||
.ErrorSet => Type.u16,
|
||||
.Optional => blk: {
|
||||
const payload_ty = lhs_ty.optionalChild(mod);
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
|
||||
const payload_ty = lhs_ty.optionalChild(zcu);
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
break :blk Type.u1;
|
||||
} else if (lhs_ty.isPtrLikeOptional(mod)) {
|
||||
} else if (lhs_ty.isPtrLikeOptional(zcu)) {
|
||||
break :blk Type.usize;
|
||||
} else {
|
||||
return self.fail("TODO SPARCv9 cmp non-pointer optionals", .{});
|
||||
@ -1436,7 +1437,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
const int_info = int_ty.intInfo(mod);
|
||||
const int_info = int_ty.intInfo(zcu);
|
||||
if (int_info.bits <= 64) {
|
||||
_ = try self.binOp(.cmp_eq, lhs, rhs, int_ty, int_ty, BinOpMetadata{
|
||||
.lhs = bin_op.lhs,
|
||||
@ -1635,13 +1636,9 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
|
||||
}
|
||||
|
||||
fn airDbgInlineBlock(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
||||
const extra = self.air.extraData(Air.DbgInlineBlock, ty_pl.payload);
|
||||
const func = mod.funcInfo(extra.data.func);
|
||||
// TODO emit debug info for function change
|
||||
_ = func;
|
||||
try self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]));
|
||||
}
|
||||
|
||||
@ -1735,11 +1732,11 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
|
||||
return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
|
||||
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const zcu = pt.zcu;
|
||||
const operand_ty = self.typeOf(ty_op.operand);
|
||||
const operand = try self.resolveInst(ty_op.operand);
|
||||
const info_a = operand_ty.intInfo(mod);
|
||||
const info_b = self.typeOfIndex(inst).intInfo(mod);
|
||||
const info_a = operand_ty.intInfo(zcu);
|
||||
const info_b = self.typeOfIndex(inst).intInfo(zcu);
|
||||
if (info_a.signedness != info_b.signedness)
|
||||
return self.fail("TODO gen intcast sign safety in semantic analysis", .{});
|
||||
|
||||
@ -1797,16 +1794,16 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const zcu = pt.zcu;
|
||||
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
||||
const elem_ty = self.typeOfIndex(inst);
|
||||
const elem_size = elem_ty.abiSize(pt);
|
||||
const elem_size = elem_ty.abiSize(zcu);
|
||||
const result: MCValue = result: {
|
||||
if (!elem_ty.hasRuntimeBits(pt))
|
||||
if (!elem_ty.hasRuntimeBits(zcu))
|
||||
break :result MCValue.none;
|
||||
|
||||
const ptr = try self.resolveInst(ty_op.operand);
|
||||
const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(mod);
|
||||
const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(zcu);
|
||||
if (self.liveness.isUnused(inst) and !is_volatile)
|
||||
break :result MCValue.dead;
|
||||
|
||||
@ -2024,18 +2021,18 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
||||
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const zcu = pt.zcu;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const lhs = try self.resolveInst(extra.lhs);
|
||||
const rhs = try self.resolveInst(extra.rhs);
|
||||
const lhs_ty = self.typeOf(extra.lhs);
|
||||
const rhs_ty = self.typeOf(extra.rhs);
|
||||
|
||||
switch (lhs_ty.zigTypeTag(mod)) {
|
||||
switch (lhs_ty.zigTypeTag(zcu)) {
|
||||
.Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}),
|
||||
.Int => {
|
||||
assert(lhs_ty.eql(rhs_ty, mod));
|
||||
const int_info = lhs_ty.intInfo(mod);
|
||||
assert(lhs_ty.eql(rhs_ty, zcu));
|
||||
const int_info = lhs_ty.intInfo(zcu);
|
||||
switch (int_info.bits) {
|
||||
1...32 => {
|
||||
try self.spillConditionFlagsIfOccupied();
|
||||
@ -2089,7 +2086,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
fn airNot(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const zcu = pt.zcu;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const operand = try self.resolveInst(ty_op.operand);
|
||||
const operand_ty = self.typeOf(ty_op.operand);
|
||||
@ -2105,7 +2102,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
|
||||
};
|
||||
},
|
||||
else => {
|
||||
switch (operand_ty.zigTypeTag(mod)) {
|
||||
switch (operand_ty.zigTypeTag(zcu)) {
|
||||
.Bool => {
|
||||
const op_reg = switch (operand) {
|
||||
.register => |r| r,
|
||||
@ -2139,7 +2136,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
|
||||
},
|
||||
.Vector => return self.fail("TODO bitwise not for vectors", .{}),
|
||||
.Int => {
|
||||
const int_info = operand_ty.intInfo(mod);
|
||||
const int_info = operand_ty.intInfo(zcu);
|
||||
if (int_info.bits <= 64) {
|
||||
const op_reg = switch (operand) {
|
||||
.register => |r| r,
|
||||
@ -2322,17 +2319,17 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
||||
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const zcu = pt.zcu;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const lhs = try self.resolveInst(extra.lhs);
|
||||
const rhs = try self.resolveInst(extra.rhs);
|
||||
const lhs_ty = self.typeOf(extra.lhs);
|
||||
const rhs_ty = self.typeOf(extra.rhs);
|
||||
|
||||
switch (lhs_ty.zigTypeTag(mod)) {
|
||||
switch (lhs_ty.zigTypeTag(zcu)) {
|
||||
.Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}),
|
||||
.Int => {
|
||||
const int_info = lhs_ty.intInfo(mod);
|
||||
const int_info = lhs_ty.intInfo(zcu);
|
||||
if (int_info.bits <= 64) {
|
||||
try self.spillConditionFlagsIfOccupied();
|
||||
|
||||
@ -2428,7 +2425,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const zcu = pt.zcu;
|
||||
const is_volatile = false; // TODO
|
||||
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
|
||||
|
||||
@ -2438,10 +2435,10 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const index_mcv = try self.resolveInst(bin_op.rhs);
|
||||
|
||||
const slice_ty = self.typeOf(bin_op.lhs);
|
||||
const elem_ty = slice_ty.childType(mod);
|
||||
const elem_size = elem_ty.abiSize(pt);
|
||||
const elem_ty = slice_ty.childType(zcu);
|
||||
const elem_size = elem_ty.abiSize(zcu);
|
||||
|
||||
const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod);
|
||||
const slice_ptr_field_type = slice_ty.slicePtrFieldType(zcu);
|
||||
|
||||
const index_lock: ?RegisterLock = if (index_mcv == .register)
|
||||
self.register_manager.lockRegAssumeUnused(index_mcv.register)
|
||||
@ -2553,10 +2550,10 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const operand = extra.struct_operand;
|
||||
const index = extra.field_index;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const pt = self.pt;
|
||||
const zcu = self.pt.zcu;
|
||||
const mcv = try self.resolveInst(operand);
|
||||
const struct_ty = self.typeOf(operand);
|
||||
const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, pt)));
|
||||
const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, zcu)));
|
||||
|
||||
switch (mcv) {
|
||||
.dead, .unreach => unreachable,
|
||||
@ -2687,13 +2684,13 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const zcu = pt.zcu;
|
||||
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const error_union_ty = self.typeOf(ty_op.operand);
|
||||
const payload_ty = error_union_ty.errorUnionPayload(mod);
|
||||
const payload_ty = error_union_ty.errorUnionPayload(zcu);
|
||||
const mcv = try self.resolveInst(ty_op.operand);
|
||||
if (!payload_ty.hasRuntimeBits(pt)) break :result mcv;
|
||||
if (!payload_ty.hasRuntimeBits(zcu)) break :result mcv;
|
||||
|
||||
return self.fail("TODO implement unwrap error union error for non-empty payloads", .{});
|
||||
};
|
||||
@ -2702,12 +2699,12 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const zcu = pt.zcu;
|
||||
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const error_union_ty = self.typeOf(ty_op.operand);
|
||||
const payload_ty = error_union_ty.errorUnionPayload(mod);
|
||||
if (!payload_ty.hasRuntimeBits(pt)) break :result MCValue.none;
|
||||
const payload_ty = error_union_ty.errorUnionPayload(zcu);
|
||||
if (!payload_ty.hasRuntimeBits(zcu)) break :result MCValue.none;
|
||||
|
||||
return self.fail("TODO implement unwrap error union payload for non-empty payloads", .{});
|
||||
};
|
||||
@ -2717,13 +2714,13 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void {
|
||||
/// E to E!T
|
||||
fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const zcu = pt.zcu;
|
||||
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const error_union_ty = ty_op.ty.toType();
|
||||
const payload_ty = error_union_ty.errorUnionPayload(mod);
|
||||
const payload_ty = error_union_ty.errorUnionPayload(zcu);
|
||||
const mcv = try self.resolveInst(ty_op.operand);
|
||||
if (!payload_ty.hasRuntimeBits(pt)) break :result mcv;
|
||||
if (!payload_ty.hasRuntimeBits(zcu)) break :result mcv;
|
||||
|
||||
return self.fail("TODO implement wrap errunion error for non-empty payloads", .{});
|
||||
};
|
||||
@ -2744,7 +2741,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const optional_ty = self.typeOfIndex(inst);
|
||||
|
||||
// Optional with a zero-bit payload type is just a boolean true
|
||||
if (optional_ty.abiSize(pt) == 1)
|
||||
if (optional_ty.abiSize(pt.zcu) == 1)
|
||||
break :result MCValue{ .immediate = 1 };
|
||||
|
||||
return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch});
|
||||
@ -2779,10 +2776,10 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: Alignme
|
||||
/// Use a pointer instruction as the basis for allocating stack memory.
|
||||
fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const elem_ty = self.typeOfIndex(inst).childType(mod);
|
||||
const zcu = pt.zcu;
|
||||
const elem_ty = self.typeOfIndex(inst).childType(zcu);
|
||||
|
||||
if (!elem_ty.hasRuntimeBits(pt)) {
|
||||
if (!elem_ty.hasRuntimeBits(zcu)) {
|
||||
// As this stack item will never be dereferenced at runtime,
|
||||
// return the stack offset 0. Stack offset 0 will be where all
|
||||
// zero-sized stack allocations live as non-zero-sized
|
||||
@ -2790,21 +2787,22 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
|
||||
return @as(u32, 0);
|
||||
}
|
||||
|
||||
const abi_size = math.cast(u32, elem_ty.abiSize(pt)) orelse {
|
||||
const abi_size = math.cast(u32, elem_ty.abiSize(zcu)) orelse {
|
||||
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
|
||||
};
|
||||
// TODO swap this for inst.ty.ptrAlign
|
||||
const abi_align = elem_ty.abiAlignment(pt);
|
||||
const abi_align = elem_ty.abiAlignment(zcu);
|
||||
return self.allocMem(inst, abi_size, abi_align);
|
||||
}
|
||||
|
||||
fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
|
||||
const pt = self.pt;
|
||||
const zcu = pt.zcu;
|
||||
const elem_ty = self.typeOfIndex(inst);
|
||||
const abi_size = math.cast(u32, elem_ty.abiSize(pt)) orelse {
|
||||
const abi_size = math.cast(u32, elem_ty.abiSize(zcu)) orelse {
|
||||
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
|
||||
};
|
||||
const abi_align = elem_ty.abiAlignment(pt);
|
||||
const abi_align = elem_ty.abiAlignment(zcu);
|
||||
self.stack_align = self.stack_align.max(abi_align);
|
||||
|
||||
if (reg_ok) {
|
||||
@ -2847,7 +2845,7 @@ fn binOp(
|
||||
metadata: ?BinOpMetadata,
|
||||
) InnerError!MCValue {
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const zcu = pt.zcu;
|
||||
switch (tag) {
|
||||
.add,
|
||||
.sub,
|
||||
@ -2857,12 +2855,12 @@ fn binOp(
|
||||
.xor,
|
||||
.cmp_eq,
|
||||
=> {
|
||||
switch (lhs_ty.zigTypeTag(mod)) {
|
||||
switch (lhs_ty.zigTypeTag(zcu)) {
|
||||
.Float => return self.fail("TODO binary operations on floats", .{}),
|
||||
.Vector => return self.fail("TODO binary operations on vectors", .{}),
|
||||
.Int => {
|
||||
assert(lhs_ty.eql(rhs_ty, mod));
|
||||
const int_info = lhs_ty.intInfo(mod);
|
||||
assert(lhs_ty.eql(rhs_ty, zcu));
|
||||
const int_info = lhs_ty.intInfo(zcu);
|
||||
if (int_info.bits <= 64) {
|
||||
// Only say yes if the operation is
|
||||
// commutative, i.e. we can swap both of the
|
||||
@ -2931,10 +2929,10 @@ fn binOp(
|
||||
const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
|
||||
|
||||
// Truncate if necessary
|
||||
switch (lhs_ty.zigTypeTag(mod)) {
|
||||
switch (lhs_ty.zigTypeTag(zcu)) {
|
||||
.Vector => return self.fail("TODO binary operations on vectors", .{}),
|
||||
.Int => {
|
||||
const int_info = lhs_ty.intInfo(mod);
|
||||
const int_info = lhs_ty.intInfo(zcu);
|
||||
if (int_info.bits <= 64) {
|
||||
const result_reg = result.register;
|
||||
try self.truncRegister(result_reg, result_reg, int_info.signedness, int_info.bits);
|
||||
@ -2948,11 +2946,11 @@ fn binOp(
|
||||
},
|
||||
|
||||
.div_trunc => {
|
||||
switch (lhs_ty.zigTypeTag(mod)) {
|
||||
switch (lhs_ty.zigTypeTag(zcu)) {
|
||||
.Vector => return self.fail("TODO binary operations on vectors", .{}),
|
||||
.Int => {
|
||||
assert(lhs_ty.eql(rhs_ty, mod));
|
||||
const int_info = lhs_ty.intInfo(mod);
|
||||
assert(lhs_ty.eql(rhs_ty, zcu));
|
||||
const int_info = lhs_ty.intInfo(zcu);
|
||||
if (int_info.bits <= 64) {
|
||||
const rhs_immediate_ok = switch (tag) {
|
||||
.div_trunc => rhs == .immediate and rhs.immediate <= std.math.maxInt(u12),
|
||||
@ -2981,14 +2979,14 @@ fn binOp(
|
||||
},
|
||||
|
||||
.ptr_add => {
|
||||
switch (lhs_ty.zigTypeTag(mod)) {
|
||||
switch (lhs_ty.zigTypeTag(zcu)) {
|
||||
.Pointer => {
|
||||
const ptr_ty = lhs_ty;
|
||||
const elem_ty = switch (ptr_ty.ptrSize(mod)) {
|
||||
.One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type
|
||||
else => ptr_ty.childType(mod),
|
||||
const elem_ty = switch (ptr_ty.ptrSize(zcu)) {
|
||||
.One => ptr_ty.childType(zcu).childType(zcu), // ptr to array, so get array element type
|
||||
else => ptr_ty.childType(zcu),
|
||||
};
|
||||
const elem_size = elem_ty.abiSize(pt);
|
||||
const elem_size = elem_ty.abiSize(zcu);
|
||||
|
||||
if (elem_size == 1) {
|
||||
const base_tag: Mir.Inst.Tag = switch (tag) {
|
||||
@ -3013,7 +3011,7 @@ fn binOp(
|
||||
.bool_and,
|
||||
.bool_or,
|
||||
=> {
|
||||
switch (lhs_ty.zigTypeTag(mod)) {
|
||||
switch (lhs_ty.zigTypeTag(zcu)) {
|
||||
.Bool => {
|
||||
assert(lhs != .immediate); // should have been handled by Sema
|
||||
assert(rhs != .immediate); // should have been handled by Sema
|
||||
@ -3043,10 +3041,10 @@ fn binOp(
|
||||
const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
|
||||
|
||||
// Truncate if necessary
|
||||
switch (lhs_ty.zigTypeTag(mod)) {
|
||||
switch (lhs_ty.zigTypeTag(zcu)) {
|
||||
.Vector => return self.fail("TODO binary operations on vectors", .{}),
|
||||
.Int => {
|
||||
const int_info = lhs_ty.intInfo(mod);
|
||||
const int_info = lhs_ty.intInfo(zcu);
|
||||
if (int_info.bits <= 64) {
|
||||
// 32 and 64 bit operands doesn't need truncating
|
||||
if (int_info.bits == 32 or int_info.bits == 64) return result;
|
||||
@ -3065,10 +3063,10 @@ fn binOp(
|
||||
.shl_exact,
|
||||
.shr_exact,
|
||||
=> {
|
||||
switch (lhs_ty.zigTypeTag(mod)) {
|
||||
switch (lhs_ty.zigTypeTag(zcu)) {
|
||||
.Vector => return self.fail("TODO binary operations on vectors", .{}),
|
||||
.Int => {
|
||||
const int_info = lhs_ty.intInfo(mod);
|
||||
const int_info = lhs_ty.intInfo(zcu);
|
||||
if (int_info.bits <= 64) {
|
||||
const rhs_immediate_ok = rhs == .immediate;
|
||||
|
||||
@ -3388,8 +3386,8 @@ fn binOpRegister(
|
||||
fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
|
||||
const block_data = self.blocks.getPtr(block).?;
|
||||
|
||||
const pt = self.pt;
|
||||
if (self.typeOf(operand).hasRuntimeBits(pt)) {
|
||||
const zcu = self.pt.zcu;
|
||||
if (self.typeOf(operand).hasRuntimeBits(zcu)) {
|
||||
const operand_mcv = try self.resolveInst(operand);
|
||||
const block_mcv = block_data.mcv;
|
||||
if (block_mcv == .none) {
|
||||
@ -3509,17 +3507,17 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void {
|
||||
/// Given an error union, returns the payload
|
||||
fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCValue {
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const err_ty = error_union_ty.errorUnionSet(mod);
|
||||
const payload_ty = error_union_ty.errorUnionPayload(mod);
|
||||
if (err_ty.errorSetIsEmpty(mod)) {
|
||||
const zcu = pt.zcu;
|
||||
const err_ty = error_union_ty.errorUnionSet(zcu);
|
||||
const payload_ty = error_union_ty.errorUnionPayload(zcu);
|
||||
if (err_ty.errorSetIsEmpty(zcu)) {
|
||||
return error_union_mcv;
|
||||
}
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
return MCValue.none;
|
||||
}
|
||||
|
||||
const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, pt)));
|
||||
const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, zcu)));
|
||||
switch (error_union_mcv) {
|
||||
.register => return self.fail("TODO errUnionPayload for registers", .{}),
|
||||
.stack_offset => |off| {
|
||||
@ -3731,6 +3729,7 @@ fn genLoadASI(self: *Self, value_reg: Register, addr_reg: Register, off_reg: Reg
|
||||
|
||||
fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void {
|
||||
const pt = self.pt;
|
||||
const zcu = pt.zcu;
|
||||
switch (mcv) {
|
||||
.dead => unreachable,
|
||||
.unreach, .none => return, // Nothing to do.
|
||||
@ -3929,21 +3928,21 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
|
||||
// The value is in memory at a hard-coded address.
|
||||
// If the type is a pointer, it means the pointer address is at this memory location.
|
||||
try self.genSetReg(ty, reg, .{ .immediate = addr });
|
||||
try self.genLoad(reg, reg, i13, 0, ty.abiSize(pt));
|
||||
try self.genLoad(reg, reg, i13, 0, ty.abiSize(zcu));
|
||||
},
|
||||
.stack_offset => |off| {
|
||||
const real_offset = realStackOffset(off);
|
||||
const simm13 = math.cast(i13, real_offset) orelse
|
||||
return self.fail("TODO larger stack offsets: {}", .{real_offset});
|
||||
try self.genLoad(reg, .sp, i13, simm13, ty.abiSize(pt));
|
||||
try self.genLoad(reg, .sp, i13, simm13, ty.abiSize(zcu));
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const abi_size = ty.abiSize(pt);
|
||||
const zcu = pt.zcu;
|
||||
const abi_size = ty.abiSize(zcu);
|
||||
switch (mcv) {
|
||||
.dead => unreachable,
|
||||
.unreach, .none => return, // Nothing to do.
|
||||
@ -3951,7 +3950,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
|
||||
if (!self.wantSafety())
|
||||
return; // The already existing value will do just fine.
|
||||
// TODO Upgrade this to a memset call when we have that available.
|
||||
switch (ty.abiSize(pt)) {
|
||||
switch (ty.abiSize(zcu)) {
|
||||
1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }),
|
||||
2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }),
|
||||
4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }),
|
||||
@ -3977,11 +3976,11 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
|
||||
const reg_lock = self.register_manager.lockReg(rwo.reg);
|
||||
defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg);
|
||||
|
||||
const wrapped_ty = ty.structFieldType(0, mod);
|
||||
const wrapped_ty = ty.fieldType(0, zcu);
|
||||
try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg });
|
||||
|
||||
const overflow_bit_ty = ty.structFieldType(1, mod);
|
||||
const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, pt)));
|
||||
const overflow_bit_ty = ty.fieldType(1, zcu);
|
||||
const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, zcu)));
|
||||
const cond_reg = try self.register_manager.allocReg(null, gp);
|
||||
|
||||
// TODO handle floating point CCRs
|
||||
@ -4154,14 +4153,14 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
|
||||
|
||||
fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue {
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const error_type = ty.errorUnionSet(mod);
|
||||
const payload_type = ty.errorUnionPayload(mod);
|
||||
const zcu = pt.zcu;
|
||||
const error_type = ty.errorUnionSet(zcu);
|
||||
const payload_type = ty.errorUnionPayload(zcu);
|
||||
|
||||
if (!error_type.hasRuntimeBits(pt)) {
|
||||
if (!error_type.hasRuntimeBits(zcu)) {
|
||||
return MCValue{ .immediate = 0 }; // always false
|
||||
} else if (!payload_type.hasRuntimeBits(pt)) {
|
||||
if (error_type.abiSize(pt) <= 8) {
|
||||
} else if (!payload_type.hasRuntimeBits(zcu)) {
|
||||
if (error_type.abiSize(zcu) <= 8) {
|
||||
const reg_mcv: MCValue = switch (operand) {
|
||||
.register => operand,
|
||||
else => .{ .register = try self.copyToTmpRegister(error_type, operand) },
|
||||
@ -4253,9 +4252,9 @@ fn jump(self: *Self, inst: Mir.Inst.Index) !void {
|
||||
|
||||
fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void {
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const elem_ty = ptr_ty.childType(mod);
|
||||
const elem_size = elem_ty.abiSize(pt);
|
||||
const zcu = pt.zcu;
|
||||
const elem_ty = ptr_ty.childType(zcu);
|
||||
const elem_size = elem_ty.abiSize(zcu);
|
||||
|
||||
switch (ptr) {
|
||||
.none => unreachable,
|
||||
@ -4325,13 +4324,13 @@ fn minMax(
|
||||
rhs_ty: Type,
|
||||
) InnerError!MCValue {
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
assert(lhs_ty.eql(rhs_ty, mod));
|
||||
switch (lhs_ty.zigTypeTag(mod)) {
|
||||
const zcu = pt.zcu;
|
||||
assert(lhs_ty.eql(rhs_ty, zcu));
|
||||
switch (lhs_ty.zigTypeTag(zcu)) {
|
||||
.Float => return self.fail("TODO min/max on floats", .{}),
|
||||
.Vector => return self.fail("TODO min/max on vectors", .{}),
|
||||
.Int => {
|
||||
const int_info = lhs_ty.intInfo(mod);
|
||||
const int_info = lhs_ty.intInfo(zcu);
|
||||
if (int_info.bits <= 64) {
|
||||
// TODO skip register setting when one of the operands
|
||||
// is a small (fits in i13) immediate.
|
||||
@ -4446,9 +4445,9 @@ fn realStackOffset(off: u32) u32 {
|
||||
/// Caller must call `CallMCValues.deinit`.
|
||||
fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) !CallMCValues {
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const ip = &mod.intern_pool;
|
||||
const fn_info = mod.typeToFunc(fn_ty).?;
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const fn_info = zcu.typeToFunc(fn_ty).?;
|
||||
const cc = fn_info.cc;
|
||||
var result: CallMCValues = .{
|
||||
.args = try self.gpa.alloc(MCValue, fn_info.param_types.len),
|
||||
@ -4459,7 +4458,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
|
||||
};
|
||||
errdefer self.gpa.free(result.args);
|
||||
|
||||
const ret_ty = fn_ty.fnReturnType(mod);
|
||||
const ret_ty = fn_ty.fnReturnType(zcu);
|
||||
|
||||
switch (cc) {
|
||||
.Naked => {
|
||||
@ -4487,7 +4486,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
|
||||
};
|
||||
|
||||
for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
|
||||
const param_size = @as(u32, @intCast(Type.fromInterned(ty).abiSize(pt)));
|
||||
const param_size = @as(u32, @intCast(Type.fromInterned(ty).abiSize(zcu)));
|
||||
if (param_size <= 8) {
|
||||
if (next_register < argument_registers.len) {
|
||||
result_arg.* = .{ .register = argument_registers[next_register] };
|
||||
@ -4514,12 +4513,12 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
|
||||
result.stack_byte_count = next_stack_offset;
|
||||
result.stack_align = .@"16";
|
||||
|
||||
if (ret_ty.zigTypeTag(mod) == .NoReturn) {
|
||||
if (ret_ty.zigTypeTag(zcu) == .NoReturn) {
|
||||
result.return_value = .{ .unreach = {} };
|
||||
} else if (!ret_ty.hasRuntimeBits(pt)) {
|
||||
} else if (!ret_ty.hasRuntimeBits(zcu)) {
|
||||
result.return_value = .{ .none = {} };
|
||||
} else {
|
||||
const ret_ty_size: u32 = @intCast(ret_ty.abiSize(pt));
|
||||
const ret_ty_size: u32 = @intCast(ret_ty.abiSize(zcu));
|
||||
// The callee puts the return values in %i0-%i3, which becomes %o0-%o3 inside the caller.
|
||||
if (ret_ty_size <= 8) {
|
||||
result.return_value = switch (role) {
|
||||
@ -4542,7 +4541,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue {
|
||||
const ty = self.typeOf(ref);
|
||||
|
||||
// If the type has no codegen bits, no need to store it.
|
||||
if (!ty.hasRuntimeBitsIgnoreComptime(pt)) return .none;
|
||||
if (!ty.hasRuntimeBitsIgnoreComptime(pt.zcu)) return .none;
|
||||
|
||||
if (ref.toIndex()) |inst| {
|
||||
return self.getResolvedInstValue(inst);
|
||||
@ -4553,8 +4552,8 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue {
|
||||
|
||||
fn ret(self: *Self, mcv: MCValue) !void {
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const ret_ty = self.fn_type.fnReturnType(mod);
|
||||
const zcu = pt.zcu;
|
||||
const ret_ty = self.fn_type.fnReturnType(zcu);
|
||||
try self.setRegOrMem(ret_ty, self.ret_mcv, mcv);
|
||||
|
||||
// Just add space for a branch instruction, patch this later
|
||||
@ -4656,7 +4655,7 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void
|
||||
|
||||
fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void {
|
||||
const pt = self.pt;
|
||||
const abi_size = value_ty.abiSize(pt);
|
||||
const abi_size = value_ty.abiSize(pt.zcu);
|
||||
|
||||
switch (ptr) {
|
||||
.none => unreachable,
|
||||
@ -4698,11 +4697,11 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
|
||||
fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue {
|
||||
return if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const zcu = pt.zcu;
|
||||
const mcv = try self.resolveInst(operand);
|
||||
const ptr_ty = self.typeOf(operand);
|
||||
const struct_ty = ptr_ty.childType(mod);
|
||||
const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, pt)));
|
||||
const struct_ty = ptr_ty.childType(zcu);
|
||||
const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, zcu)));
|
||||
switch (mcv) {
|
||||
.ptr_stack_offset => |off| {
|
||||
break :result MCValue{ .ptr_stack_offset = off - struct_field_offset };
|
||||
@ -4741,9 +4740,9 @@ fn trunc(
|
||||
dest_ty: Type,
|
||||
) !MCValue {
|
||||
const pt = self.pt;
|
||||
const mod = pt.zcu;
|
||||
const info_a = operand_ty.intInfo(mod);
|
||||
const info_b = dest_ty.intInfo(mod);
|
||||
const zcu = pt.zcu;
|
||||
const info_a = operand_ty.intInfo(zcu);
|
||||
const info_b = dest_ty.intInfo(zcu);
|
||||
|
||||
if (info_b.bits <= 64) {
|
||||
const operand_reg = switch (operand) {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -255,7 +255,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
|
||||
@setCold(true);
|
||||
std.debug.assert(emit.error_msg == null);
|
||||
const comp = emit.bin_file.base.comp;
|
||||
const zcu = comp.module.?;
|
||||
const zcu = comp.zcu.?;
|
||||
const gpa = comp.gpa;
|
||||
emit.error_msg = try Zcu.ErrorMsg.create(gpa, zcu.navSrcLoc(emit.owner_nav), format, args);
|
||||
return error.EmitFail;
|
||||
|
||||
@ -22,16 +22,15 @@ const direct: [2]Class = .{ .direct, .none };
|
||||
/// Classifies a given Zig type to determine how they must be passed
|
||||
/// or returned as value within a wasm function.
|
||||
/// When all elements result in `.none`, no value must be passed in or returned.
|
||||
pub fn classifyType(ty: Type, pt: Zcu.PerThread) [2]Class {
|
||||
const mod = pt.zcu;
|
||||
const ip = &mod.intern_pool;
|
||||
const target = mod.getTarget();
|
||||
if (!ty.hasRuntimeBitsIgnoreComptime(pt)) return none;
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
pub fn classifyType(ty: Type, zcu: *Zcu) [2]Class {
|
||||
const ip = &zcu.intern_pool;
|
||||
const target = zcu.getTarget();
|
||||
if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) return none;
|
||||
switch (ty.zigTypeTag(zcu)) {
|
||||
.Struct => {
|
||||
const struct_type = pt.zcu.typeToStruct(ty).?;
|
||||
const struct_type = zcu.typeToStruct(ty).?;
|
||||
if (struct_type.layout == .@"packed") {
|
||||
if (ty.bitSize(pt) <= 64) return direct;
|
||||
if (ty.bitSize(zcu) <= 64) return direct;
|
||||
return .{ .direct, .direct };
|
||||
}
|
||||
if (struct_type.field_types.len > 1) {
|
||||
@ -41,13 +40,13 @@ pub fn classifyType(ty: Type, pt: Zcu.PerThread) [2]Class {
|
||||
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[0]);
|
||||
const explicit_align = struct_type.fieldAlign(ip, 0);
|
||||
if (explicit_align != .none) {
|
||||
if (explicit_align.compareStrict(.gt, field_ty.abiAlignment(pt)))
|
||||
if (explicit_align.compareStrict(.gt, field_ty.abiAlignment(zcu)))
|
||||
return memory;
|
||||
}
|
||||
return classifyType(field_ty, pt);
|
||||
return classifyType(field_ty, zcu);
|
||||
},
|
||||
.Int, .Enum, .ErrorSet => {
|
||||
const int_bits = ty.intInfo(pt.zcu).bits;
|
||||
const int_bits = ty.intInfo(zcu).bits;
|
||||
if (int_bits <= 64) return direct;
|
||||
if (int_bits <= 128) return .{ .direct, .direct };
|
||||
return memory;
|
||||
@ -62,24 +61,24 @@ pub fn classifyType(ty: Type, pt: Zcu.PerThread) [2]Class {
|
||||
.Vector => return direct,
|
||||
.Array => return memory,
|
||||
.Optional => {
|
||||
assert(ty.isPtrLikeOptional(pt.zcu));
|
||||
assert(ty.isPtrLikeOptional(zcu));
|
||||
return direct;
|
||||
},
|
||||
.Pointer => {
|
||||
assert(!ty.isSlice(pt.zcu));
|
||||
assert(!ty.isSlice(zcu));
|
||||
return direct;
|
||||
},
|
||||
.Union => {
|
||||
const union_obj = pt.zcu.typeToUnion(ty).?;
|
||||
const union_obj = zcu.typeToUnion(ty).?;
|
||||
if (union_obj.flagsUnordered(ip).layout == .@"packed") {
|
||||
if (ty.bitSize(pt) <= 64) return direct;
|
||||
if (ty.bitSize(zcu) <= 64) return direct;
|
||||
return .{ .direct, .direct };
|
||||
}
|
||||
const layout = ty.unionGetLayout(pt);
|
||||
const layout = ty.unionGetLayout(zcu);
|
||||
assert(layout.tag_size == 0);
|
||||
if (union_obj.field_types.len > 1) return memory;
|
||||
const first_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]);
|
||||
return classifyType(first_field_ty, pt);
|
||||
return classifyType(first_field_ty, zcu);
|
||||
},
|
||||
.ErrorUnion,
|
||||
.Frame,
|
||||
@ -101,29 +100,28 @@ pub fn classifyType(ty: Type, pt: Zcu.PerThread) [2]Class {
|
||||
/// Returns the scalar type a given type can represent.
|
||||
/// Asserts given type can be represented as scalar, such as
|
||||
/// a struct with a single scalar field.
|
||||
pub fn scalarType(ty: Type, pt: Zcu.PerThread) Type {
|
||||
const mod = pt.zcu;
|
||||
const ip = &mod.intern_pool;
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
pub fn scalarType(ty: Type, zcu: *Zcu) Type {
|
||||
const ip = &zcu.intern_pool;
|
||||
switch (ty.zigTypeTag(zcu)) {
|
||||
.Struct => {
|
||||
if (mod.typeToPackedStruct(ty)) |packed_struct| {
|
||||
return scalarType(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), pt);
|
||||
if (zcu.typeToPackedStruct(ty)) |packed_struct| {
|
||||
return scalarType(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), zcu);
|
||||
} else {
|
||||
assert(ty.structFieldCount(mod) == 1);
|
||||
return scalarType(ty.structFieldType(0, mod), pt);
|
||||
assert(ty.structFieldCount(zcu) == 1);
|
||||
return scalarType(ty.fieldType(0, zcu), zcu);
|
||||
}
|
||||
},
|
||||
.Union => {
|
||||
const union_obj = mod.typeToUnion(ty).?;
|
||||
const union_obj = zcu.typeToUnion(ty).?;
|
||||
if (union_obj.flagsUnordered(ip).layout != .@"packed") {
|
||||
const layout = pt.getUnionLayout(union_obj);
|
||||
const layout = Type.getUnionLayout(union_obj, zcu);
|
||||
if (layout.payload_size == 0 and layout.tag_size != 0) {
|
||||
return scalarType(ty.unionTagTypeSafety(mod).?, pt);
|
||||
return scalarType(ty.unionTagTypeSafety(zcu).?, zcu);
|
||||
}
|
||||
assert(union_obj.field_types.len == 1);
|
||||
}
|
||||
const first_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]);
|
||||
return scalarType(first_field_ty, pt);
|
||||
return scalarType(first_field_ty, zcu);
|
||||
},
|
||||
else => return ty,
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -357,7 +357,7 @@ pub fn emitMir(emit: *Emit) Error!void {
|
||||
} } };
|
||||
},
|
||||
};
|
||||
const ip = &emit.lower.bin_file.comp.module.?.intern_pool;
|
||||
const ip = &emit.lower.bin_file.comp.zcu.?.intern_pool;
|
||||
const air_inst = emit.air.instructions.get(@intFromEnum(air_inst_index));
|
||||
const name: Air.NullTerminatedString = switch (air_inst.tag) {
|
||||
else => unreachable,
|
||||
|
||||
@ -44,7 +44,7 @@ pub const Class = enum {
|
||||
}
|
||||
};
|
||||
|
||||
pub fn classifyWindows(ty: Type, pt: Zcu.PerThread) Class {
|
||||
pub fn classifyWindows(ty: Type, zcu: *Zcu) Class {
|
||||
// https://docs.microsoft.com/en-gb/cpp/build/x64-calling-convention?view=vs-2017
|
||||
// "There's a strict one-to-one correspondence between a function call's arguments
|
||||
// and the registers used for those arguments. Any argument that doesn't fit in 8
|
||||
@ -53,7 +53,7 @@ pub fn classifyWindows(ty: Type, pt: Zcu.PerThread) Class {
|
||||
// "All floating point operations are done using the 16 XMM registers."
|
||||
// "Structs and unions of size 8, 16, 32, or 64 bits, and __m64 types, are passed
|
||||
// as if they were integers of the same size."
|
||||
switch (ty.zigTypeTag(pt.zcu)) {
|
||||
switch (ty.zigTypeTag(zcu)) {
|
||||
.Pointer,
|
||||
.Int,
|
||||
.Bool,
|
||||
@ -68,12 +68,12 @@ pub fn classifyWindows(ty: Type, pt: Zcu.PerThread) Class {
|
||||
.ErrorUnion,
|
||||
.AnyFrame,
|
||||
.Frame,
|
||||
=> switch (ty.abiSize(pt)) {
|
||||
=> switch (ty.abiSize(zcu)) {
|
||||
0 => unreachable,
|
||||
1, 2, 4, 8 => return .integer,
|
||||
else => switch (ty.zigTypeTag(pt.zcu)) {
|
||||
else => switch (ty.zigTypeTag(zcu)) {
|
||||
.Int => return .win_i128,
|
||||
.Struct, .Union => if (ty.containerLayout(pt.zcu) == .@"packed") {
|
||||
.Struct, .Union => if (ty.containerLayout(zcu) == .@"packed") {
|
||||
return .win_i128;
|
||||
} else {
|
||||
return .memory;
|
||||
@ -100,14 +100,14 @@ pub const Context = enum { ret, arg, field, other };
|
||||
|
||||
/// There are a maximum of 8 possible return slots. Returned values are in
|
||||
/// the beginning of the array; unused slots are filled with .none.
|
||||
pub fn classifySystemV(ty: Type, pt: Zcu.PerThread, target: std.Target, ctx: Context) [8]Class {
|
||||
pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8]Class {
|
||||
const memory_class = [_]Class{
|
||||
.memory, .none, .none, .none,
|
||||
.none, .none, .none, .none,
|
||||
};
|
||||
var result = [1]Class{.none} ** 8;
|
||||
switch (ty.zigTypeTag(pt.zcu)) {
|
||||
.Pointer => switch (ty.ptrSize(pt.zcu)) {
|
||||
switch (ty.zigTypeTag(zcu)) {
|
||||
.Pointer => switch (ty.ptrSize(zcu)) {
|
||||
.Slice => {
|
||||
result[0] = .integer;
|
||||
result[1] = .integer;
|
||||
@ -119,7 +119,7 @@ pub fn classifySystemV(ty: Type, pt: Zcu.PerThread, target: std.Target, ctx: Con
|
||||
},
|
||||
},
|
||||
.Int, .Enum, .ErrorSet => {
|
||||
const bits = ty.intInfo(pt.zcu).bits;
|
||||
const bits = ty.intInfo(zcu).bits;
|
||||
if (bits <= 64) {
|
||||
result[0] = .integer;
|
||||
return result;
|
||||
@ -185,8 +185,8 @@ pub fn classifySystemV(ty: Type, pt: Zcu.PerThread, target: std.Target, ctx: Con
|
||||
else => unreachable,
|
||||
},
|
||||
.Vector => {
|
||||
const elem_ty = ty.childType(pt.zcu);
|
||||
const bits = elem_ty.bitSize(pt) * ty.arrayLen(pt.zcu);
|
||||
const elem_ty = ty.childType(zcu);
|
||||
const bits = elem_ty.bitSize(zcu) * ty.arrayLen(zcu);
|
||||
if (elem_ty.toIntern() == .bool_type) {
|
||||
if (bits <= 32) return .{
|
||||
.integer, .none, .none, .none,
|
||||
@ -250,7 +250,7 @@ pub fn classifySystemV(ty: Type, pt: Zcu.PerThread, target: std.Target, ctx: Con
|
||||
return memory_class;
|
||||
},
|
||||
.Optional => {
|
||||
if (ty.isPtrLikeOptional(pt.zcu)) {
|
||||
if (ty.isPtrLikeOptional(zcu)) {
|
||||
result[0] = .integer;
|
||||
return result;
|
||||
}
|
||||
@ -261,8 +261,8 @@ pub fn classifySystemV(ty: Type, pt: Zcu.PerThread, target: std.Target, ctx: Con
|
||||
// it contains unaligned fields, it has class MEMORY"
|
||||
// "If the size of the aggregate exceeds a single eightbyte, each is classified
|
||||
// separately.".
|
||||
const ty_size = ty.abiSize(pt);
|
||||
switch (ty.containerLayout(pt.zcu)) {
|
||||
const ty_size = ty.abiSize(zcu);
|
||||
switch (ty.containerLayout(zcu)) {
|
||||
.auto, .@"extern" => {},
|
||||
.@"packed" => {
|
||||
assert(ty_size <= 16);
|
||||
@ -274,10 +274,10 @@ pub fn classifySystemV(ty: Type, pt: Zcu.PerThread, target: std.Target, ctx: Con
|
||||
if (ty_size > 64)
|
||||
return memory_class;
|
||||
|
||||
_ = if (pt.zcu.typeToStruct(ty)) |loaded_struct|
|
||||
classifySystemVStruct(&result, 0, loaded_struct, pt, target)
|
||||
else if (pt.zcu.typeToUnion(ty)) |loaded_union|
|
||||
classifySystemVUnion(&result, 0, loaded_union, pt, target)
|
||||
_ = if (zcu.typeToStruct(ty)) |loaded_struct|
|
||||
classifySystemVStruct(&result, 0, loaded_struct, zcu, target)
|
||||
else if (zcu.typeToUnion(ty)) |loaded_union|
|
||||
classifySystemVUnion(&result, 0, loaded_union, zcu, target)
|
||||
else
|
||||
unreachable;
|
||||
|
||||
@ -306,7 +306,7 @@ pub fn classifySystemV(ty: Type, pt: Zcu.PerThread, target: std.Target, ctx: Con
|
||||
return result;
|
||||
},
|
||||
.Array => {
|
||||
const ty_size = ty.abiSize(pt);
|
||||
const ty_size = ty.abiSize(zcu);
|
||||
if (ty_size <= 8) {
|
||||
result[0] = .integer;
|
||||
return result;
|
||||
@ -326,10 +326,10 @@ fn classifySystemVStruct(
|
||||
result: *[8]Class,
|
||||
starting_byte_offset: u64,
|
||||
loaded_struct: InternPool.LoadedStructType,
|
||||
pt: Zcu.PerThread,
|
||||
zcu: *Zcu,
|
||||
target: std.Target,
|
||||
) u64 {
|
||||
const ip = &pt.zcu.intern_pool;
|
||||
const ip = &zcu.intern_pool;
|
||||
var byte_offset = starting_byte_offset;
|
||||
var field_it = loaded_struct.iterateRuntimeOrder(ip);
|
||||
while (field_it.next()) |field_index| {
|
||||
@ -338,29 +338,29 @@ fn classifySystemVStruct(
|
||||
byte_offset = std.mem.alignForward(
|
||||
u64,
|
||||
byte_offset,
|
||||
field_align.toByteUnits() orelse field_ty.abiAlignment(pt).toByteUnits().?,
|
||||
field_align.toByteUnits() orelse field_ty.abiAlignment(zcu).toByteUnits().?,
|
||||
);
|
||||
if (pt.zcu.typeToStruct(field_ty)) |field_loaded_struct| {
|
||||
if (zcu.typeToStruct(field_ty)) |field_loaded_struct| {
|
||||
switch (field_loaded_struct.layout) {
|
||||
.auto, .@"extern" => {
|
||||
byte_offset = classifySystemVStruct(result, byte_offset, field_loaded_struct, pt, target);
|
||||
byte_offset = classifySystemVStruct(result, byte_offset, field_loaded_struct, zcu, target);
|
||||
continue;
|
||||
},
|
||||
.@"packed" => {},
|
||||
}
|
||||
} else if (pt.zcu.typeToUnion(field_ty)) |field_loaded_union| {
|
||||
} else if (zcu.typeToUnion(field_ty)) |field_loaded_union| {
|
||||
switch (field_loaded_union.flagsUnordered(ip).layout) {
|
||||
.auto, .@"extern" => {
|
||||
byte_offset = classifySystemVUnion(result, byte_offset, field_loaded_union, pt, target);
|
||||
byte_offset = classifySystemVUnion(result, byte_offset, field_loaded_union, zcu, target);
|
||||
continue;
|
||||
},
|
||||
.@"packed" => {},
|
||||
}
|
||||
}
|
||||
const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, pt, target, .field), .none);
|
||||
const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, zcu, target, .field), .none);
|
||||
for (result[@intCast(byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class|
|
||||
result_class.* = result_class.combineSystemV(field_class);
|
||||
byte_offset += field_ty.abiSize(pt);
|
||||
byte_offset += field_ty.abiSize(zcu);
|
||||
}
|
||||
const final_byte_offset = starting_byte_offset + loaded_struct.sizeUnordered(ip);
|
||||
std.debug.assert(final_byte_offset == std.mem.alignForward(
|
||||
@ -375,30 +375,30 @@ fn classifySystemVUnion(
|
||||
result: *[8]Class,
|
||||
starting_byte_offset: u64,
|
||||
loaded_union: InternPool.LoadedUnionType,
|
||||
pt: Zcu.PerThread,
|
||||
zcu: *Zcu,
|
||||
target: std.Target,
|
||||
) u64 {
|
||||
const ip = &pt.zcu.intern_pool;
|
||||
const ip = &zcu.intern_pool;
|
||||
for (0..loaded_union.field_types.len) |field_index| {
|
||||
const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
|
||||
if (pt.zcu.typeToStruct(field_ty)) |field_loaded_struct| {
|
||||
if (zcu.typeToStruct(field_ty)) |field_loaded_struct| {
|
||||
switch (field_loaded_struct.layout) {
|
||||
.auto, .@"extern" => {
|
||||
_ = classifySystemVStruct(result, starting_byte_offset, field_loaded_struct, pt, target);
|
||||
_ = classifySystemVStruct(result, starting_byte_offset, field_loaded_struct, zcu, target);
|
||||
continue;
|
||||
},
|
||||
.@"packed" => {},
|
||||
}
|
||||
} else if (pt.zcu.typeToUnion(field_ty)) |field_loaded_union| {
|
||||
} else if (zcu.typeToUnion(field_ty)) |field_loaded_union| {
|
||||
switch (field_loaded_union.flagsUnordered(ip).layout) {
|
||||
.auto, .@"extern" => {
|
||||
_ = classifySystemVUnion(result, starting_byte_offset, field_loaded_union, pt, target);
|
||||
_ = classifySystemVUnion(result, starting_byte_offset, field_loaded_union, zcu, target);
|
||||
continue;
|
||||
},
|
||||
.@"packed" => {},
|
||||
}
|
||||
}
|
||||
const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, pt, target, .field), .none);
|
||||
const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, zcu, target, .field), .none);
|
||||
for (result[@intCast(starting_byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class|
|
||||
result_class.* = result_class.combineSystemV(field_class);
|
||||
}
|
||||
|
||||
131
src/codegen.zig
131
src/codegen.zig
@ -198,17 +198,17 @@ pub fn generateSymbol(
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
const mod = pt.zcu;
|
||||
const ip = &mod.intern_pool;
|
||||
const ty = val.typeOf(mod);
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const ty = val.typeOf(zcu);
|
||||
|
||||
const target = mod.getTarget();
|
||||
const target = zcu.getTarget();
|
||||
const endian = target.cpu.arch.endian();
|
||||
|
||||
log.debug("generateSymbol: val = {}", .{val.fmtValue(pt)});
|
||||
|
||||
if (val.isUndefDeep(mod)) {
|
||||
const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow;
|
||||
if (val.isUndefDeep(zcu)) {
|
||||
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
|
||||
try code.appendNTimes(0xaa, abi_size);
|
||||
return .ok;
|
||||
}
|
||||
@ -254,9 +254,9 @@ pub fn generateSymbol(
|
||||
.empty_enum_value,
|
||||
=> unreachable, // non-runtime values
|
||||
.int => {
|
||||
const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow;
|
||||
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
|
||||
var space: Value.BigIntSpace = undefined;
|
||||
const int_val = val.toBigInt(&space, pt);
|
||||
const int_val = val.toBigInt(&space, zcu);
|
||||
int_val.writeTwosComplement(try code.addManyAsSlice(abi_size), endian);
|
||||
},
|
||||
.err => |err| {
|
||||
@ -264,20 +264,20 @@ pub fn generateSymbol(
|
||||
try code.writer().writeInt(u16, @intCast(int), endian);
|
||||
},
|
||||
.error_union => |error_union| {
|
||||
const payload_ty = ty.errorUnionPayload(mod);
|
||||
const payload_ty = ty.errorUnionPayload(zcu);
|
||||
const err_val: u16 = switch (error_union.val) {
|
||||
.err_name => |err_name| @intCast(try pt.getErrorValue(err_name)),
|
||||
.payload => 0,
|
||||
};
|
||||
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
try code.writer().writeInt(u16, err_val, endian);
|
||||
return .ok;
|
||||
}
|
||||
|
||||
const payload_align = payload_ty.abiAlignment(pt);
|
||||
const error_align = Type.anyerror.abiAlignment(pt);
|
||||
const abi_align = ty.abiAlignment(pt);
|
||||
const payload_align = payload_ty.abiAlignment(zcu);
|
||||
const error_align = Type.anyerror.abiAlignment(zcu);
|
||||
const abi_align = ty.abiAlignment(zcu);
|
||||
|
||||
// error value first when its type is larger than the error union's payload
|
||||
if (error_align.order(payload_align) == .gt) {
|
||||
@ -317,7 +317,7 @@ pub fn generateSymbol(
|
||||
}
|
||||
},
|
||||
.enum_tag => |enum_tag| {
|
||||
const int_tag_ty = ty.intTagType(mod);
|
||||
const int_tag_ty = ty.intTagType(zcu);
|
||||
switch (try generateSymbol(bin_file, pt, src_loc, try pt.getCoerced(Value.fromInterned(enum_tag.int), int_tag_ty), code, debug_output, reloc_info)) {
|
||||
.ok => {},
|
||||
.fail => |em| return .{ .fail = em },
|
||||
@ -329,7 +329,7 @@ pub fn generateSymbol(
|
||||
.f64 => |f64_val| writeFloat(f64, f64_val, target, endian, try code.addManyAsArray(8)),
|
||||
.f80 => |f80_val| {
|
||||
writeFloat(f80, f80_val, target, endian, try code.addManyAsArray(10));
|
||||
const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow;
|
||||
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
|
||||
try code.appendNTimes(0, abi_size - 10);
|
||||
},
|
||||
.f128 => |f128_val| writeFloat(f128, f128_val, target, endian, try code.addManyAsArray(16)),
|
||||
@ -349,11 +349,11 @@ pub fn generateSymbol(
|
||||
}
|
||||
},
|
||||
.opt => {
|
||||
const payload_type = ty.optionalChild(mod);
|
||||
const payload_val = val.optionalValue(mod);
|
||||
const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow;
|
||||
const payload_type = ty.optionalChild(zcu);
|
||||
const payload_val = val.optionalValue(zcu);
|
||||
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
|
||||
|
||||
if (ty.optionalReprIsPayload(mod)) {
|
||||
if (ty.optionalReprIsPayload(zcu)) {
|
||||
if (payload_val) |value| {
|
||||
switch (try generateSymbol(bin_file, pt, src_loc, value, code, debug_output, reloc_info)) {
|
||||
.ok => {},
|
||||
@ -363,8 +363,8 @@ pub fn generateSymbol(
|
||||
try code.appendNTimes(0, abi_size);
|
||||
}
|
||||
} else {
|
||||
const padding = abi_size - (math.cast(usize, payload_type.abiSize(pt)) orelse return error.Overflow) - 1;
|
||||
if (payload_type.hasRuntimeBits(pt)) {
|
||||
const padding = abi_size - (math.cast(usize, payload_type.abiSize(zcu)) orelse return error.Overflow) - 1;
|
||||
if (payload_type.hasRuntimeBits(zcu)) {
|
||||
const value = payload_val orelse Value.fromInterned(try pt.intern(.{
|
||||
.undef = payload_type.toIntern(),
|
||||
}));
|
||||
@ -398,7 +398,7 @@ pub fn generateSymbol(
|
||||
},
|
||||
},
|
||||
.vector_type => |vector_type| {
|
||||
const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow;
|
||||
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
|
||||
if (vector_type.child == .bool_type) {
|
||||
const bytes = try code.addManyAsSlice(abi_size);
|
||||
@memset(bytes, 0xaa);
|
||||
@ -458,7 +458,7 @@ pub fn generateSymbol(
|
||||
}
|
||||
|
||||
const padding = abi_size -
|
||||
(math.cast(usize, Type.fromInterned(vector_type.child).abiSize(pt) * vector_type.len) orelse
|
||||
(math.cast(usize, Type.fromInterned(vector_type.child).abiSize(zcu) * vector_type.len) orelse
|
||||
return error.Overflow);
|
||||
if (padding > 0) try code.appendNTimes(0, padding);
|
||||
}
|
||||
@ -471,7 +471,7 @@ pub fn generateSymbol(
|
||||
0..,
|
||||
) |field_ty, comptime_val, index| {
|
||||
if (comptime_val != .none) continue;
|
||||
if (!Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue;
|
||||
if (!Type.fromInterned(field_ty).hasRuntimeBits(zcu)) continue;
|
||||
|
||||
const field_val = switch (aggregate.storage) {
|
||||
.bytes => |bytes| try pt.intern(.{ .int = .{
|
||||
@ -489,7 +489,7 @@ pub fn generateSymbol(
|
||||
const unpadded_field_end = code.items.len - struct_begin;
|
||||
|
||||
// Pad struct members if required
|
||||
const padded_field_end = ty.structFieldOffset(index + 1, pt);
|
||||
const padded_field_end = ty.structFieldOffset(index + 1, zcu);
|
||||
const padding = math.cast(usize, padded_field_end - unpadded_field_end) orelse
|
||||
return error.Overflow;
|
||||
|
||||
@ -502,7 +502,7 @@ pub fn generateSymbol(
|
||||
const struct_type = ip.loadStructType(ty.toIntern());
|
||||
switch (struct_type.layout) {
|
||||
.@"packed" => {
|
||||
const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow;
|
||||
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
|
||||
const current_pos = code.items.len;
|
||||
try code.appendNTimes(0, abi_size);
|
||||
var bits: u16 = 0;
|
||||
@ -519,8 +519,8 @@ pub fn generateSymbol(
|
||||
|
||||
// pointer may point to a decl which must be marked used
|
||||
// but can also result in a relocation. Therefore we handle those separately.
|
||||
if (Type.fromInterned(field_ty).zigTypeTag(mod) == .Pointer) {
|
||||
const field_size = math.cast(usize, Type.fromInterned(field_ty).abiSize(pt)) orelse
|
||||
if (Type.fromInterned(field_ty).zigTypeTag(zcu) == .Pointer) {
|
||||
const field_size = math.cast(usize, Type.fromInterned(field_ty).abiSize(zcu)) orelse
|
||||
return error.Overflow;
|
||||
var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size);
|
||||
defer tmp_list.deinit();
|
||||
@ -531,7 +531,7 @@ pub fn generateSymbol(
|
||||
} else {
|
||||
Value.fromInterned(field_val).writeToPackedMemory(Type.fromInterned(field_ty), pt, code.items[current_pos..], bits) catch unreachable;
|
||||
}
|
||||
bits += @intCast(Type.fromInterned(field_ty).bitSize(pt));
|
||||
bits += @intCast(Type.fromInterned(field_ty).bitSize(zcu));
|
||||
}
|
||||
},
|
||||
.auto, .@"extern" => {
|
||||
@ -542,7 +542,7 @@ pub fn generateSymbol(
|
||||
var it = struct_type.iterateRuntimeOrder(ip);
|
||||
while (it.next()) |field_index| {
|
||||
const field_ty = field_types[field_index];
|
||||
if (!Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue;
|
||||
if (!Type.fromInterned(field_ty).hasRuntimeBits(zcu)) continue;
|
||||
|
||||
const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
|
||||
.bytes => |bytes| try pt.intern(.{ .int = .{
|
||||
@ -580,7 +580,7 @@ pub fn generateSymbol(
|
||||
else => unreachable,
|
||||
},
|
||||
.un => |un| {
|
||||
const layout = ty.unionGetLayout(pt);
|
||||
const layout = ty.unionGetLayout(zcu);
|
||||
|
||||
if (layout.payload_size == 0) {
|
||||
return generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, debug_output, reloc_info);
|
||||
@ -594,11 +594,11 @@ pub fn generateSymbol(
|
||||
}
|
||||
}
|
||||
|
||||
const union_obj = mod.typeToUnion(ty).?;
|
||||
const union_obj = zcu.typeToUnion(ty).?;
|
||||
if (un.tag != .none) {
|
||||
const field_index = ty.unionTagFieldIndex(Value.fromInterned(un.tag), mod).?;
|
||||
const field_index = ty.unionTagFieldIndex(Value.fromInterned(un.tag), zcu).?;
|
||||
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
|
||||
if (!field_ty.hasRuntimeBits(pt)) {
|
||||
if (!field_ty.hasRuntimeBits(zcu)) {
|
||||
try code.appendNTimes(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow);
|
||||
} else {
|
||||
switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), code, debug_output, reloc_info)) {
|
||||
@ -606,7 +606,7 @@ pub fn generateSymbol(
|
||||
.fail => |em| return Result{ .fail = em },
|
||||
}
|
||||
|
||||
const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(pt)) orelse return error.Overflow;
|
||||
const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(zcu)) orelse return error.Overflow;
|
||||
if (padding > 0) {
|
||||
try code.appendNTimes(0, padding);
|
||||
}
|
||||
@ -661,7 +661,7 @@ fn lowerPtr(
|
||||
reloc_info,
|
||||
offset + errUnionPayloadOffset(
|
||||
Value.fromInterned(eu_ptr).typeOf(zcu).childType(zcu).errorUnionPayload(zcu),
|
||||
pt,
|
||||
zcu,
|
||||
),
|
||||
),
|
||||
.opt_payload => |opt_ptr| try lowerPtr(
|
||||
@ -687,7 +687,7 @@ fn lowerPtr(
|
||||
};
|
||||
},
|
||||
.Struct, .Union => switch (base_ty.containerLayout(zcu)) {
|
||||
.auto => base_ty.structFieldOffset(@intCast(field.index), pt),
|
||||
.auto => base_ty.structFieldOffset(@intCast(field.index), zcu),
|
||||
.@"extern", .@"packed" => unreachable,
|
||||
},
|
||||
else => unreachable,
|
||||
@ -713,15 +713,16 @@ fn lowerUavRef(
|
||||
offset: u64,
|
||||
) CodeGenError!Result {
|
||||
_ = debug_output;
|
||||
const ip = &pt.zcu.intern_pool;
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const target = lf.comp.root_mod.resolved_target.result;
|
||||
|
||||
const ptr_width_bytes = @divExact(target.ptrBitWidth(), 8);
|
||||
const uav_val = uav.val;
|
||||
const uav_ty = Type.fromInterned(ip.typeOf(uav_val));
|
||||
log.debug("lowerUavRef: ty = {}", .{uav_ty.fmt(pt)});
|
||||
const is_fn_body = uav_ty.zigTypeTag(pt.zcu) == .Fn;
|
||||
if (!is_fn_body and !uav_ty.hasRuntimeBits(pt)) {
|
||||
const is_fn_body = uav_ty.zigTypeTag(zcu) == .Fn;
|
||||
if (!is_fn_body and !uav_ty.hasRuntimeBits(zcu)) {
|
||||
try code.appendNTimes(0xaa, ptr_width_bytes);
|
||||
return Result.ok;
|
||||
}
|
||||
@ -768,7 +769,7 @@ fn lowerNavRef(
|
||||
const ptr_width = target.ptrBitWidth();
|
||||
const nav_ty = Type.fromInterned(ip.getNav(nav_index).typeOf(ip));
|
||||
const is_fn_body = nav_ty.zigTypeTag(zcu) == .Fn;
|
||||
if (!is_fn_body and !nav_ty.hasRuntimeBits(pt)) {
|
||||
if (!is_fn_body and !nav_ty.hasRuntimeBits(zcu)) {
|
||||
try code.appendNTimes(0xaa, @divExact(ptr_width, 8));
|
||||
return Result.ok;
|
||||
}
|
||||
@ -860,7 +861,7 @@ fn genNavRef(
|
||||
const ty = val.typeOf(zcu);
|
||||
log.debug("genNavRef: val = {}", .{val.fmtValue(pt)});
|
||||
|
||||
if (!ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) {
|
||||
if (!ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
const imm: u64 = switch (@divExact(target.ptrBitWidth(), 8)) {
|
||||
1 => 0xaa,
|
||||
2 => 0xaaaa,
|
||||
@ -877,12 +878,12 @@ fn genNavRef(
|
||||
// TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
|
||||
if (ty.castPtrToFn(zcu)) |fn_ty| {
|
||||
if (zcu.typeToFunc(fn_ty).?.is_generic) {
|
||||
return .{ .mcv = .{ .immediate = fn_ty.abiAlignment(pt).toByteUnits().? } };
|
||||
return .{ .mcv = .{ .immediate = fn_ty.abiAlignment(zcu).toByteUnits().? } };
|
||||
}
|
||||
} else if (ty.zigTypeTag(zcu) == .Pointer) {
|
||||
const elem_ty = ty.elemType2(zcu);
|
||||
if (!elem_ty.hasRuntimeBits(pt)) {
|
||||
return .{ .mcv = .{ .immediate = elem_ty.abiAlignment(pt).toByteUnits().? } };
|
||||
if (!elem_ty.hasRuntimeBits(zcu)) {
|
||||
return .{ .mcv = .{ .immediate = elem_ty.abiAlignment(zcu).toByteUnits().? } };
|
||||
}
|
||||
}
|
||||
|
||||
@ -963,15 +964,15 @@ pub fn genTypedValue(
|
||||
},
|
||||
else => switch (ip.indexToKey(val.toIntern())) {
|
||||
.int => {
|
||||
return .{ .mcv = .{ .immediate = val.toUnsignedInt(pt) } };
|
||||
return .{ .mcv = .{ .immediate = val.toUnsignedInt(zcu) } };
|
||||
},
|
||||
.ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
|
||||
.nav => |nav| return genNavRef(lf, pt, src_loc, val, nav, target),
|
||||
.uav => |uav| if (Value.fromInterned(uav.val).typeOf(zcu).hasRuntimeBits(pt))
|
||||
.uav => |uav| if (Value.fromInterned(uav.val).typeOf(zcu).hasRuntimeBits(zcu))
|
||||
return switch (try lf.lowerUav(
|
||||
pt,
|
||||
uav.val,
|
||||
Type.fromInterned(uav.orig_ty).ptrAlignment(pt),
|
||||
Type.fromInterned(uav.orig_ty).ptrAlignment(zcu),
|
||||
src_loc,
|
||||
)) {
|
||||
.mcv => |mcv| return .{ .mcv = switch (mcv) {
|
||||
@ -982,7 +983,7 @@ pub fn genTypedValue(
|
||||
.fail => |em| return .{ .fail = em },
|
||||
}
|
||||
else
|
||||
return .{ .mcv = .{ .immediate = Type.fromInterned(uav.orig_ty).ptrAlignment(pt)
|
||||
return .{ .mcv = .{ .immediate = Type.fromInterned(uav.orig_ty).ptrAlignment(zcu)
|
||||
.forward(@intCast((@as(u66, 1) << @intCast(target.ptrBitWidth() | 1)) / 3)) } },
|
||||
else => {},
|
||||
},
|
||||
@ -994,8 +995,8 @@ pub fn genTypedValue(
|
||||
const info = ty.intInfo(zcu);
|
||||
if (info.bits <= target.ptrBitWidth()) {
|
||||
const unsigned: u64 = switch (info.signedness) {
|
||||
.signed => @bitCast(val.toSignedInt(pt)),
|
||||
.unsigned => val.toUnsignedInt(pt),
|
||||
.signed => @bitCast(val.toSignedInt(zcu)),
|
||||
.unsigned => val.toUnsignedInt(zcu),
|
||||
};
|
||||
return .{ .mcv = .{ .immediate = unsigned } };
|
||||
}
|
||||
@ -1012,7 +1013,7 @@ pub fn genTypedValue(
|
||||
val.optionalValue(zcu) orelse return .{ .mcv = .{ .immediate = 0 } },
|
||||
target,
|
||||
);
|
||||
} else if (ty.abiSize(pt) == 1) {
|
||||
} else if (ty.abiSize(zcu) == 1) {
|
||||
return .{ .mcv = .{ .immediate = @intFromBool(!val.isNull(zcu)) } };
|
||||
}
|
||||
},
|
||||
@ -1034,7 +1035,7 @@ pub fn genTypedValue(
|
||||
.ErrorUnion => {
|
||||
const err_type = ty.errorUnionSet(zcu);
|
||||
const payload_type = ty.errorUnionPayload(zcu);
|
||||
if (!payload_type.hasRuntimeBitsIgnoreComptime(pt)) {
|
||||
if (!payload_type.hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
// We use the error type directly as the type.
|
||||
const err_int_ty = try pt.errorIntType();
|
||||
switch (ip.indexToKey(val.toIntern()).error_union.val) {
|
||||
@ -1074,23 +1075,23 @@ pub fn genTypedValue(
|
||||
return lf.lowerUav(pt, val.toIntern(), .none, src_loc);
|
||||
}
|
||||
|
||||
pub fn errUnionPayloadOffset(payload_ty: Type, pt: Zcu.PerThread) u64 {
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return 0;
|
||||
const payload_align = payload_ty.abiAlignment(pt);
|
||||
const error_align = Type.anyerror.abiAlignment(pt);
|
||||
if (payload_align.compare(.gte, error_align) or !payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
|
||||
pub fn errUnionPayloadOffset(payload_ty: Type, zcu: *Zcu) u64 {
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) return 0;
|
||||
const payload_align = payload_ty.abiAlignment(zcu);
|
||||
const error_align = Type.anyerror.abiAlignment(zcu);
|
||||
if (payload_align.compare(.gte, error_align) or !payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
return 0;
|
||||
} else {
|
||||
return payload_align.forward(Type.anyerror.abiSize(pt));
|
||||
return payload_align.forward(Type.anyerror.abiSize(zcu));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn errUnionErrorOffset(payload_ty: Type, pt: Zcu.PerThread) u64 {
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return 0;
|
||||
const payload_align = payload_ty.abiAlignment(pt);
|
||||
const error_align = Type.anyerror.abiAlignment(pt);
|
||||
if (payload_align.compare(.gte, error_align) and payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
|
||||
return error_align.forward(payload_ty.abiSize(pt));
|
||||
pub fn errUnionErrorOffset(payload_ty: Type, zcu: *Zcu) u64 {
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) return 0;
|
||||
const payload_align = payload_ty.abiAlignment(zcu);
|
||||
const error_align = Type.anyerror.abiAlignment(zcu);
|
||||
if (payload_align.compare(.gte, error_align) and payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
return error_align.forward(payload_ty.abiSize(zcu));
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -334,7 +334,7 @@ pub const Function = struct {
|
||||
const writer = f.object.codeHeaderWriter();
|
||||
const decl_c_value = try f.allocLocalValue(.{
|
||||
.ctype = try f.ctypeFromType(ty, .complete),
|
||||
.alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(pt)),
|
||||
.alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(pt.zcu)),
|
||||
});
|
||||
const gpa = f.object.dg.gpa;
|
||||
try f.allocs.put(gpa, decl_c_value.new_local, false);
|
||||
@ -372,7 +372,7 @@ pub const Function = struct {
|
||||
fn allocLocal(f: *Function, inst: ?Air.Inst.Index, ty: Type) !CValue {
|
||||
return f.allocAlignedLocal(inst, .{
|
||||
.ctype = try f.ctypeFromType(ty, .complete),
|
||||
.alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(f.object.dg.pt)),
|
||||
.alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(f.object.dg.pt.zcu)),
|
||||
});
|
||||
}
|
||||
|
||||
@ -648,7 +648,7 @@ pub const DeclGen = struct {
|
||||
|
||||
// Render an undefined pointer if we have a pointer to a zero-bit or comptime type.
|
||||
const ptr_ty = Type.fromInterned(uav.orig_ty);
|
||||
if (ptr_ty.isPtrAtRuntime(zcu) and !uav_ty.isFnOrHasRuntimeBits(pt)) {
|
||||
if (ptr_ty.isPtrAtRuntime(zcu) and !uav_ty.isFnOrHasRuntimeBits(zcu)) {
|
||||
return dg.writeCValue(writer, .{ .undef = ptr_ty });
|
||||
}
|
||||
|
||||
@ -688,7 +688,7 @@ pub const DeclGen = struct {
|
||||
// alignment. If there is already an entry, keep the greater alignment.
|
||||
const explicit_alignment = ptr_type.flags.alignment;
|
||||
if (explicit_alignment != .none) {
|
||||
const abi_alignment = Type.fromInterned(ptr_type.child).abiAlignment(pt);
|
||||
const abi_alignment = Type.fromInterned(ptr_type.child).abiAlignment(zcu);
|
||||
if (explicit_alignment.order(abi_alignment).compare(.gt)) {
|
||||
const aligned_gop = try dg.aligned_uavs.getOrPut(dg.gpa, uav.val);
|
||||
aligned_gop.value_ptr.* = if (aligned_gop.found_existing)
|
||||
@ -722,7 +722,7 @@ pub const DeclGen = struct {
|
||||
// Render an undefined pointer if we have a pointer to a zero-bit or comptime type.
|
||||
const nav_ty = Type.fromInterned(ip.getNav(owner_nav).typeOf(ip));
|
||||
const ptr_ty = try pt.navPtrType(owner_nav);
|
||||
if (!nav_ty.isFnOrHasRuntimeBits(pt)) {
|
||||
if (!nav_ty.isFnOrHasRuntimeBits(zcu)) {
|
||||
return dg.writeCValue(writer, .{ .undef = ptr_ty });
|
||||
}
|
||||
|
||||
@ -805,7 +805,7 @@ pub const DeclGen = struct {
|
||||
}
|
||||
},
|
||||
|
||||
.elem_ptr => |elem| if (!(try elem.parent.ptrType(pt)).childType(zcu).hasRuntimeBits(pt)) {
|
||||
.elem_ptr => |elem| if (!(try elem.parent.ptrType(pt)).childType(zcu).hasRuntimeBits(zcu)) {
|
||||
// Element type is zero-bit, so lowers to `void`. The index is irrelevant; just cast the pointer.
|
||||
const ptr_ctype = try dg.ctypeFromType(elem.result_ptr_ty, .complete);
|
||||
try writer.writeByte('(');
|
||||
@ -923,7 +923,7 @@ pub const DeclGen = struct {
|
||||
try writer.writeAll("((");
|
||||
try dg.renderCType(writer, ctype);
|
||||
try writer.print("){x})", .{try dg.fmtIntLiteral(
|
||||
try pt.intValue(Type.usize, val.toUnsignedInt(pt)),
|
||||
try pt.intValue(Type.usize, val.toUnsignedInt(zcu)),
|
||||
.Other,
|
||||
)});
|
||||
},
|
||||
@ -970,7 +970,7 @@ pub const DeclGen = struct {
|
||||
.enum_tag => |enum_tag| try dg.renderValue(writer, Value.fromInterned(enum_tag.int), location),
|
||||
.float => {
|
||||
const bits = ty.floatBits(target.*);
|
||||
const f128_val = val.toFloat(f128, pt);
|
||||
const f128_val = val.toFloat(f128, zcu);
|
||||
|
||||
// All unsigned ints matching float types are pre-allocated.
|
||||
const repr_ty = pt.intType(.unsigned, bits) catch unreachable;
|
||||
@ -984,10 +984,10 @@ pub const DeclGen = struct {
|
||||
};
|
||||
|
||||
switch (bits) {
|
||||
16 => repr_val_big.set(@as(u16, @bitCast(val.toFloat(f16, pt)))),
|
||||
32 => repr_val_big.set(@as(u32, @bitCast(val.toFloat(f32, pt)))),
|
||||
64 => repr_val_big.set(@as(u64, @bitCast(val.toFloat(f64, pt)))),
|
||||
80 => repr_val_big.set(@as(u80, @bitCast(val.toFloat(f80, pt)))),
|
||||
16 => repr_val_big.set(@as(u16, @bitCast(val.toFloat(f16, zcu)))),
|
||||
32 => repr_val_big.set(@as(u32, @bitCast(val.toFloat(f32, zcu)))),
|
||||
64 => repr_val_big.set(@as(u64, @bitCast(val.toFloat(f64, zcu)))),
|
||||
80 => repr_val_big.set(@as(u80, @bitCast(val.toFloat(f80, zcu)))),
|
||||
128 => repr_val_big.set(@as(u128, @bitCast(f128_val))),
|
||||
else => unreachable,
|
||||
}
|
||||
@ -998,10 +998,10 @@ pub const DeclGen = struct {
|
||||
try dg.renderTypeForBuiltinFnName(writer, ty);
|
||||
try writer.writeByte('(');
|
||||
switch (bits) {
|
||||
16 => try writer.print("{x}", .{val.toFloat(f16, pt)}),
|
||||
32 => try writer.print("{x}", .{val.toFloat(f32, pt)}),
|
||||
64 => try writer.print("{x}", .{val.toFloat(f64, pt)}),
|
||||
80 => try writer.print("{x}", .{val.toFloat(f80, pt)}),
|
||||
16 => try writer.print("{x}", .{val.toFloat(f16, zcu)}),
|
||||
32 => try writer.print("{x}", .{val.toFloat(f32, zcu)}),
|
||||
64 => try writer.print("{x}", .{val.toFloat(f64, zcu)}),
|
||||
80 => try writer.print("{x}", .{val.toFloat(f80, zcu)}),
|
||||
128 => try writer.print("{x}", .{f128_val}),
|
||||
else => unreachable,
|
||||
}
|
||||
@ -1041,10 +1041,10 @@ pub const DeclGen = struct {
|
||||
if (std.math.isNan(f128_val)) switch (bits) {
|
||||
// We only actually need to pass the significand, but it will get
|
||||
// properly masked anyway, so just pass the whole value.
|
||||
16 => try writer.print("\"0x{x}\"", .{@as(u16, @bitCast(val.toFloat(f16, pt)))}),
|
||||
32 => try writer.print("\"0x{x}\"", .{@as(u32, @bitCast(val.toFloat(f32, pt)))}),
|
||||
64 => try writer.print("\"0x{x}\"", .{@as(u64, @bitCast(val.toFloat(f64, pt)))}),
|
||||
80 => try writer.print("\"0x{x}\"", .{@as(u80, @bitCast(val.toFloat(f80, pt)))}),
|
||||
16 => try writer.print("\"0x{x}\"", .{@as(u16, @bitCast(val.toFloat(f16, zcu)))}),
|
||||
32 => try writer.print("\"0x{x}\"", .{@as(u32, @bitCast(val.toFloat(f32, zcu)))}),
|
||||
64 => try writer.print("\"0x{x}\"", .{@as(u64, @bitCast(val.toFloat(f64, zcu)))}),
|
||||
80 => try writer.print("\"0x{x}\"", .{@as(u80, @bitCast(val.toFloat(f80, zcu)))}),
|
||||
128 => try writer.print("\"0x{x}\"", .{@as(u128, @bitCast(f128_val))}),
|
||||
else => unreachable,
|
||||
};
|
||||
@ -1167,11 +1167,11 @@ pub const DeclGen = struct {
|
||||
const elem_val_u8: u8 = if (elem_val.isUndef(zcu))
|
||||
undefPattern(u8)
|
||||
else
|
||||
@intCast(elem_val.toUnsignedInt(pt));
|
||||
@intCast(elem_val.toUnsignedInt(zcu));
|
||||
try literal.writeChar(elem_val_u8);
|
||||
}
|
||||
if (ai.sentinel) |s| {
|
||||
const s_u8: u8 = @intCast(s.toUnsignedInt(pt));
|
||||
const s_u8: u8 = @intCast(s.toUnsignedInt(zcu));
|
||||
if (s_u8 != 0) try literal.writeChar(s_u8);
|
||||
}
|
||||
try literal.end();
|
||||
@ -1203,7 +1203,7 @@ pub const DeclGen = struct {
|
||||
const comptime_val = tuple.values.get(ip)[field_index];
|
||||
if (comptime_val != .none) continue;
|
||||
const field_ty = Type.fromInterned(tuple.types.get(ip)[field_index]);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
|
||||
if (!empty) try writer.writeByte(',');
|
||||
|
||||
@ -1238,7 +1238,7 @@ pub const DeclGen = struct {
|
||||
var need_comma = false;
|
||||
while (field_it.next()) |field_index| {
|
||||
const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
|
||||
if (need_comma) try writer.writeByte(',');
|
||||
need_comma = true;
|
||||
@ -1265,7 +1265,7 @@ pub const DeclGen = struct {
|
||||
|
||||
for (0..loaded_struct.field_types.len) |field_index| {
|
||||
const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
eff_num_fields += 1;
|
||||
}
|
||||
|
||||
@ -1273,7 +1273,7 @@ pub const DeclGen = struct {
|
||||
try writer.writeByte('(');
|
||||
try dg.renderUndefValue(writer, ty, location);
|
||||
try writer.writeByte(')');
|
||||
} else if (ty.bitSize(pt) > 64) {
|
||||
} else if (ty.bitSize(zcu) > 64) {
|
||||
// zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off))
|
||||
var num_or = eff_num_fields - 1;
|
||||
while (num_or > 0) : (num_or -= 1) {
|
||||
@ -1286,7 +1286,7 @@ pub const DeclGen = struct {
|
||||
var needs_closing_paren = false;
|
||||
for (0..loaded_struct.field_types.len) |field_index| {
|
||||
const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
|
||||
const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
|
||||
.bytes => |bytes| try pt.intern(.{ .int = .{
|
||||
@ -1312,7 +1312,7 @@ pub const DeclGen = struct {
|
||||
if (needs_closing_paren) try writer.writeByte(')');
|
||||
if (eff_index != eff_num_fields - 1) try writer.writeAll(", ");
|
||||
|
||||
bit_offset += field_ty.bitSize(pt);
|
||||
bit_offset += field_ty.bitSize(zcu);
|
||||
needs_closing_paren = true;
|
||||
eff_index += 1;
|
||||
}
|
||||
@ -1322,7 +1322,7 @@ pub const DeclGen = struct {
|
||||
var empty = true;
|
||||
for (0..loaded_struct.field_types.len) |field_index| {
|
||||
const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
|
||||
if (!empty) try writer.writeAll(" | ");
|
||||
try writer.writeByte('(');
|
||||
@ -1346,7 +1346,7 @@ pub const DeclGen = struct {
|
||||
try dg.renderValue(writer, Value.fromInterned(field_val), .Other);
|
||||
}
|
||||
|
||||
bit_offset += field_ty.bitSize(pt);
|
||||
bit_offset += field_ty.bitSize(zcu);
|
||||
empty = false;
|
||||
}
|
||||
try writer.writeByte(')');
|
||||
@ -1396,7 +1396,7 @@ pub const DeclGen = struct {
|
||||
const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
|
||||
const field_name = loaded_union.loadTagType(ip).names.get(ip)[field_index];
|
||||
if (loaded_union.flagsUnordered(ip).layout == .@"packed") {
|
||||
if (field_ty.hasRuntimeBits(pt)) {
|
||||
if (field_ty.hasRuntimeBits(zcu)) {
|
||||
if (field_ty.isPtrAtRuntime(zcu)) {
|
||||
try writer.writeByte('(');
|
||||
try dg.renderCType(writer, ctype);
|
||||
@ -1427,7 +1427,7 @@ pub const DeclGen = struct {
|
||||
),
|
||||
.payload => {
|
||||
try writer.writeByte('{');
|
||||
if (field_ty.hasRuntimeBits(pt)) {
|
||||
if (field_ty.hasRuntimeBits(zcu)) {
|
||||
try writer.print(" .{ } = ", .{fmtIdent(field_name.toSlice(ip))});
|
||||
try dg.renderValue(
|
||||
writer,
|
||||
@ -1439,7 +1439,7 @@ pub const DeclGen = struct {
|
||||
const inner_field_ty = Type.fromInterned(
|
||||
loaded_union.field_types.get(ip)[inner_field_index],
|
||||
);
|
||||
if (!inner_field_ty.hasRuntimeBits(pt)) continue;
|
||||
if (!inner_field_ty.hasRuntimeBits(zcu)) continue;
|
||||
try dg.renderUndefValue(writer, inner_field_ty, initializer_type);
|
||||
break;
|
||||
}
|
||||
@ -1588,7 +1588,7 @@ pub const DeclGen = struct {
|
||||
var need_comma = false;
|
||||
while (field_it.next()) |field_index| {
|
||||
const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
|
||||
if (need_comma) try writer.writeByte(',');
|
||||
need_comma = true;
|
||||
@ -1613,7 +1613,7 @@ pub const DeclGen = struct {
|
||||
for (0..anon_struct_info.types.len) |field_index| {
|
||||
if (anon_struct_info.values.get(ip)[field_index] != .none) continue;
|
||||
const field_ty = Type.fromInterned(anon_struct_info.types.get(ip)[field_index]);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
|
||||
if (need_comma) try writer.writeByte(',');
|
||||
need_comma = true;
|
||||
@ -1651,7 +1651,7 @@ pub const DeclGen = struct {
|
||||
const inner_field_ty = Type.fromInterned(
|
||||
loaded_union.field_types.get(ip)[inner_field_index],
|
||||
);
|
||||
if (!inner_field_ty.hasRuntimeBits(pt)) continue;
|
||||
if (!inner_field_ty.hasRuntimeBits(pt.zcu)) continue;
|
||||
try dg.renderUndefValue(
|
||||
writer,
|
||||
inner_field_ty,
|
||||
@ -1902,7 +1902,8 @@ pub const DeclGen = struct {
|
||||
};
|
||||
fn intCastIsNoop(dg: *DeclGen, dest_ty: Type, src_ty: Type) bool {
|
||||
const pt = dg.pt;
|
||||
const dest_bits = dest_ty.bitSize(pt);
|
||||
const zcu = pt.zcu;
|
||||
const dest_bits = dest_ty.bitSize(zcu);
|
||||
const dest_int_info = dest_ty.intInfo(pt.zcu);
|
||||
|
||||
const src_is_ptr = src_ty.isPtrAtRuntime(pt.zcu);
|
||||
@ -1911,7 +1912,7 @@ pub const DeclGen = struct {
|
||||
.signed => Type.isize,
|
||||
} else src_ty;
|
||||
|
||||
const src_bits = src_eff_ty.bitSize(pt);
|
||||
const src_bits = src_eff_ty.bitSize(zcu);
|
||||
const src_int_info = if (src_eff_ty.isAbiInt(pt.zcu)) src_eff_ty.intInfo(pt.zcu) else null;
|
||||
if (dest_bits <= 64 and src_bits <= 64) {
|
||||
const needs_cast = src_int_info == null or
|
||||
@ -1943,7 +1944,7 @@ pub const DeclGen = struct {
|
||||
) !void {
|
||||
const pt = dg.pt;
|
||||
const zcu = pt.zcu;
|
||||
const dest_bits = dest_ty.bitSize(pt);
|
||||
const dest_bits = dest_ty.bitSize(zcu);
|
||||
const dest_int_info = dest_ty.intInfo(zcu);
|
||||
|
||||
const src_is_ptr = src_ty.isPtrAtRuntime(zcu);
|
||||
@ -1952,7 +1953,7 @@ pub const DeclGen = struct {
|
||||
.signed => Type.isize,
|
||||
} else src_ty;
|
||||
|
||||
const src_bits = src_eff_ty.bitSize(pt);
|
||||
const src_bits = src_eff_ty.bitSize(zcu);
|
||||
const src_int_info = if (src_eff_ty.isAbiInt(zcu)) src_eff_ty.intInfo(zcu) else null;
|
||||
if (dest_bits <= 64 and src_bits <= 64) {
|
||||
const needs_cast = src_int_info == null or
|
||||
@ -2033,7 +2034,7 @@ pub const DeclGen = struct {
|
||||
qualifiers,
|
||||
CType.AlignAs.fromAlignment(.{
|
||||
.@"align" = alignment,
|
||||
.abi = ty.abiAlignment(dg.pt),
|
||||
.abi = ty.abiAlignment(dg.pt.zcu),
|
||||
}),
|
||||
);
|
||||
}
|
||||
@ -2239,9 +2240,10 @@ pub const DeclGen = struct {
|
||||
}
|
||||
|
||||
const pt = dg.pt;
|
||||
const int_info = if (ty.isAbiInt(pt.zcu)) ty.intInfo(pt.zcu) else std.builtin.Type.Int{
|
||||
const zcu = pt.zcu;
|
||||
const int_info = if (ty.isAbiInt(zcu)) ty.intInfo(zcu) else std.builtin.Type.Int{
|
||||
.signedness = .unsigned,
|
||||
.bits = @as(u16, @intCast(ty.bitSize(pt))),
|
||||
.bits = @as(u16, @intCast(ty.bitSize(zcu))),
|
||||
};
|
||||
|
||||
if (is_big) try writer.print(", {}", .{int_info.signedness == .signed});
|
||||
@ -2891,7 +2893,7 @@ pub fn genDecl(o: *Object) !void {
|
||||
const nav = ip.getNav(o.dg.pass.nav);
|
||||
const nav_ty = Type.fromInterned(nav.typeOf(ip));
|
||||
|
||||
if (!nav_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return;
|
||||
if (!nav_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return;
|
||||
switch (ip.indexToKey(nav.status.resolved.val)) {
|
||||
.@"extern" => |@"extern"| {
|
||||
if (!ip.isFunctionType(nav_ty.toIntern())) return o.dg.renderFwdDecl(o.dg.pass.nav, .{
|
||||
@ -3420,10 +3422,10 @@ fn airSliceField(f: *Function, inst: Air.Inst.Index, is_ptr: bool, field_name: [
|
||||
}
|
||||
|
||||
fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
const pt = f.object.dg.pt;
|
||||
const zcu = f.object.dg.pt.zcu;
|
||||
const inst_ty = f.typeOfIndex(inst);
|
||||
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
|
||||
if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) {
|
||||
if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
|
||||
return .none;
|
||||
}
|
||||
@ -3453,7 +3455,7 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
|
||||
const inst_ty = f.typeOfIndex(inst);
|
||||
const ptr_ty = f.typeOf(bin_op.lhs);
|
||||
const elem_has_bits = ptr_ty.elemType2(zcu).hasRuntimeBitsIgnoreComptime(pt);
|
||||
const elem_has_bits = ptr_ty.elemType2(zcu).hasRuntimeBitsIgnoreComptime(zcu);
|
||||
|
||||
const ptr = try f.resolveInst(bin_op.lhs);
|
||||
const index = try f.resolveInst(bin_op.rhs);
|
||||
@ -3482,10 +3484,10 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
}
|
||||
|
||||
fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
const pt = f.object.dg.pt;
|
||||
const zcu = f.object.dg.pt.zcu;
|
||||
const inst_ty = f.typeOfIndex(inst);
|
||||
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
|
||||
if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) {
|
||||
if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
|
||||
return .none;
|
||||
}
|
||||
@ -3516,7 +3518,7 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
const inst_ty = f.typeOfIndex(inst);
|
||||
const slice_ty = f.typeOf(bin_op.lhs);
|
||||
const elem_ty = slice_ty.elemType2(zcu);
|
||||
const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(pt);
|
||||
const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(zcu);
|
||||
|
||||
const slice = try f.resolveInst(bin_op.lhs);
|
||||
const index = try f.resolveInst(bin_op.rhs);
|
||||
@ -3539,10 +3541,10 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
}
|
||||
|
||||
fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
const pt = f.object.dg.pt;
|
||||
const zcu = f.object.dg.pt.zcu;
|
||||
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
|
||||
const inst_ty = f.typeOfIndex(inst);
|
||||
if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) {
|
||||
if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
|
||||
return .none;
|
||||
}
|
||||
@ -3569,13 +3571,13 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
const zcu = pt.zcu;
|
||||
const inst_ty = f.typeOfIndex(inst);
|
||||
const elem_ty = inst_ty.childType(zcu);
|
||||
if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return .{ .undef = inst_ty };
|
||||
if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return .{ .undef = inst_ty };
|
||||
|
||||
const local = try f.allocLocalValue(.{
|
||||
.ctype = try f.ctypeFromType(elem_ty, .complete),
|
||||
.alignas = CType.AlignAs.fromAlignment(.{
|
||||
.@"align" = inst_ty.ptrInfo(zcu).flags.alignment,
|
||||
.abi = elem_ty.abiAlignment(pt),
|
||||
.abi = elem_ty.abiAlignment(zcu),
|
||||
}),
|
||||
});
|
||||
log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local });
|
||||
@ -3588,13 +3590,13 @@ fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
const zcu = pt.zcu;
|
||||
const inst_ty = f.typeOfIndex(inst);
|
||||
const elem_ty = inst_ty.childType(zcu);
|
||||
if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return .{ .undef = inst_ty };
|
||||
if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return .{ .undef = inst_ty };
|
||||
|
||||
const local = try f.allocLocalValue(.{
|
||||
.ctype = try f.ctypeFromType(elem_ty, .complete),
|
||||
.alignas = CType.AlignAs.fromAlignment(.{
|
||||
.@"align" = inst_ty.ptrInfo(zcu).flags.alignment,
|
||||
.abi = elem_ty.abiAlignment(pt),
|
||||
.abi = elem_ty.abiAlignment(zcu),
|
||||
}),
|
||||
});
|
||||
log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local });
|
||||
@ -3636,7 +3638,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
const ptr_info = ptr_scalar_ty.ptrInfo(zcu);
|
||||
const src_ty = Type.fromInterned(ptr_info.child);
|
||||
|
||||
if (!src_ty.hasRuntimeBitsIgnoreComptime(pt)) {
|
||||
if (!src_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
try reap(f, inst, &.{ty_op.operand});
|
||||
return .none;
|
||||
}
|
||||
@ -3646,7 +3648,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
try reap(f, inst, &.{ty_op.operand});
|
||||
|
||||
const is_aligned = if (ptr_info.flags.alignment != .none)
|
||||
ptr_info.flags.alignment.order(src_ty.abiAlignment(pt)).compare(.gte)
|
||||
ptr_info.flags.alignment.order(src_ty.abiAlignment(zcu)).compare(.gte)
|
||||
else
|
||||
true;
|
||||
const is_array = lowersToArray(src_ty, pt);
|
||||
@ -3674,7 +3676,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1));
|
||||
const bit_offset_val = try pt.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset);
|
||||
|
||||
const field_ty = try pt.intType(.unsigned, @as(u16, @intCast(src_ty.bitSize(pt))));
|
||||
const field_ty = try pt.intType(.unsigned, @as(u16, @intCast(src_ty.bitSize(zcu))));
|
||||
|
||||
try f.writeCValue(writer, local, .Other);
|
||||
try v.elem(f, writer);
|
||||
@ -3685,9 +3687,9 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
try writer.writeAll("((");
|
||||
try f.renderType(writer, field_ty);
|
||||
try writer.writeByte(')');
|
||||
const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(pt) > 64;
|
||||
const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(zcu) > 64;
|
||||
if (cant_cast) {
|
||||
if (field_ty.bitSize(pt) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
|
||||
if (field_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
|
||||
try writer.writeAll("zig_lo_");
|
||||
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
|
||||
try writer.writeByte('(');
|
||||
@ -3735,7 +3737,7 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue {
|
||||
const ret_val = if (is_array) ret_val: {
|
||||
const array_local = try f.allocAlignedLocal(inst, .{
|
||||
.ctype = ret_ctype,
|
||||
.alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(pt)),
|
||||
.alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(zcu)),
|
||||
});
|
||||
try writer.writeAll("memcpy(");
|
||||
try f.writeCValueMember(writer, array_local, .{ .identifier = "array" });
|
||||
@ -3926,7 +3928,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
|
||||
}
|
||||
|
||||
const is_aligned = if (ptr_info.flags.alignment != .none)
|
||||
ptr_info.flags.alignment.order(src_ty.abiAlignment(pt)).compare(.gte)
|
||||
ptr_info.flags.alignment.order(src_ty.abiAlignment(zcu)).compare(.gte)
|
||||
else
|
||||
true;
|
||||
const is_array = lowersToArray(Type.fromInterned(ptr_info.child), pt);
|
||||
@ -3976,7 +3978,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
|
||||
const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1));
|
||||
const bit_offset_val = try pt.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset);
|
||||
|
||||
const src_bits = src_ty.bitSize(pt);
|
||||
const src_bits = src_ty.bitSize(zcu);
|
||||
|
||||
const ExpectedContents = [BigInt.Managed.default_capacity]BigIntLimb;
|
||||
var stack align(@alignOf(ExpectedContents)) =
|
||||
@ -4006,9 +4008,9 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
|
||||
try writer.print(", {x}), zig_shl_", .{try f.fmtIntLiteral(mask_val)});
|
||||
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
|
||||
try writer.writeByte('(');
|
||||
const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(pt) > 64;
|
||||
const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(zcu) > 64;
|
||||
if (cant_cast) {
|
||||
if (src_ty.bitSize(pt) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
|
||||
if (src_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
|
||||
try writer.writeAll("zig_make_");
|
||||
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
|
||||
try writer.writeAll("(0, ");
|
||||
@ -4130,7 +4132,7 @@ fn airBinOp(
|
||||
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
|
||||
const operand_ty = f.typeOf(bin_op.lhs);
|
||||
const scalar_ty = operand_ty.scalarType(zcu);
|
||||
if ((scalar_ty.isInt(zcu) and scalar_ty.bitSize(pt) > 64) or scalar_ty.isRuntimeFloat())
|
||||
if ((scalar_ty.isInt(zcu) and scalar_ty.bitSize(zcu) > 64) or scalar_ty.isRuntimeFloat())
|
||||
return try airBinBuiltinCall(f, inst, operation, info);
|
||||
|
||||
const lhs = try f.resolveInst(bin_op.lhs);
|
||||
@ -4169,7 +4171,7 @@ fn airCmpOp(
|
||||
const lhs_ty = f.typeOf(data.lhs);
|
||||
const scalar_ty = lhs_ty.scalarType(zcu);
|
||||
|
||||
const scalar_bits = scalar_ty.bitSize(pt);
|
||||
const scalar_bits = scalar_ty.bitSize(zcu);
|
||||
if (scalar_ty.isInt(zcu) and scalar_bits > 64)
|
||||
return airCmpBuiltinCall(
|
||||
f,
|
||||
@ -4219,7 +4221,7 @@ fn airEquality(
|
||||
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
|
||||
|
||||
const operand_ty = f.typeOf(bin_op.lhs);
|
||||
const operand_bits = operand_ty.bitSize(pt);
|
||||
const operand_bits = operand_ty.bitSize(zcu);
|
||||
if (operand_ty.isAbiInt(zcu) and operand_bits > 64)
|
||||
return airCmpBuiltinCall(
|
||||
f,
|
||||
@ -4312,7 +4314,7 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue {
|
||||
const inst_ty = f.typeOfIndex(inst);
|
||||
const inst_scalar_ty = inst_ty.scalarType(zcu);
|
||||
const elem_ty = inst_scalar_ty.elemType2(zcu);
|
||||
if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) return f.moveCValue(inst, inst_ty, lhs);
|
||||
if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) return f.moveCValue(inst, inst_ty, lhs);
|
||||
const inst_scalar_ctype = try f.ctypeFromType(inst_scalar_ty, .complete);
|
||||
|
||||
const local = try f.allocLocal(inst, inst_ty);
|
||||
@ -4351,7 +4353,7 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []cons
|
||||
const inst_ty = f.typeOfIndex(inst);
|
||||
const inst_scalar_ty = inst_ty.scalarType(zcu);
|
||||
|
||||
if ((inst_scalar_ty.isInt(zcu) and inst_scalar_ty.bitSize(pt) > 64) or inst_scalar_ty.isRuntimeFloat())
|
||||
if ((inst_scalar_ty.isInt(zcu) and inst_scalar_ty.bitSize(zcu) > 64) or inst_scalar_ty.isRuntimeFloat())
|
||||
return try airBinBuiltinCall(f, inst, operation, .none);
|
||||
|
||||
const lhs = try f.resolveInst(bin_op.lhs);
|
||||
@ -4446,7 +4448,7 @@ fn airCall(
|
||||
if (!arg_ctype.eql(try f.ctypeFromType(arg_ty, .complete))) {
|
||||
const array_local = try f.allocAlignedLocal(inst, .{
|
||||
.ctype = arg_ctype,
|
||||
.alignas = CType.AlignAs.fromAbiAlignment(arg_ty.abiAlignment(pt)),
|
||||
.alignas = CType.AlignAs.fromAbiAlignment(arg_ty.abiAlignment(zcu)),
|
||||
});
|
||||
try writer.writeAll("memcpy(");
|
||||
try f.writeCValueMember(writer, array_local, .{ .identifier = "array" });
|
||||
@ -4493,7 +4495,7 @@ fn airCall(
|
||||
} else {
|
||||
const local = try f.allocAlignedLocal(inst, .{
|
||||
.ctype = ret_ctype,
|
||||
.alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(pt)),
|
||||
.alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(zcu)),
|
||||
});
|
||||
try f.writeCValue(writer, local, .Other);
|
||||
try writer.writeAll(" = ");
|
||||
@ -4618,7 +4620,7 @@ fn lowerBlock(f: *Function, inst: Air.Inst.Index, body: []const Air.Inst.Index)
|
||||
const writer = f.object.writer();
|
||||
|
||||
const inst_ty = f.typeOfIndex(inst);
|
||||
const result = if (inst_ty.hasRuntimeBitsIgnoreComptime(pt) and !f.liveness.isUnused(inst))
|
||||
const result = if (inst_ty.hasRuntimeBitsIgnoreComptime(zcu) and !f.liveness.isUnused(inst))
|
||||
try f.allocLocal(inst, inst_ty)
|
||||
else
|
||||
.none;
|
||||
@ -4681,7 +4683,7 @@ fn lowerTry(
|
||||
const liveness_condbr = f.liveness.getCondBr(inst);
|
||||
const writer = f.object.writer();
|
||||
const payload_ty = err_union_ty.errorUnionPayload(zcu);
|
||||
const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(pt);
|
||||
const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(zcu);
|
||||
|
||||
if (!err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
|
||||
try writer.writeAll("if (");
|
||||
@ -4820,7 +4822,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !CVal
|
||||
try writer.writeAll(", sizeof(");
|
||||
try f.renderType(
|
||||
writer,
|
||||
if (dest_ty.abiSize(pt) <= operand_ty.abiSize(pt)) dest_ty else operand_ty,
|
||||
if (dest_ty.abiSize(zcu) <= operand_ty.abiSize(zcu)) dest_ty else operand_ty,
|
||||
);
|
||||
try writer.writeAll("));\n");
|
||||
|
||||
@ -5030,7 +5032,7 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
try f.object.indent_writer.insertNewline();
|
||||
try writer.writeAll("case ");
|
||||
const item_value = try f.air.value(item, pt);
|
||||
if (item_value.?.getUnsignedInt(pt)) |item_int| try writer.print("{}\n", .{
|
||||
if (item_value.?.getUnsignedInt(zcu)) |item_int| try writer.print("{}\n", .{
|
||||
try f.fmtIntLiteral(try pt.intValue(lowered_condition_ty, item_int)),
|
||||
}) else {
|
||||
if (condition_ty.isPtrAtRuntime(zcu)) {
|
||||
@ -5112,10 +5114,10 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
const result = result: {
|
||||
const writer = f.object.writer();
|
||||
const inst_ty = f.typeOfIndex(inst);
|
||||
const inst_local = if (inst_ty.hasRuntimeBitsIgnoreComptime(pt)) local: {
|
||||
const inst_local = if (inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) local: {
|
||||
const inst_local = try f.allocLocalValue(.{
|
||||
.ctype = try f.ctypeFromType(inst_ty, .complete),
|
||||
.alignas = CType.AlignAs.fromAbiAlignment(inst_ty.abiAlignment(pt)),
|
||||
.alignas = CType.AlignAs.fromAbiAlignment(inst_ty.abiAlignment(zcu)),
|
||||
});
|
||||
if (f.wantSafety()) {
|
||||
try f.writeCValue(writer, inst_local, .Other);
|
||||
@ -5148,7 +5150,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
try writer.writeAll("register ");
|
||||
const output_local = try f.allocLocalValue(.{
|
||||
.ctype = try f.ctypeFromType(output_ty, .complete),
|
||||
.alignas = CType.AlignAs.fromAbiAlignment(output_ty.abiAlignment(pt)),
|
||||
.alignas = CType.AlignAs.fromAbiAlignment(output_ty.abiAlignment(zcu)),
|
||||
});
|
||||
try f.allocs.put(gpa, output_local.new_local, false);
|
||||
try f.object.dg.renderTypeAndName(writer, output_ty, output_local, .{}, .none, .complete);
|
||||
@ -5183,7 +5185,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
if (is_reg) try writer.writeAll("register ");
|
||||
const input_local = try f.allocLocalValue(.{
|
||||
.ctype = try f.ctypeFromType(input_ty, .complete),
|
||||
.alignas = CType.AlignAs.fromAbiAlignment(input_ty.abiAlignment(pt)),
|
||||
.alignas = CType.AlignAs.fromAbiAlignment(input_ty.abiAlignment(zcu)),
|
||||
});
|
||||
try f.allocs.put(gpa, input_local.new_local, false);
|
||||
try f.object.dg.renderTypeAndName(writer, input_ty, input_local, Const, .none, .complete);
|
||||
@ -5526,9 +5528,9 @@ fn fieldLocation(
|
||||
.struct_type => {
|
||||
const loaded_struct = ip.loadStructType(container_ty.toIntern());
|
||||
return switch (loaded_struct.layout) {
|
||||
.auto, .@"extern" => if (!container_ty.hasRuntimeBitsIgnoreComptime(pt))
|
||||
.auto, .@"extern" => if (!container_ty.hasRuntimeBitsIgnoreComptime(zcu))
|
||||
.begin
|
||||
else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(pt))
|
||||
else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(zcu))
|
||||
.{ .byte_offset = loaded_struct.offsets.get(ip)[field_index] }
|
||||
else
|
||||
.{ .field = if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name|
|
||||
@ -5542,10 +5544,10 @@ fn fieldLocation(
|
||||
.begin,
|
||||
};
|
||||
},
|
||||
.anon_struct_type => |anon_struct_info| return if (!container_ty.hasRuntimeBitsIgnoreComptime(pt))
|
||||
.anon_struct_type => |anon_struct_info| return if (!container_ty.hasRuntimeBitsIgnoreComptime(zcu))
|
||||
.begin
|
||||
else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(pt))
|
||||
.{ .byte_offset = container_ty.structFieldOffset(field_index, pt) }
|
||||
else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(zcu))
|
||||
.{ .byte_offset = container_ty.structFieldOffset(field_index, zcu) }
|
||||
else
|
||||
.{ .field = if (anon_struct_info.fieldName(ip, field_index).unwrap()) |field_name|
|
||||
.{ .identifier = field_name.toSlice(ip) }
|
||||
@ -5556,8 +5558,8 @@ fn fieldLocation(
|
||||
switch (loaded_union.flagsUnordered(ip).layout) {
|
||||
.auto, .@"extern" => {
|
||||
const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(pt))
|
||||
return if (loaded_union.hasTag(ip) and !container_ty.unionHasAllZeroBitFieldTypes(pt))
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu))
|
||||
return if (loaded_union.hasTag(ip) and !container_ty.unionHasAllZeroBitFieldTypes(zcu))
|
||||
.{ .field = .{ .identifier = "payload" } }
|
||||
else
|
||||
.begin;
|
||||
@ -5706,7 +5708,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
const extra = f.air.extraData(Air.StructField, ty_pl.payload).data;
|
||||
|
||||
const inst_ty = f.typeOfIndex(inst);
|
||||
if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) {
|
||||
if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
try reap(f, inst, &.{extra.struct_operand});
|
||||
return .none;
|
||||
}
|
||||
@ -5738,7 +5740,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
inst_ty.intInfo(zcu).signedness
|
||||
else
|
||||
.unsigned;
|
||||
const field_int_ty = try pt.intType(field_int_signedness, @as(u16, @intCast(inst_ty.bitSize(pt))));
|
||||
const field_int_ty = try pt.intType(field_int_signedness, @as(u16, @intCast(inst_ty.bitSize(zcu))));
|
||||
|
||||
const temp_local = try f.allocLocal(inst, field_int_ty);
|
||||
try f.writeCValue(writer, temp_local, .Other);
|
||||
@ -5749,7 +5751,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
try writer.writeByte(')');
|
||||
const cant_cast = int_info.bits > 64;
|
||||
if (cant_cast) {
|
||||
if (field_int_ty.bitSize(pt) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
|
||||
if (field_int_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
|
||||
try writer.writeAll("zig_lo_");
|
||||
try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty);
|
||||
try writer.writeByte('(');
|
||||
@ -5857,7 +5859,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
const payload_ty = error_union_ty.errorUnionPayload(zcu);
|
||||
const local = try f.allocLocal(inst, inst_ty);
|
||||
|
||||
if (!payload_ty.hasRuntimeBits(pt) and operand == .local and operand.local == local.new_local) {
|
||||
if (!payload_ty.hasRuntimeBits(zcu) and operand == .local and operand.local == local.new_local) {
|
||||
// The store will be 'x = x'; elide it.
|
||||
return local;
|
||||
}
|
||||
@ -5866,7 +5868,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
try f.writeCValue(writer, local, .Other);
|
||||
try writer.writeAll(" = ");
|
||||
|
||||
if (!payload_ty.hasRuntimeBits(pt))
|
||||
if (!payload_ty.hasRuntimeBits(zcu))
|
||||
try f.writeCValue(writer, operand, .Other)
|
||||
else if (error_ty.errorSetIsEmpty(zcu))
|
||||
try writer.print("{}", .{
|
||||
@ -5892,7 +5894,7 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu
|
||||
const error_union_ty = if (is_ptr) operand_ty.childType(zcu) else operand_ty;
|
||||
|
||||
const writer = f.object.writer();
|
||||
if (!error_union_ty.errorUnionPayload(zcu).hasRuntimeBits(pt)) {
|
||||
if (!error_union_ty.errorUnionPayload(zcu).hasRuntimeBits(zcu)) {
|
||||
if (!is_ptr) return .none;
|
||||
|
||||
const local = try f.allocLocal(inst, inst_ty);
|
||||
@ -5963,7 +5965,7 @@ fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
|
||||
const inst_ty = f.typeOfIndex(inst);
|
||||
const payload_ty = inst_ty.errorUnionPayload(zcu);
|
||||
const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(pt);
|
||||
const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(zcu);
|
||||
const err_ty = inst_ty.errorUnionSet(zcu);
|
||||
const err = try f.resolveInst(ty_op.operand);
|
||||
try reap(f, inst, &.{ty_op.operand});
|
||||
@ -6012,7 +6014,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
try reap(f, inst, &.{ty_op.operand});
|
||||
|
||||
// First, set the non-error value.
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
const a = try Assignment.start(f, writer, try f.ctypeFromType(operand_ty, .complete));
|
||||
try f.writeCValueDeref(writer, operand);
|
||||
try a.assign(f, writer);
|
||||
@ -6064,7 +6066,7 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
const inst_ty = f.typeOfIndex(inst);
|
||||
const payload_ty = inst_ty.errorUnionPayload(zcu);
|
||||
const payload = try f.resolveInst(ty_op.operand);
|
||||
const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(pt);
|
||||
const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(zcu);
|
||||
const err_ty = inst_ty.errorUnionSet(zcu);
|
||||
try reap(f, inst, &.{ty_op.operand});
|
||||
|
||||
@ -6109,7 +6111,7 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const
|
||||
try a.assign(f, writer);
|
||||
const err_int_ty = try pt.errorIntType();
|
||||
if (!error_ty.errorSetIsEmpty(zcu))
|
||||
if (payload_ty.hasRuntimeBits(pt))
|
||||
if (payload_ty.hasRuntimeBits(zcu))
|
||||
if (is_ptr)
|
||||
try f.writeCValueDerefMember(writer, operand, .{ .identifier = "error" })
|
||||
else
|
||||
@ -6430,7 +6432,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue
|
||||
try reap(f, inst, &.{ extra.ptr, extra.expected_value, extra.new_value });
|
||||
|
||||
const repr_ty = if (ty.isRuntimeFloat())
|
||||
pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(pt) * 8))) catch unreachable
|
||||
pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(zcu) * 8))) catch unreachable
|
||||
else
|
||||
ty;
|
||||
|
||||
@ -6534,7 +6536,7 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
const operand_mat = try Materialize.start(f, inst, ty, operand);
|
||||
try reap(f, inst, &.{ pl_op.operand, extra.operand });
|
||||
|
||||
const repr_bits = @as(u16, @intCast(ty.abiSize(pt) * 8));
|
||||
const repr_bits = @as(u16, @intCast(ty.abiSize(zcu) * 8));
|
||||
const is_float = ty.isRuntimeFloat();
|
||||
const is_128 = repr_bits == 128;
|
||||
const repr_ty = if (is_float) pt.intType(.unsigned, repr_bits) catch unreachable else ty;
|
||||
@ -6585,7 +6587,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
const ty = ptr_ty.childType(zcu);
|
||||
|
||||
const repr_ty = if (ty.isRuntimeFloat())
|
||||
pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(pt) * 8))) catch unreachable
|
||||
pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(zcu) * 8))) catch unreachable
|
||||
else
|
||||
ty;
|
||||
|
||||
@ -6626,7 +6628,7 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa
|
||||
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
|
||||
|
||||
const repr_ty = if (ty.isRuntimeFloat())
|
||||
pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(pt) * 8))) catch unreachable
|
||||
pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(zcu) * 8))) catch unreachable
|
||||
else
|
||||
ty;
|
||||
|
||||
@ -6666,7 +6668,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
|
||||
const dest_slice = try f.resolveInst(bin_op.lhs);
|
||||
const value = try f.resolveInst(bin_op.rhs);
|
||||
const elem_ty = f.typeOf(bin_op.rhs);
|
||||
const elem_abi_size = elem_ty.abiSize(pt);
|
||||
const elem_abi_size = elem_ty.abiSize(zcu);
|
||||
const val_is_undef = if (try f.air.value(bin_op.rhs, pt)) |val| val.isUndefDeep(zcu) else false;
|
||||
const writer = f.object.writer();
|
||||
|
||||
@ -6831,7 +6833,7 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
|
||||
|
||||
const union_ty = f.typeOf(bin_op.lhs).childType(zcu);
|
||||
const layout = union_ty.unionGetLayout(pt);
|
||||
const layout = union_ty.unionGetLayout(zcu);
|
||||
if (layout.tag_size == 0) return .none;
|
||||
const tag_ty = union_ty.unionTagTypeSafety(zcu).?;
|
||||
|
||||
@ -6846,13 +6848,14 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
|
||||
fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
const pt = f.object.dg.pt;
|
||||
const zcu = pt.zcu;
|
||||
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
||||
|
||||
const operand = try f.resolveInst(ty_op.operand);
|
||||
try reap(f, inst, &.{ty_op.operand});
|
||||
|
||||
const union_ty = f.typeOf(ty_op.operand);
|
||||
const layout = union_ty.unionGetLayout(pt);
|
||||
const layout = union_ty.unionGetLayout(zcu);
|
||||
if (layout.tag_size == 0) return .none;
|
||||
|
||||
const inst_ty = f.typeOfIndex(inst);
|
||||
@ -6960,6 +6963,7 @@ fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
|
||||
fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
const pt = f.object.dg.pt;
|
||||
const zcu = pt.zcu;
|
||||
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
||||
const extra = f.air.extraData(Air.Shuffle, ty_pl.payload).data;
|
||||
|
||||
@ -6978,7 +6982,7 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
try f.object.dg.renderValue(writer, try pt.intValue(Type.usize, index), .Other);
|
||||
try writer.writeAll("] = ");
|
||||
|
||||
const mask_elem = (try mask.elemValue(pt, index)).toSignedInt(pt);
|
||||
const mask_elem = (try mask.elemValue(pt, index)).toSignedInt(zcu);
|
||||
const src_val = try pt.intValue(Type.usize, @as(u64, @intCast(mask_elem ^ mask_elem >> 63)));
|
||||
|
||||
try f.writeCValue(writer, if (mask_elem >= 0) lhs else rhs, .Other);
|
||||
@ -7001,7 +7005,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
const operand_ty = f.typeOf(reduce.operand);
|
||||
const writer = f.object.writer();
|
||||
|
||||
const use_operator = scalar_ty.bitSize(pt) <= 64;
|
||||
const use_operator = scalar_ty.bitSize(zcu) <= 64;
|
||||
const op: union(enum) {
|
||||
const Func = struct { operation: []const u8, info: BuiltinInfo = .none };
|
||||
builtin: Func,
|
||||
@ -7178,7 +7182,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
var field_it = loaded_struct.iterateRuntimeOrder(ip);
|
||||
while (field_it.next()) |field_index| {
|
||||
const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
|
||||
const a = try Assignment.start(f, writer, try f.ctypeFromType(field_ty, .complete));
|
||||
try f.writeCValueMember(writer, local, if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name|
|
||||
@ -7202,8 +7206,8 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
var empty = true;
|
||||
for (0..elements.len) |field_index| {
|
||||
if (inst_ty.structFieldIsComptime(field_index, zcu)) continue;
|
||||
const field_ty = inst_ty.structFieldType(field_index, zcu);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
|
||||
const field_ty = inst_ty.fieldType(field_index, zcu);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
|
||||
if (!empty) {
|
||||
try writer.writeAll("zig_or_");
|
||||
@ -7215,8 +7219,8 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
empty = true;
|
||||
for (resolved_elements, 0..) |element, field_index| {
|
||||
if (inst_ty.structFieldIsComptime(field_index, zcu)) continue;
|
||||
const field_ty = inst_ty.structFieldType(field_index, zcu);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
|
||||
const field_ty = inst_ty.fieldType(field_index, zcu);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
|
||||
if (!empty) try writer.writeAll(", ");
|
||||
// TODO: Skip this entire shift if val is 0?
|
||||
@ -7248,7 +7252,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
try writer.writeByte(')');
|
||||
if (!empty) try writer.writeByte(')');
|
||||
|
||||
bit_offset += field_ty.bitSize(pt);
|
||||
bit_offset += field_ty.bitSize(zcu);
|
||||
empty = false;
|
||||
}
|
||||
try writer.writeAll(";\n");
|
||||
@ -7258,7 +7262,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
.anon_struct_type => |anon_struct_info| for (0..anon_struct_info.types.len) |field_index| {
|
||||
if (anon_struct_info.values.get(ip)[field_index] != .none) continue;
|
||||
const field_ty = Type.fromInterned(anon_struct_info.types.get(ip)[field_index]);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
|
||||
const a = try Assignment.start(f, writer, try f.ctypeFromType(field_ty, .complete));
|
||||
try f.writeCValueMember(writer, local, if (anon_struct_info.fieldName(ip, field_index).unwrap()) |field_name|
|
||||
@ -7294,7 +7298,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
if (loaded_union.flagsUnordered(ip).layout == .@"packed") return f.moveCValue(inst, union_ty, payload);
|
||||
|
||||
const field: CValue = if (union_ty.unionTagTypeSafety(zcu)) |tag_ty| field: {
|
||||
const layout = union_ty.unionGetLayout(pt);
|
||||
const layout = union_ty.unionGetLayout(zcu);
|
||||
if (layout.tag_size != 0) {
|
||||
const field_index = tag_ty.enumFieldIndex(field_name, zcu).?;
|
||||
const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index);
|
||||
@ -7818,7 +7822,7 @@ fn formatIntLiteral(
|
||||
};
|
||||
undef_int.truncate(undef_int.toConst(), data.int_info.signedness, data.int_info.bits);
|
||||
break :blk undef_int.toConst();
|
||||
} else data.val.toBigInt(&int_buf, pt);
|
||||
} else data.val.toBigInt(&int_buf, zcu);
|
||||
assert(int.fitsInTwosComp(data.int_info.signedness, data.int_info.bits));
|
||||
|
||||
const c_bits: usize = @intCast(data.ctype.byteSize(ctype_pool, data.dg.mod) * 8);
|
||||
@ -8062,9 +8066,10 @@ const Vectorize = struct {
|
||||
};
|
||||
|
||||
fn lowersToArray(ty: Type, pt: Zcu.PerThread) bool {
|
||||
return switch (ty.zigTypeTag(pt.zcu)) {
|
||||
const zcu = pt.zcu;
|
||||
return switch (ty.zigTypeTag(zcu)) {
|
||||
.Array, .Vector => return true,
|
||||
else => return ty.isAbiInt(pt.zcu) and toCIntBits(@as(u32, @intCast(ty.bitSize(pt)))) == null,
|
||||
else => return ty.isAbiInt(zcu) and toCIntBits(@as(u32, @intCast(ty.bitSize(zcu)))) == null,
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@ -1344,6 +1344,7 @@ pub const Pool = struct {
|
||||
kind: Kind,
|
||||
) !CType {
|
||||
const ip = &pt.zcu.intern_pool;
|
||||
const zcu = pt.zcu;
|
||||
switch (ty.toIntern()) {
|
||||
.u0_type,
|
||||
.i0_type,
|
||||
@ -1476,7 +1477,7 @@ pub const Pool = struct {
|
||||
),
|
||||
.alignas = AlignAs.fromAlignment(.{
|
||||
.@"align" = ptr_info.flags.alignment,
|
||||
.abi = Type.fromInterned(ptr_info.child).abiAlignment(pt),
|
||||
.abi = Type.fromInterned(ptr_info.child).abiAlignment(zcu),
|
||||
}),
|
||||
};
|
||||
break :elem_ctype if (elem.alignas.abiOrder().compare(.gte))
|
||||
@ -1552,7 +1553,7 @@ pub const Pool = struct {
|
||||
.{
|
||||
.name = .{ .index = .array },
|
||||
.ctype = array_ctype,
|
||||
.alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(pt)),
|
||||
.alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(zcu)),
|
||||
},
|
||||
};
|
||||
return pool.fromFields(allocator, .@"struct", &fields, kind);
|
||||
@ -1578,7 +1579,7 @@ pub const Pool = struct {
|
||||
.{
|
||||
.name = .{ .index = .array },
|
||||
.ctype = vector_ctype,
|
||||
.alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(pt)),
|
||||
.alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(zcu)),
|
||||
},
|
||||
};
|
||||
return pool.fromFields(allocator, .@"struct", &fields, kind);
|
||||
@ -1613,7 +1614,7 @@ pub const Pool = struct {
|
||||
.name = .{ .index = .payload },
|
||||
.ctype = payload_ctype,
|
||||
.alignas = AlignAs.fromAbiAlignment(
|
||||
Type.fromInterned(payload_type).abiAlignment(pt),
|
||||
Type.fromInterned(payload_type).abiAlignment(zcu),
|
||||
),
|
||||
},
|
||||
};
|
||||
@ -1649,7 +1650,7 @@ pub const Pool = struct {
|
||||
.{
|
||||
.name = .{ .index = .payload },
|
||||
.ctype = payload_ctype,
|
||||
.alignas = AlignAs.fromAbiAlignment(payload_type.abiAlignment(pt)),
|
||||
.alignas = AlignAs.fromAbiAlignment(payload_type.abiAlignment(zcu)),
|
||||
},
|
||||
};
|
||||
return pool.fromFields(allocator, .@"struct", &fields, kind);
|
||||
@ -1663,7 +1664,7 @@ pub const Pool = struct {
|
||||
.tag = .@"struct",
|
||||
.name = .{ .index = ip_index },
|
||||
});
|
||||
if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(pt))
|
||||
if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(zcu))
|
||||
fwd_decl
|
||||
else
|
||||
CType.void;
|
||||
@ -1696,7 +1697,7 @@ pub const Pool = struct {
|
||||
String.fromUnnamed(@intCast(field_index));
|
||||
const field_alignas = AlignAs.fromAlignment(.{
|
||||
.@"align" = loaded_struct.fieldAlign(ip, field_index),
|
||||
.abi = field_type.abiAlignment(pt),
|
||||
.abi = field_type.abiAlignment(zcu),
|
||||
});
|
||||
pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{
|
||||
.name = field_name.index,
|
||||
@ -1758,7 +1759,7 @@ pub const Pool = struct {
|
||||
.name = field_name.index,
|
||||
.ctype = field_ctype.index,
|
||||
.flags = .{ .alignas = AlignAs.fromAbiAlignment(
|
||||
field_type.abiAlignment(pt),
|
||||
field_type.abiAlignment(zcu),
|
||||
) },
|
||||
});
|
||||
}
|
||||
@ -1802,7 +1803,7 @@ pub const Pool = struct {
|
||||
.tag = if (has_tag) .@"struct" else .@"union",
|
||||
.name = .{ .index = ip_index },
|
||||
});
|
||||
if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(pt))
|
||||
if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(zcu))
|
||||
fwd_decl
|
||||
else
|
||||
CType.void;
|
||||
@ -1836,7 +1837,7 @@ pub const Pool = struct {
|
||||
);
|
||||
const field_alignas = AlignAs.fromAlignment(.{
|
||||
.@"align" = loaded_union.fieldAlign(ip, field_index),
|
||||
.abi = field_type.abiAlignment(pt),
|
||||
.abi = field_type.abiAlignment(zcu),
|
||||
});
|
||||
pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{
|
||||
.name = field_name.index,
|
||||
@ -1881,7 +1882,7 @@ pub const Pool = struct {
|
||||
struct_fields[struct_fields_len] = .{
|
||||
.name = .{ .index = .tag },
|
||||
.ctype = tag_ctype,
|
||||
.alignas = AlignAs.fromAbiAlignment(tag_type.abiAlignment(pt)),
|
||||
.alignas = AlignAs.fromAbiAlignment(tag_type.abiAlignment(zcu)),
|
||||
};
|
||||
struct_fields_len += 1;
|
||||
}
|
||||
@ -1929,7 +1930,7 @@ pub const Pool = struct {
|
||||
},
|
||||
.@"packed" => return pool.fromIntInfo(allocator, .{
|
||||
.signedness = .unsigned,
|
||||
.bits = @intCast(ty.bitSize(pt)),
|
||||
.bits = @intCast(ty.bitSize(zcu)),
|
||||
}, mod, kind),
|
||||
}
|
||||
},
|
||||
|
||||
1697
src/codegen/llvm.zig
1697
src/codegen/llvm.zig
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -755,7 +755,7 @@ pub const File = struct {
|
||||
const directory = base.emit.root_dir; // Just an alias to make it shorter to type.
|
||||
const full_out_path = try directory.join(arena, &[_][]const u8{base.emit.sub_path});
|
||||
const full_out_path_z = try arena.dupeZ(u8, full_out_path);
|
||||
const opt_zcu = comp.module;
|
||||
const opt_zcu = comp.zcu;
|
||||
|
||||
// If there is no Zig code to compile, then we should skip flushing the output file
|
||||
// because it will not be part of the linker line anyway.
|
||||
|
||||
@ -327,7 +327,7 @@ pub fn updateNav(self: *C, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !
|
||||
.variable => |variable| variable.init,
|
||||
else => nav.status.resolved.val,
|
||||
};
|
||||
if (nav_init != .none and !Value.fromInterned(nav_init).typeOf(zcu).hasRuntimeBits(pt)) return;
|
||||
if (nav_init != .none and !Value.fromInterned(nav_init).typeOf(zcu).hasRuntimeBits(zcu)) return;
|
||||
|
||||
const gop = try self.navs.getOrPut(gpa, nav_index);
|
||||
errdefer _ = self.navs.pop();
|
||||
@ -418,7 +418,7 @@ pub fn flushModule(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node:
|
||||
|
||||
const comp = self.base.comp;
|
||||
const gpa = comp.gpa;
|
||||
const zcu = self.base.comp.module.?;
|
||||
const zcu = self.base.comp.zcu.?;
|
||||
const ip = &zcu.intern_pool;
|
||||
const pt: Zcu.PerThread = .{ .zcu = zcu, .tid = tid };
|
||||
|
||||
|
||||
@ -1141,7 +1141,7 @@ pub fn updateFunc(self: *Coff, pt: Zcu.PerThread, func_index: InternPool.Index,
|
||||
|
||||
const LowerConstResult = union(enum) {
|
||||
ok: Atom.Index,
|
||||
fail: *Module.ErrorMsg,
|
||||
fail: *Zcu.ErrorMsg,
|
||||
};
|
||||
|
||||
fn lowerConst(
|
||||
@ -1151,7 +1151,7 @@ fn lowerConst(
|
||||
val: Value,
|
||||
required_alignment: InternPool.Alignment,
|
||||
sect_id: u16,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
) !LowerConstResult {
|
||||
const gpa = self.base.comp.gpa;
|
||||
|
||||
@ -1221,7 +1221,7 @@ pub fn updateNav(
|
||||
else => nav_val,
|
||||
};
|
||||
|
||||
if (nav_init.typeOf(zcu).hasRuntimeBits(pt)) {
|
||||
if (nav_init.typeOf(zcu).hasRuntimeBits(zcu)) {
|
||||
const atom_index = try self.getOrCreateAtomForNav(nav_index);
|
||||
Atom.freeRelocations(self, atom_index);
|
||||
const atom = self.getAtom(atom_index);
|
||||
@ -1259,8 +1259,8 @@ fn updateLazySymbolAtom(
|
||||
atom_index: Atom.Index,
|
||||
section_index: u16,
|
||||
) !void {
|
||||
const mod = pt.zcu;
|
||||
const gpa = mod.gpa;
|
||||
const zcu = pt.zcu;
|
||||
const gpa = zcu.gpa;
|
||||
|
||||
var required_alignment: InternPool.Alignment = .none;
|
||||
var code_buffer = std.ArrayList(u8).init(gpa);
|
||||
@ -1275,7 +1275,7 @@ fn updateLazySymbolAtom(
|
||||
const atom = self.getAtomPtr(atom_index);
|
||||
const local_sym_index = atom.getSymbolIndex().?;
|
||||
|
||||
const src = Type.fromInterned(sym.ty).srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded;
|
||||
const src = Type.fromInterned(sym.ty).srcLocOrNull(zcu) orelse Zcu.LazySrcLoc.unneeded;
|
||||
const res = try codegen.generateLazySymbol(
|
||||
&self.base,
|
||||
pt,
|
||||
@ -1354,7 +1354,7 @@ pub fn getOrCreateAtomForNav(self: *Coff, nav_index: InternPool.Nav.Index) !Atom
|
||||
}
|
||||
|
||||
fn getNavOutputSection(self: *Coff, nav_index: InternPool.Nav.Index) u16 {
|
||||
const zcu = self.base.comp.module.?;
|
||||
const zcu = self.base.comp.zcu.?;
|
||||
const ip = &zcu.intern_pool;
|
||||
const nav = ip.getNav(nav_index);
|
||||
const ty = Type.fromInterned(nav.typeOf(ip));
|
||||
@ -1462,15 +1462,15 @@ pub fn freeNav(self: *Coff, nav_index: InternPool.NavIndex) void {
|
||||
pub fn updateExports(
|
||||
self: *Coff,
|
||||
pt: Zcu.PerThread,
|
||||
exported: Module.Exported,
|
||||
exported: Zcu.Exported,
|
||||
export_indices: []const u32,
|
||||
) link.File.UpdateExportsError!void {
|
||||
if (build_options.skip_non_native and builtin.object_format != .coff) {
|
||||
@panic("Attempted to compile for object format that was disabled by build configuration");
|
||||
}
|
||||
|
||||
const mod = pt.zcu;
|
||||
const ip = &mod.intern_pool;
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const comp = self.base.comp;
|
||||
const target = comp.root_mod.resolved_target.result;
|
||||
|
||||
@ -1478,7 +1478,7 @@ pub fn updateExports(
|
||||
// Even in the case of LLVM, we need to notice certain exported symbols in order to
|
||||
// detect the default subsystem.
|
||||
for (export_indices) |export_idx| {
|
||||
const exp = mod.all_exports.items[export_idx];
|
||||
const exp = zcu.all_exports.items[export_idx];
|
||||
const exported_nav_index = switch (exp.exported) {
|
||||
.nav => |nav| nav,
|
||||
.uav => continue,
|
||||
@ -1490,20 +1490,20 @@ pub fn updateExports(
|
||||
.x86 => .Stdcall,
|
||||
else => .C,
|
||||
};
|
||||
const exported_cc = Type.fromInterned(exported_ty).fnCallingConvention(mod);
|
||||
const exported_cc = Type.fromInterned(exported_ty).fnCallingConvention(zcu);
|
||||
if (exported_cc == .C and exp.opts.name.eqlSlice("main", ip) and comp.config.link_libc) {
|
||||
mod.stage1_flags.have_c_main = true;
|
||||
zcu.stage1_flags.have_c_main = true;
|
||||
} else if (exported_cc == winapi_cc and target.os.tag == .windows) {
|
||||
if (exp.opts.name.eqlSlice("WinMain", ip)) {
|
||||
mod.stage1_flags.have_winmain = true;
|
||||
zcu.stage1_flags.have_winmain = true;
|
||||
} else if (exp.opts.name.eqlSlice("wWinMain", ip)) {
|
||||
mod.stage1_flags.have_wwinmain = true;
|
||||
zcu.stage1_flags.have_wwinmain = true;
|
||||
} else if (exp.opts.name.eqlSlice("WinMainCRTStartup", ip)) {
|
||||
mod.stage1_flags.have_winmain_crt_startup = true;
|
||||
zcu.stage1_flags.have_winmain_crt_startup = true;
|
||||
} else if (exp.opts.name.eqlSlice("wWinMainCRTStartup", ip)) {
|
||||
mod.stage1_flags.have_wwinmain_crt_startup = true;
|
||||
zcu.stage1_flags.have_wwinmain_crt_startup = true;
|
||||
} else if (exp.opts.name.eqlSlice("DllMainCRTStartup", ip)) {
|
||||
mod.stage1_flags.have_dllmain_crt_startup = true;
|
||||
zcu.stage1_flags.have_dllmain_crt_startup = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1519,15 +1519,15 @@ pub fn updateExports(
|
||||
break :blk self.navs.getPtr(nav).?;
|
||||
},
|
||||
.uav => |uav| self.uavs.getPtr(uav) orelse blk: {
|
||||
const first_exp = mod.all_exports.items[export_indices[0]];
|
||||
const first_exp = zcu.all_exports.items[export_indices[0]];
|
||||
const res = try self.lowerUav(pt, uav, .none, first_exp.src);
|
||||
switch (res) {
|
||||
.mcv => {},
|
||||
.fail => |em| {
|
||||
// TODO maybe it's enough to return an error here and let Module.processExportsInner
|
||||
// handle the error?
|
||||
try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1);
|
||||
mod.failed_exports.putAssumeCapacityNoClobber(export_indices[0], em);
|
||||
try zcu.failed_exports.ensureUnusedCapacity(zcu.gpa, 1);
|
||||
zcu.failed_exports.putAssumeCapacityNoClobber(export_indices[0], em);
|
||||
return;
|
||||
},
|
||||
}
|
||||
@ -1538,12 +1538,12 @@ pub fn updateExports(
|
||||
const atom = self.getAtom(atom_index);
|
||||
|
||||
for (export_indices) |export_idx| {
|
||||
const exp = mod.all_exports.items[export_idx];
|
||||
log.debug("adding new export '{}'", .{exp.opts.name.fmt(&mod.intern_pool)});
|
||||
const exp = zcu.all_exports.items[export_idx];
|
||||
log.debug("adding new export '{}'", .{exp.opts.name.fmt(&zcu.intern_pool)});
|
||||
|
||||
if (exp.opts.section.toSlice(&mod.intern_pool)) |section_name| {
|
||||
if (exp.opts.section.toSlice(&zcu.intern_pool)) |section_name| {
|
||||
if (!mem.eql(u8, section_name, ".text")) {
|
||||
try mod.failed_exports.putNoClobber(gpa, export_idx, try Module.ErrorMsg.create(
|
||||
try zcu.failed_exports.putNoClobber(gpa, export_idx, try Zcu.ErrorMsg.create(
|
||||
gpa,
|
||||
exp.src,
|
||||
"Unimplemented: ExportOptions.section",
|
||||
@ -1554,7 +1554,7 @@ pub fn updateExports(
|
||||
}
|
||||
|
||||
if (exp.opts.linkage == .link_once) {
|
||||
try mod.failed_exports.putNoClobber(gpa, export_idx, try Module.ErrorMsg.create(
|
||||
try zcu.failed_exports.putNoClobber(gpa, export_idx, try Zcu.ErrorMsg.create(
|
||||
gpa,
|
||||
exp.src,
|
||||
"Unimplemented: GlobalLinkage.link_once",
|
||||
@ -1563,7 +1563,7 @@ pub fn updateExports(
|
||||
continue;
|
||||
}
|
||||
|
||||
const exp_name = exp.opts.name.toSlice(&mod.intern_pool);
|
||||
const exp_name = exp.opts.name.toSlice(&zcu.intern_pool);
|
||||
const sym_index = metadata.getExport(self, exp_name) orelse blk: {
|
||||
const sym_index = if (self.getGlobalIndex(exp_name)) |global_index| ind: {
|
||||
const global = self.globals.items[global_index];
|
||||
@ -1609,14 +1609,14 @@ pub fn deleteExport(
|
||||
.nav => |nav| self.navs.getPtr(nav),
|
||||
.uav => |uav| self.uavs.getPtr(uav),
|
||||
} orelse return;
|
||||
const mod = self.base.comp.module.?;
|
||||
const name_slice = name.toSlice(&mod.intern_pool);
|
||||
const zcu = self.base.comp.zcu.?;
|
||||
const name_slice = name.toSlice(&zcu.intern_pool);
|
||||
const sym_index = metadata.getExportPtr(self, name_slice) orelse return;
|
||||
|
||||
const gpa = self.base.comp.gpa;
|
||||
const sym_loc = SymbolWithLoc{ .sym_index = sym_index.*, .file = null };
|
||||
const sym = self.getSymbolPtr(sym_loc);
|
||||
log.debug("deleting export '{}'", .{name.fmt(&mod.intern_pool)});
|
||||
log.debug("deleting export '{}'", .{name.fmt(&zcu.intern_pool)});
|
||||
assert(sym.storage_class == .EXTERNAL and sym.section_number != .UNDEFINED);
|
||||
sym.* = .{
|
||||
.name = [_]u8{0} ** 8,
|
||||
@ -1691,7 +1691,7 @@ pub fn flushModule(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no
|
||||
defer sub_prog_node.end();
|
||||
|
||||
const pt: Zcu.PerThread = .{
|
||||
.zcu = comp.module orelse return error.LinkingWithoutZigSourceUnimplemented,
|
||||
.zcu = comp.zcu orelse return error.LinkingWithoutZigSourceUnimplemented,
|
||||
.tid = tid,
|
||||
};
|
||||
|
||||
@ -1843,13 +1843,13 @@ pub fn lowerUav(
|
||||
pt: Zcu.PerThread,
|
||||
uav: InternPool.Index,
|
||||
explicit_alignment: InternPool.Alignment,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
) !codegen.GenResult {
|
||||
const zcu = pt.zcu;
|
||||
const gpa = zcu.gpa;
|
||||
const val = Value.fromInterned(uav);
|
||||
const uav_alignment = switch (explicit_alignment) {
|
||||
.none => val.typeOf(zcu).abiAlignment(pt),
|
||||
.none => val.typeOf(zcu).abiAlignment(zcu),
|
||||
else => explicit_alignment,
|
||||
};
|
||||
if (self.uavs.get(uav)) |metadata| {
|
||||
@ -1872,7 +1872,7 @@ pub fn lowerUav(
|
||||
src_loc,
|
||||
) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
else => |e| return .{ .fail = try Module.ErrorMsg.create(
|
||||
else => |e| return .{ .fail = try Zcu.ErrorMsg.create(
|
||||
gpa,
|
||||
src_loc,
|
||||
"lowerAnonDecl failed with error: {s}",
|
||||
@ -2730,8 +2730,6 @@ const ImportTable = @import("Coff/ImportTable.zig");
|
||||
const Liveness = @import("../Liveness.zig");
|
||||
const LlvmObject = @import("../codegen/llvm.zig").Object;
|
||||
const Zcu = @import("../Zcu.zig");
|
||||
/// Deprecated.
|
||||
const Module = Zcu;
|
||||
const InternPool = @import("../InternPool.zig");
|
||||
const Object = @import("Coff/Object.zig");
|
||||
const Relocation = @import("Coff/Relocation.zig");
|
||||
|
||||
@ -32,7 +32,7 @@ pub fn linkWithLLD(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no
|
||||
|
||||
// If there is no Zig code to compile, then we should skip flushing the output file because it
|
||||
// will not be part of the linker line anyway.
|
||||
const module_obj_path: ?[]const u8 = if (comp.module != null) blk: {
|
||||
const module_obj_path: ?[]const u8 = if (comp.zcu != null) blk: {
|
||||
try self.flushModule(arena, tid, prog_node);
|
||||
|
||||
if (fs.path.dirname(full_out_path)) |dirname| {
|
||||
@ -296,7 +296,7 @@ pub fn linkWithLLD(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no
|
||||
if (self.subsystem) |explicit| break :blk explicit;
|
||||
switch (target.os.tag) {
|
||||
.windows => {
|
||||
if (comp.module) |module| {
|
||||
if (comp.zcu) |module| {
|
||||
if (module.stage1_flags.have_dllmain_crt_startup or is_dyn_lib)
|
||||
break :blk null;
|
||||
if (module.stage1_flags.have_c_main or comp.config.is_test or
|
||||
@ -440,7 +440,7 @@ pub fn linkWithLLD(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no
|
||||
} else {
|
||||
try argv.append("-NODEFAULTLIB");
|
||||
if (!is_lib and entry_name == null) {
|
||||
if (comp.module) |module| {
|
||||
if (comp.zcu) |module| {
|
||||
if (module.stage1_flags.have_winmain_crt_startup) {
|
||||
try argv.append("-ENTRY:WinMainCRTStartup");
|
||||
} else {
|
||||
|
||||
@ -780,7 +780,7 @@ const Entry = struct {
|
||||
else
|
||||
"?", 0),
|
||||
});
|
||||
const zcu = dwarf.bin_file.comp.module.?;
|
||||
const zcu = dwarf.bin_file.comp.zcu.?;
|
||||
const ip = &zcu.intern_pool;
|
||||
for (dwarf.types.keys(), dwarf.types.values()) |ty, other_entry| {
|
||||
const ty_unit: Unit.Index = if (Type.fromInterned(ty).typeDeclInst(zcu)) |inst_index|
|
||||
@ -1429,7 +1429,7 @@ pub const WipNav = struct {
|
||||
}
|
||||
} else {
|
||||
try wip_nav.abbrevCode(abbrev_code.block);
|
||||
const bytes = Type.fromInterned(loaded_enum.tag_ty).abiSize(wip_nav.pt);
|
||||
const bytes = Type.fromInterned(loaded_enum.tag_ty).abiSize(wip_nav.pt.zcu);
|
||||
try uleb128(diw, bytes);
|
||||
big_int.writeTwosComplement(try wip_nav.debug_info.addManyAsSlice(wip_nav.dwarf.gpa, @intCast(bytes)), wip_nav.dwarf.endian);
|
||||
}
|
||||
@ -1770,7 +1770,7 @@ pub fn initWipNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool.Nav.In
|
||||
const ty_reloc_index = try wip_nav.refForward();
|
||||
try wip_nav.exprloc(.{ .addr = .{ .sym = sym_index } });
|
||||
try uleb128(diw, nav.status.resolved.alignment.toByteUnits() orelse
|
||||
ty.abiAlignment(pt).toByteUnits().?);
|
||||
ty.abiAlignment(zcu).toByteUnits().?);
|
||||
try diw.writeByte(@intFromBool(false));
|
||||
wip_nav.finishForward(ty_reloc_index);
|
||||
try wip_nav.abbrevCode(.is_const);
|
||||
@ -1821,7 +1821,7 @@ pub fn initWipNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool.Nav.In
|
||||
const addr: Loc = .{ .addr = .{ .sym = sym_index } };
|
||||
try wip_nav.exprloc(if (variable.is_threadlocal) .{ .form_tls_address = &addr } else addr);
|
||||
try uleb128(diw, nav.status.resolved.alignment.toByteUnits() orelse
|
||||
ty.abiAlignment(pt).toByteUnits().?);
|
||||
ty.abiAlignment(zcu).toByteUnits().?);
|
||||
try diw.writeByte(@intFromBool(false));
|
||||
},
|
||||
.func => |func| {
|
||||
@ -2158,8 +2158,8 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool
|
||||
try diw.writeByte(accessibility);
|
||||
try wip_nav.strp(nav.name.toSlice(ip));
|
||||
if (loaded_struct.field_types.len == 0) try diw.writeByte(@intFromBool(false)) else {
|
||||
try uleb128(diw, nav_val.toType().abiSize(pt));
|
||||
try uleb128(diw, nav_val.toType().abiAlignment(pt).toByteUnits().?);
|
||||
try uleb128(diw, nav_val.toType().abiSize(zcu));
|
||||
try uleb128(diw, nav_val.toType().abiAlignment(zcu).toByteUnits().?);
|
||||
for (0..loaded_struct.field_types.len) |field_index| {
|
||||
const is_comptime = loaded_struct.fieldIsComptime(ip, field_index);
|
||||
try wip_nav.abbrevCode(if (is_comptime) .struct_field_comptime else .struct_field);
|
||||
@ -2173,7 +2173,7 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool
|
||||
if (!is_comptime) {
|
||||
try uleb128(diw, loaded_struct.offsets.get(ip)[field_index]);
|
||||
try uleb128(diw, loaded_struct.fieldAlign(ip, field_index).toByteUnits() orelse
|
||||
field_type.abiAlignment(pt).toByteUnits().?);
|
||||
field_type.abiAlignment(zcu).toByteUnits().?);
|
||||
}
|
||||
}
|
||||
try uleb128(diw, @intFromEnum(AbbrevCode.null));
|
||||
@ -2195,7 +2195,7 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool
|
||||
const field_type = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
|
||||
try wip_nav.refType(field_type);
|
||||
try uleb128(diw, field_bit_offset);
|
||||
field_bit_offset += @intCast(field_type.bitSize(pt));
|
||||
field_bit_offset += @intCast(field_type.bitSize(zcu));
|
||||
}
|
||||
try uleb128(diw, @intFromEnum(AbbrevCode.null));
|
||||
},
|
||||
@ -2360,7 +2360,7 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool
|
||||
try uleb128(diw, loc.column + 1);
|
||||
try diw.writeByte(accessibility);
|
||||
try wip_nav.strp(nav.name.toSlice(ip));
|
||||
const union_layout = pt.getUnionLayout(loaded_union);
|
||||
const union_layout = Type.getUnionLayout(loaded_union, zcu);
|
||||
try uleb128(diw, union_layout.abi_size);
|
||||
try uleb128(diw, union_layout.abi_align.toByteUnits().?);
|
||||
const loaded_tag = loaded_union.loadTagType(ip);
|
||||
@ -2391,7 +2391,7 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool
|
||||
try wip_nav.refType(field_type);
|
||||
try uleb128(diw, union_layout.payloadOffset());
|
||||
try uleb128(diw, loaded_union.fieldAlign(ip, field_index).toByteUnits() orelse
|
||||
if (field_type.isNoReturn(zcu)) 1 else field_type.abiAlignment(pt).toByteUnits().?);
|
||||
if (field_type.isNoReturn(zcu)) 1 else field_type.abiAlignment(zcu).toByteUnits().?);
|
||||
}
|
||||
try uleb128(diw, @intFromEnum(AbbrevCode.null));
|
||||
}
|
||||
@ -2406,7 +2406,7 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool
|
||||
const field_type = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
|
||||
try wip_nav.refType(field_type);
|
||||
try uleb128(diw, loaded_union.fieldAlign(ip, field_index).toByteUnits() orelse
|
||||
field_type.abiAlignment(pt).toByteUnits().?);
|
||||
field_type.abiAlignment(zcu).toByteUnits().?);
|
||||
}
|
||||
try uleb128(diw, @intFromEnum(AbbrevCode.null));
|
||||
break :done;
|
||||
@ -2560,8 +2560,8 @@ fn updateType(
|
||||
inline .signed, .unsigned => |signedness| @field(DW.ATE, @tagName(signedness)),
|
||||
});
|
||||
try uleb128(diw, int_type.bits);
|
||||
try uleb128(diw, ty.abiSize(pt));
|
||||
try uleb128(diw, ty.abiAlignment(pt).toByteUnits().?);
|
||||
try uleb128(diw, ty.abiSize(zcu));
|
||||
try uleb128(diw, ty.abiAlignment(zcu).toByteUnits().?);
|
||||
},
|
||||
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
|
||||
.One, .Many, .C => {
|
||||
@ -2569,7 +2569,7 @@ fn updateType(
|
||||
try wip_nav.abbrevCode(.ptr_type);
|
||||
try wip_nav.strp(name);
|
||||
try uleb128(diw, ptr_type.flags.alignment.toByteUnits() orelse
|
||||
ptr_child_type.abiAlignment(pt).toByteUnits().?);
|
||||
ptr_child_type.abiAlignment(zcu).toByteUnits().?);
|
||||
try diw.writeByte(@intFromEnum(ptr_type.flags.address_space));
|
||||
if (ptr_type.flags.is_const or ptr_type.flags.is_volatile) try wip_nav.infoSectionOffset(
|
||||
.debug_info,
|
||||
@ -2594,8 +2594,8 @@ fn updateType(
|
||||
.Slice => {
|
||||
try wip_nav.abbrevCode(.struct_type);
|
||||
try wip_nav.strp(name);
|
||||
try uleb128(diw, ty.abiSize(pt));
|
||||
try uleb128(diw, ty.abiAlignment(pt).toByteUnits().?);
|
||||
try uleb128(diw, ty.abiSize(zcu));
|
||||
try uleb128(diw, ty.abiAlignment(zcu).toByteUnits().?);
|
||||
try wip_nav.abbrevCode(.generated_field);
|
||||
try wip_nav.strp("ptr");
|
||||
const ptr_field_type = ty.slicePtrFieldType(zcu);
|
||||
@ -2605,7 +2605,7 @@ fn updateType(
|
||||
try wip_nav.strp("len");
|
||||
const len_field_type = Type.usize;
|
||||
try wip_nav.refType(len_field_type);
|
||||
try uleb128(diw, len_field_type.abiAlignment(pt).forward(ptr_field_type.abiSize(pt)));
|
||||
try uleb128(diw, len_field_type.abiAlignment(zcu).forward(ptr_field_type.abiSize(zcu)));
|
||||
try uleb128(diw, @intFromEnum(AbbrevCode.null));
|
||||
},
|
||||
},
|
||||
@ -2623,8 +2623,8 @@ fn updateType(
|
||||
const opt_child_type = Type.fromInterned(opt_child_type_index);
|
||||
try wip_nav.abbrevCode(.union_type);
|
||||
try wip_nav.strp(name);
|
||||
try uleb128(diw, ty.abiSize(pt));
|
||||
try uleb128(diw, ty.abiAlignment(pt).toByteUnits().?);
|
||||
try uleb128(diw, ty.abiSize(zcu));
|
||||
try uleb128(diw, ty.abiAlignment(zcu).toByteUnits().?);
|
||||
if (opt_child_type.isNoReturn(zcu)) {
|
||||
try wip_nav.abbrevCode(.generated_field);
|
||||
try wip_nav.strp("null");
|
||||
@ -2652,8 +2652,8 @@ fn updateType(
|
||||
switch (repr) {
|
||||
.unpacked => {
|
||||
try wip_nav.refType(Type.bool);
|
||||
try uleb128(diw, if (opt_child_type.hasRuntimeBits(pt))
|
||||
opt_child_type.abiSize(pt)
|
||||
try uleb128(diw, if (opt_child_type.hasRuntimeBits(zcu))
|
||||
opt_child_type.abiSize(zcu)
|
||||
else
|
||||
0);
|
||||
},
|
||||
@ -2700,8 +2700,8 @@ fn updateType(
|
||||
const error_union_error_set_offset, const error_union_payload_offset = switch (error_union_type.payload_type) {
|
||||
.generic_poison_type => .{ 0, 0 },
|
||||
else => .{
|
||||
codegen.errUnionErrorOffset(error_union_payload_type, pt),
|
||||
codegen.errUnionPayloadOffset(error_union_payload_type, pt),
|
||||
codegen.errUnionErrorOffset(error_union_payload_type, zcu),
|
||||
codegen.errUnionPayloadOffset(error_union_payload_type, zcu),
|
||||
},
|
||||
};
|
||||
|
||||
@ -2710,8 +2710,8 @@ fn updateType(
|
||||
if (error_union_type.error_set_type != .generic_poison_type and
|
||||
error_union_type.payload_type != .generic_poison_type)
|
||||
{
|
||||
try uleb128(diw, ty.abiSize(pt));
|
||||
try uleb128(diw, ty.abiAlignment(pt).toByteUnits().?);
|
||||
try uleb128(diw, ty.abiSize(zcu));
|
||||
try uleb128(diw, ty.abiAlignment(zcu).toByteUnits().?);
|
||||
} else {
|
||||
try uleb128(diw, 0);
|
||||
try uleb128(diw, 1);
|
||||
@ -2788,9 +2788,9 @@ fn updateType(
|
||||
DW.ATE.unsigned
|
||||
else
|
||||
unreachable);
|
||||
try uleb128(diw, ty.bitSize(pt));
|
||||
try uleb128(diw, ty.abiSize(pt));
|
||||
try uleb128(diw, ty.abiAlignment(pt).toByteUnits().?);
|
||||
try uleb128(diw, ty.bitSize(zcu));
|
||||
try uleb128(diw, ty.abiSize(zcu));
|
||||
try uleb128(diw, ty.abiAlignment(zcu).toByteUnits().?);
|
||||
},
|
||||
.anyopaque,
|
||||
.void,
|
||||
@ -2820,8 +2820,8 @@ fn updateType(
|
||||
} else {
|
||||
try wip_nav.abbrevCode(.struct_type);
|
||||
try wip_nav.strp(name);
|
||||
try uleb128(diw, ty.abiSize(pt));
|
||||
try uleb128(diw, ty.abiAlignment(pt).toByteUnits().?);
|
||||
try uleb128(diw, ty.abiSize(zcu));
|
||||
try uleb128(diw, ty.abiAlignment(zcu).toByteUnits().?);
|
||||
var field_byte_offset: u64 = 0;
|
||||
for (0..anon_struct_type.types.len) |field_index| {
|
||||
const comptime_value = anon_struct_type.values.get(ip)[field_index];
|
||||
@ -2834,11 +2834,11 @@ fn updateType(
|
||||
const field_type = Type.fromInterned(anon_struct_type.types.get(ip)[field_index]);
|
||||
try wip_nav.refType(field_type);
|
||||
if (comptime_value == .none) {
|
||||
const field_align = field_type.abiAlignment(pt);
|
||||
const field_align = field_type.abiAlignment(zcu);
|
||||
field_byte_offset = field_align.forward(field_byte_offset);
|
||||
try uleb128(diw, field_byte_offset);
|
||||
try uleb128(diw, field_type.abiAlignment(pt).toByteUnits().?);
|
||||
field_byte_offset += field_type.abiSize(pt);
|
||||
try uleb128(diw, field_type.abiAlignment(zcu).toByteUnits().?);
|
||||
field_byte_offset += field_type.abiSize(zcu);
|
||||
}
|
||||
}
|
||||
try uleb128(diw, @intFromEnum(AbbrevCode.null));
|
||||
@ -2976,8 +2976,8 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP
|
||||
try uleb128(diw, file_gop.index);
|
||||
try wip_nav.strp(loaded_struct.name.toSlice(ip));
|
||||
if (loaded_struct.field_types.len > 0) {
|
||||
try uleb128(diw, ty.abiSize(pt));
|
||||
try uleb128(diw, ty.abiAlignment(pt).toByteUnits().?);
|
||||
try uleb128(diw, ty.abiSize(zcu));
|
||||
try uleb128(diw, ty.abiAlignment(zcu).toByteUnits().?);
|
||||
for (0..loaded_struct.field_types.len) |field_index| {
|
||||
const is_comptime = loaded_struct.fieldIsComptime(ip, field_index);
|
||||
try wip_nav.abbrevCode(if (is_comptime) .struct_field_comptime else .struct_field);
|
||||
@ -2991,7 +2991,7 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP
|
||||
if (!is_comptime) {
|
||||
try uleb128(diw, loaded_struct.offsets.get(ip)[field_index]);
|
||||
try uleb128(diw, loaded_struct.fieldAlign(ip, field_index).toByteUnits() orelse
|
||||
field_type.abiAlignment(pt).toByteUnits().?);
|
||||
field_type.abiAlignment(zcu).toByteUnits().?);
|
||||
}
|
||||
}
|
||||
try uleb128(diw, @intFromEnum(AbbrevCode.null));
|
||||
@ -3042,8 +3042,8 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP
|
||||
try wip_nav.abbrevCode(if (loaded_struct.field_types.len == 0) .namespace_struct_type else .struct_type);
|
||||
try wip_nav.strp(name);
|
||||
if (loaded_struct.field_types.len == 0) try diw.writeByte(@intFromBool(false)) else {
|
||||
try uleb128(diw, ty.abiSize(pt));
|
||||
try uleb128(diw, ty.abiAlignment(pt).toByteUnits().?);
|
||||
try uleb128(diw, ty.abiSize(zcu));
|
||||
try uleb128(diw, ty.abiAlignment(zcu).toByteUnits().?);
|
||||
for (0..loaded_struct.field_types.len) |field_index| {
|
||||
const is_comptime = loaded_struct.fieldIsComptime(ip, field_index);
|
||||
try wip_nav.abbrevCode(if (is_comptime) .struct_field_comptime else .struct_field);
|
||||
@ -3057,7 +3057,7 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP
|
||||
if (!is_comptime) {
|
||||
try uleb128(diw, loaded_struct.offsets.get(ip)[field_index]);
|
||||
try uleb128(diw, loaded_struct.fieldAlign(ip, field_index).toByteUnits() orelse
|
||||
field_type.abiAlignment(pt).toByteUnits().?);
|
||||
field_type.abiAlignment(zcu).toByteUnits().?);
|
||||
}
|
||||
}
|
||||
try uleb128(diw, @intFromEnum(AbbrevCode.null));
|
||||
@ -3074,7 +3074,7 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP
|
||||
const field_type = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
|
||||
try wip_nav.refType(field_type);
|
||||
try uleb128(diw, field_bit_offset);
|
||||
field_bit_offset += @intCast(field_type.bitSize(pt));
|
||||
field_bit_offset += @intCast(field_type.bitSize(zcu));
|
||||
}
|
||||
if (loaded_struct.field_types.len > 0) try uleb128(diw, @intFromEnum(AbbrevCode.null));
|
||||
},
|
||||
@ -3099,7 +3099,7 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP
|
||||
const loaded_union = ip.loadUnionType(type_index);
|
||||
try wip_nav.abbrevCode(if (loaded_union.field_types.len > 0) .union_type else .empty_union_type);
|
||||
try wip_nav.strp(name);
|
||||
const union_layout = pt.getUnionLayout(loaded_union);
|
||||
const union_layout = Type.getUnionLayout(loaded_union, zcu);
|
||||
try uleb128(diw, union_layout.abi_size);
|
||||
try uleb128(diw, union_layout.abi_align.toByteUnits().?);
|
||||
const loaded_tag = loaded_union.loadTagType(ip);
|
||||
@ -3130,7 +3130,7 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP
|
||||
try wip_nav.refType(field_type);
|
||||
try uleb128(diw, union_layout.payloadOffset());
|
||||
try uleb128(diw, loaded_union.fieldAlign(ip, field_index).toByteUnits() orelse
|
||||
if (field_type.isNoReturn(zcu)) 1 else field_type.abiAlignment(pt).toByteUnits().?);
|
||||
if (field_type.isNoReturn(zcu)) 1 else field_type.abiAlignment(zcu).toByteUnits().?);
|
||||
}
|
||||
try uleb128(diw, @intFromEnum(AbbrevCode.null));
|
||||
}
|
||||
@ -3145,7 +3145,7 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP
|
||||
const field_type = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
|
||||
try wip_nav.refType(field_type);
|
||||
try uleb128(diw, loaded_union.fieldAlign(ip, field_index).toByteUnits() orelse
|
||||
field_type.abiAlignment(pt).toByteUnits().?);
|
||||
field_type.abiAlignment(zcu).toByteUnits().?);
|
||||
}
|
||||
if (loaded_union.field_types.len > 0) try uleb128(diw, @intFromEnum(AbbrevCode.null));
|
||||
},
|
||||
|
||||
@ -212,7 +212,7 @@ pub fn createEmpty(
|
||||
|
||||
const use_lld = build_options.have_llvm and comp.config.use_lld;
|
||||
const use_llvm = comp.config.use_llvm;
|
||||
const opt_zcu = comp.module;
|
||||
const opt_zcu = comp.zcu;
|
||||
const output_mode = comp.config.output_mode;
|
||||
const link_mode = comp.config.link_mode;
|
||||
const optimize_mode = comp.root_mod.optimize_mode;
|
||||
@ -2084,7 +2084,7 @@ fn linkWithLLD(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: s
|
||||
|
||||
// If there is no Zig code to compile, then we should skip flushing the output file because it
|
||||
// will not be part of the linker line anyway.
|
||||
const module_obj_path: ?[]const u8 = if (comp.module != null) blk: {
|
||||
const module_obj_path: ?[]const u8 = if (comp.zcu != null) blk: {
|
||||
try self.flushModule(arena, tid, prog_node);
|
||||
|
||||
if (fs.path.dirname(full_out_path)) |dirname| {
|
||||
|
||||
@ -128,7 +128,7 @@ pub fn deinit(self: *ZigObject, allocator: Allocator) void {
|
||||
pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !void {
|
||||
// Handle any lazy symbols that were emitted by incremental compilation.
|
||||
if (self.lazy_syms.getPtr(.anyerror_type)) |metadata| {
|
||||
const pt: Zcu.PerThread = .{ .zcu = elf_file.base.comp.module.?, .tid = tid };
|
||||
const pt: Zcu.PerThread = .{ .zcu = elf_file.base.comp.zcu.?, .tid = tid };
|
||||
|
||||
// Most lazy symbols can be updated on first use, but
|
||||
// anyerror needs to wait for everything to be flushed.
|
||||
@ -157,7 +157,7 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi
|
||||
}
|
||||
|
||||
if (build_options.enable_logging) {
|
||||
const pt: Zcu.PerThread = .{ .zcu = elf_file.base.comp.module.?, .tid = tid };
|
||||
const pt: Zcu.PerThread = .{ .zcu = elf_file.base.comp.zcu.?, .tid = tid };
|
||||
for (self.navs.keys(), self.navs.values()) |nav_index, meta| {
|
||||
checkNavAllocated(pt, nav_index, meta);
|
||||
}
|
||||
@ -167,7 +167,7 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi
|
||||
}
|
||||
|
||||
if (self.dwarf) |*dwarf| {
|
||||
const pt: Zcu.PerThread = .{ .zcu = elf_file.base.comp.module.?, .tid = tid };
|
||||
const pt: Zcu.PerThread = .{ .zcu = elf_file.base.comp.zcu.?, .tid = tid };
|
||||
try dwarf.flushModule(pt);
|
||||
|
||||
const gpa = elf_file.base.comp.gpa;
|
||||
@ -849,7 +849,7 @@ pub fn lowerUav(
|
||||
const gpa = zcu.gpa;
|
||||
const val = Value.fromInterned(uav);
|
||||
const uav_alignment = switch (explicit_alignment) {
|
||||
.none => val.typeOf(zcu).abiAlignment(pt),
|
||||
.none => val.typeOf(zcu).abiAlignment(zcu),
|
||||
else => explicit_alignment,
|
||||
};
|
||||
if (self.uavs.get(uav)) |metadata| {
|
||||
@ -949,7 +949,7 @@ pub fn getOrCreateMetadataForNav(
|
||||
if (!gop.found_existing) {
|
||||
const any_non_single_threaded = elf_file.base.comp.config.any_non_single_threaded;
|
||||
const symbol_index = try self.newSymbolWithAtom(gpa, 0);
|
||||
const zcu = elf_file.base.comp.module.?;
|
||||
const zcu = elf_file.base.comp.zcu.?;
|
||||
const nav_val = Value.fromInterned(zcu.intern_pool.getNav(nav_index).status.resolved.val);
|
||||
const sym = self.symbol(symbol_index);
|
||||
if (nav_val.getVariable(zcu)) |variable| {
|
||||
@ -1306,7 +1306,7 @@ pub fn updateNav(
|
||||
else => nav.status.resolved.val,
|
||||
};
|
||||
|
||||
if (nav_init != .none and Value.fromInterned(nav_init).typeOf(zcu).hasRuntimeBits(pt)) {
|
||||
if (nav_init != .none and Value.fromInterned(nav_init).typeOf(zcu).hasRuntimeBits(zcu)) {
|
||||
const sym_index = try self.getOrCreateMetadataForNav(elf_file, nav_index);
|
||||
self.symbol(sym_index).atom(elf_file).?.freeRelocs(self);
|
||||
|
||||
@ -1382,8 +1382,8 @@ fn updateLazySymbol(
|
||||
sym: link.File.LazySymbol,
|
||||
symbol_index: Symbol.Index,
|
||||
) !void {
|
||||
const mod = pt.zcu;
|
||||
const gpa = mod.gpa;
|
||||
const zcu = pt.zcu;
|
||||
const gpa = zcu.gpa;
|
||||
|
||||
var required_alignment: InternPool.Alignment = .none;
|
||||
var code_buffer = std.ArrayList(u8).init(gpa);
|
||||
@ -1398,7 +1398,7 @@ fn updateLazySymbol(
|
||||
break :blk try self.strtab.insert(gpa, name);
|
||||
};
|
||||
|
||||
const src = Type.fromInterned(sym.ty).srcLocOrNull(mod) orelse Zcu.LazySrcLoc.unneeded;
|
||||
const src = Type.fromInterned(sym.ty).srcLocOrNull(zcu) orelse Zcu.LazySrcLoc.unneeded;
|
||||
const res = try codegen.generateLazySymbol(
|
||||
&elf_file.base,
|
||||
pt,
|
||||
@ -1513,7 +1513,7 @@ pub fn updateExports(
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
const mod = pt.zcu;
|
||||
const zcu = pt.zcu;
|
||||
const gpa = elf_file.base.comp.gpa;
|
||||
const metadata = switch (exported) {
|
||||
.nav => |nav| blk: {
|
||||
@ -1521,15 +1521,15 @@ pub fn updateExports(
|
||||
break :blk self.navs.getPtr(nav).?;
|
||||
},
|
||||
.uav => |uav| self.uavs.getPtr(uav) orelse blk: {
|
||||
const first_exp = mod.all_exports.items[export_indices[0]];
|
||||
const first_exp = zcu.all_exports.items[export_indices[0]];
|
||||
const res = try self.lowerUav(elf_file, pt, uav, .none, first_exp.src);
|
||||
switch (res) {
|
||||
.mcv => {},
|
||||
.fail => |em| {
|
||||
// TODO maybe it's enough to return an error here and let Zcu.processExportsInner
|
||||
// handle the error?
|
||||
try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1);
|
||||
mod.failed_exports.putAssumeCapacityNoClobber(export_indices[0], em);
|
||||
try zcu.failed_exports.ensureUnusedCapacity(zcu.gpa, 1);
|
||||
zcu.failed_exports.putAssumeCapacityNoClobber(export_indices[0], em);
|
||||
return;
|
||||
},
|
||||
}
|
||||
@ -1542,11 +1542,11 @@ pub fn updateExports(
|
||||
const esym_shndx = self.symtab.items(.shndx)[esym_index];
|
||||
|
||||
for (export_indices) |export_idx| {
|
||||
const exp = mod.all_exports.items[export_idx];
|
||||
const exp = zcu.all_exports.items[export_idx];
|
||||
if (exp.opts.section.unwrap()) |section_name| {
|
||||
if (!section_name.eqlSlice(".text", &mod.intern_pool)) {
|
||||
try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1);
|
||||
mod.failed_exports.putAssumeCapacityNoClobber(export_idx, try Zcu.ErrorMsg.create(
|
||||
if (!section_name.eqlSlice(".text", &zcu.intern_pool)) {
|
||||
try zcu.failed_exports.ensureUnusedCapacity(zcu.gpa, 1);
|
||||
zcu.failed_exports.putAssumeCapacityNoClobber(export_idx, try Zcu.ErrorMsg.create(
|
||||
gpa,
|
||||
exp.src,
|
||||
"Unimplemented: ExportOptions.section",
|
||||
@ -1560,8 +1560,8 @@ pub fn updateExports(
|
||||
.strong => elf.STB_GLOBAL,
|
||||
.weak => elf.STB_WEAK,
|
||||
.link_once => {
|
||||
try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1);
|
||||
mod.failed_exports.putAssumeCapacityNoClobber(export_idx, try Zcu.ErrorMsg.create(
|
||||
try zcu.failed_exports.ensureUnusedCapacity(zcu.gpa, 1);
|
||||
zcu.failed_exports.putAssumeCapacityNoClobber(export_idx, try Zcu.ErrorMsg.create(
|
||||
gpa,
|
||||
exp.src,
|
||||
"Unimplemented: GlobalLinkage.LinkOnce",
|
||||
@ -1571,7 +1571,7 @@ pub fn updateExports(
|
||||
},
|
||||
};
|
||||
const stt_bits: u8 = @as(u4, @truncate(esym.st_info));
|
||||
const exp_name = exp.opts.name.toSlice(&mod.intern_pool);
|
||||
const exp_name = exp.opts.name.toSlice(&zcu.intern_pool);
|
||||
const name_off = try self.strtab.insert(gpa, exp_name);
|
||||
const global_sym_index = if (metadata.@"export"(self, exp_name)) |exp_index|
|
||||
exp_index.*
|
||||
@ -1626,8 +1626,8 @@ pub fn deleteExport(
|
||||
.nav => |nav| self.navs.getPtr(nav),
|
||||
.uav => |uav| self.uavs.getPtr(uav),
|
||||
} orelse return;
|
||||
const mod = elf_file.base.comp.module.?;
|
||||
const exp_name = name.toSlice(&mod.intern_pool);
|
||||
const zcu = elf_file.base.comp.zcu.?;
|
||||
const exp_name = name.toSlice(&zcu.intern_pool);
|
||||
const esym_index = metadata.@"export"(self, exp_name) orelse return;
|
||||
log.debug("deleting export '{s}'", .{exp_name});
|
||||
const esym = &self.symtab.items(.elf_sym)[esym_index.*];
|
||||
|
||||
@ -164,7 +164,7 @@ pub fn createEmpty(
|
||||
|
||||
const gpa = comp.gpa;
|
||||
const use_llvm = comp.config.use_llvm;
|
||||
const opt_zcu = comp.module;
|
||||
const opt_zcu = comp.zcu;
|
||||
const optimize_mode = comp.root_mod.optimize_mode;
|
||||
const output_mode = comp.config.output_mode;
|
||||
const link_mode = comp.config.link_mode;
|
||||
@ -3026,7 +3026,7 @@ pub fn updateNavLineNumber(self: *MachO, pt: Zcu.PerThread, nav: InternPool.NavI
|
||||
pub fn updateExports(
|
||||
self: *MachO,
|
||||
pt: Zcu.PerThread,
|
||||
exported: Module.Exported,
|
||||
exported: Zcu.Exported,
|
||||
export_indices: []const u32,
|
||||
) link.File.UpdateExportsError!void {
|
||||
if (build_options.skip_non_native and builtin.object_format != .macho) {
|
||||
@ -3060,7 +3060,7 @@ pub fn lowerUav(
|
||||
pt: Zcu.PerThread,
|
||||
uav: InternPool.Index,
|
||||
explicit_alignment: InternPool.Alignment,
|
||||
src_loc: Module.LazySrcLoc,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
) !codegen.GenResult {
|
||||
return self.getZigObject().?.lowerUav(self, pt, uav, explicit_alignment, src_loc);
|
||||
}
|
||||
@ -4634,8 +4634,6 @@ const Liveness = @import("../Liveness.zig");
|
||||
const LlvmObject = @import("../codegen/llvm.zig").Object;
|
||||
const Md5 = std.crypto.hash.Md5;
|
||||
const Zcu = @import("../Zcu.zig");
|
||||
/// Deprecated.
|
||||
const Module = Zcu;
|
||||
const InternPool = @import("../InternPool.zig");
|
||||
const Rebase = @import("MachO/dyld_info/Rebase.zig");
|
||||
pub const Relocation = @import("MachO/Relocation.zig");
|
||||
|
||||
@ -566,7 +566,7 @@ pub fn getInputSection(self: ZigObject, atom: Atom, macho_file: *MachO) macho.se
|
||||
pub fn flushModule(self: *ZigObject, macho_file: *MachO, tid: Zcu.PerThread.Id) !void {
|
||||
// Handle any lazy symbols that were emitted by incremental compilation.
|
||||
if (self.lazy_syms.getPtr(.anyerror_type)) |metadata| {
|
||||
const pt: Zcu.PerThread = .{ .zcu = macho_file.base.comp.module.?, .tid = tid };
|
||||
const pt: Zcu.PerThread = .{ .zcu = macho_file.base.comp.zcu.?, .tid = tid };
|
||||
|
||||
// Most lazy symbols can be updated on first use, but
|
||||
// anyerror needs to wait for everything to be flushed.
|
||||
@ -595,7 +595,7 @@ pub fn flushModule(self: *ZigObject, macho_file: *MachO, tid: Zcu.PerThread.Id)
|
||||
}
|
||||
|
||||
if (self.dwarf) |*dwarf| {
|
||||
const pt: Zcu.PerThread = .{ .zcu = macho_file.base.comp.module.?, .tid = tid };
|
||||
const pt: Zcu.PerThread = .{ .zcu = macho_file.base.comp.zcu.?, .tid = tid };
|
||||
try dwarf.flushModule(pt);
|
||||
|
||||
self.debug_abbrev_dirty = false;
|
||||
@ -688,7 +688,7 @@ pub fn lowerUav(
|
||||
const gpa = zcu.gpa;
|
||||
const val = Value.fromInterned(uav);
|
||||
const uav_alignment = switch (explicit_alignment) {
|
||||
.none => val.typeOf(zcu).abiAlignment(pt),
|
||||
.none => val.typeOf(zcu).abiAlignment(zcu),
|
||||
else => explicit_alignment,
|
||||
};
|
||||
if (self.uavs.get(uav)) |metadata| {
|
||||
@ -887,7 +887,7 @@ pub fn updateNav(
|
||||
else => nav.status.resolved.val,
|
||||
};
|
||||
|
||||
if (nav_init != .none and Value.fromInterned(nav_init).typeOf(zcu).hasRuntimeBits(pt)) {
|
||||
if (nav_init != .none and Value.fromInterned(nav_init).typeOf(zcu).hasRuntimeBits(zcu)) {
|
||||
const sym_index = try self.getOrCreateMetadataForNav(macho_file, nav_index);
|
||||
self.symbols.items[sym_index].getAtom(macho_file).?.freeRelocs(macho_file);
|
||||
|
||||
@ -1256,7 +1256,7 @@ pub fn updateExports(
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
const mod = pt.zcu;
|
||||
const zcu = pt.zcu;
|
||||
const gpa = macho_file.base.comp.gpa;
|
||||
const metadata = switch (exported) {
|
||||
.nav => |nav| blk: {
|
||||
@ -1264,15 +1264,15 @@ pub fn updateExports(
|
||||
break :blk self.navs.getPtr(nav).?;
|
||||
},
|
||||
.uav => |uav| self.uavs.getPtr(uav) orelse blk: {
|
||||
const first_exp = mod.all_exports.items[export_indices[0]];
|
||||
const first_exp = zcu.all_exports.items[export_indices[0]];
|
||||
const res = try self.lowerUav(macho_file, pt, uav, .none, first_exp.src);
|
||||
switch (res) {
|
||||
.mcv => {},
|
||||
.fail => |em| {
|
||||
// TODO maybe it's enough to return an error here and let Zcu.processExportsInner
|
||||
// handle the error?
|
||||
try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1);
|
||||
mod.failed_exports.putAssumeCapacityNoClobber(export_indices[0], em);
|
||||
try zcu.failed_exports.ensureUnusedCapacity(zcu.gpa, 1);
|
||||
zcu.failed_exports.putAssumeCapacityNoClobber(export_indices[0], em);
|
||||
return;
|
||||
},
|
||||
}
|
||||
@ -1284,11 +1284,11 @@ pub fn updateExports(
|
||||
const nlist = self.symtab.items(.nlist)[nlist_idx];
|
||||
|
||||
for (export_indices) |export_idx| {
|
||||
const exp = mod.all_exports.items[export_idx];
|
||||
const exp = zcu.all_exports.items[export_idx];
|
||||
if (exp.opts.section.unwrap()) |section_name| {
|
||||
if (!section_name.eqlSlice("__text", &mod.intern_pool)) {
|
||||
try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1);
|
||||
mod.failed_exports.putAssumeCapacityNoClobber(export_idx, try Zcu.ErrorMsg.create(
|
||||
if (!section_name.eqlSlice("__text", &zcu.intern_pool)) {
|
||||
try zcu.failed_exports.ensureUnusedCapacity(zcu.gpa, 1);
|
||||
zcu.failed_exports.putAssumeCapacityNoClobber(export_idx, try Zcu.ErrorMsg.create(
|
||||
gpa,
|
||||
exp.src,
|
||||
"Unimplemented: ExportOptions.section",
|
||||
@ -1298,7 +1298,7 @@ pub fn updateExports(
|
||||
}
|
||||
}
|
||||
if (exp.opts.linkage == .link_once) {
|
||||
try mod.failed_exports.putNoClobber(mod.gpa, export_idx, try Zcu.ErrorMsg.create(
|
||||
try zcu.failed_exports.putNoClobber(zcu.gpa, export_idx, try Zcu.ErrorMsg.create(
|
||||
gpa,
|
||||
exp.src,
|
||||
"Unimplemented: GlobalLinkage.link_once",
|
||||
@ -1307,7 +1307,7 @@ pub fn updateExports(
|
||||
continue;
|
||||
}
|
||||
|
||||
const exp_name = exp.opts.name.toSlice(&mod.intern_pool);
|
||||
const exp_name = exp.opts.name.toSlice(&zcu.intern_pool);
|
||||
const global_nlist_index = if (metadata.@"export"(self, exp_name)) |exp_index|
|
||||
exp_index.*
|
||||
else blk: {
|
||||
@ -1437,15 +1437,15 @@ pub fn deleteExport(
|
||||
exported: Zcu.Exported,
|
||||
name: InternPool.NullTerminatedString,
|
||||
) void {
|
||||
const mod = macho_file.base.comp.module.?;
|
||||
const zcu = macho_file.base.comp.zcu.?;
|
||||
|
||||
const metadata = switch (exported) {
|
||||
.nav => |nav| self.navs.getPtr(nav),
|
||||
.uav => |uav| self.uavs.getPtr(uav),
|
||||
} orelse return;
|
||||
const nlist_index = metadata.@"export"(self, name.toSlice(&mod.intern_pool)) orelse return;
|
||||
const nlist_index = metadata.@"export"(self, name.toSlice(&zcu.intern_pool)) orelse return;
|
||||
|
||||
log.debug("deleting export '{}'", .{name.fmt(&mod.intern_pool)});
|
||||
log.debug("deleting export '{}'", .{name.fmt(&zcu.intern_pool)});
|
||||
|
||||
const nlist = &self.symtab.items(.nlist)[nlist_index.*];
|
||||
self.symtab.items(.size)[nlist_index.*] = 0;
|
||||
@ -1545,7 +1545,7 @@ pub fn getOrCreateMetadataForLazySymbol(
|
||||
fn isThreadlocal(macho_file: *MachO, nav_index: InternPool.Nav.Index) bool {
|
||||
if (!macho_file.base.comp.config.any_non_single_threaded)
|
||||
return false;
|
||||
const ip = &macho_file.base.comp.module.?.intern_pool;
|
||||
const ip = &macho_file.base.comp.zcu.?.intern_pool;
|
||||
return switch (ip.indexToKey(ip.getNav(nav_index).status.resolved.val)) {
|
||||
.variable => |variable| variable.is_threadlocal,
|
||||
.@"extern" => |@"extern"| @"extern".is_threadlocal,
|
||||
|
||||
@ -152,7 +152,7 @@ pub const Atom = struct {
|
||||
return .{ .code_ptr = slice.ptr, .other = .{ .code_len = slice.len } };
|
||||
}
|
||||
fn getCode(self: CodePtr, plan9: *const Plan9) []u8 {
|
||||
const zcu = plan9.base.comp.module.?;
|
||||
const zcu = plan9.base.comp.zcu.?;
|
||||
const ip = &zcu.intern_pool;
|
||||
return if (self.code_ptr) |p| p[0..self.other.code_len] else blk: {
|
||||
const nav_index = self.other.nav_index;
|
||||
@ -317,8 +317,8 @@ pub fn createEmpty(
|
||||
|
||||
fn putFn(self: *Plan9, nav_index: InternPool.Nav.Index, out: FnNavOutput) !void {
|
||||
const gpa = self.base.comp.gpa;
|
||||
const mod = self.base.comp.module.?;
|
||||
const file_scope = mod.navFileScopeIndex(nav_index);
|
||||
const zcu = self.base.comp.zcu.?;
|
||||
const file_scope = zcu.navFileScopeIndex(nav_index);
|
||||
const fn_map_res = try self.fn_nav_table.getOrPut(gpa, file_scope);
|
||||
if (fn_map_res.found_existing) {
|
||||
if (try fn_map_res.value_ptr.functions.fetchPut(gpa, nav_index, out)) |old_entry| {
|
||||
@ -326,7 +326,7 @@ fn putFn(self: *Plan9, nav_index: InternPool.Nav.Index, out: FnNavOutput) !void
|
||||
gpa.free(old_entry.value.lineinfo);
|
||||
}
|
||||
} else {
|
||||
const file = mod.fileByIndex(file_scope);
|
||||
const file = zcu.fileByIndex(file_scope);
|
||||
const arena = self.path_arena.allocator();
|
||||
// each file gets a symbol
|
||||
fn_map_res.value_ptr.* = .{
|
||||
@ -391,10 +391,10 @@ pub fn updateFunc(self: *Plan9, pt: Zcu.PerThread, func_index: InternPool.Index,
|
||||
@panic("Attempted to compile for object format that was disabled by build configuration");
|
||||
}
|
||||
|
||||
const mod = pt.zcu;
|
||||
const gpa = mod.gpa;
|
||||
const zcu = pt.zcu;
|
||||
const gpa = zcu.gpa;
|
||||
const target = self.base.comp.root_mod.resolved_target.result;
|
||||
const func = mod.funcInfo(func_index);
|
||||
const func = zcu.funcInfo(func_index);
|
||||
|
||||
const atom_idx = try self.seeNav(pt, func.owner_nav);
|
||||
|
||||
@ -413,7 +413,7 @@ pub fn updateFunc(self: *Plan9, pt: Zcu.PerThread, func_index: InternPool.Index,
|
||||
const res = try codegen.generateFunction(
|
||||
&self.base,
|
||||
pt,
|
||||
mod.navSrcLoc(func.owner_nav),
|
||||
zcu.navSrcLoc(func.owner_nav),
|
||||
func_index,
|
||||
air,
|
||||
liveness,
|
||||
@ -423,7 +423,7 @@ pub fn updateFunc(self: *Plan9, pt: Zcu.PerThread, func_index: InternPool.Index,
|
||||
const code = switch (res) {
|
||||
.ok => try code_buffer.toOwnedSlice(),
|
||||
.fail => |em| {
|
||||
try mod.failed_codegen.put(gpa, func.owner_nav, em);
|
||||
try zcu.failed_codegen.put(gpa, func.owner_nav, em);
|
||||
return;
|
||||
},
|
||||
};
|
||||
@ -457,7 +457,7 @@ pub fn updateNav(self: *Plan9, pt: Zcu.PerThread, nav_index: InternPool.Nav.Inde
|
||||
else => nav_val,
|
||||
};
|
||||
|
||||
if (nav_init.typeOf(zcu).hasRuntimeBits(pt)) {
|
||||
if (nav_init.typeOf(zcu).hasRuntimeBits(zcu)) {
|
||||
const atom_idx = try self.seeNav(pt, nav_index);
|
||||
|
||||
var code_buffer = std.ArrayList(u8).init(gpa);
|
||||
@ -607,7 +607,7 @@ pub fn flushModule(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
|
||||
defer assert(self.hdr.entry != 0x0);
|
||||
|
||||
const pt: Zcu.PerThread = .{
|
||||
.zcu = self.base.comp.module orelse return error.LinkingWithoutZigSourceUnimplemented,
|
||||
.zcu = self.base.comp.zcu orelse return error.LinkingWithoutZigSourceUnimplemented,
|
||||
.tid = tid,
|
||||
};
|
||||
|
||||
@ -952,11 +952,11 @@ pub fn freeDecl(self: *Plan9, decl_index: InternPool.DeclIndex) void {
|
||||
const gpa = self.base.comp.gpa;
|
||||
// TODO audit the lifetimes of decls table entries. It's possible to get
|
||||
// freeDecl without any updateDecl in between.
|
||||
const mod = self.base.comp.module.?;
|
||||
const decl = mod.declPtr(decl_index);
|
||||
const is_fn = decl.val.isFuncBody(mod);
|
||||
const zcu = self.base.comp.zcu.?;
|
||||
const decl = zcu.declPtr(decl_index);
|
||||
const is_fn = decl.val.isFuncBody(zcu);
|
||||
if (is_fn) {
|
||||
const symidx_and_submap = self.fn_decl_table.get(decl.getFileScope(mod)).?;
|
||||
const symidx_and_submap = self.fn_decl_table.get(decl.getFileScope(zcu)).?;
|
||||
var submap = symidx_and_submap.functions;
|
||||
if (submap.fetchSwapRemove(decl_index)) |removed_entry| {
|
||||
gpa.free(removed_entry.value.code);
|
||||
@ -1256,8 +1256,8 @@ pub fn writeSym(self: *Plan9, w: anytype, sym: aout.Sym) !void {
|
||||
}
|
||||
|
||||
pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
|
||||
const mod = self.base.comp.module.?;
|
||||
const ip = &mod.intern_pool;
|
||||
const zcu = self.base.comp.zcu.?;
|
||||
const ip = &zcu.intern_pool;
|
||||
const writer = buf.writer();
|
||||
// write __GOT
|
||||
try self.writeSym(writer, self.syms.items[0]);
|
||||
@ -1284,7 +1284,7 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
|
||||
try self.writeSym(writer, sym);
|
||||
if (self.nav_exports.get(nav_index)) |export_indices| {
|
||||
for (export_indices) |export_idx| {
|
||||
const exp = mod.all_exports.items[export_idx];
|
||||
const exp = zcu.all_exports.items[export_idx];
|
||||
if (nav_metadata.getExport(self, exp.opts.name.toSlice(ip))) |exp_i| {
|
||||
try self.writeSym(writer, self.syms.items[exp_i]);
|
||||
}
|
||||
@ -1323,7 +1323,7 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
|
||||
try self.writeSym(writer, sym);
|
||||
if (self.nav_exports.get(nav_index)) |export_indices| {
|
||||
for (export_indices) |export_idx| {
|
||||
const exp = mod.all_exports.items[export_idx];
|
||||
const exp = zcu.all_exports.items[export_idx];
|
||||
if (nav_metadata.getExport(self, exp.opts.name.toSlice(ip))) |exp_i| {
|
||||
const s = self.syms.items[exp_i];
|
||||
if (mem.eql(u8, s.name, "_start"))
|
||||
|
||||
@ -229,7 +229,7 @@ pub fn flushModule(self: *SpirV, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
|
||||
defer error_info.deinit();
|
||||
|
||||
try error_info.appendSlice("zig_errors:");
|
||||
const ip = &self.base.comp.module.?.intern_pool;
|
||||
const ip = &self.base.comp.zcu.?.intern_pool;
|
||||
for (ip.global_error_set.getNamesFromMainThread()) |name| {
|
||||
// Errors can contain pretty much any character - to encode them in a string we must escape
|
||||
// them somehow. Easiest here is to use some established scheme, one which also preseves the
|
||||
|
||||
@ -556,7 +556,7 @@ pub fn createEmpty(
|
||||
}
|
||||
}
|
||||
|
||||
if (comp.module) |zcu| {
|
||||
if (comp.zcu) |zcu| {
|
||||
if (!use_llvm) {
|
||||
const index: File.Index = @enumFromInt(wasm.files.len);
|
||||
var zig_object: ZigObject = .{
|
||||
@ -3352,7 +3352,7 @@ fn linkWithLLD(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node:
|
||||
|
||||
// If there is no Zig code to compile, then we should skip flushing the output file because it
|
||||
// will not be part of the linker line anyway.
|
||||
const module_obj_path: ?[]const u8 = if (comp.module != null) blk: {
|
||||
const module_obj_path: ?[]const u8 = if (comp.zcu != null) blk: {
|
||||
try wasm.flushModule(arena, tid, prog_node);
|
||||
|
||||
if (fs.path.dirname(full_out_path)) |dirname| {
|
||||
|
||||
@ -259,7 +259,7 @@ pub fn updateNav(
|
||||
else => .{ false, .none, nav_val },
|
||||
};
|
||||
|
||||
if (nav_init.typeOf(zcu).hasRuntimeBits(pt)) {
|
||||
if (nav_init.typeOf(zcu).hasRuntimeBits(zcu)) {
|
||||
const gpa = wasm_file.base.comp.gpa;
|
||||
const atom_index = try zig_object.getOrCreateAtomForNav(wasm_file, pt, nav_index);
|
||||
const atom = wasm_file.getAtomPtr(atom_index);
|
||||
@ -487,9 +487,9 @@ fn lowerConst(
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
) !LowerConstResult {
|
||||
const gpa = wasm_file.base.comp.gpa;
|
||||
const mod = wasm_file.base.comp.module.?;
|
||||
const zcu = wasm_file.base.comp.zcu.?;
|
||||
|
||||
const ty = val.typeOf(mod);
|
||||
const ty = val.typeOf(zcu);
|
||||
|
||||
// Create and initialize a new local symbol and atom
|
||||
const sym_index = try zig_object.allocateSymbol(gpa);
|
||||
@ -499,7 +499,7 @@ fn lowerConst(
|
||||
|
||||
const code = code: {
|
||||
const atom = wasm_file.getAtomPtr(atom_index);
|
||||
atom.alignment = ty.abiAlignment(pt);
|
||||
atom.alignment = ty.abiAlignment(zcu);
|
||||
const segment_name = try std.mem.concat(gpa, u8, &.{ ".rodata.", name });
|
||||
errdefer gpa.free(segment_name);
|
||||
zig_object.symbol(sym_index).* = .{
|
||||
@ -509,7 +509,7 @@ fn lowerConst(
|
||||
.index = try zig_object.createDataSegment(
|
||||
gpa,
|
||||
segment_name,
|
||||
ty.abiAlignment(pt),
|
||||
ty.abiAlignment(zcu),
|
||||
),
|
||||
.virtual_address = undefined,
|
||||
};
|
||||
@ -555,7 +555,7 @@ pub fn getErrorTableSymbol(zig_object: *ZigObject, wasm_file: *Wasm, pt: Zcu.Per
|
||||
const atom_index = try wasm_file.createAtom(sym_index, zig_object.index);
|
||||
const atom = wasm_file.getAtomPtr(atom_index);
|
||||
const slice_ty = Type.slice_const_u8_sentinel_0;
|
||||
atom.alignment = slice_ty.abiAlignment(pt);
|
||||
atom.alignment = slice_ty.abiAlignment(pt.zcu);
|
||||
|
||||
const sym_name = try zig_object.string_table.insert(gpa, "__zig_err_name_table");
|
||||
const segment_name = try gpa.dupe(u8, ".rodata.__zig_err_name_table");
|
||||
@ -604,14 +604,14 @@ fn populateErrorNameTable(zig_object: *ZigObject, wasm_file: *Wasm, tid: Zcu.Per
|
||||
|
||||
// Addend for each relocation to the table
|
||||
var addend: u32 = 0;
|
||||
const pt: Zcu.PerThread = .{ .zcu = wasm_file.base.comp.module.?, .tid = tid };
|
||||
const pt: Zcu.PerThread = .{ .zcu = wasm_file.base.comp.zcu.?, .tid = tid };
|
||||
const slice_ty = Type.slice_const_u8_sentinel_0;
|
||||
const atom = wasm_file.getAtomPtr(atom_index);
|
||||
{
|
||||
// TODO: remove this unreachable entry
|
||||
try atom.code.appendNTimes(gpa, 0, 4);
|
||||
try atom.code.writer(gpa).writeInt(u32, 0, .little);
|
||||
atom.size += @intCast(slice_ty.abiSize(pt));
|
||||
atom.size += @intCast(slice_ty.abiSize(pt.zcu));
|
||||
addend += 1;
|
||||
|
||||
try names_atom.code.append(gpa, 0);
|
||||
@ -632,7 +632,7 @@ fn populateErrorNameTable(zig_object: *ZigObject, wasm_file: *Wasm, tid: Zcu.Per
|
||||
.offset = offset,
|
||||
.addend = @intCast(addend),
|
||||
});
|
||||
atom.size += @intCast(slice_ty.abiSize(pt));
|
||||
atom.size += @intCast(slice_ty.abiSize(pt.zcu));
|
||||
addend += len;
|
||||
|
||||
// as we updated the error name table, we now store the actual name within the names atom
|
||||
@ -803,9 +803,9 @@ pub fn getUavVAddr(
|
||||
const parent_atom_index = wasm_file.symbol_atom.get(.{ .file = zig_object.index, .index = @enumFromInt(reloc_info.parent_atom_index) }).?;
|
||||
const parent_atom = wasm_file.getAtomPtr(parent_atom_index);
|
||||
const is_wasm32 = target.cpu.arch == .wasm32;
|
||||
const mod = wasm_file.base.comp.module.?;
|
||||
const ty = Type.fromInterned(mod.intern_pool.typeOf(uav));
|
||||
if (ty.zigTypeTag(mod) == .Fn) {
|
||||
const zcu = wasm_file.base.comp.zcu.?;
|
||||
const ty = Type.fromInterned(zcu.intern_pool.typeOf(uav));
|
||||
if (ty.zigTypeTag(zcu) == .Fn) {
|
||||
std.debug.assert(reloc_info.addend == 0); // addend not allowed for function relocations
|
||||
try parent_atom.relocs.append(gpa, .{
|
||||
.index = target_symbol_index,
|
||||
@ -834,13 +834,13 @@ pub fn deleteExport(
|
||||
exported: Zcu.Exported,
|
||||
name: InternPool.NullTerminatedString,
|
||||
) void {
|
||||
const mod = wasm_file.base.comp.module.?;
|
||||
const zcu = wasm_file.base.comp.zcu.?;
|
||||
const nav_index = switch (exported) {
|
||||
.nav => |nav_index| nav_index,
|
||||
.uav => @panic("TODO: implement Wasm linker code for exporting a constant value"),
|
||||
};
|
||||
const nav_info = zig_object.navs.getPtr(nav_index) orelse return;
|
||||
if (nav_info.@"export"(zig_object, name.toSlice(&mod.intern_pool))) |sym_index| {
|
||||
if (nav_info.@"export"(zig_object, name.toSlice(&zcu.intern_pool))) |sym_index| {
|
||||
const sym = zig_object.symbol(sym_index);
|
||||
nav_info.deleteExport(sym_index);
|
||||
std.debug.assert(zig_object.global_syms.remove(sym.name));
|
||||
@ -930,8 +930,8 @@ pub fn updateExports(
|
||||
|
||||
pub fn freeNav(zig_object: *ZigObject, wasm_file: *Wasm, nav_index: InternPool.Nav.Index) void {
|
||||
const gpa = wasm_file.base.comp.gpa;
|
||||
const mod = wasm_file.base.comp.module.?;
|
||||
const ip = &mod.intern_pool;
|
||||
const zcu = wasm_file.base.comp.zcu.?;
|
||||
const ip = &zcu.intern_pool;
|
||||
const nav_info = zig_object.navs.getPtr(nav_index).?;
|
||||
const atom_index = nav_info.atom;
|
||||
const atom = wasm_file.getAtomPtr(atom_index);
|
||||
@ -956,7 +956,7 @@ pub fn freeNav(zig_object: *ZigObject, wasm_file: *Wasm, nav_index: InternPool.N
|
||||
segment.name = &.{}; // Ensure no accidental double free
|
||||
}
|
||||
|
||||
const nav_val = mod.navValue(nav_index).toIntern();
|
||||
const nav_val = zcu.navValue(nav_index).toIntern();
|
||||
if (ip.indexToKey(nav_val) == .@"extern") {
|
||||
std.debug.assert(zig_object.imports.remove(atom.sym_index));
|
||||
}
|
||||
@ -1016,7 +1016,7 @@ fn setupErrorsLen(zig_object: *ZigObject, wasm_file: *Wasm) !void {
|
||||
const gpa = wasm_file.base.comp.gpa;
|
||||
const sym_index = zig_object.findGlobalSymbol("__zig_errors_len") orelse return;
|
||||
|
||||
const errors_len = 1 + wasm_file.base.comp.module.?.intern_pool.global_error_set.getNamesFromMainThread().len;
|
||||
const errors_len = 1 + wasm_file.base.comp.zcu.?.intern_pool.global_error_set.getNamesFromMainThread().len;
|
||||
// overwrite existing atom if it already exists (maybe the error set has increased)
|
||||
// if not, allocate a new atom.
|
||||
const atom_index = if (wasm_file.symbol_atom.get(.{ .file = zig_object.index, .index = sym_index })) |index| blk: {
|
||||
|
||||
@ -223,7 +223,7 @@ pub const MutableValue = union(enum) {
|
||||
@memset(elems[0..@intCast(len_no_sent)], .{ .interned = undef_elem });
|
||||
},
|
||||
.Struct => for (elems[0..@intCast(len_no_sent)], 0..) |*mut_elem, i| {
|
||||
const field_ty = ty.structFieldType(i, zcu).toIntern();
|
||||
const field_ty = ty.fieldType(i, zcu).toIntern();
|
||||
mut_elem.* = .{ .interned = try pt.intern(.{ .undef = field_ty }) };
|
||||
},
|
||||
else => unreachable,
|
||||
@ -369,7 +369,7 @@ pub const MutableValue = union(enum) {
|
||||
.bytes => |b| {
|
||||
assert(is_trivial_int);
|
||||
assert(field_val.typeOf(zcu).toIntern() == .u8_type);
|
||||
b.data[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(pt));
|
||||
b.data[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(zcu));
|
||||
},
|
||||
.repeated => |r| {
|
||||
if (field_val.eqlTrivial(r.child.*)) return;
|
||||
@ -382,9 +382,9 @@ pub const MutableValue = union(enum) {
|
||||
{
|
||||
// We can use the `bytes` representation.
|
||||
const bytes = try arena.alloc(u8, @intCast(len_inc_sent));
|
||||
const repeated_byte = Value.fromInterned(r.child.interned).toUnsignedInt(pt);
|
||||
const repeated_byte = Value.fromInterned(r.child.interned).toUnsignedInt(zcu);
|
||||
@memset(bytes, @intCast(repeated_byte));
|
||||
bytes[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(pt));
|
||||
bytes[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(zcu));
|
||||
mv.* = .{ .bytes = .{
|
||||
.ty = r.ty,
|
||||
.data = bytes,
|
||||
@ -431,7 +431,7 @@ pub const MutableValue = union(enum) {
|
||||
} else {
|
||||
const bytes = try arena.alloc(u8, a.elems.len);
|
||||
for (a.elems, bytes) |elem_val, *b| {
|
||||
b.* = @intCast(Value.fromInterned(elem_val.interned).toUnsignedInt(pt));
|
||||
b.* = @intCast(Value.fromInterned(elem_val.interned).toUnsignedInt(zcu));
|
||||
}
|
||||
mv.* = .{ .bytes = .{
|
||||
.ty = a.ty,
|
||||
|
||||
@ -428,10 +428,10 @@ const Writer = struct {
|
||||
}
|
||||
|
||||
fn writeAggregateInit(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
|
||||
const mod = w.pt.zcu;
|
||||
const zcu = w.pt.zcu;
|
||||
const ty_pl = w.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
||||
const vector_ty = ty_pl.ty.toType();
|
||||
const len = @as(usize, @intCast(vector_ty.arrayLen(mod)));
|
||||
const len = @as(usize, @intCast(vector_ty.arrayLen(zcu)));
|
||||
const elements = @as([]const Air.Inst.Ref, @ptrCast(w.air.extra[ty_pl.payload..][0..len]));
|
||||
|
||||
try w.writeType(s, vector_ty);
|
||||
@ -508,11 +508,11 @@ const Writer = struct {
|
||||
}
|
||||
|
||||
fn writeSelect(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
|
||||
const mod = w.pt.zcu;
|
||||
const zcu = w.pt.zcu;
|
||||
const pl_op = w.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
|
||||
const extra = w.air.extraData(Air.Bin, pl_op.payload).data;
|
||||
|
||||
const elem_ty = w.typeOfIndex(inst).childType(mod);
|
||||
const elem_ty = w.typeOfIndex(inst).childType(zcu);
|
||||
try w.writeType(s, elem_ty);
|
||||
try s.writeAll(", ");
|
||||
try w.writeOperand(s, inst, 0, pl_op.operand);
|
||||
@ -974,7 +974,7 @@ const Writer = struct {
|
||||
}
|
||||
|
||||
fn typeOfIndex(w: *Writer, inst: Air.Inst.Index) Type {
|
||||
const mod = w.pt.zcu;
|
||||
return w.air.typeOfIndex(inst, &mod.intern_pool);
|
||||
const zcu = w.pt.zcu;
|
||||
return w.air.typeOfIndex(inst, &zcu.intern_pool);
|
||||
}
|
||||
};
|
||||
|
||||
@ -62,8 +62,8 @@ pub fn print(
|
||||
comptime have_sema: bool,
|
||||
sema: if (have_sema) *Sema else void,
|
||||
) (@TypeOf(writer).Error || Zcu.CompileError)!void {
|
||||
const mod = pt.zcu;
|
||||
const ip = &mod.intern_pool;
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
switch (ip.indexToKey(val.toIntern())) {
|
||||
.int_type,
|
||||
.ptr_type,
|
||||
@ -95,11 +95,11 @@ pub fn print(
|
||||
.int => |int| switch (int.storage) {
|
||||
inline .u64, .i64, .big_int => |x| try writer.print("{}", .{x}),
|
||||
.lazy_align => |ty| if (have_sema) {
|
||||
const a = (try Type.fromInterned(ty).abiAlignmentAdvanced(pt, .sema)).scalar;
|
||||
const a = try Type.fromInterned(ty).abiAlignmentSema(pt);
|
||||
try writer.print("{}", .{a.toByteUnits() orelse 0});
|
||||
} else try writer.print("@alignOf({})", .{Type.fromInterned(ty).fmt(pt)}),
|
||||
.lazy_size => |ty| if (have_sema) {
|
||||
const s = (try Type.fromInterned(ty).abiSizeAdvanced(pt, .sema)).scalar;
|
||||
const s = try Type.fromInterned(ty).abiSizeSema(pt);
|
||||
try writer.print("{}", .{s});
|
||||
} else try writer.print("@sizeOf({})", .{Type.fromInterned(ty).fmt(pt)}),
|
||||
},
|
||||
@ -116,7 +116,7 @@ pub fn print(
|
||||
enum_literal.fmt(ip),
|
||||
}),
|
||||
.enum_tag => |enum_tag| {
|
||||
const enum_type = ip.loadEnumType(val.typeOf(mod).toIntern());
|
||||
const enum_type = ip.loadEnumType(val.typeOf(zcu).toIntern());
|
||||
if (enum_type.tagValueIndex(ip, val.toIntern())) |tag_index| {
|
||||
return writer.print(".{i}", .{enum_type.names.get(ip)[tag_index].fmt(ip)});
|
||||
}
|
||||
@ -173,7 +173,7 @@ pub fn print(
|
||||
return;
|
||||
}
|
||||
if (un.tag == .none) {
|
||||
const backing_ty = try val.typeOf(mod).unionBackingType(pt);
|
||||
const backing_ty = try val.typeOf(zcu).unionBackingType(pt);
|
||||
try writer.print("@bitCast(@as({}, ", .{backing_ty.fmt(pt)});
|
||||
try print(Value.fromInterned(un.val), writer, level - 1, pt, have_sema, sema);
|
||||
try writer.writeAll("))");
|
||||
@ -245,7 +245,7 @@ fn printAggregate(
|
||||
if (ty.childType(zcu).toIntern() != .u8_type) break :one_byte_str;
|
||||
const elem_val = Value.fromInterned(aggregate.storage.values()[0]);
|
||||
if (elem_val.isUndef(zcu)) break :one_byte_str;
|
||||
const byte = elem_val.toUnsignedInt(pt);
|
||||
const byte = elem_val.toUnsignedInt(zcu);
|
||||
try writer.print("\"{}\"", .{std.zig.fmtEscapes(&.{@intCast(byte)})});
|
||||
if (!is_ref) try writer.writeAll(".*");
|
||||
return;
|
||||
|
||||
@ -526,7 +526,11 @@ pub fn zigBackend(target: std.Target, use_llvm: bool) std.builtin.CompilerBacken
|
||||
pub inline fn backendSupportsFeature(backend: std.builtin.CompilerBackend, comptime feature: Feature) bool {
|
||||
return switch (feature) {
|
||||
.panic_fn => switch (backend) {
|
||||
.stage2_c, .stage2_llvm, .stage2_x86_64, .stage2_riscv64 => true,
|
||||
.stage2_c,
|
||||
.stage2_llvm,
|
||||
.stage2_x86_64,
|
||||
.stage2_riscv64,
|
||||
=> true,
|
||||
else => false,
|
||||
},
|
||||
.panic_unwrap_error => switch (backend) {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user