mirror of
https://github.com/ziglang/zig.git
synced 2025-12-06 06:13:07 +00:00
Merge pull request #20593 from jacobly0/more-races
InternPool: fix more races
This commit is contained in:
commit
d404d8a363
@ -501,8 +501,8 @@ fn checkType(ty: Type, zcu: *Zcu) bool {
|
||||
.struct_type => {
|
||||
const struct_obj = zcu.typeToStruct(ty).?;
|
||||
return switch (struct_obj.layout) {
|
||||
.@"packed" => struct_obj.backingIntType(ip).* != .none,
|
||||
.auto, .@"extern" => struct_obj.flagsPtr(ip).fully_resolved,
|
||||
.@"packed" => struct_obj.backingIntTypeUnordered(ip) != .none,
|
||||
.auto, .@"extern" => struct_obj.flagsUnordered(ip).fully_resolved,
|
||||
};
|
||||
},
|
||||
.anon_struct_type => |tuple| {
|
||||
@ -516,6 +516,6 @@ fn checkType(ty: Type, zcu: *Zcu) bool {
|
||||
},
|
||||
else => unreachable,
|
||||
},
|
||||
.Union => return zcu.typeToUnion(ty).?.flagsPtr(ip).status == .fully_resolved,
|
||||
.Union => return zcu.typeToUnion(ty).?.flagsUnordered(ip).status == .fully_resolved,
|
||||
};
|
||||
}
|
||||
|
||||
@ -101,7 +101,15 @@ link_error_flags: link.File.ErrorFlags = .{},
|
||||
link_errors: std.ArrayListUnmanaged(link.File.ErrorMsg) = .{},
|
||||
lld_errors: std.ArrayListUnmanaged(LldError) = .{},
|
||||
|
||||
work_queue: std.fifo.LinearFifo(Job, .Dynamic),
|
||||
work_queues: [
|
||||
len: {
|
||||
var len: usize = 0;
|
||||
for (std.enums.values(Job.Tag)) |tag| {
|
||||
len = @max(Job.stage(tag) + 1, len);
|
||||
}
|
||||
break :len len;
|
||||
}
|
||||
]std.fifo.LinearFifo(Job, .Dynamic),
|
||||
|
||||
codegen_work: if (InternPool.single_threaded) void else struct {
|
||||
mutex: std.Thread.Mutex,
|
||||
@ -370,6 +378,20 @@ const Job = union(enum) {
|
||||
|
||||
/// The value is the index into `system_libs`.
|
||||
windows_import_lib: usize,
|
||||
|
||||
const Tag = @typeInfo(Job).Union.tag_type.?;
|
||||
fn stage(tag: Tag) usize {
|
||||
return switch (tag) {
|
||||
// Prioritize functions so that codegen can get to work on them on a
|
||||
// separate thread, while Sema goes back to its own work.
|
||||
.resolve_type_fully, .analyze_func, .codegen_func => 0,
|
||||
else => 1,
|
||||
};
|
||||
}
|
||||
comptime {
|
||||
// Job dependencies
|
||||
assert(stage(.resolve_type_fully) <= stage(.codegen_func));
|
||||
}
|
||||
};
|
||||
|
||||
const CodegenJob = union(enum) {
|
||||
@ -1452,7 +1474,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
|
||||
.emit_asm = options.emit_asm,
|
||||
.emit_llvm_ir = options.emit_llvm_ir,
|
||||
.emit_llvm_bc = options.emit_llvm_bc,
|
||||
.work_queue = std.fifo.LinearFifo(Job, .Dynamic).init(gpa),
|
||||
.work_queues = .{std.fifo.LinearFifo(Job, .Dynamic).init(gpa)} ** @typeInfo(std.meta.FieldType(Compilation, .work_queues)).Array.len,
|
||||
.codegen_work = if (InternPool.single_threaded) {} else .{
|
||||
.mutex = .{},
|
||||
.cond = .{},
|
||||
@ -1760,12 +1782,12 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
|
||||
if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
|
||||
|
||||
if (glibc.needsCrtiCrtn(target)) {
|
||||
try comp.work_queue.write(&[_]Job{
|
||||
try comp.queueJobs(&[_]Job{
|
||||
.{ .glibc_crt_file = .crti_o },
|
||||
.{ .glibc_crt_file = .crtn_o },
|
||||
});
|
||||
}
|
||||
try comp.work_queue.write(&[_]Job{
|
||||
try comp.queueJobs(&[_]Job{
|
||||
.{ .glibc_crt_file = .scrt1_o },
|
||||
.{ .glibc_crt_file = .libc_nonshared_a },
|
||||
.{ .glibc_shared_objects = {} },
|
||||
@ -1774,14 +1796,13 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
|
||||
if (comp.wantBuildMuslFromSource()) {
|
||||
if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
|
||||
|
||||
try comp.work_queue.ensureUnusedCapacity(6);
|
||||
if (musl.needsCrtiCrtn(target)) {
|
||||
comp.work_queue.writeAssumeCapacity(&[_]Job{
|
||||
try comp.queueJobs(&[_]Job{
|
||||
.{ .musl_crt_file = .crti_o },
|
||||
.{ .musl_crt_file = .crtn_o },
|
||||
});
|
||||
}
|
||||
comp.work_queue.writeAssumeCapacity(&[_]Job{
|
||||
try comp.queueJobs(&[_]Job{
|
||||
.{ .musl_crt_file = .crt1_o },
|
||||
.{ .musl_crt_file = .scrt1_o },
|
||||
.{ .musl_crt_file = .rcrt1_o },
|
||||
@ -1795,15 +1816,12 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
|
||||
if (comp.wantBuildWasiLibcFromSource()) {
|
||||
if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
|
||||
|
||||
// worst-case we need all components
|
||||
try comp.work_queue.ensureUnusedCapacity(comp.wasi_emulated_libs.len + 2);
|
||||
|
||||
for (comp.wasi_emulated_libs) |crt_file| {
|
||||
comp.work_queue.writeItemAssumeCapacity(.{
|
||||
try comp.queueJob(.{
|
||||
.wasi_libc_crt_file = crt_file,
|
||||
});
|
||||
}
|
||||
comp.work_queue.writeAssumeCapacity(&[_]Job{
|
||||
try comp.queueJobs(&[_]Job{
|
||||
.{ .wasi_libc_crt_file = wasi_libc.execModelCrtFile(comp.config.wasi_exec_model) },
|
||||
.{ .wasi_libc_crt_file = .libc_a },
|
||||
});
|
||||
@ -1813,9 +1831,10 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
|
||||
if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
|
||||
|
||||
const crt_job: Job = .{ .mingw_crt_file = if (is_dyn_lib) .dllcrt2_o else .crt2_o };
|
||||
try comp.work_queue.ensureUnusedCapacity(2);
|
||||
comp.work_queue.writeItemAssumeCapacity(.{ .mingw_crt_file = .mingw32_lib });
|
||||
comp.work_queue.writeItemAssumeCapacity(crt_job);
|
||||
try comp.queueJobs(&.{
|
||||
.{ .mingw_crt_file = .mingw32_lib },
|
||||
crt_job,
|
||||
});
|
||||
|
||||
// When linking mingw-w64 there are some import libs we always need.
|
||||
for (mingw.always_link_libs) |name| {
|
||||
@ -1829,20 +1848,19 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
|
||||
// Generate Windows import libs.
|
||||
if (target.os.tag == .windows) {
|
||||
const count = comp.system_libs.count();
|
||||
try comp.work_queue.ensureUnusedCapacity(count);
|
||||
for (0..count) |i| {
|
||||
comp.work_queue.writeItemAssumeCapacity(.{ .windows_import_lib = i });
|
||||
try comp.queueJob(.{ .windows_import_lib = i });
|
||||
}
|
||||
}
|
||||
if (comp.wantBuildLibUnwindFromSource()) {
|
||||
try comp.work_queue.writeItem(.{ .libunwind = {} });
|
||||
try comp.queueJob(.{ .libunwind = {} });
|
||||
}
|
||||
if (build_options.have_llvm and is_exe_or_dyn_lib and comp.config.link_libcpp) {
|
||||
try comp.work_queue.writeItem(.libcxx);
|
||||
try comp.work_queue.writeItem(.libcxxabi);
|
||||
try comp.queueJob(.libcxx);
|
||||
try comp.queueJob(.libcxxabi);
|
||||
}
|
||||
if (build_options.have_llvm and comp.config.any_sanitize_thread) {
|
||||
try comp.work_queue.writeItem(.libtsan);
|
||||
try comp.queueJob(.libtsan);
|
||||
}
|
||||
|
||||
if (target.isMinGW() and comp.config.any_non_single_threaded) {
|
||||
@ -1872,7 +1890,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
|
||||
if (!comp.skip_linker_dependencies and is_exe_or_dyn_lib and
|
||||
!comp.config.link_libc and capable_of_building_zig_libc)
|
||||
{
|
||||
try comp.work_queue.writeItem(.{ .zig_libc = {} });
|
||||
try comp.queueJob(.{ .zig_libc = {} });
|
||||
}
|
||||
}
|
||||
|
||||
@ -1883,7 +1901,7 @@ pub fn destroy(comp: *Compilation) void {
|
||||
if (comp.bin_file) |lf| lf.destroy();
|
||||
if (comp.module) |zcu| zcu.deinit();
|
||||
comp.cache_use.deinit();
|
||||
comp.work_queue.deinit();
|
||||
for (comp.work_queues) |work_queue| work_queue.deinit();
|
||||
if (!InternPool.single_threaded) comp.codegen_work.queue.deinit();
|
||||
comp.c_object_work_queue.deinit();
|
||||
if (!build_options.only_core_functionality) {
|
||||
@ -2199,13 +2217,13 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
|
||||
}
|
||||
}
|
||||
|
||||
try comp.work_queue.writeItem(.{ .analyze_mod = std_mod });
|
||||
try comp.queueJob(.{ .analyze_mod = std_mod });
|
||||
if (comp.config.is_test) {
|
||||
try comp.work_queue.writeItem(.{ .analyze_mod = zcu.main_mod });
|
||||
try comp.queueJob(.{ .analyze_mod = zcu.main_mod });
|
||||
}
|
||||
|
||||
if (zcu.root_mod.deps.get("compiler_rt")) |compiler_rt_mod| {
|
||||
try comp.work_queue.writeItem(.{ .analyze_mod = compiler_rt_mod });
|
||||
try comp.queueJob(.{ .analyze_mod = compiler_rt_mod });
|
||||
}
|
||||
}
|
||||
|
||||
@ -2852,11 +2870,7 @@ pub fn makeBinFileWritable(comp: *Compilation) !void {
|
||||
|
||||
const Header = extern struct {
|
||||
intern_pool: extern struct {
|
||||
//items_len: u32,
|
||||
//extra_len: u32,
|
||||
//limbs_len: u32,
|
||||
//string_bytes_len: u32,
|
||||
//tracked_insts_len: u32,
|
||||
thread_count: u32,
|
||||
src_hash_deps_len: u32,
|
||||
decl_val_deps_len: u32,
|
||||
namespace_deps_len: u32,
|
||||
@ -2864,28 +2878,39 @@ const Header = extern struct {
|
||||
first_dependency_len: u32,
|
||||
dep_entries_len: u32,
|
||||
free_dep_entries_len: u32,
|
||||
//files_len: u32,
|
||||
},
|
||||
|
||||
const PerThread = extern struct {
|
||||
intern_pool: extern struct {
|
||||
items_len: u32,
|
||||
extra_len: u32,
|
||||
limbs_len: u32,
|
||||
string_bytes_len: u32,
|
||||
tracked_insts_len: u32,
|
||||
files_len: u32,
|
||||
},
|
||||
};
|
||||
};
|
||||
|
||||
/// Note that all state that is included in the cache hash namespace is *not*
|
||||
/// saved, such as the target and most CLI flags. A cache hit will only occur
|
||||
/// when subsequent compiler invocations use the same set of flags.
|
||||
pub fn saveState(comp: *Compilation) !void {
|
||||
var bufs_list: [21]std.posix.iovec_const = undefined;
|
||||
var bufs_len: usize = 0;
|
||||
|
||||
const lf = comp.bin_file orelse return;
|
||||
|
||||
const gpa = comp.gpa;
|
||||
|
||||
var bufs = std.ArrayList(std.posix.iovec_const).init(gpa);
|
||||
defer bufs.deinit();
|
||||
|
||||
var pt_headers = std.ArrayList(Header.PerThread).init(gpa);
|
||||
defer pt_headers.deinit();
|
||||
|
||||
if (comp.module) |zcu| {
|
||||
const ip = &zcu.intern_pool;
|
||||
const header: Header = .{
|
||||
.intern_pool = .{
|
||||
//.items_len = @intCast(ip.items.len),
|
||||
//.extra_len = @intCast(ip.extra.items.len),
|
||||
//.limbs_len = @intCast(ip.limbs.items.len),
|
||||
//.string_bytes_len = @intCast(ip.string_bytes.items.len),
|
||||
//.tracked_insts_len = @intCast(ip.tracked_insts.count()),
|
||||
.thread_count = @intCast(ip.locals.len),
|
||||
.src_hash_deps_len = @intCast(ip.src_hash_deps.count()),
|
||||
.decl_val_deps_len = @intCast(ip.decl_val_deps.count()),
|
||||
.namespace_deps_len = @intCast(ip.namespace_deps.count()),
|
||||
@ -2893,38 +2918,54 @@ pub fn saveState(comp: *Compilation) !void {
|
||||
.first_dependency_len = @intCast(ip.first_dependency.count()),
|
||||
.dep_entries_len = @intCast(ip.dep_entries.items.len),
|
||||
.free_dep_entries_len = @intCast(ip.free_dep_entries.items.len),
|
||||
//.files_len = @intCast(ip.files.entries.len),
|
||||
},
|
||||
};
|
||||
addBuf(&bufs_list, &bufs_len, mem.asBytes(&header));
|
||||
//addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.limbs.items));
|
||||
//addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.extra.items));
|
||||
//addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.items.items(.data)));
|
||||
//addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.items.items(.tag)));
|
||||
//addBuf(&bufs_list, &bufs_len, ip.string_bytes.items);
|
||||
//addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.tracked_insts.keys()));
|
||||
|
||||
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.src_hash_deps.keys()));
|
||||
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.src_hash_deps.values()));
|
||||
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.decl_val_deps.keys()));
|
||||
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.decl_val_deps.values()));
|
||||
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.namespace_deps.keys()));
|
||||
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.namespace_deps.values()));
|
||||
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.namespace_name_deps.keys()));
|
||||
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.namespace_name_deps.values()));
|
||||
try pt_headers.ensureTotalCapacityPrecise(header.intern_pool.thread_count);
|
||||
for (ip.locals) |*local| pt_headers.appendAssumeCapacity(.{
|
||||
.intern_pool = .{
|
||||
.items_len = @intCast(local.mutate.items.len),
|
||||
.extra_len = @intCast(local.mutate.extra.len),
|
||||
.limbs_len = @intCast(local.mutate.limbs.len),
|
||||
.string_bytes_len = @intCast(local.mutate.strings.len),
|
||||
.tracked_insts_len = @intCast(local.mutate.tracked_insts.len),
|
||||
.files_len = @intCast(local.mutate.files.len),
|
||||
},
|
||||
});
|
||||
|
||||
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.first_dependency.keys()));
|
||||
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.first_dependency.values()));
|
||||
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.dep_entries.items));
|
||||
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.free_dep_entries.items));
|
||||
try bufs.ensureTotalCapacityPrecise(14 + 8 * pt_headers.items.len);
|
||||
addBuf(&bufs, mem.asBytes(&header));
|
||||
addBuf(&bufs, mem.sliceAsBytes(pt_headers.items));
|
||||
|
||||
//addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.files.keys()));
|
||||
//addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.files.values()));
|
||||
addBuf(&bufs, mem.sliceAsBytes(ip.src_hash_deps.keys()));
|
||||
addBuf(&bufs, mem.sliceAsBytes(ip.src_hash_deps.values()));
|
||||
addBuf(&bufs, mem.sliceAsBytes(ip.decl_val_deps.keys()));
|
||||
addBuf(&bufs, mem.sliceAsBytes(ip.decl_val_deps.values()));
|
||||
addBuf(&bufs, mem.sliceAsBytes(ip.namespace_deps.keys()));
|
||||
addBuf(&bufs, mem.sliceAsBytes(ip.namespace_deps.values()));
|
||||
addBuf(&bufs, mem.sliceAsBytes(ip.namespace_name_deps.keys()));
|
||||
addBuf(&bufs, mem.sliceAsBytes(ip.namespace_name_deps.values()));
|
||||
|
||||
// TODO: compilation errors
|
||||
// TODO: namespaces
|
||||
// TODO: decls
|
||||
// TODO: linker state
|
||||
addBuf(&bufs, mem.sliceAsBytes(ip.first_dependency.keys()));
|
||||
addBuf(&bufs, mem.sliceAsBytes(ip.first_dependency.values()));
|
||||
addBuf(&bufs, mem.sliceAsBytes(ip.dep_entries.items));
|
||||
addBuf(&bufs, mem.sliceAsBytes(ip.free_dep_entries.items));
|
||||
|
||||
for (ip.locals, pt_headers.items) |*local, pt_header| {
|
||||
addBuf(&bufs, mem.sliceAsBytes(local.shared.limbs.view().items(.@"0")[0..pt_header.intern_pool.limbs_len]));
|
||||
addBuf(&bufs, mem.sliceAsBytes(local.shared.extra.view().items(.@"0")[0..pt_header.intern_pool.extra_len]));
|
||||
addBuf(&bufs, mem.sliceAsBytes(local.shared.items.view().items(.data)[0..pt_header.intern_pool.items_len]));
|
||||
addBuf(&bufs, mem.sliceAsBytes(local.shared.items.view().items(.tag)[0..pt_header.intern_pool.items_len]));
|
||||
addBuf(&bufs, local.shared.strings.view().items(.@"0")[0..pt_header.intern_pool.string_bytes_len]);
|
||||
addBuf(&bufs, mem.sliceAsBytes(local.shared.tracked_insts.view().items(.@"0")[0..pt_header.intern_pool.tracked_insts_len]));
|
||||
addBuf(&bufs, mem.sliceAsBytes(local.shared.files.view().items(.bin_digest)[0..pt_header.intern_pool.files_len]));
|
||||
addBuf(&bufs, mem.sliceAsBytes(local.shared.files.view().items(.root_decl)[0..pt_header.intern_pool.files_len]));
|
||||
}
|
||||
|
||||
//// TODO: compilation errors
|
||||
//// TODO: namespaces
|
||||
//// TODO: decls
|
||||
//// TODO: linker state
|
||||
}
|
||||
var basename_buf: [255]u8 = undefined;
|
||||
const basename = std.fmt.bufPrint(&basename_buf, "{s}.zcs", .{
|
||||
@ -2938,20 +2979,14 @@ pub fn saveState(comp: *Compilation) !void {
|
||||
// the previous incremental compilation state.
|
||||
var af = try lf.emit.directory.handle.atomicFile(basename, .{});
|
||||
defer af.deinit();
|
||||
try af.file.pwritevAll(bufs_list[0..bufs_len], 0);
|
||||
try af.file.pwritevAll(bufs.items, 0);
|
||||
try af.finish();
|
||||
}
|
||||
|
||||
fn addBuf(bufs_list: []std.posix.iovec_const, bufs_len: *usize, buf: []const u8) void {
|
||||
fn addBuf(list: *std.ArrayList(std.posix.iovec_const), buf: []const u8) void {
|
||||
// Even when len=0, the undefined pointer might cause EFAULT.
|
||||
if (buf.len == 0) return;
|
||||
|
||||
const i = bufs_len.*;
|
||||
bufs_len.* = i + 1;
|
||||
bufs_list[i] = .{
|
||||
.base = buf.ptr,
|
||||
.len = buf.len,
|
||||
};
|
||||
list.appendAssumeCapacity(.{ .base = buf.ptr, .len = buf.len });
|
||||
}
|
||||
|
||||
/// This function is temporally single-threaded.
|
||||
@ -3011,7 +3046,7 @@ pub fn totalErrorCount(comp: *Compilation) u32 {
|
||||
}
|
||||
}
|
||||
|
||||
if (zcu.intern_pool.global_error_set.mutate.list.len > zcu.error_limit) {
|
||||
if (zcu.intern_pool.global_error_set.getNamesFromMainThread().len > zcu.error_limit) {
|
||||
total += 1;
|
||||
}
|
||||
}
|
||||
@ -3095,6 +3130,39 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
|
||||
for (zcu.failed_embed_files.values()) |error_msg| {
|
||||
try addModuleErrorMsg(zcu, &bundle, error_msg.*, &all_references);
|
||||
}
|
||||
{
|
||||
const SortOrder = struct {
|
||||
zcu: *Zcu,
|
||||
err: *?Error,
|
||||
|
||||
const Error = @typeInfo(
|
||||
@typeInfo(@TypeOf(Zcu.SrcLoc.span)).Fn.return_type.?,
|
||||
).ErrorUnion.error_set;
|
||||
|
||||
pub fn lessThan(ctx: @This(), lhs_index: usize, rhs_index: usize) bool {
|
||||
if (ctx.err.*) |_| return lhs_index < rhs_index;
|
||||
const errors = ctx.zcu.failed_analysis.values();
|
||||
const lhs_src_loc = errors[lhs_index].src_loc.upgrade(ctx.zcu);
|
||||
const rhs_src_loc = errors[rhs_index].src_loc.upgrade(ctx.zcu);
|
||||
return if (lhs_src_loc.file_scope != rhs_src_loc.file_scope) std.mem.order(
|
||||
u8,
|
||||
lhs_src_loc.file_scope.sub_file_path,
|
||||
rhs_src_loc.file_scope.sub_file_path,
|
||||
).compare(.lt) else (lhs_src_loc.span(ctx.zcu.gpa) catch |e| {
|
||||
ctx.err.* = e;
|
||||
return lhs_index < rhs_index;
|
||||
}).main < (rhs_src_loc.span(ctx.zcu.gpa) catch |e| {
|
||||
ctx.err.* = e;
|
||||
return lhs_index < rhs_index;
|
||||
}).main;
|
||||
}
|
||||
};
|
||||
var err: ?SortOrder.Error = null;
|
||||
// This leaves `zcu.failed_analysis` an invalid state, but we do not
|
||||
// need lookups anymore anyway.
|
||||
zcu.failed_analysis.entries.sort(SortOrder{ .zcu = zcu, .err = &err });
|
||||
if (err) |e| return e;
|
||||
}
|
||||
for (zcu.failed_analysis.keys(), zcu.failed_analysis.values()) |anal_unit, error_msg| {
|
||||
const decl_index = switch (anal_unit.unwrap()) {
|
||||
.decl => |d| d,
|
||||
@ -3140,7 +3208,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
|
||||
try addModuleErrorMsg(zcu, &bundle, value.*, &all_references);
|
||||
}
|
||||
|
||||
const actual_error_count = zcu.intern_pool.global_error_set.mutate.list.len;
|
||||
const actual_error_count = zcu.intern_pool.global_error_set.getNamesFromMainThread().len;
|
||||
if (actual_error_count > zcu.error_limit) {
|
||||
try bundle.addRootErrorMessage(.{
|
||||
.msg = try bundle.printString("ZCU used more errors than possible: used {d}, max {d}", .{
|
||||
@ -3543,18 +3611,18 @@ fn performAllTheWorkInner(
|
||||
comp.codegen_work.cond.signal();
|
||||
};
|
||||
|
||||
while (true) {
|
||||
if (comp.work_queue.readItem()) |work_item| {
|
||||
try processOneJob(@intFromEnum(Zcu.PerThread.Id.main), comp, work_item, main_progress_node);
|
||||
continue;
|
||||
}
|
||||
work: while (true) {
|
||||
for (&comp.work_queues) |*work_queue| if (work_queue.readItem()) |job| {
|
||||
try processOneJob(@intFromEnum(Zcu.PerThread.Id.main), comp, job, main_progress_node);
|
||||
continue :work;
|
||||
};
|
||||
if (comp.module) |zcu| {
|
||||
// If there's no work queued, check if there's anything outdated
|
||||
// which we need to work on, and queue it if so.
|
||||
if (try zcu.findOutdatedToAnalyze()) |outdated| {
|
||||
switch (outdated.unwrap()) {
|
||||
.decl => |decl| try comp.work_queue.writeItem(.{ .analyze_decl = decl }),
|
||||
.func => |func| try comp.work_queue.writeItem(.{ .analyze_func = func }),
|
||||
.decl => |decl| try comp.queueJob(.{ .analyze_decl = decl }),
|
||||
.func => |func| try comp.queueJob(.{ .analyze_func = func }),
|
||||
}
|
||||
continue;
|
||||
}
|
||||
@ -3575,6 +3643,14 @@ fn performAllTheWorkInner(
|
||||
|
||||
const JobError = Allocator.Error;
|
||||
|
||||
pub fn queueJob(comp: *Compilation, job: Job) !void {
|
||||
try comp.work_queues[Job.stage(job)].writeItem(job);
|
||||
}
|
||||
|
||||
pub fn queueJobs(comp: *Compilation, jobs: []const Job) !void {
|
||||
for (jobs) |job| try comp.queueJob(job);
|
||||
}
|
||||
|
||||
fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progress.Node) JobError!void {
|
||||
switch (job) {
|
||||
.codegen_decl => |decl_index| {
|
||||
@ -6478,7 +6554,7 @@ pub fn addLinkLib(comp: *Compilation, lib_name: []const u8) !void {
|
||||
};
|
||||
const target = comp.root_mod.resolved_target.result;
|
||||
if (target.os.tag == .windows and target.ofmt != .c) {
|
||||
try comp.work_queue.writeItem(.{
|
||||
try comp.queueJob(.{
|
||||
.windows_import_lib = comp.system_libs.count() - 1,
|
||||
});
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
221
src/Sema.zig
221
src/Sema.zig
@ -2530,13 +2530,13 @@ pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Module.Error
|
||||
}
|
||||
|
||||
if (sema.owner_func_index != .none) {
|
||||
ip.funcAnalysis(sema.owner_func_index).state = .sema_failure;
|
||||
ip.funcSetAnalysisState(sema.owner_func_index, .sema_failure);
|
||||
} else {
|
||||
sema.owner_decl.analysis = .sema_failure;
|
||||
}
|
||||
|
||||
if (sema.func_index != .none) {
|
||||
ip.funcAnalysis(sema.func_index).state = .sema_failure;
|
||||
ip.funcSetAnalysisState(sema.func_index, .sema_failure);
|
||||
}
|
||||
|
||||
return error.AnalysisFail;
|
||||
@ -2848,7 +2848,7 @@ fn zirStructDecl(
|
||||
}
|
||||
|
||||
try pt.finalizeAnonDecl(new_decl_index);
|
||||
try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index });
|
||||
try mod.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
|
||||
try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = new_decl_index }));
|
||||
return Air.internedToRef(wip_ty.finish(ip, new_decl_index, new_namespace_index));
|
||||
}
|
||||
@ -3353,7 +3353,7 @@ fn zirUnionDecl(
|
||||
}
|
||||
|
||||
try pt.finalizeAnonDecl(new_decl_index);
|
||||
try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index });
|
||||
try mod.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
|
||||
try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = new_decl_index }));
|
||||
return Air.internedToRef(wip_ty.finish(ip, new_decl_index, new_namespace_index));
|
||||
}
|
||||
@ -6550,14 +6550,7 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst
|
||||
}
|
||||
sema.prev_stack_alignment_src = src;
|
||||
|
||||
const ip = &mod.intern_pool;
|
||||
const a = ip.funcAnalysis(sema.func_index);
|
||||
if (a.stack_alignment != .none) {
|
||||
a.stack_alignment = @enumFromInt(@max(
|
||||
@intFromEnum(alignment),
|
||||
@intFromEnum(a.stack_alignment),
|
||||
));
|
||||
}
|
||||
mod.intern_pool.funcMaxStackAlignment(sema.func_index, alignment);
|
||||
}
|
||||
|
||||
fn zirSetCold(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
|
||||
@ -6570,7 +6563,7 @@ fn zirSetCold(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData)
|
||||
.needed_comptime_reason = "operand to @setCold must be comptime-known",
|
||||
});
|
||||
if (sema.func_index == .none) return; // does nothing outside a function
|
||||
ip.funcAnalysis(sema.func_index).is_cold = is_cold;
|
||||
ip.funcSetCold(sema.func_index, is_cold);
|
||||
}
|
||||
|
||||
fn zirSetFloatMode(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
|
||||
@ -7085,7 +7078,7 @@ fn zirCall(
|
||||
const call_inst = try sema.analyzeCall(block, func, func_ty, callee_src, call_src, modifier, ensure_result_used, args_info, call_dbg_node, .call);
|
||||
|
||||
if (sema.owner_func_index == .none or
|
||||
!mod.intern_pool.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn)
|
||||
!mod.intern_pool.funcAnalysisUnordered(sema.owner_func_index).calls_or_awaits_errorable_fn)
|
||||
{
|
||||
// No errorable fn actually called; we have no error return trace
|
||||
input_is_error = false;
|
||||
@ -7793,7 +7786,7 @@ fn analyzeCall(
|
||||
_ = ics.callee();
|
||||
|
||||
if (!inlining.has_comptime_args) {
|
||||
if (module_fn.analysis(ip).state == .sema_failure)
|
||||
if (module_fn.analysisUnordered(ip).state == .sema_failure)
|
||||
return error.AnalysisFail;
|
||||
|
||||
var block_it = block;
|
||||
@ -7816,7 +7809,7 @@ fn analyzeCall(
|
||||
try sema.resolveInst(fn_info.ret_ty_ref);
|
||||
const ret_ty_src: LazySrcLoc = .{ .base_node_inst = module_fn.zir_body_inst, .offset = .{ .node_offset_fn_type_ret_ty = 0 } };
|
||||
sema.fn_ret_ty = try sema.analyzeAsType(&child_block, ret_ty_src, ret_ty_inst);
|
||||
if (module_fn.analysis(ip).inferred_error_set) {
|
||||
if (module_fn.analysisUnordered(ip).inferred_error_set) {
|
||||
// Create a fresh inferred error set type for inline/comptime calls.
|
||||
const ies = try sema.arena.create(InferredErrorSet);
|
||||
ies.* = .{ .func = .none };
|
||||
@ -7942,7 +7935,7 @@ fn analyzeCall(
|
||||
if (call_dbg_node) |some| try sema.zirDbgStmt(block, some);
|
||||
|
||||
if (sema.owner_func_index != .none and Type.fromInterned(func_ty_info.return_type).isError(mod)) {
|
||||
ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn = true;
|
||||
ip.funcSetCallsOrAwaitsErrorableFn(sema.owner_func_index);
|
||||
}
|
||||
|
||||
if (try sema.resolveValue(func)) |func_val| {
|
||||
@ -8386,7 +8379,7 @@ fn instantiateGenericCall(
|
||||
const callee_index = (child_sema.resolveConstDefinedValue(&child_block, LazySrcLoc.unneeded, new_func_inst, undefined) catch unreachable).toIntern();
|
||||
|
||||
const callee = zcu.funcInfo(callee_index);
|
||||
callee.branchQuota(ip).* = @max(callee.branchQuota(ip).*, sema.branch_quota);
|
||||
callee.maxBranchQuota(ip, sema.branch_quota);
|
||||
|
||||
// Make a runtime call to the new function, making sure to omit the comptime args.
|
||||
const func_ty = Type.fromInterned(callee.ty);
|
||||
@ -8408,7 +8401,7 @@ fn instantiateGenericCall(
|
||||
if (sema.owner_func_index != .none and
|
||||
Type.fromInterned(func_ty_info.return_type).isError(zcu))
|
||||
{
|
||||
ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn = true;
|
||||
ip.funcSetCallsOrAwaitsErrorableFn(sema.owner_func_index);
|
||||
}
|
||||
|
||||
try sema.addReferenceEntry(call_src, AnalUnit.wrap(.{ .func = callee_index }));
|
||||
@ -8769,9 +8762,9 @@ fn zirErrorFromInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD
|
||||
const int = try sema.usizeCast(block, operand_src, try value.toUnsignedIntSema(pt));
|
||||
if (int > len: {
|
||||
const mutate = &ip.global_error_set.mutate;
|
||||
mutate.mutex.lock();
|
||||
defer mutate.mutex.unlock();
|
||||
break :len mutate.list.len;
|
||||
mutate.map.mutex.lock();
|
||||
defer mutate.map.mutex.unlock();
|
||||
break :len mutate.names.len;
|
||||
} or int == 0)
|
||||
return sema.fail(block, operand_src, "integer value '{d}' represents no error", .{int});
|
||||
return Air.internedToRef((try pt.intern(.{ .err = .{
|
||||
@ -18395,7 +18388,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
||||
try ty.resolveLayout(pt); // Getting alignment requires type layout
|
||||
const union_obj = mod.typeToUnion(ty).?;
|
||||
const tag_type = union_obj.loadTagType(ip);
|
||||
const layout = union_obj.getLayout(ip);
|
||||
const layout = union_obj.flagsUnordered(ip).layout;
|
||||
|
||||
const union_field_vals = try gpa.alloc(InternPool.Index, tag_type.names.len);
|
||||
defer gpa.free(union_field_vals);
|
||||
@ -18713,8 +18706,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
||||
const backing_integer_val = try pt.intern(.{ .opt = .{
|
||||
.ty = (try pt.optionalType(.type_type)).toIntern(),
|
||||
.val = if (mod.typeToPackedStruct(ty)) |packed_struct| val: {
|
||||
assert(Type.fromInterned(packed_struct.backingIntType(ip).*).isInt(mod));
|
||||
break :val packed_struct.backingIntType(ip).*;
|
||||
assert(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)).isInt(mod));
|
||||
break :val packed_struct.backingIntTypeUnordered(ip);
|
||||
} else .none,
|
||||
} });
|
||||
|
||||
@ -19795,7 +19788,7 @@ fn restoreErrRetIndex(sema: *Sema, start_block: *Block, src: LazySrcLoc, target_
|
||||
return;
|
||||
}
|
||||
|
||||
if (!mod.intern_pool.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn) return;
|
||||
if (!mod.intern_pool.funcAnalysisUnordered(sema.owner_func_index).calls_or_awaits_errorable_fn) return;
|
||||
if (!start_block.ownerModule().error_tracing) return;
|
||||
|
||||
assert(saved_index != .none); // The .error_return_trace_index field was dropped somewhere
|
||||
@ -21053,7 +21046,7 @@ fn getErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
|
||||
const opt_ptr_stack_trace_ty = try pt.optionalType(ptr_stack_trace_ty.toIntern());
|
||||
|
||||
if (sema.owner_func_index != .none and
|
||||
ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn and
|
||||
ip.funcAnalysisUnordered(sema.owner_func_index).calls_or_awaits_errorable_fn and
|
||||
block.ownerModule().error_tracing)
|
||||
{
|
||||
return block.addTy(.err_return_trace, opt_ptr_stack_trace_ty);
|
||||
@ -22201,11 +22194,11 @@ fn reifyUnion(
|
||||
if (any_aligns) {
|
||||
loaded_union.setFieldAligns(ip, field_aligns);
|
||||
}
|
||||
loaded_union.tagTypePtr(ip).* = enum_tag_ty;
|
||||
loaded_union.flagsPtr(ip).status = .have_field_types;
|
||||
loaded_union.setTagType(ip, enum_tag_ty);
|
||||
loaded_union.setStatus(ip, .have_field_types);
|
||||
|
||||
try pt.finalizeAnonDecl(new_decl_index);
|
||||
try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index });
|
||||
try mod.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
|
||||
try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = new_decl_index }));
|
||||
return Air.internedToRef(wip_ty.finish(ip, new_decl_index, .none));
|
||||
}
|
||||
@ -22464,15 +22457,15 @@ fn reifyStruct(
|
||||
if (opt_backing_int_val.optionalValue(mod)) |backing_int_val| {
|
||||
const backing_int_ty = backing_int_val.toType();
|
||||
try sema.checkBackingIntType(block, src, backing_int_ty, fields_bit_sum);
|
||||
struct_type.backingIntType(ip).* = backing_int_ty.toIntern();
|
||||
struct_type.setBackingIntType(ip, backing_int_ty.toIntern());
|
||||
} else {
|
||||
const backing_int_ty = try pt.intType(.unsigned, @intCast(fields_bit_sum));
|
||||
struct_type.backingIntType(ip).* = backing_int_ty.toIntern();
|
||||
struct_type.setBackingIntType(ip, backing_int_ty.toIntern());
|
||||
}
|
||||
}
|
||||
|
||||
try pt.finalizeAnonDecl(new_decl_index);
|
||||
try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index });
|
||||
try mod.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
|
||||
try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = new_decl_index }));
|
||||
return Air.internedToRef(wip_ty.finish(ip, new_decl_index, .none));
|
||||
}
|
||||
@ -28347,7 +28340,7 @@ fn unionFieldPtr(
|
||||
.is_const = union_ptr_info.flags.is_const,
|
||||
.is_volatile = union_ptr_info.flags.is_volatile,
|
||||
.address_space = union_ptr_info.flags.address_space,
|
||||
.alignment = if (union_obj.getLayout(ip) == .auto) blk: {
|
||||
.alignment = if (union_obj.flagsUnordered(ip).layout == .auto) blk: {
|
||||
const union_align = if (union_ptr_info.flags.alignment != .none)
|
||||
union_ptr_info.flags.alignment
|
||||
else
|
||||
@ -28375,7 +28368,7 @@ fn unionFieldPtr(
|
||||
}
|
||||
|
||||
if (try sema.resolveDefinedValue(block, src, union_ptr)) |union_ptr_val| ct: {
|
||||
switch (union_obj.getLayout(ip)) {
|
||||
switch (union_obj.flagsUnordered(ip).layout) {
|
||||
.auto => if (initializing) {
|
||||
// Store to the union to initialize the tag.
|
||||
const field_tag = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
|
||||
@ -28413,7 +28406,7 @@ fn unionFieldPtr(
|
||||
}
|
||||
|
||||
try sema.requireRuntimeBlock(block, src, null);
|
||||
if (!initializing and union_obj.getLayout(ip) == .auto and block.wantSafety() and
|
||||
if (!initializing and union_obj.flagsUnordered(ip).layout == .auto and block.wantSafety() and
|
||||
union_ty.unionTagTypeSafety(mod) != null and union_obj.field_types.len > 1)
|
||||
{
|
||||
const wanted_tag_val = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
|
||||
@ -28456,7 +28449,7 @@ fn unionFieldVal(
|
||||
const un = ip.indexToKey(union_val.toIntern()).un;
|
||||
const field_tag = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
|
||||
const tag_matches = un.tag == field_tag.toIntern();
|
||||
switch (union_obj.getLayout(ip)) {
|
||||
switch (union_obj.flagsUnordered(ip).layout) {
|
||||
.auto => {
|
||||
if (tag_matches) {
|
||||
return Air.internedToRef(un.val);
|
||||
@ -28490,7 +28483,7 @@ fn unionFieldVal(
|
||||
}
|
||||
|
||||
try sema.requireRuntimeBlock(block, src, null);
|
||||
if (union_obj.getLayout(ip) == .auto and block.wantSafety() and
|
||||
if (union_obj.flagsUnordered(ip).layout == .auto and block.wantSafety() and
|
||||
union_ty.unionTagTypeSafety(zcu) != null and union_obj.field_types.len > 1)
|
||||
{
|
||||
const wanted_tag_val = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
|
||||
@ -32037,7 +32030,7 @@ pub fn ensureDeclAnalyzed(sema: *Sema, decl_index: InternPool.DeclIndex) Compile
|
||||
|
||||
pt.ensureDeclAnalyzed(decl_index) catch |err| {
|
||||
if (sema.owner_func_index != .none) {
|
||||
ip.funcAnalysis(sema.owner_func_index).state = .dependency_failure;
|
||||
ip.funcSetAnalysisState(sema.owner_func_index, .dependency_failure);
|
||||
} else {
|
||||
sema.owner_decl.analysis = .dependency_failure;
|
||||
}
|
||||
@ -32051,7 +32044,7 @@ fn ensureFuncBodyAnalyzed(sema: *Sema, func: InternPool.Index) CompileError!void
|
||||
const ip = &mod.intern_pool;
|
||||
pt.ensureFuncBodyAnalyzed(func) catch |err| {
|
||||
if (sema.owner_func_index != .none) {
|
||||
ip.funcAnalysis(sema.owner_func_index).state = .dependency_failure;
|
||||
ip.funcSetAnalysisState(sema.owner_func_index, .dependency_failure);
|
||||
} else {
|
||||
sema.owner_decl.analysis = .dependency_failure;
|
||||
}
|
||||
@ -32397,7 +32390,7 @@ fn analyzeIsNonErrComptimeOnly(
|
||||
// If the error set is empty, we must return a comptime true or false.
|
||||
// However we want to avoid unnecessarily resolving an inferred error set
|
||||
// in case it is already non-empty.
|
||||
switch (ip.funcIesResolved(func_index).*) {
|
||||
switch (ip.funcIesResolvedUnordered(func_index)) {
|
||||
.anyerror_type => break :blk,
|
||||
.none => {},
|
||||
else => |i| if (ip.indexToKey(i).error_set_type.names.len != 0) break :blk,
|
||||
@ -33466,7 +33459,7 @@ fn wrapErrorUnionSet(
|
||||
.inferred_error_set_type => |func_index| ok: {
|
||||
// We carefully do this in an order that avoids unnecessarily
|
||||
// resolving the destination error set type.
|
||||
switch (ip.funcIesResolved(func_index).*) {
|
||||
switch (ip.funcIesResolvedUnordered(func_index)) {
|
||||
.anyerror_type => break :ok,
|
||||
.none => if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) {
|
||||
break :ok;
|
||||
@ -35071,33 +35064,25 @@ pub fn resolveStructAlignment(
|
||||
|
||||
assert(sema.ownerUnit().unwrap().decl == struct_type.decl.unwrap().?);
|
||||
|
||||
assert(struct_type.flagsPtr(ip).alignment == .none);
|
||||
assert(struct_type.layout != .@"packed");
|
||||
assert(struct_type.flagsUnordered(ip).alignment == .none);
|
||||
|
||||
if (struct_type.flagsPtr(ip).field_types_wip) {
|
||||
// We'll guess "pointer-aligned", if the struct has an
|
||||
// underaligned pointer field then some allocations
|
||||
// might require explicit alignment.
|
||||
struct_type.flagsPtr(ip).assumed_pointer_aligned = true;
|
||||
const result = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8));
|
||||
struct_type.flagsPtr(ip).alignment = result;
|
||||
return;
|
||||
}
|
||||
const ptr_align = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8));
|
||||
|
||||
// We'll guess "pointer-aligned", if the struct has an
|
||||
// underaligned pointer field then some allocations
|
||||
// might require explicit alignment.
|
||||
if (struct_type.assumePointerAlignedIfFieldTypesWip(ip, ptr_align)) return;
|
||||
|
||||
try sema.resolveTypeFieldsStruct(ty, struct_type);
|
||||
|
||||
if (struct_type.setAlignmentWip(ip)) {
|
||||
// We'll guess "pointer-aligned", if the struct has an
|
||||
// underaligned pointer field then some allocations
|
||||
// might require explicit alignment.
|
||||
struct_type.flagsPtr(ip).assumed_pointer_aligned = true;
|
||||
const result = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8));
|
||||
struct_type.flagsPtr(ip).alignment = result;
|
||||
return;
|
||||
}
|
||||
// We'll guess "pointer-aligned", if the struct has an
|
||||
// underaligned pointer field then some allocations
|
||||
// might require explicit alignment.
|
||||
if (struct_type.assumePointerAlignedIfWip(ip, ptr_align)) return;
|
||||
defer struct_type.clearAlignmentWip(ip);
|
||||
|
||||
var result: Alignment = .@"1";
|
||||
var alignment: Alignment = .@"1";
|
||||
|
||||
for (0..struct_type.field_types.len) |i| {
|
||||
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
|
||||
@ -35109,10 +35094,10 @@ pub fn resolveStructAlignment(
|
||||
struct_type.layout,
|
||||
.sema,
|
||||
);
|
||||
result = result.maxStrict(field_align);
|
||||
alignment = alignment.maxStrict(field_align);
|
||||
}
|
||||
|
||||
struct_type.flagsPtr(ip).alignment = result;
|
||||
struct_type.setAlignment(ip, alignment);
|
||||
}
|
||||
|
||||
pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void {
|
||||
@ -35177,7 +35162,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void {
|
||||
big_align = big_align.maxStrict(field_align.*);
|
||||
}
|
||||
|
||||
if (struct_type.flagsPtr(ip).assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) {
|
||||
if (struct_type.flagsUnordered(ip).assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) {
|
||||
const msg = try sema.errMsg(
|
||||
ty.srcLoc(zcu),
|
||||
"struct layout depends on it having runtime bits",
|
||||
@ -35186,7 +35171,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void {
|
||||
return sema.failWithOwnedErrorMsg(null, msg);
|
||||
}
|
||||
|
||||
if (struct_type.flagsPtr(ip).assumed_pointer_aligned and
|
||||
if (struct_type.flagsUnordered(ip).assumed_pointer_aligned and
|
||||
big_align.compareStrict(.neq, Alignment.fromByteUnits(@divExact(zcu.getTarget().ptrBitWidth(), 8))))
|
||||
{
|
||||
const msg = try sema.errMsg(
|
||||
@ -35254,10 +35239,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void {
|
||||
offsets[i] = @intCast(aligns[i].forward(offset));
|
||||
offset = offsets[i] + sizes[i];
|
||||
}
|
||||
struct_type.size(ip).* = @intCast(big_align.forward(offset));
|
||||
const flags = struct_type.flagsPtr(ip);
|
||||
flags.alignment = big_align;
|
||||
flags.layout_resolved = true;
|
||||
struct_type.setLayoutResolved(ip, @intCast(big_align.forward(offset)), big_align);
|
||||
_ = try sema.typeRequiresComptime(ty);
|
||||
}
|
||||
|
||||
@ -35350,13 +35332,13 @@ fn semaBackingIntType(pt: Zcu.PerThread, struct_type: InternPool.LoadedStructTyp
|
||||
};
|
||||
|
||||
try sema.checkBackingIntType(&block, backing_int_src, backing_int_ty, fields_bit_sum);
|
||||
struct_type.backingIntType(ip).* = backing_int_ty.toIntern();
|
||||
struct_type.setBackingIntType(ip, backing_int_ty.toIntern());
|
||||
} else {
|
||||
if (fields_bit_sum > std.math.maxInt(u16)) {
|
||||
return sema.fail(&block, block.nodeOffset(0), "size of packed struct '{d}' exceeds maximum bit width of 65535", .{fields_bit_sum});
|
||||
}
|
||||
const backing_int_ty = try pt.intType(.unsigned, @intCast(fields_bit_sum));
|
||||
struct_type.backingIntType(ip).* = backing_int_ty.toIntern();
|
||||
struct_type.setBackingIntType(ip, backing_int_ty.toIntern());
|
||||
}
|
||||
|
||||
try sema.flushExports();
|
||||
@ -35430,15 +35412,12 @@ pub fn resolveUnionAlignment(
|
||||
|
||||
assert(!union_type.haveLayout(ip));
|
||||
|
||||
if (union_type.flagsPtr(ip).status == .field_types_wip) {
|
||||
// We'll guess "pointer-aligned", if the union has an
|
||||
// underaligned pointer field then some allocations
|
||||
// might require explicit alignment.
|
||||
union_type.flagsPtr(ip).assumed_pointer_aligned = true;
|
||||
const result = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8));
|
||||
union_type.flagsPtr(ip).alignment = result;
|
||||
return;
|
||||
}
|
||||
const ptr_align = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8));
|
||||
|
||||
// We'll guess "pointer-aligned", if the union has an
|
||||
// underaligned pointer field then some allocations
|
||||
// might require explicit alignment.
|
||||
if (union_type.assumePointerAlignedIfFieldTypesWip(ip, ptr_align)) return;
|
||||
|
||||
try sema.resolveTypeFieldsUnion(ty, union_type);
|
||||
|
||||
@ -35456,7 +35435,7 @@ pub fn resolveUnionAlignment(
|
||||
max_align = max_align.max(field_align);
|
||||
}
|
||||
|
||||
union_type.flagsPtr(ip).alignment = max_align;
|
||||
union_type.setAlignment(ip, max_align);
|
||||
}
|
||||
|
||||
/// This logic must be kept in sync with `Module.getUnionLayout`.
|
||||
@ -35471,7 +35450,8 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
|
||||
|
||||
assert(sema.ownerUnit().unwrap().decl == union_type.decl);
|
||||
|
||||
switch (union_type.flagsPtr(ip).status) {
|
||||
const old_flags = union_type.flagsUnordered(ip);
|
||||
switch (old_flags.status) {
|
||||
.none, .have_field_types => {},
|
||||
.field_types_wip, .layout_wip => {
|
||||
const msg = try sema.errMsg(
|
||||
@ -35484,12 +35464,9 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
|
||||
.have_layout, .fully_resolved_wip, .fully_resolved => return,
|
||||
}
|
||||
|
||||
const prev_status = union_type.flagsPtr(ip).status;
|
||||
errdefer if (union_type.flagsPtr(ip).status == .layout_wip) {
|
||||
union_type.flagsPtr(ip).status = prev_status;
|
||||
};
|
||||
errdefer union_type.setStatusIfLayoutWip(ip, old_flags.status);
|
||||
|
||||
union_type.flagsPtr(ip).status = .layout_wip;
|
||||
union_type.setStatus(ip, .layout_wip);
|
||||
|
||||
var max_size: u64 = 0;
|
||||
var max_align: Alignment = .@"1";
|
||||
@ -35516,8 +35493,8 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
|
||||
max_align = max_align.max(field_align);
|
||||
}
|
||||
|
||||
const flags = union_type.flagsPtr(ip);
|
||||
const has_runtime_tag = flags.runtime_tag.hasTag() and try sema.typeHasRuntimeBits(Type.fromInterned(union_type.enum_tag_ty));
|
||||
const has_runtime_tag = union_type.flagsUnordered(ip).runtime_tag.hasTag() and
|
||||
try sema.typeHasRuntimeBits(Type.fromInterned(union_type.enum_tag_ty));
|
||||
const size, const alignment, const padding = if (has_runtime_tag) layout: {
|
||||
const enum_tag_type = Type.fromInterned(union_type.enum_tag_ty);
|
||||
const tag_align = try sema.typeAbiAlignment(enum_tag_type);
|
||||
@ -35551,12 +35528,9 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
|
||||
break :layout .{ size, max_align.max(tag_align), padding };
|
||||
} else .{ max_align.forward(max_size), max_align, 0 };
|
||||
|
||||
union_type.size(ip).* = @intCast(size);
|
||||
union_type.padding(ip).* = padding;
|
||||
flags.alignment = alignment;
|
||||
flags.status = .have_layout;
|
||||
union_type.setHaveLayout(ip, @intCast(size), padding, alignment);
|
||||
|
||||
if (union_type.flagsPtr(ip).assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) {
|
||||
if (union_type.flagsUnordered(ip).assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) {
|
||||
const msg = try sema.errMsg(
|
||||
ty.srcLoc(pt.zcu),
|
||||
"union layout depends on it having runtime bits",
|
||||
@ -35565,7 +35539,7 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
|
||||
return sema.failWithOwnedErrorMsg(null, msg);
|
||||
}
|
||||
|
||||
if (union_type.flagsPtr(ip).assumed_pointer_aligned and
|
||||
if (union_type.flagsUnordered(ip).assumed_pointer_aligned and
|
||||
alignment.compareStrict(.neq, Alignment.fromByteUnits(@divExact(pt.zcu.getTarget().ptrBitWidth(), 8))))
|
||||
{
|
||||
const msg = try sema.errMsg(
|
||||
@ -35612,7 +35586,7 @@ pub fn resolveUnionFully(sema: *Sema, ty: Type) SemaError!void {
|
||||
|
||||
assert(sema.ownerUnit().unwrap().decl == union_obj.decl);
|
||||
|
||||
switch (union_obj.flagsPtr(ip).status) {
|
||||
switch (union_obj.flagsUnordered(ip).status) {
|
||||
.none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {},
|
||||
.fully_resolved_wip, .fully_resolved => return,
|
||||
}
|
||||
@ -35621,15 +35595,15 @@ pub fn resolveUnionFully(sema: *Sema, ty: Type) SemaError!void {
|
||||
// After we have resolve union layout we have to go over the fields again to
|
||||
// make sure pointer fields get their child types resolved as well.
|
||||
// See also similar code for structs.
|
||||
const prev_status = union_obj.flagsPtr(ip).status;
|
||||
errdefer union_obj.flagsPtr(ip).status = prev_status;
|
||||
const prev_status = union_obj.flagsUnordered(ip).status;
|
||||
errdefer union_obj.setStatus(ip, prev_status);
|
||||
|
||||
union_obj.flagsPtr(ip).status = .fully_resolved_wip;
|
||||
union_obj.setStatus(ip, .fully_resolved_wip);
|
||||
for (0..union_obj.field_types.len) |field_index| {
|
||||
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
|
||||
try field_ty.resolveFully(pt);
|
||||
}
|
||||
union_obj.flagsPtr(ip).status = .fully_resolved;
|
||||
union_obj.setStatus(ip, .fully_resolved);
|
||||
}
|
||||
|
||||
// And let's not forget comptime-only status.
|
||||
@ -35662,7 +35636,7 @@ pub fn resolveTypeFieldsStruct(
|
||||
|
||||
if (struct_type.haveFieldTypes(ip)) return;
|
||||
|
||||
if (struct_type.setTypesWip(ip)) {
|
||||
if (struct_type.setFieldTypesWip(ip)) {
|
||||
const msg = try sema.errMsg(
|
||||
Type.fromInterned(ty).srcLoc(zcu),
|
||||
"struct '{}' depends on itself",
|
||||
@ -35670,7 +35644,7 @@ pub fn resolveTypeFieldsStruct(
|
||||
);
|
||||
return sema.failWithOwnedErrorMsg(null, msg);
|
||||
}
|
||||
defer struct_type.clearTypesWip(ip);
|
||||
defer struct_type.clearFieldTypesWip(ip);
|
||||
|
||||
semaStructFields(pt, sema.arena, struct_type) catch |err| switch (err) {
|
||||
error.AnalysisFail => {
|
||||
@ -35739,7 +35713,7 @@ pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.Load
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
switch (union_type.flagsPtr(ip).status) {
|
||||
switch (union_type.flagsUnordered(ip).status) {
|
||||
.none => {},
|
||||
.field_types_wip => {
|
||||
const msg = try sema.errMsg(
|
||||
@ -35757,8 +35731,8 @@ pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.Load
|
||||
=> return,
|
||||
}
|
||||
|
||||
union_type.flagsPtr(ip).status = .field_types_wip;
|
||||
errdefer union_type.flagsPtr(ip).status = .none;
|
||||
union_type.setStatus(ip, .field_types_wip);
|
||||
errdefer union_type.setStatus(ip, .none);
|
||||
semaUnionFields(pt, sema.arena, union_type) catch |err| switch (err) {
|
||||
error.AnalysisFail => {
|
||||
if (owner_decl.analysis == .complete) {
|
||||
@ -35769,7 +35743,7 @@ pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.Load
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable,
|
||||
};
|
||||
union_type.flagsPtr(ip).status = .have_field_types;
|
||||
union_type.setStatus(ip, .have_field_types);
|
||||
}
|
||||
|
||||
/// Returns a normal error set corresponding to the fully populated inferred
|
||||
@ -35790,10 +35764,10 @@ fn resolveInferredErrorSet(
|
||||
|
||||
// TODO: during an incremental update this might not be `.none`, but the
|
||||
// function might be out-of-date!
|
||||
const resolved_ty = func.resolvedErrorSet(ip).*;
|
||||
const resolved_ty = func.resolvedErrorSetUnordered(ip);
|
||||
if (resolved_ty != .none) return resolved_ty;
|
||||
|
||||
if (func.analysis(ip).state == .in_progress)
|
||||
if (func.analysisUnordered(ip).state == .in_progress)
|
||||
return sema.fail(block, src, "unable to resolve inferred error set", .{});
|
||||
|
||||
// In order to ensure that all dependencies are properly added to the set,
|
||||
@ -35830,7 +35804,7 @@ fn resolveInferredErrorSet(
|
||||
|
||||
// This will now have been resolved by the logic at the end of `Module.analyzeFnBody`
|
||||
// which calls `resolveInferredErrorSetPtr`.
|
||||
const final_resolved_ty = func.resolvedErrorSet(ip).*;
|
||||
const final_resolved_ty = func.resolvedErrorSetUnordered(ip);
|
||||
assert(final_resolved_ty != .none);
|
||||
return final_resolved_ty;
|
||||
}
|
||||
@ -35996,8 +35970,7 @@ fn semaStructFields(
|
||||
return;
|
||||
},
|
||||
.auto, .@"extern" => {
|
||||
struct_type.size(ip).* = 0;
|
||||
struct_type.flagsPtr(ip).layout_resolved = true;
|
||||
struct_type.setLayoutResolved(ip, 0, .none);
|
||||
return;
|
||||
},
|
||||
};
|
||||
@ -36191,7 +36164,7 @@ fn semaStructFields(
|
||||
extra_index += zir_field.init_body_len;
|
||||
}
|
||||
|
||||
struct_type.clearTypesWip(ip);
|
||||
struct_type.clearFieldTypesWip(ip);
|
||||
if (!any_inits) struct_type.setHaveFieldInits(ip);
|
||||
|
||||
try sema.flushExports();
|
||||
@ -36467,7 +36440,7 @@ fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_type: InternPool.L
|
||||
}
|
||||
} else {
|
||||
// The provided type is the enum tag type.
|
||||
union_type.tagTypePtr(ip).* = provided_ty.toIntern();
|
||||
union_type.setTagType(ip, provided_ty.toIntern());
|
||||
const enum_type = switch (ip.indexToKey(provided_ty.toIntern())) {
|
||||
.enum_type => ip.loadEnumType(provided_ty.toIntern()),
|
||||
else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{provided_ty.fmt(pt)}),
|
||||
@ -36605,10 +36578,11 @@ fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_type: InternPool.L
|
||||
}
|
||||
|
||||
if (explicit_tags_seen.len > 0) {
|
||||
const tag_info = ip.loadEnumType(union_type.tagTypePtr(ip).*);
|
||||
const tag_ty = union_type.tagTypeUnordered(ip);
|
||||
const tag_info = ip.loadEnumType(tag_ty);
|
||||
const enum_index = tag_info.nameIndex(ip, field_name) orelse {
|
||||
return sema.fail(&block_scope, name_src, "no field named '{}' in enum '{}'", .{
|
||||
field_name.fmt(ip), Type.fromInterned(union_type.tagTypePtr(ip).*).fmt(pt),
|
||||
field_name.fmt(ip), Type.fromInterned(tag_ty).fmt(pt),
|
||||
});
|
||||
};
|
||||
|
||||
@ -36645,7 +36619,7 @@ fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_type: InternPool.L
|
||||
};
|
||||
return sema.failWithOwnedErrorMsg(&block_scope, msg);
|
||||
}
|
||||
const layout = union_type.getLayout(ip);
|
||||
const layout = union_type.flagsUnordered(ip).layout;
|
||||
if (layout == .@"extern" and
|
||||
!try sema.validateExternType(field_ty, .union_field))
|
||||
{
|
||||
@ -36688,7 +36662,8 @@ fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_type: InternPool.L
|
||||
union_type.setFieldAligns(ip, field_aligns.items);
|
||||
|
||||
if (explicit_tags_seen.len > 0) {
|
||||
const tag_info = ip.loadEnumType(union_type.tagTypePtr(ip).*);
|
||||
const tag_ty = union_type.tagTypeUnordered(ip);
|
||||
const tag_info = ip.loadEnumType(tag_ty);
|
||||
if (tag_info.names.len > fields_len) {
|
||||
const msg = msg: {
|
||||
const msg = try sema.errMsg(src, "enum field(s) missing in union", .{});
|
||||
@ -36696,21 +36671,21 @@ fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_type: InternPool.L
|
||||
|
||||
for (tag_info.names.get(ip), 0..) |field_name, field_index| {
|
||||
if (explicit_tags_seen[field_index]) continue;
|
||||
try sema.addFieldErrNote(Type.fromInterned(union_type.tagTypePtr(ip).*), field_index, msg, "field '{}' missing, declared here", .{
|
||||
try sema.addFieldErrNote(Type.fromInterned(tag_ty), field_index, msg, "field '{}' missing, declared here", .{
|
||||
field_name.fmt(ip),
|
||||
});
|
||||
}
|
||||
try sema.addDeclaredHereNote(msg, Type.fromInterned(union_type.tagTypePtr(ip).*));
|
||||
try sema.addDeclaredHereNote(msg, Type.fromInterned(tag_ty));
|
||||
break :msg msg;
|
||||
};
|
||||
return sema.failWithOwnedErrorMsg(&block_scope, msg);
|
||||
}
|
||||
} else if (enum_field_vals.count() > 0) {
|
||||
const enum_ty = try sema.generateUnionTagTypeNumbered(&block_scope, enum_field_names, enum_field_vals.keys(), zcu.declPtr(union_type.decl));
|
||||
union_type.tagTypePtr(ip).* = enum_ty;
|
||||
union_type.setTagType(ip, enum_ty);
|
||||
} else {
|
||||
const enum_ty = try sema.generateUnionTagTypeSimple(&block_scope, enum_field_names, zcu.declPtr(union_type.decl));
|
||||
union_type.tagTypePtr(ip).* = enum_ty;
|
||||
union_type.setTagType(ip, enum_ty);
|
||||
}
|
||||
|
||||
try sema.flushExports();
|
||||
@ -37086,7 +37061,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
|
||||
try ty.resolveLayout(pt);
|
||||
|
||||
const union_obj = ip.loadUnionType(ty.toIntern());
|
||||
const tag_val = (try sema.typeHasOnePossibleValue(Type.fromInterned(union_obj.tagTypePtr(ip).*))) orelse
|
||||
const tag_val = (try sema.typeHasOnePossibleValue(Type.fromInterned(union_obj.tagTypeUnordered(ip)))) orelse
|
||||
return null;
|
||||
if (union_obj.field_types.len == 0) {
|
||||
const only = try pt.intern(.{ .empty_enum_value = ty.toIntern() });
|
||||
|
||||
95
src/Type.zig
95
src/Type.zig
@ -605,17 +605,15 @@ pub fn hasRuntimeBitsAdvanced(
|
||||
|
||||
.union_type => {
|
||||
const union_type = ip.loadUnionType(ty.toIntern());
|
||||
switch (union_type.flagsPtr(ip).runtime_tag) {
|
||||
const union_flags = union_type.flagsUnordered(ip);
|
||||
switch (union_flags.runtime_tag) {
|
||||
.none => {
|
||||
if (union_type.flagsPtr(ip).status == .field_types_wip) {
|
||||
// In this case, we guess that hasRuntimeBits() for this type is true,
|
||||
// and then later if our guess was incorrect, we emit a compile error.
|
||||
union_type.flagsPtr(ip).assumed_runtime_bits = true;
|
||||
return true;
|
||||
}
|
||||
// In this case, we guess that hasRuntimeBits() for this type is true,
|
||||
// and then later if our guess was incorrect, we emit a compile error.
|
||||
if (union_type.assumeRuntimeBitsIfFieldTypesWip(ip)) return true;
|
||||
},
|
||||
.safety, .tagged => {
|
||||
const tag_ty = union_type.tagTypePtr(ip).*;
|
||||
const tag_ty = union_type.tagTypeUnordered(ip);
|
||||
// tag_ty will be `none` if this union's tag type is not resolved yet,
|
||||
// in which case we want control flow to continue down below.
|
||||
if (tag_ty != .none and
|
||||
@ -627,8 +625,8 @@ pub fn hasRuntimeBitsAdvanced(
|
||||
}
|
||||
switch (strat) {
|
||||
.sema => try ty.resolveFields(pt),
|
||||
.eager => assert(union_type.flagsPtr(ip).status.haveFieldTypes()),
|
||||
.lazy => if (!union_type.flagsPtr(ip).status.haveFieldTypes())
|
||||
.eager => assert(union_flags.status.haveFieldTypes()),
|
||||
.lazy => if (!union_flags.status.haveFieldTypes())
|
||||
return error.NeedLazy,
|
||||
}
|
||||
for (0..union_type.field_types.len) |field_index| {
|
||||
@ -745,8 +743,8 @@ pub fn hasWellDefinedLayout(ty: Type, mod: *Module) bool {
|
||||
},
|
||||
.union_type => {
|
||||
const union_type = ip.loadUnionType(ty.toIntern());
|
||||
return switch (union_type.flagsPtr(ip).runtime_tag) {
|
||||
.none, .safety => union_type.flagsPtr(ip).layout != .auto,
|
||||
return switch (union_type.flagsUnordered(ip).runtime_tag) {
|
||||
.none, .safety => union_type.flagsUnordered(ip).layout != .auto,
|
||||
.tagged => false,
|
||||
};
|
||||
},
|
||||
@ -1045,7 +1043,7 @@ pub fn abiAlignmentAdvanced(
|
||||
if (struct_type.layout == .@"packed") {
|
||||
switch (strat) {
|
||||
.sema => try ty.resolveLayout(pt),
|
||||
.lazy => if (struct_type.backingIntType(ip).* == .none) return .{
|
||||
.lazy => if (struct_type.backingIntTypeUnordered(ip) == .none) return .{
|
||||
.val = Value.fromInterned(try pt.intern(.{ .int = .{
|
||||
.ty = .comptime_int_type,
|
||||
.storage = .{ .lazy_align = ty.toIntern() },
|
||||
@ -1053,10 +1051,10 @@ pub fn abiAlignmentAdvanced(
|
||||
},
|
||||
.eager => {},
|
||||
}
|
||||
return .{ .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiAlignment(pt) };
|
||||
return .{ .scalar = Type.fromInterned(struct_type.backingIntTypeUnordered(ip)).abiAlignment(pt) };
|
||||
}
|
||||
|
||||
if (struct_type.flagsPtr(ip).alignment == .none) switch (strat) {
|
||||
if (struct_type.flagsUnordered(ip).alignment == .none) switch (strat) {
|
||||
.eager => unreachable, // struct alignment not resolved
|
||||
.sema => try ty.resolveStructAlignment(pt),
|
||||
.lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
|
||||
@ -1065,7 +1063,7 @@ pub fn abiAlignmentAdvanced(
|
||||
} })) },
|
||||
};
|
||||
|
||||
return .{ .scalar = struct_type.flagsPtr(ip).alignment };
|
||||
return .{ .scalar = struct_type.flagsUnordered(ip).alignment };
|
||||
},
|
||||
.anon_struct_type => |tuple| {
|
||||
var big_align: Alignment = .@"1";
|
||||
@ -1088,7 +1086,7 @@ pub fn abiAlignmentAdvanced(
|
||||
.union_type => {
|
||||
const union_type = ip.loadUnionType(ty.toIntern());
|
||||
|
||||
if (union_type.flagsPtr(ip).alignment == .none) switch (strat) {
|
||||
if (union_type.flagsUnordered(ip).alignment == .none) switch (strat) {
|
||||
.eager => unreachable, // union layout not resolved
|
||||
.sema => try ty.resolveUnionAlignment(pt),
|
||||
.lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
|
||||
@ -1097,7 +1095,7 @@ pub fn abiAlignmentAdvanced(
|
||||
} })) },
|
||||
};
|
||||
|
||||
return .{ .scalar = union_type.flagsPtr(ip).alignment };
|
||||
return .{ .scalar = union_type.flagsUnordered(ip).alignment };
|
||||
},
|
||||
.opaque_type => return .{ .scalar = .@"1" },
|
||||
.enum_type => return .{
|
||||
@ -1420,7 +1418,7 @@ pub fn abiSizeAdvanced(
|
||||
.sema => try ty.resolveLayout(pt),
|
||||
.lazy => switch (struct_type.layout) {
|
||||
.@"packed" => {
|
||||
if (struct_type.backingIntType(ip).* == .none) return .{
|
||||
if (struct_type.backingIntTypeUnordered(ip) == .none) return .{
|
||||
.val = Value.fromInterned(try pt.intern(.{ .int = .{
|
||||
.ty = .comptime_int_type,
|
||||
.storage = .{ .lazy_size = ty.toIntern() },
|
||||
@ -1440,11 +1438,11 @@ pub fn abiSizeAdvanced(
|
||||
}
|
||||
switch (struct_type.layout) {
|
||||
.@"packed" => return .{
|
||||
.scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiSize(pt),
|
||||
.scalar = Type.fromInterned(struct_type.backingIntTypeUnordered(ip)).abiSize(pt),
|
||||
},
|
||||
.auto, .@"extern" => {
|
||||
assert(struct_type.haveLayout(ip));
|
||||
return .{ .scalar = struct_type.size(ip).* };
|
||||
return .{ .scalar = struct_type.sizeUnordered(ip) };
|
||||
},
|
||||
}
|
||||
},
|
||||
@ -1464,7 +1462,7 @@ pub fn abiSizeAdvanced(
|
||||
const union_type = ip.loadUnionType(ty.toIntern());
|
||||
switch (strat) {
|
||||
.sema => try ty.resolveLayout(pt),
|
||||
.lazy => if (!union_type.flagsPtr(ip).status.haveLayout()) return .{
|
||||
.lazy => if (!union_type.flagsUnordered(ip).status.haveLayout()) return .{
|
||||
.val = Value.fromInterned(try pt.intern(.{ .int = .{
|
||||
.ty = .comptime_int_type,
|
||||
.storage = .{ .lazy_size = ty.toIntern() },
|
||||
@ -1474,7 +1472,7 @@ pub fn abiSizeAdvanced(
|
||||
}
|
||||
|
||||
assert(union_type.haveLayout(ip));
|
||||
return .{ .scalar = union_type.size(ip).* };
|
||||
return .{ .scalar = union_type.sizeUnordered(ip) };
|
||||
},
|
||||
.opaque_type => unreachable, // no size available
|
||||
.enum_type => return .{ .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiSize(pt) },
|
||||
@ -1788,7 +1786,7 @@ pub fn bitSizeAdvanced(
|
||||
if (is_packed) try ty.resolveLayout(pt);
|
||||
}
|
||||
if (is_packed) {
|
||||
return try Type.fromInterned(struct_type.backingIntType(ip).*).bitSizeAdvanced(pt, strat);
|
||||
return try Type.fromInterned(struct_type.backingIntTypeUnordered(ip)).bitSizeAdvanced(pt, strat);
|
||||
}
|
||||
return (try ty.abiSizeAdvanced(pt, strat_lazy)).scalar * 8;
|
||||
},
|
||||
@ -1808,7 +1806,7 @@ pub fn bitSizeAdvanced(
|
||||
if (!is_packed) {
|
||||
return (try ty.abiSizeAdvanced(pt, strat_lazy)).scalar * 8;
|
||||
}
|
||||
assert(union_type.flagsPtr(ip).status.haveFieldTypes());
|
||||
assert(union_type.flagsUnordered(ip).status.haveFieldTypes());
|
||||
|
||||
var size: u64 = 0;
|
||||
for (0..union_type.field_types.len) |field_index| {
|
||||
@ -2056,9 +2054,10 @@ pub fn unionTagType(ty: Type, mod: *Module) ?Type {
|
||||
else => return null,
|
||||
}
|
||||
const union_type = ip.loadUnionType(ty.toIntern());
|
||||
switch (union_type.flagsPtr(ip).runtime_tag) {
|
||||
const union_flags = union_type.flagsUnordered(ip);
|
||||
switch (union_flags.runtime_tag) {
|
||||
.tagged => {
|
||||
assert(union_type.flagsPtr(ip).status.haveFieldTypes());
|
||||
assert(union_flags.status.haveFieldTypes());
|
||||
return Type.fromInterned(union_type.enum_tag_ty);
|
||||
},
|
||||
else => return null,
|
||||
@ -2135,7 +2134,7 @@ pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout
|
||||
return switch (ip.indexToKey(ty.toIntern())) {
|
||||
.struct_type => ip.loadStructType(ty.toIntern()).layout,
|
||||
.anon_struct_type => .auto,
|
||||
.union_type => ip.loadUnionType(ty.toIntern()).flagsPtr(ip).layout,
|
||||
.union_type => ip.loadUnionType(ty.toIntern()).flagsUnordered(ip).layout,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
@ -2157,7 +2156,7 @@ pub fn errorSetIsEmpty(ty: Type, mod: *Module) bool {
|
||||
.anyerror_type, .adhoc_inferred_error_set_type => false,
|
||||
else => switch (ip.indexToKey(ty.toIntern())) {
|
||||
.error_set_type => |error_set_type| error_set_type.names.len == 0,
|
||||
.inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) {
|
||||
.inferred_error_set_type => |i| switch (ip.funcIesResolvedUnordered(i)) {
|
||||
.none, .anyerror_type => false,
|
||||
else => |t| ip.indexToKey(t).error_set_type.names.len == 0,
|
||||
},
|
||||
@ -2175,7 +2174,7 @@ pub fn isAnyError(ty: Type, mod: *Module) bool {
|
||||
.anyerror_type => true,
|
||||
.adhoc_inferred_error_set_type => false,
|
||||
else => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
|
||||
.inferred_error_set_type => |i| ip.funcIesResolved(i).* == .anyerror_type,
|
||||
.inferred_error_set_type => |i| ip.funcIesResolvedUnordered(i) == .anyerror_type,
|
||||
else => false,
|
||||
},
|
||||
};
|
||||
@ -2200,7 +2199,7 @@ pub fn errorSetHasFieldIp(
|
||||
.anyerror_type => true,
|
||||
else => switch (ip.indexToKey(ty)) {
|
||||
.error_set_type => |error_set_type| error_set_type.nameIndex(ip, name) != null,
|
||||
.inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) {
|
||||
.inferred_error_set_type => |i| switch (ip.funcIesResolvedUnordered(i)) {
|
||||
.anyerror_type => true,
|
||||
.none => false,
|
||||
else => |t| ip.indexToKey(t).error_set_type.nameIndex(ip, name) != null,
|
||||
@ -2336,7 +2335,7 @@ pub fn intInfo(starting_ty: Type, mod: *Module) InternPool.Key.IntType {
|
||||
.c_ulonglong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) },
|
||||
else => switch (ip.indexToKey(ty.toIntern())) {
|
||||
.int_type => |int_type| return int_type,
|
||||
.struct_type => ty = Type.fromInterned(ip.loadStructType(ty.toIntern()).backingIntType(ip).*),
|
||||
.struct_type => ty = Type.fromInterned(ip.loadStructType(ty.toIntern()).backingIntTypeUnordered(ip)),
|
||||
.enum_type => ty = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty),
|
||||
.vector_type => |vector_type| ty = Type.fromInterned(vector_type.child),
|
||||
|
||||
@ -2826,17 +2825,18 @@ pub fn comptimeOnlyAdvanced(ty: Type, pt: Zcu.PerThread, strat: ResolveStrat) Se
|
||||
return false;
|
||||
|
||||
// A struct with no fields is not comptime-only.
|
||||
return switch (struct_type.flagsPtr(ip).requires_comptime) {
|
||||
return switch (struct_type.setRequiresComptimeWip(ip)) {
|
||||
.no, .wip => false,
|
||||
.yes => true,
|
||||
.unknown => {
|
||||
assert(strat == .sema);
|
||||
|
||||
if (struct_type.flagsPtr(ip).field_types_wip)
|
||||
if (struct_type.flagsUnordered(ip).field_types_wip) {
|
||||
struct_type.setRequiresComptime(ip, .unknown);
|
||||
return false;
|
||||
}
|
||||
|
||||
struct_type.flagsPtr(ip).requires_comptime = .wip;
|
||||
errdefer struct_type.flagsPtr(ip).requires_comptime = .unknown;
|
||||
errdefer struct_type.setRequiresComptime(ip, .unknown);
|
||||
|
||||
try ty.resolveFields(pt);
|
||||
|
||||
@ -2849,12 +2849,12 @@ pub fn comptimeOnlyAdvanced(ty: Type, pt: Zcu.PerThread, strat: ResolveStrat) Se
|
||||
// be considered resolved. Comptime-only types
|
||||
// still maintain a layout of their
|
||||
// runtime-known fields.
|
||||
struct_type.flagsPtr(ip).requires_comptime = .yes;
|
||||
struct_type.setRequiresComptime(ip, .yes);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
struct_type.flagsPtr(ip).requires_comptime = .no;
|
||||
struct_type.setRequiresComptime(ip, .no);
|
||||
return false;
|
||||
},
|
||||
};
|
||||
@ -2870,29 +2870,30 @@ pub fn comptimeOnlyAdvanced(ty: Type, pt: Zcu.PerThread, strat: ResolveStrat) Se
|
||||
|
||||
.union_type => {
|
||||
const union_type = ip.loadUnionType(ty.toIntern());
|
||||
switch (union_type.flagsPtr(ip).requires_comptime) {
|
||||
switch (union_type.setRequiresComptimeWip(ip)) {
|
||||
.no, .wip => return false,
|
||||
.yes => return true,
|
||||
.unknown => {
|
||||
assert(strat == .sema);
|
||||
|
||||
if (union_type.flagsPtr(ip).status == .field_types_wip)
|
||||
if (union_type.flagsUnordered(ip).status == .field_types_wip) {
|
||||
union_type.setRequiresComptime(ip, .unknown);
|
||||
return false;
|
||||
}
|
||||
|
||||
union_type.flagsPtr(ip).requires_comptime = .wip;
|
||||
errdefer union_type.flagsPtr(ip).requires_comptime = .unknown;
|
||||
errdefer union_type.setRequiresComptime(ip, .unknown);
|
||||
|
||||
try ty.resolveFields(pt);
|
||||
|
||||
for (0..union_type.field_types.len) |field_idx| {
|
||||
const field_ty = union_type.field_types.get(ip)[field_idx];
|
||||
if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(pt, strat)) {
|
||||
union_type.flagsPtr(ip).requires_comptime = .yes;
|
||||
union_type.setRequiresComptime(ip, .yes);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
union_type.flagsPtr(ip).requires_comptime = .no;
|
||||
union_type.setRequiresComptime(ip, .no);
|
||||
return false;
|
||||
},
|
||||
}
|
||||
@ -3117,7 +3118,7 @@ pub fn errorSetNames(ty: Type, mod: *Module) InternPool.NullTerminatedString.Sli
|
||||
const ip = &mod.intern_pool;
|
||||
return switch (ip.indexToKey(ty.toIntern())) {
|
||||
.error_set_type => |x| x.names,
|
||||
.inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) {
|
||||
.inferred_error_set_type => |i| switch (ip.funcIesResolvedUnordered(i)) {
|
||||
.none => unreachable, // unresolved inferred error set
|
||||
.anyerror_type => unreachable,
|
||||
else => |t| ip.indexToKey(t).error_set_type.names,
|
||||
@ -3374,7 +3375,7 @@ pub fn isTuple(ty: Type, mod: *Module) bool {
|
||||
const struct_type = ip.loadStructType(ty.toIntern());
|
||||
if (struct_type.layout == .@"packed") return false;
|
||||
if (struct_type.decl == .none) return false;
|
||||
return struct_type.flagsPtr(ip).is_tuple;
|
||||
return struct_type.flagsUnordered(ip).is_tuple;
|
||||
},
|
||||
.anon_struct_type => |anon_struct| anon_struct.names.len == 0,
|
||||
else => false,
|
||||
@ -3396,7 +3397,7 @@ pub fn isTupleOrAnonStruct(ty: Type, mod: *Module) bool {
|
||||
const struct_type = ip.loadStructType(ty.toIntern());
|
||||
if (struct_type.layout == .@"packed") return false;
|
||||
if (struct_type.decl == .none) return false;
|
||||
return struct_type.flagsPtr(ip).is_tuple;
|
||||
return struct_type.flagsUnordered(ip).is_tuple;
|
||||
},
|
||||
.anon_struct_type => true,
|
||||
else => false,
|
||||
|
||||
@ -558,7 +558,7 @@ pub fn writeToPackedMemory(
|
||||
},
|
||||
.Union => {
|
||||
const union_obj = mod.typeToUnion(ty).?;
|
||||
switch (union_obj.getLayout(ip)) {
|
||||
switch (union_obj.flagsUnordered(ip).layout) {
|
||||
.auto, .@"extern" => unreachable, // Handled in non-packed writeToMemory
|
||||
.@"packed" => {
|
||||
if (val.unionTag(mod)) |union_tag| {
|
||||
|
||||
@ -2968,7 +2968,7 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: InternPool.Index)
|
||||
const is_outdated = mod.outdated.contains(func_as_depender) or
|
||||
mod.potentially_outdated.contains(func_as_depender);
|
||||
|
||||
switch (func.analysis(ip).state) {
|
||||
switch (func.analysisUnordered(ip).state) {
|
||||
.none => {},
|
||||
.queued => return,
|
||||
// As above, we don't need to forward errors here.
|
||||
@ -2983,13 +2983,13 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: InternPool.Index)
|
||||
|
||||
// Decl itself is safely analyzed, and body analysis is not yet queued
|
||||
|
||||
try mod.comp.work_queue.writeItem(.{ .analyze_func = func_index });
|
||||
try mod.comp.queueJob(.{ .analyze_func = func_index });
|
||||
if (mod.emit_h != null) {
|
||||
// TODO: we ideally only want to do this if the function's type changed
|
||||
// since the last update
|
||||
try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl_index });
|
||||
try mod.comp.queueJob(.{ .emit_h_decl = decl_index });
|
||||
}
|
||||
func.analysis(ip).state = .queued;
|
||||
func.setAnalysisState(ip, .queued);
|
||||
}
|
||||
|
||||
pub const SemaDeclResult = packed struct {
|
||||
|
||||
@ -641,8 +641,8 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter
|
||||
|
||||
// We'll want to remember what the IES used to be before the update for
|
||||
// dependency invalidation purposes.
|
||||
const old_resolved_ies = if (func.analysis(ip).inferred_error_set)
|
||||
func.resolvedErrorSet(ip).*
|
||||
const old_resolved_ies = if (func.analysisUnordered(ip).inferred_error_set)
|
||||
func.resolvedErrorSetUnordered(ip)
|
||||
else
|
||||
.none;
|
||||
|
||||
@ -671,7 +671,7 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter
|
||||
zcu.deleteUnitReferences(func_as_depender);
|
||||
}
|
||||
|
||||
switch (func.analysis(ip).state) {
|
||||
switch (func.analysisUnordered(ip).state) {
|
||||
.success => if (!was_outdated) return,
|
||||
.sema_failure,
|
||||
.dependency_failure,
|
||||
@ -693,11 +693,11 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter
|
||||
|
||||
var air = pt.analyzeFnBody(func_index, sema_arena) catch |err| switch (err) {
|
||||
error.AnalysisFail => {
|
||||
if (func.analysis(ip).state == .in_progress) {
|
||||
if (func.analysisUnordered(ip).state == .in_progress) {
|
||||
// If this decl caused the compile error, the analysis field would
|
||||
// be changed to indicate it was this Decl's fault. Because this
|
||||
// did not happen, we infer here that it was a dependency failure.
|
||||
func.analysis(ip).state = .dependency_failure;
|
||||
func.setAnalysisState(ip, .dependency_failure);
|
||||
}
|
||||
return error.AnalysisFail;
|
||||
},
|
||||
@ -707,8 +707,8 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter
|
||||
|
||||
const invalidate_ies_deps = i: {
|
||||
if (!was_outdated) break :i false;
|
||||
if (!func.analysis(ip).inferred_error_set) break :i true;
|
||||
const new_resolved_ies = func.resolvedErrorSet(ip).*;
|
||||
if (!func.analysisUnordered(ip).inferred_error_set) break :i true;
|
||||
const new_resolved_ies = func.resolvedErrorSetUnordered(ip);
|
||||
break :i new_resolved_ies != old_resolved_ies;
|
||||
};
|
||||
if (invalidate_ies_deps) {
|
||||
@ -729,7 +729,7 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter
|
||||
return;
|
||||
}
|
||||
|
||||
try comp.work_queue.writeItem(.{ .codegen_func = .{
|
||||
try comp.queueJob(.{ .codegen_func = .{
|
||||
.func = func_index,
|
||||
.air = air,
|
||||
} });
|
||||
@ -783,7 +783,7 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai
|
||||
.{@errorName(err)},
|
||||
),
|
||||
);
|
||||
func.analysis(ip).state = .codegen_failure;
|
||||
func.setAnalysisState(ip, .codegen_failure);
|
||||
return;
|
||||
},
|
||||
};
|
||||
@ -797,12 +797,12 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai
|
||||
// Correcting this failure will involve changing a type this function
|
||||
// depends on, hence triggering re-analysis of this function, so this
|
||||
// interacts correctly with incremental compilation.
|
||||
func.analysis(ip).state = .codegen_failure;
|
||||
func.setAnalysisState(ip, .codegen_failure);
|
||||
} else if (comp.bin_file) |lf| {
|
||||
lf.updateFunc(pt, func_index, air, liveness) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.AnalysisFail => {
|
||||
func.analysis(ip).state = .codegen_failure;
|
||||
func.setAnalysisState(ip, .codegen_failure);
|
||||
},
|
||||
else => {
|
||||
try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1);
|
||||
@ -812,7 +812,7 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai
|
||||
"unable to codegen: {s}",
|
||||
.{@errorName(err)},
|
||||
));
|
||||
func.analysis(ip).state = .codegen_failure;
|
||||
func.setAnalysisState(ip, .codegen_failure);
|
||||
try zcu.retryable_failures.append(zcu.gpa, InternPool.AnalUnit.wrap(.{ .func = func_index }));
|
||||
},
|
||||
};
|
||||
@ -903,7 +903,7 @@ fn getFileRootStruct(
|
||||
decl.analysis = .complete;
|
||||
|
||||
try pt.scanNamespace(namespace_index, decls, decl);
|
||||
try zcu.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index });
|
||||
try zcu.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
|
||||
return wip_ty.finish(ip, decl_index, namespace_index.toOptional());
|
||||
}
|
||||
|
||||
@ -1080,7 +1080,7 @@ fn semaDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult {
|
||||
const old_linksection = decl.@"linksection";
|
||||
const old_addrspace = decl.@"addrspace";
|
||||
const old_is_inline = if (decl.getOwnedFunction(zcu)) |prev_func|
|
||||
prev_func.analysis(ip).state == .inline_only
|
||||
prev_func.analysisUnordered(ip).state == .inline_only
|
||||
else
|
||||
false;
|
||||
|
||||
@ -1311,10 +1311,10 @@ fn semaDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult {
|
||||
// codegen backend wants full access to the Decl Type.
|
||||
try decl_ty.resolveFully(pt);
|
||||
|
||||
try zcu.comp.work_queue.writeItem(.{ .codegen_decl = decl_index });
|
||||
try zcu.comp.queueJob(.{ .codegen_decl = decl_index });
|
||||
|
||||
if (result.invalidate_decl_ref and zcu.emit_h != null) {
|
||||
try zcu.comp.work_queue.writeItem(.{ .emit_h_decl = decl_index });
|
||||
try zcu.comp.queueJob(.{ .emit_h_decl = decl_index });
|
||||
}
|
||||
}
|
||||
|
||||
@ -1740,8 +1740,6 @@ pub fn scanNamespace(
|
||||
var seen_decls: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{};
|
||||
defer seen_decls.deinit(gpa);
|
||||
|
||||
try zcu.comp.work_queue.ensureUnusedCapacity(decls.len);
|
||||
|
||||
namespace.decls.clearRetainingCapacity();
|
||||
try namespace.decls.ensureTotalCapacity(gpa, decls.len);
|
||||
|
||||
@ -1967,7 +1965,7 @@ const ScanDeclIter = struct {
|
||||
log.debug("scanDecl queue analyze_decl file='{s}' decl_name='{}' decl_index={d}", .{
|
||||
namespace.fileScope(zcu).sub_file_path, decl_name.fmt(ip), decl_index,
|
||||
});
|
||||
comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = decl_index });
|
||||
try comp.queueJob(.{ .analyze_decl = decl_index });
|
||||
}
|
||||
}
|
||||
|
||||
@ -1976,7 +1974,7 @@ const ScanDeclIter = struct {
|
||||
// updated line numbers. Look into this!
|
||||
// TODO Look into detecting when this would be unnecessary by storing enough state
|
||||
// in `Decl` to notice that the line number did not change.
|
||||
comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index });
|
||||
try comp.queueJob(.{ .update_line_number = decl_index });
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -1991,7 +1989,7 @@ pub fn abortAnonDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) void {
|
||||
/// Finalize the creation of an anon decl.
|
||||
pub fn finalizeAnonDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) Allocator.Error!void {
|
||||
if (pt.zcu.declPtr(decl_index).typeOf(pt.zcu).isFnOrHasRuntimeBits(pt)) {
|
||||
try pt.zcu.comp.work_queue.writeItem(.{ .codegen_decl = decl_index });
|
||||
try pt.zcu.comp.queueJob(.{ .codegen_decl = decl_index });
|
||||
}
|
||||
}
|
||||
|
||||
@ -2037,7 +2035,7 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All
|
||||
.fn_ret_ty = Type.fromInterned(fn_ty_info.return_type),
|
||||
.fn_ret_ty_ies = null,
|
||||
.owner_func_index = func_index,
|
||||
.branch_quota = @max(func.branchQuota(ip).*, Sema.default_branch_quota),
|
||||
.branch_quota = @max(func.branchQuotaUnordered(ip), Sema.default_branch_quota),
|
||||
.comptime_err_ret_trace = &comptime_err_ret_trace,
|
||||
};
|
||||
defer sema.deinit();
|
||||
@ -2047,14 +2045,14 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All
|
||||
try sema.declareDependency(.{ .src_hash = decl.zir_decl_index.unwrap().? });
|
||||
try sema.declareDependency(.{ .decl_val = decl_index });
|
||||
|
||||
if (func.analysis(ip).inferred_error_set) {
|
||||
if (func.analysisUnordered(ip).inferred_error_set) {
|
||||
const ies = try arena.create(Sema.InferredErrorSet);
|
||||
ies.* = .{ .func = func_index };
|
||||
sema.fn_ret_ty_ies = ies;
|
||||
}
|
||||
|
||||
// reset in case calls to errorable functions are removed.
|
||||
func.analysis(ip).calls_or_awaits_errorable_fn = false;
|
||||
func.setCallsOrAwaitsErrorableFn(ip, false);
|
||||
|
||||
// First few indexes of extra are reserved and set at the end.
|
||||
const reserved_count = @typeInfo(Air.ExtraIndex).Enum.fields.len;
|
||||
@ -2080,7 +2078,7 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All
|
||||
};
|
||||
defer inner_block.instructions.deinit(gpa);
|
||||
|
||||
const fn_info = sema.code.getFnInfo(func.zirBodyInst(ip).resolve(ip));
|
||||
const fn_info = sema.code.getFnInfo(func.zirBodyInstUnordered(ip).resolve(ip));
|
||||
|
||||
// Here we are performing "runtime semantic analysis" for a function body, which means
|
||||
// we must map the parameter ZIR instructions to `arg` AIR instructions.
|
||||
@ -2149,7 +2147,7 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All
|
||||
});
|
||||
}
|
||||
|
||||
func.analysis(ip).state = .in_progress;
|
||||
func.setAnalysisState(ip, .in_progress);
|
||||
|
||||
const last_arg_index = inner_block.instructions.items.len;
|
||||
|
||||
@ -2176,7 +2174,7 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All
|
||||
}
|
||||
|
||||
// If we don't get an error return trace from a caller, create our own.
|
||||
if (func.analysis(ip).calls_or_awaits_errorable_fn and
|
||||
if (func.analysisUnordered(ip).calls_or_awaits_errorable_fn and
|
||||
mod.comp.config.any_error_tracing and
|
||||
!sema.fn_ret_ty.isError(mod))
|
||||
{
|
||||
@ -2218,10 +2216,10 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All
|
||||
else => |e| return e,
|
||||
};
|
||||
assert(ies.resolved != .none);
|
||||
ip.funcIesResolved(func_index).* = ies.resolved;
|
||||
ip.funcSetIesResolved(func_index, ies.resolved);
|
||||
}
|
||||
|
||||
func.analysis(ip).state = .success;
|
||||
func.setAnalysisState(ip, .success);
|
||||
|
||||
// Finally we must resolve the return type and parameter types so that backends
|
||||
// have full access to type information.
|
||||
@ -2415,6 +2413,7 @@ fn processExportsInner(
|
||||
) error{OutOfMemory}!void {
|
||||
const zcu = pt.zcu;
|
||||
const gpa = zcu.gpa;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
for (export_indices) |export_idx| {
|
||||
const new_export = &zcu.all_exports.items[export_idx];
|
||||
@ -2423,7 +2422,7 @@ fn processExportsInner(
|
||||
new_export.status = .failed_retryable;
|
||||
try zcu.failed_exports.ensureUnusedCapacity(gpa, 1);
|
||||
const msg = try Zcu.ErrorMsg.create(gpa, new_export.src, "exported symbol collision: {}", .{
|
||||
new_export.opts.name.fmt(&zcu.intern_pool),
|
||||
new_export.opts.name.fmt(ip),
|
||||
});
|
||||
errdefer msg.destroy(gpa);
|
||||
const other_export = zcu.all_exports.items[gop.value_ptr.*];
|
||||
@ -2443,8 +2442,7 @@ fn processExportsInner(
|
||||
if (!decl.owns_tv) break :failed false;
|
||||
if (decl.typeOf(zcu).zigTypeTag(zcu) != .Fn) break :failed false;
|
||||
// Check if owned function failed
|
||||
const a = zcu.funcInfo(decl.val.toIntern()).analysis(&zcu.intern_pool);
|
||||
break :failed a.state != .success;
|
||||
break :failed zcu.funcInfo(decl.val.toIntern()).analysisUnordered(ip).state != .success;
|
||||
}) {
|
||||
// This `Decl` is failed, so was never sent to codegen.
|
||||
// TODO: we should probably tell the backend to delete any old exports of this `Decl`?
|
||||
@ -3072,7 +3070,7 @@ pub fn getUnionLayout(pt: Zcu.PerThread, loaded_union: InternPool.LoadedUnionTyp
|
||||
most_aligned_field_size = field_size;
|
||||
}
|
||||
}
|
||||
const have_tag = loaded_union.flagsPtr(ip).runtime_tag.hasTag();
|
||||
const have_tag = loaded_union.flagsUnordered(ip).runtime_tag.hasTag();
|
||||
if (!have_tag or !Type.fromInterned(loaded_union.enum_tag_ty).hasRuntimeBits(pt)) {
|
||||
return .{
|
||||
.abi_size = payload_align.forward(payload_size),
|
||||
@ -3091,7 +3089,7 @@ pub fn getUnionLayout(pt: Zcu.PerThread, loaded_union: InternPool.LoadedUnionTyp
|
||||
const tag_size = Type.fromInterned(loaded_union.enum_tag_ty).abiSize(pt);
|
||||
const tag_align = Type.fromInterned(loaded_union.enum_tag_ty).abiAlignment(pt).max(.@"1");
|
||||
return .{
|
||||
.abi_size = loaded_union.size(ip).*,
|
||||
.abi_size = loaded_union.sizeUnordered(ip),
|
||||
.abi_align = tag_align.max(payload_align),
|
||||
.most_aligned_field = most_aligned_field,
|
||||
.most_aligned_field_size = most_aligned_field_size,
|
||||
@ -3100,7 +3098,7 @@ pub fn getUnionLayout(pt: Zcu.PerThread, loaded_union: InternPool.LoadedUnionTyp
|
||||
.payload_align = payload_align,
|
||||
.tag_align = tag_align,
|
||||
.tag_size = tag_size,
|
||||
.padding = loaded_union.padding(ip).*,
|
||||
.padding = loaded_union.paddingUnordered(ip),
|
||||
};
|
||||
}
|
||||
|
||||
@ -3142,7 +3140,7 @@ pub fn unionFieldNormalAlignmentAdvanced(
|
||||
strat: Type.ResolveStrat,
|
||||
) Zcu.SemaError!InternPool.Alignment {
|
||||
const ip = &pt.zcu.intern_pool;
|
||||
assert(loaded_union.flagsPtr(ip).layout != .@"packed");
|
||||
assert(loaded_union.flagsUnordered(ip).layout != .@"packed");
|
||||
const field_align = loaded_union.fieldAlign(ip, field_index);
|
||||
if (field_align != .none) return field_align;
|
||||
const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
|
||||
|
||||
@ -56,7 +56,7 @@ pub fn classifyType(ty: Type, pt: Zcu.PerThread, ctx: Context) Class {
|
||||
.Union => {
|
||||
const bit_size = ty.bitSize(pt);
|
||||
const union_obj = pt.zcu.typeToUnion(ty).?;
|
||||
if (union_obj.getLayout(ip) == .@"packed") {
|
||||
if (union_obj.flagsUnordered(ip).layout == .@"packed") {
|
||||
if (bit_size > 64) return .memory;
|
||||
return .byval;
|
||||
}
|
||||
|
||||
@ -768,7 +768,7 @@ pub fn generate(
|
||||
@intFromEnum(FrameIndex.stack_frame),
|
||||
FrameAlloc.init(.{
|
||||
.size = 0,
|
||||
.alignment = func.analysis(ip).stack_alignment.max(.@"1"),
|
||||
.alignment = func.analysisUnordered(ip).stack_alignment.max(.@"1"),
|
||||
}),
|
||||
);
|
||||
function.frame_allocs.set(
|
||||
|
||||
@ -1011,7 +1011,7 @@ fn typeToValtype(ty: Type, pt: Zcu.PerThread) wasm.Valtype {
|
||||
},
|
||||
.Struct => {
|
||||
if (pt.zcu.typeToPackedStruct(ty)) |packed_struct| {
|
||||
return typeToValtype(Type.fromInterned(packed_struct.backingIntType(ip).*), pt);
|
||||
return typeToValtype(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), pt);
|
||||
} else {
|
||||
return wasm.Valtype.i32;
|
||||
}
|
||||
@ -1746,7 +1746,7 @@ fn isByRef(ty: Type, pt: Zcu.PerThread) bool {
|
||||
=> return ty.hasRuntimeBitsIgnoreComptime(pt),
|
||||
.Union => {
|
||||
if (mod.typeToUnion(ty)) |union_obj| {
|
||||
if (union_obj.getLayout(ip) == .@"packed") {
|
||||
if (union_obj.flagsUnordered(ip).layout == .@"packed") {
|
||||
return ty.abiSize(pt) > 8;
|
||||
}
|
||||
}
|
||||
@ -1754,7 +1754,7 @@ fn isByRef(ty: Type, pt: Zcu.PerThread) bool {
|
||||
},
|
||||
.Struct => {
|
||||
if (mod.typeToPackedStruct(ty)) |packed_struct| {
|
||||
return isByRef(Type.fromInterned(packed_struct.backingIntType(ip).*), pt);
|
||||
return isByRef(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), pt);
|
||||
}
|
||||
return ty.hasRuntimeBitsIgnoreComptime(pt);
|
||||
},
|
||||
@ -3377,7 +3377,7 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
|
||||
assert(struct_type.layout == .@"packed");
|
||||
var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer
|
||||
val.writeToPackedMemory(ty, pt, &buf, 0) catch unreachable;
|
||||
const backing_int_ty = Type.fromInterned(struct_type.backingIntType(ip).*);
|
||||
const backing_int_ty = Type.fromInterned(struct_type.backingIntTypeUnordered(ip));
|
||||
const int_val = try pt.intValue(
|
||||
backing_int_ty,
|
||||
mem.readInt(u64, &buf, .little),
|
||||
@ -3443,7 +3443,7 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
|
||||
},
|
||||
.Struct => {
|
||||
const packed_struct = mod.typeToPackedStruct(ty).?;
|
||||
return func.emitUndefined(Type.fromInterned(packed_struct.backingIntType(ip).*));
|
||||
return func.emitUndefined(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)));
|
||||
},
|
||||
else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag(mod)}),
|
||||
}
|
||||
@ -3974,7 +3974,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
.Struct => result: {
|
||||
const packed_struct = mod.typeToPackedStruct(struct_ty).?;
|
||||
const offset = pt.structPackedFieldBitOffset(packed_struct, field_index);
|
||||
const backing_ty = Type.fromInterned(packed_struct.backingIntType(ip).*);
|
||||
const backing_ty = Type.fromInterned(packed_struct.backingIntTypeUnordered(ip));
|
||||
const wasm_bits = toWasmBits(backing_ty.intInfo(mod).bits) orelse {
|
||||
return func.fail("TODO: airStructFieldVal for packed structs larger than 128 bits", .{});
|
||||
};
|
||||
@ -5377,7 +5377,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
}
|
||||
const packed_struct = mod.typeToPackedStruct(result_ty).?;
|
||||
const field_types = packed_struct.field_types;
|
||||
const backing_type = Type.fromInterned(packed_struct.backingIntType(ip).*);
|
||||
const backing_type = Type.fromInterned(packed_struct.backingIntTypeUnordered(ip));
|
||||
|
||||
// ensure the result is zero'd
|
||||
const result = try func.allocLocal(backing_type);
|
||||
|
||||
@ -71,7 +71,7 @@ pub fn classifyType(ty: Type, pt: Zcu.PerThread) [2]Class {
|
||||
},
|
||||
.Union => {
|
||||
const union_obj = pt.zcu.typeToUnion(ty).?;
|
||||
if (union_obj.getLayout(ip) == .@"packed") {
|
||||
if (union_obj.flagsUnordered(ip).layout == .@"packed") {
|
||||
if (ty.bitSize(pt) <= 64) return direct;
|
||||
return .{ .direct, .direct };
|
||||
}
|
||||
@ -107,7 +107,7 @@ pub fn scalarType(ty: Type, pt: Zcu.PerThread) Type {
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
.Struct => {
|
||||
if (mod.typeToPackedStruct(ty)) |packed_struct| {
|
||||
return scalarType(Type.fromInterned(packed_struct.backingIntType(ip).*), pt);
|
||||
return scalarType(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), pt);
|
||||
} else {
|
||||
assert(ty.structFieldCount(mod) == 1);
|
||||
return scalarType(ty.structFieldType(0, mod), pt);
|
||||
@ -115,7 +115,7 @@ pub fn scalarType(ty: Type, pt: Zcu.PerThread) Type {
|
||||
},
|
||||
.Union => {
|
||||
const union_obj = mod.typeToUnion(ty).?;
|
||||
if (union_obj.getLayout(ip) != .@"packed") {
|
||||
if (union_obj.flagsUnordered(ip).layout != .@"packed") {
|
||||
const layout = pt.getUnionLayout(union_obj);
|
||||
if (layout.payload_size == 0 and layout.tag_size != 0) {
|
||||
return scalarType(ty.unionTagTypeSafety(mod).?, pt);
|
||||
|
||||
@ -856,7 +856,7 @@ pub fn generate(
|
||||
@intFromEnum(FrameIndex.stack_frame),
|
||||
FrameAlloc.init(.{
|
||||
.size = 0,
|
||||
.alignment = func.analysis(ip).stack_alignment.max(.@"1"),
|
||||
.alignment = func.analysisUnordered(ip).stack_alignment.max(.@"1"),
|
||||
}),
|
||||
);
|
||||
function.frame_allocs.set(
|
||||
|
||||
@ -349,7 +349,7 @@ fn classifySystemVStruct(
|
||||
.@"packed" => {},
|
||||
}
|
||||
} else if (pt.zcu.typeToUnion(field_ty)) |field_loaded_union| {
|
||||
switch (field_loaded_union.getLayout(ip)) {
|
||||
switch (field_loaded_union.flagsUnordered(ip).layout) {
|
||||
.auto, .@"extern" => {
|
||||
byte_offset = classifySystemVUnion(result, byte_offset, field_loaded_union, pt, target);
|
||||
continue;
|
||||
@ -362,11 +362,11 @@ fn classifySystemVStruct(
|
||||
result_class.* = result_class.combineSystemV(field_class);
|
||||
byte_offset += field_ty.abiSize(pt);
|
||||
}
|
||||
const final_byte_offset = starting_byte_offset + loaded_struct.size(ip).*;
|
||||
const final_byte_offset = starting_byte_offset + loaded_struct.sizeUnordered(ip);
|
||||
std.debug.assert(final_byte_offset == std.mem.alignForward(
|
||||
u64,
|
||||
byte_offset,
|
||||
loaded_struct.flagsPtr(ip).alignment.toByteUnits().?,
|
||||
loaded_struct.flagsUnordered(ip).alignment.toByteUnits().?,
|
||||
));
|
||||
return final_byte_offset;
|
||||
}
|
||||
@ -390,7 +390,7 @@ fn classifySystemVUnion(
|
||||
.@"packed" => {},
|
||||
}
|
||||
} else if (pt.zcu.typeToUnion(field_ty)) |field_loaded_union| {
|
||||
switch (field_loaded_union.getLayout(ip)) {
|
||||
switch (field_loaded_union.flagsUnordered(ip).layout) {
|
||||
.auto, .@"extern" => {
|
||||
_ = classifySystemVUnion(result, starting_byte_offset, field_loaded_union, pt, target);
|
||||
continue;
|
||||
@ -402,7 +402,7 @@ fn classifySystemVUnion(
|
||||
for (result[@intCast(starting_byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class|
|
||||
result_class.* = result_class.combineSystemV(field_class);
|
||||
}
|
||||
return starting_byte_offset + loaded_union.size(ip).*;
|
||||
return starting_byte_offset + loaded_union.sizeUnordered(ip);
|
||||
}
|
||||
|
||||
pub const SysV = struct {
|
||||
|
||||
@ -548,8 +548,8 @@ pub fn generateSymbol(
|
||||
}
|
||||
}
|
||||
|
||||
const size = struct_type.size(ip).*;
|
||||
const alignment = struct_type.flagsPtr(ip).alignment.toByteUnits().?;
|
||||
const size = struct_type.sizeUnordered(ip);
|
||||
const alignment = struct_type.flagsUnordered(ip).alignment.toByteUnits().?;
|
||||
|
||||
const padding = math.cast(
|
||||
usize,
|
||||
|
||||
@ -1366,7 +1366,7 @@ pub const DeclGen = struct {
|
||||
const loaded_union = ip.loadUnionType(ty.toIntern());
|
||||
if (un.tag == .none) {
|
||||
const backing_ty = try ty.unionBackingType(pt);
|
||||
switch (loaded_union.getLayout(ip)) {
|
||||
switch (loaded_union.flagsUnordered(ip).layout) {
|
||||
.@"packed" => {
|
||||
if (!location.isInitializer()) {
|
||||
try writer.writeByte('(');
|
||||
@ -1401,7 +1401,7 @@ pub const DeclGen = struct {
|
||||
const field_index = zcu.unionTagFieldIndex(loaded_union, Value.fromInterned(un.tag)).?;
|
||||
const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
|
||||
const field_name = loaded_union.loadTagType(ip).names.get(ip)[field_index];
|
||||
if (loaded_union.getLayout(ip) == .@"packed") {
|
||||
if (loaded_union.flagsUnordered(ip).layout == .@"packed") {
|
||||
if (field_ty.hasRuntimeBits(pt)) {
|
||||
if (field_ty.isPtrAtRuntime(zcu)) {
|
||||
try writer.writeByte('(');
|
||||
@ -1629,7 +1629,7 @@ pub const DeclGen = struct {
|
||||
},
|
||||
.union_type => {
|
||||
const loaded_union = ip.loadUnionType(ty.toIntern());
|
||||
switch (loaded_union.getLayout(ip)) {
|
||||
switch (loaded_union.flagsUnordered(ip).layout) {
|
||||
.auto, .@"extern" => {
|
||||
if (!location.isInitializer()) {
|
||||
try writer.writeByte('(');
|
||||
@ -1792,7 +1792,7 @@ pub const DeclGen = struct {
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
if (fn_val.getFunction(zcu)) |func| if (func.analysis(ip).is_cold)
|
||||
if (fn_val.getFunction(zcu)) |func| if (func.analysisUnordered(ip).is_cold)
|
||||
try w.writeAll("zig_cold ");
|
||||
if (fn_info.return_type == .noreturn_type) try w.writeAll("zig_noreturn ");
|
||||
|
||||
@ -5527,7 +5527,7 @@ fn fieldLocation(
|
||||
.{ .field = field_index } },
|
||||
.union_type => {
|
||||
const loaded_union = ip.loadUnionType(container_ty.toIntern());
|
||||
switch (loaded_union.getLayout(ip)) {
|
||||
switch (loaded_union.flagsUnordered(ip).layout) {
|
||||
.auto, .@"extern" => {
|
||||
const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(pt))
|
||||
@ -5763,7 +5763,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
.{ .field = extra.field_index },
|
||||
.union_type => field_name: {
|
||||
const loaded_union = ip.loadUnionType(struct_ty.toIntern());
|
||||
switch (loaded_union.getLayout(ip)) {
|
||||
switch (loaded_union.flagsUnordered(ip).layout) {
|
||||
.auto, .@"extern" => {
|
||||
const name = loaded_union.loadTagType(ip).names.get(ip)[extra.field_index];
|
||||
break :field_name if (loaded_union.hasTag(ip))
|
||||
@ -7267,7 +7267,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
|
||||
const writer = f.object.writer();
|
||||
const local = try f.allocLocal(inst, union_ty);
|
||||
if (loaded_union.getLayout(ip) == .@"packed") return f.moveCValue(inst, union_ty, payload);
|
||||
if (loaded_union.flagsUnordered(ip).layout == .@"packed") return f.moveCValue(inst, union_ty, payload);
|
||||
|
||||
const field: CValue = if (union_ty.unionTagTypeSafety(zcu)) |tag_ty| field: {
|
||||
const layout = union_ty.unionGetLayout(pt);
|
||||
|
||||
@ -1744,7 +1744,7 @@ pub const Pool = struct {
|
||||
.@"packed" => return pool.fromType(
|
||||
allocator,
|
||||
scratch,
|
||||
Type.fromInterned(loaded_struct.backingIntType(ip).*),
|
||||
Type.fromInterned(loaded_struct.backingIntTypeUnordered(ip)),
|
||||
pt,
|
||||
mod,
|
||||
kind,
|
||||
@ -1817,7 +1817,7 @@ pub const Pool = struct {
|
||||
},
|
||||
.union_type => {
|
||||
const loaded_union = ip.loadUnionType(ip_index);
|
||||
switch (loaded_union.getLayout(ip)) {
|
||||
switch (loaded_union.flagsUnordered(ip).layout) {
|
||||
.auto, .@"extern" => {
|
||||
const has_tag = loaded_union.hasTag(ip);
|
||||
const fwd_decl = try pool.getFwdDecl(allocator, .{
|
||||
|
||||
@ -1086,7 +1086,7 @@ pub const Object = struct {
|
||||
// If there is no such function in the module, it means the source code does not need it.
|
||||
const name = o.builder.strtabStringIfExists(lt_errors_fn_name) orelse return;
|
||||
const llvm_fn = o.builder.getGlobal(name) orelse return;
|
||||
const errors_len = o.pt.zcu.intern_pool.global_error_set.mutate.list.len;
|
||||
const errors_len = o.pt.zcu.intern_pool.global_error_set.getNamesFromMainThread().len;
|
||||
|
||||
var wip = try Builder.WipFunction.init(&o.builder, .{
|
||||
.function = llvm_fn.ptrConst(&o.builder).kind.function,
|
||||
@ -1385,13 +1385,14 @@ pub const Object = struct {
|
||||
var attributes = try function_index.ptrConst(&o.builder).attributes.toWip(&o.builder);
|
||||
defer attributes.deinit(&o.builder);
|
||||
|
||||
if (func.analysis(ip).is_noinline) {
|
||||
const func_analysis = func.analysisUnordered(ip);
|
||||
if (func_analysis.is_noinline) {
|
||||
try attributes.addFnAttr(.@"noinline", &o.builder);
|
||||
} else {
|
||||
_ = try attributes.removeFnAttr(.@"noinline");
|
||||
}
|
||||
|
||||
const stack_alignment = func.analysis(ip).stack_alignment;
|
||||
const stack_alignment = func.analysisUnordered(ip).stack_alignment;
|
||||
if (stack_alignment != .none) {
|
||||
try attributes.addFnAttr(.{ .alignstack = stack_alignment.toLlvm() }, &o.builder);
|
||||
try attributes.addFnAttr(.@"noinline", &o.builder);
|
||||
@ -1399,7 +1400,7 @@ pub const Object = struct {
|
||||
_ = try attributes.removeFnAttr(.alignstack);
|
||||
}
|
||||
|
||||
if (func.analysis(ip).is_cold) {
|
||||
if (func_analysis.is_cold) {
|
||||
try attributes.addFnAttr(.cold, &o.builder);
|
||||
} else {
|
||||
_ = try attributes.removeFnAttr(.cold);
|
||||
@ -1624,7 +1625,7 @@ pub const Object = struct {
|
||||
llvm_arg_i += 1;
|
||||
|
||||
const alignment = param_ty.abiAlignment(pt).toLlvm();
|
||||
const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, alignment, target);
|
||||
const arg_ptr = try buildAllocaInner(&wip, param.typeOfWip(&wip), alignment, target);
|
||||
_ = try wip.store(.normal, param, arg_ptr, alignment);
|
||||
|
||||
args.appendAssumeCapacity(if (isByRef(param_ty, pt))
|
||||
@ -2403,7 +2404,7 @@ pub const Object = struct {
|
||||
defer gpa.free(name);
|
||||
|
||||
if (zcu.typeToPackedStruct(ty)) |struct_type| {
|
||||
const backing_int_ty = struct_type.backingIntType(ip).*;
|
||||
const backing_int_ty = struct_type.backingIntTypeUnordered(ip);
|
||||
if (backing_int_ty != .none) {
|
||||
const info = Type.fromInterned(backing_int_ty).intInfo(zcu);
|
||||
const builder_name = try o.builder.metadataString(name);
|
||||
@ -2615,7 +2616,7 @@ pub const Object = struct {
|
||||
if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue;
|
||||
|
||||
const field_size = Type.fromInterned(field_ty).abiSize(pt);
|
||||
const field_align: InternPool.Alignment = switch (union_type.flagsPtr(ip).layout) {
|
||||
const field_align: InternPool.Alignment = switch (union_type.flagsUnordered(ip).layout) {
|
||||
.@"packed" => .none,
|
||||
.auto, .@"extern" => pt.unionFieldNormalAlignment(union_type, @intCast(field_index)),
|
||||
};
|
||||
@ -3303,7 +3304,7 @@ pub const Object = struct {
|
||||
const struct_type = ip.loadStructType(t.toIntern());
|
||||
|
||||
if (struct_type.layout == .@"packed") {
|
||||
const int_ty = try o.lowerType(Type.fromInterned(struct_type.backingIntType(ip).*));
|
||||
const int_ty = try o.lowerType(Type.fromInterned(struct_type.backingIntTypeUnordered(ip)));
|
||||
try o.type_map.put(o.gpa, t.toIntern(), int_ty);
|
||||
return int_ty;
|
||||
}
|
||||
@ -3346,7 +3347,7 @@ pub const Object = struct {
|
||||
// This is a zero-bit field. If there are runtime bits after this field,
|
||||
// map to the next LLVM field (which we know exists): otherwise, don't
|
||||
// map the field, indicating it's at the end of the struct.
|
||||
if (offset != struct_type.size(ip).*) {
|
||||
if (offset != struct_type.sizeUnordered(ip)) {
|
||||
try o.struct_field_map.put(o.gpa, .{
|
||||
.struct_ty = t.toIntern(),
|
||||
.field_index = field_index,
|
||||
@ -3450,7 +3451,7 @@ pub const Object = struct {
|
||||
const union_obj = ip.loadUnionType(t.toIntern());
|
||||
const layout = pt.getUnionLayout(union_obj);
|
||||
|
||||
if (union_obj.flagsPtr(ip).layout == .@"packed") {
|
||||
if (union_obj.flagsUnordered(ip).layout == .@"packed") {
|
||||
const int_ty = try o.builder.intType(@intCast(t.bitSize(pt)));
|
||||
try o.type_map.put(o.gpa, t.toIntern(), int_ty);
|
||||
return int_ty;
|
||||
@ -3697,7 +3698,7 @@ pub const Object = struct {
|
||||
if (layout.payload_size == 0) return o.lowerValue(un.tag);
|
||||
|
||||
const union_obj = mod.typeToUnion(ty).?;
|
||||
const container_layout = union_obj.getLayout(ip);
|
||||
const container_layout = union_obj.flagsUnordered(ip).layout;
|
||||
|
||||
assert(container_layout == .@"packed");
|
||||
|
||||
@ -4205,7 +4206,7 @@ pub const Object = struct {
|
||||
if (layout.payload_size == 0) return o.lowerValue(un.tag);
|
||||
|
||||
const union_obj = mod.typeToUnion(ty).?;
|
||||
const container_layout = union_obj.getLayout(ip);
|
||||
const container_layout = union_obj.flagsUnordered(ip).layout;
|
||||
|
||||
var need_unnamed = false;
|
||||
const payload = if (un.tag != .none) p: {
|
||||
@ -10045,7 +10046,7 @@ pub const FuncGen = struct {
|
||||
},
|
||||
.Struct => {
|
||||
if (mod.typeToPackedStruct(result_ty)) |struct_type| {
|
||||
const backing_int_ty = struct_type.backingIntType(ip).*;
|
||||
const backing_int_ty = struct_type.backingIntTypeUnordered(ip);
|
||||
assert(backing_int_ty != .none);
|
||||
const big_bits = Type.fromInterned(backing_int_ty).bitSize(pt);
|
||||
const int_ty = try o.builder.intType(@intCast(big_bits));
|
||||
@ -10155,7 +10156,7 @@ pub const FuncGen = struct {
|
||||
const layout = union_ty.unionGetLayout(pt);
|
||||
const union_obj = mod.typeToUnion(union_ty).?;
|
||||
|
||||
if (union_obj.getLayout(ip) == .@"packed") {
|
||||
if (union_obj.flagsUnordered(ip).layout == .@"packed") {
|
||||
const big_bits = union_ty.bitSize(pt);
|
||||
const int_llvm_ty = try o.builder.intType(@intCast(big_bits));
|
||||
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]);
|
||||
@ -11281,7 +11282,7 @@ fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.E
|
||||
.struct_type => {
|
||||
const struct_type = ip.loadStructType(return_type.toIntern());
|
||||
assert(struct_type.haveLayout(ip));
|
||||
const size: u64 = struct_type.size(ip).*;
|
||||
const size: u64 = struct_type.sizeUnordered(ip);
|
||||
assert((std.math.divCeil(u64, size, 8) catch unreachable) == types_index);
|
||||
if (size % 8 > 0) {
|
||||
types_buffer[types_index - 1] = try o.builder.intType(@intCast(size % 8 * 8));
|
||||
@ -11587,7 +11588,7 @@ const ParamTypeIterator = struct {
|
||||
.struct_type => {
|
||||
const struct_type = ip.loadStructType(ty.toIntern());
|
||||
assert(struct_type.haveLayout(ip));
|
||||
const size: u64 = struct_type.size(ip).*;
|
||||
const size: u64 = struct_type.sizeUnordered(ip);
|
||||
assert((std.math.divCeil(u64, size, 8) catch unreachable) == types_index);
|
||||
if (size % 8 > 0) {
|
||||
types_buffer[types_index - 1] =
|
||||
|
||||
@ -1463,7 +1463,7 @@ const DeclGen = struct {
|
||||
const ip = &mod.intern_pool;
|
||||
const union_obj = mod.typeToUnion(ty).?;
|
||||
|
||||
if (union_obj.getLayout(ip) == .@"packed") {
|
||||
if (union_obj.flagsUnordered(ip).layout == .@"packed") {
|
||||
return self.todo("packed union types", .{});
|
||||
}
|
||||
|
||||
@ -1735,7 +1735,7 @@ const DeclGen = struct {
|
||||
};
|
||||
|
||||
if (struct_type.layout == .@"packed") {
|
||||
return try self.resolveType(Type.fromInterned(struct_type.backingIntType(ip).*), .direct);
|
||||
return try self.resolveType(Type.fromInterned(struct_type.backingIntTypeUnordered(ip)), .direct);
|
||||
}
|
||||
|
||||
var member_types = std.ArrayList(IdRef).init(self.gpa);
|
||||
@ -5081,7 +5081,7 @@ const DeclGen = struct {
|
||||
const union_ty = mod.typeToUnion(ty).?;
|
||||
const tag_ty = Type.fromInterned(union_ty.enum_tag_ty);
|
||||
|
||||
if (union_ty.getLayout(ip) == .@"packed") {
|
||||
if (union_ty.flagsUnordered(ip).layout == .@"packed") {
|
||||
unreachable; // TODO
|
||||
}
|
||||
|
||||
|
||||
@ -1156,7 +1156,7 @@ pub fn updateFunc(self: *Coff, pt: Zcu.PerThread, func_index: InternPool.Index,
|
||||
const code = switch (res) {
|
||||
.ok => code_buffer.items,
|
||||
.fail => |em| {
|
||||
func.analysis(&mod.intern_pool).state = .codegen_failure;
|
||||
func.setAnalysisState(&mod.intern_pool, .codegen_failure);
|
||||
try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em);
|
||||
return;
|
||||
},
|
||||
|
||||
@ -1093,7 +1093,7 @@ pub fn updateFunc(
|
||||
const code = switch (res) {
|
||||
.ok => code_buffer.items,
|
||||
.fail => |em| {
|
||||
func.analysis(&mod.intern_pool).state = .codegen_failure;
|
||||
func.setAnalysisState(&mod.intern_pool, .codegen_failure);
|
||||
try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em);
|
||||
return;
|
||||
},
|
||||
|
||||
@ -699,7 +699,7 @@ pub fn updateFunc(
|
||||
const code = switch (res) {
|
||||
.ok => code_buffer.items,
|
||||
.fail => |em| {
|
||||
func.analysis(&mod.intern_pool).state = .codegen_failure;
|
||||
func.setAnalysisState(&mod.intern_pool, .codegen_failure);
|
||||
try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em);
|
||||
return;
|
||||
},
|
||||
|
||||
@ -449,7 +449,7 @@ pub fn updateFunc(self: *Plan9, pt: Zcu.PerThread, func_index: InternPool.Index,
|
||||
const code = switch (res) {
|
||||
.ok => try code_buffer.toOwnedSlice(),
|
||||
.fail => |em| {
|
||||
func.analysis(&mod.intern_pool).state = .codegen_failure;
|
||||
func.setAnalysisState(&mod.intern_pool, .codegen_failure);
|
||||
try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em);
|
||||
return;
|
||||
},
|
||||
|
||||
@ -1051,7 +1051,7 @@ fn setupErrorsLen(zig_object: *ZigObject, wasm_file: *Wasm) !void {
|
||||
const gpa = wasm_file.base.comp.gpa;
|
||||
const sym_index = zig_object.findGlobalSymbol("__zig_errors_len") orelse return;
|
||||
|
||||
const errors_len = 1 + wasm_file.base.comp.module.?.intern_pool.global_error_set.mutate.list.len;
|
||||
const errors_len = 1 + wasm_file.base.comp.module.?.intern_pool.global_error_set.getNamesFromMainThread().len;
|
||||
// overwrite existing atom if it already exists (maybe the error set has increased)
|
||||
// if not, allcoate a new atom.
|
||||
const atom_index = if (wasm_file.symbol_atom.get(.{ .file = zig_object.index, .index = sym_index })) |index| blk: {
|
||||
|
||||
BIN
stage1/zig1.wasm
BIN
stage1/zig1.wasm
Binary file not shown.
@ -16,6 +16,6 @@ pub export fn entry2() void {
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :3:6: error: no field or member function named 'copy' in '[]const u8'
|
||||
// :9:8: error: no field or member function named 'bar' in '@TypeOf(.{})'
|
||||
// :12:18: error: no field or member function named 'bar' in 'struct{comptime foo: comptime_int = 1}'
|
||||
// :3:6: error: no field or member function named 'copy' in '[]const u8'
|
||||
|
||||
@ -17,14 +17,14 @@ export fn baz() void {
|
||||
// target=native
|
||||
//
|
||||
// :6:5: error: found compile log statement
|
||||
// :12:5: note: also here
|
||||
// :6:5: note: also here
|
||||
// :12:5: note: also here
|
||||
//
|
||||
// Compile Log Output:
|
||||
// @as(*const [5:0]u8, "begin")
|
||||
// @as(*const [1:0]u8, "a"), @as(i32, 12), @as(*const [1:0]u8, "b"), @as([]const u8, "hi"[0..2])
|
||||
// @as(*const [3:0]u8, "end")
|
||||
// @as(comptime_int, 4)
|
||||
// @as(*const [5:0]u8, "begin")
|
||||
// @as(*const [1:0]u8, "a"), @as(i32, [runtime value]), @as(*const [1:0]u8, "b"), @as([]const u8, [runtime value])
|
||||
// @as(*const [3:0]u8, "end")
|
||||
// @as(comptime_int, 4)
|
||||
|
||||
@ -18,6 +18,6 @@ comptime {
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :1:15: error: comptime parameters not allowed in function with calling convention 'C'
|
||||
// :5:30: error: comptime parameters not allowed in function with calling convention 'C'
|
||||
// :6:30: error: generic parameters not allowed in function with calling convention 'C'
|
||||
// :1:15: error: comptime parameters not allowed in function with calling convention 'C'
|
||||
|
||||
@ -82,6 +82,6 @@ pub export fn entry8() void {
|
||||
// :36:29: note: default value set here
|
||||
// :46:12: error: value stored in comptime field does not match the default value of the field
|
||||
// :55:25: error: value stored in comptime field does not match the default value of the field
|
||||
// :68:36: error: value stored in comptime field does not match the default value of the field
|
||||
// :61:30: error: value stored in comptime field does not match the default value of the field
|
||||
// :59:29: note: default value set here
|
||||
// :68:36: error: value stored in comptime field does not match the default value of the field
|
||||
|
||||
@ -18,6 +18,6 @@ comptime {
|
||||
//
|
||||
// :1:1: error: variadic function does not support '.Unspecified' calling convention
|
||||
// :1:1: note: supported calling conventions: '.C'
|
||||
// :2:1: error: generic function cannot be variadic
|
||||
// :1:1: error: variadic function does not support '.Inline' calling convention
|
||||
// :1:1: note: supported calling conventions: '.C'
|
||||
// :2:1: error: generic function cannot be variadic
|
||||
|
||||
@ -26,6 +26,6 @@ pub export fn entry2() void {
|
||||
// backend=llvm
|
||||
// target=native
|
||||
//
|
||||
// :17:12: error: C pointers cannot point to opaque types
|
||||
// :6:20: error: cannot @bitCast to '[]i32'
|
||||
// :6:20: note: use @ptrCast to cast from '[]u32'
|
||||
// :17:12: error: C pointers cannot point to opaque types
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user