compiler: rework type resolution, fully resolve all types

I'm so sorry.

This commit was just meant to be making all types fully resolve by
queueing resolution at the moment of their creation. Unfortunately, a
lot of dominoes ended up falling. Here's what happened:

* I added a work queue job to fully resolve a type.
* I realised that from here we could eliminate `Sema.types_to_resolve`
  if we made function codegen a separate job. This is desirable for
  simplicity of both spec and implementation.
* This led to a new AIR traversal to detect whether any required type is
  unresolved. If a type in the AIR failed to resolve, then we can't run
  codegen.
* Because full type resolution now occurs by the work queue job, a bug
  was exposed whereby error messages for type resolution were associated
  with the wrong `Decl`, resulting in duplicate error messages when the
  type was also resolved "by" its owner `Decl` (which really *all*
  resolution should be done on).
* A correct fix for this requires using a different `Sema` when
  performing type resolution: we need a `Sema` owned by the type. Also
  note that this fix is necessary for incremental compilation.
* This means a whole bunch of functions no longer need to take `Sema`s.
  * First-order effects: `resolveTypeFields`, `resolveTypeLayout`, etc
  * Second-order effects: `Type.abiAlignmentAdvanced`, `Value.orderAgainstZeroAdvanced`, etc

The end result of this is, in short, a more correct compiler and a
simpler language specification. This regressed a few error notes in the
test cases, but nothing that seems worth blocking this change.

Oh, also, I ripped out the old code in `test/src/Cases.zig` which
introduced a dependency on `Compilation`. This dependency was
problematic at best, and this code has been unused for a while. When we
re-enable incremental test cases, we must rewrite their executor to use
the compiler server protocol.
This commit is contained in:
mlugg 2024-07-04 05:00:32 +01:00
parent 2f0f1efa6f
commit 0e5335aaf5
No known key found for this signature in database
GPG Key ID: 3F5B7DCCBF4AF02E
20 changed files with 1854 additions and 2030 deletions

View File

@ -82,15 +82,6 @@ pub fn build(b: *std.Build) !void {
docs_step.dependOn(langref_step); docs_step.dependOn(langref_step);
docs_step.dependOn(std_docs_step); docs_step.dependOn(std_docs_step);
const check_case_exe = b.addExecutable(.{
.name = "check-case",
.root_source_file = b.path("test/src/Cases.zig"),
.target = b.graph.host,
.optimize = optimize,
.single_threaded = single_threaded,
});
check_case_exe.stack_size = stack_size;
const skip_debug = b.option(bool, "skip-debug", "Main test suite skips debug builds") orelse false; const skip_debug = b.option(bool, "skip-debug", "Main test suite skips debug builds") orelse false;
const skip_release = b.option(bool, "skip-release", "Main test suite skips release builds") orelse false; const skip_release = b.option(bool, "skip-release", "Main test suite skips release builds") orelse false;
const skip_release_small = b.option(bool, "skip-release-small", "Main test suite skips release-small builds") orelse skip_release; const skip_release_small = b.option(bool, "skip-release-small", "Main test suite skips release-small builds") orelse skip_release;
@ -222,7 +213,6 @@ pub fn build(b: *std.Build) !void {
if (target.result.os.tag == .windows and target.result.abi == .gnu) { if (target.result.os.tag == .windows and target.result.abi == .gnu) {
// LTO is currently broken on mingw, this can be removed when it's fixed. // LTO is currently broken on mingw, this can be removed when it's fixed.
exe.want_lto = false; exe.want_lto = false;
check_case_exe.want_lto = false;
} }
const use_llvm = b.option(bool, "use-llvm", "Use the llvm backend"); const use_llvm = b.option(bool, "use-llvm", "Use the llvm backend");
@ -245,7 +235,6 @@ pub fn build(b: *std.Build) !void {
if (link_libc) { if (link_libc) {
exe.linkLibC(); exe.linkLibC();
check_case_exe.linkLibC();
} }
const is_debug = optimize == .Debug; const is_debug = optimize == .Debug;
@ -339,21 +328,17 @@ pub fn build(b: *std.Build) !void {
} }
try addCmakeCfgOptionsToExe(b, cfg, exe, use_zig_libcxx); try addCmakeCfgOptionsToExe(b, cfg, exe, use_zig_libcxx);
try addCmakeCfgOptionsToExe(b, cfg, check_case_exe, use_zig_libcxx);
} else { } else {
// Here we are -Denable-llvm but no cmake integration. // Here we are -Denable-llvm but no cmake integration.
try addStaticLlvmOptionsToExe(exe); try addStaticLlvmOptionsToExe(exe);
try addStaticLlvmOptionsToExe(check_case_exe);
} }
if (target.result.os.tag == .windows) { if (target.result.os.tag == .windows) {
inline for (.{ exe, check_case_exe }) |artifact| {
// LLVM depends on networking as of version 18. // LLVM depends on networking as of version 18.
artifact.linkSystemLibrary("ws2_32"); exe.linkSystemLibrary("ws2_32");
artifact.linkSystemLibrary("version"); exe.linkSystemLibrary("version");
artifact.linkSystemLibrary("uuid"); exe.linkSystemLibrary("uuid");
artifact.linkSystemLibrary("ole32"); exe.linkSystemLibrary("ole32");
}
} }
} }
@ -394,7 +379,6 @@ pub fn build(b: *std.Build) !void {
const test_filters = b.option([]const []const u8, "test-filter", "Skip tests that do not match any filter") orelse &[0][]const u8{}; const test_filters = b.option([]const []const u8, "test-filter", "Skip tests that do not match any filter") orelse &[0][]const u8{};
const test_cases_options = b.addOptions(); const test_cases_options = b.addOptions();
check_case_exe.root_module.addOptions("build_options", test_cases_options);
test_cases_options.addOption(bool, "enable_tracy", false); test_cases_options.addOption(bool, "enable_tracy", false);
test_cases_options.addOption(bool, "enable_debug_extensions", enable_debug_extensions); test_cases_options.addOption(bool, "enable_debug_extensions", enable_debug_extensions);
@ -458,7 +442,7 @@ pub fn build(b: *std.Build) !void {
test_step.dependOn(check_fmt); test_step.dependOn(check_fmt);
const test_cases_step = b.step("test-cases", "Run the main compiler test cases"); const test_cases_step = b.step("test-cases", "Run the main compiler test cases");
try tests.addCases(b, test_cases_step, test_filters, check_case_exe, target, .{ try tests.addCases(b, test_cases_step, test_filters, target, .{
.skip_translate_c = skip_translate_c, .skip_translate_c = skip_translate_c,
.skip_run_translated_c = skip_run_translated_c, .skip_run_translated_c = skip_run_translated_c,
}, .{ }, .{

View File

@ -1801,3 +1801,5 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool {
.atomic_load => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtrIp(ip), .atomic_load => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtrIp(ip),
}; };
} }
pub const typesFullyResolved = @import("Air/types_resolved.zig").typesFullyResolved;

521
src/Air/types_resolved.zig Normal file
View File

@ -0,0 +1,521 @@
const Air = @import("../Air.zig");
const Zcu = @import("../Zcu.zig");
const Type = @import("../Type.zig");
const Value = @import("../Value.zig");
const InternPool = @import("../InternPool.zig");
/// Given a body of AIR instructions, returns whether all type resolution necessary for codegen is complete.
/// If `false`, then type resolution must have failed, so codegen cannot proceed.
pub fn typesFullyResolved(air: Air, zcu: *Zcu) bool {
return checkBody(air, air.getMainBody(), zcu);
}
fn checkBody(air: Air, body: []const Air.Inst.Index, zcu: *Zcu) bool {
const tags = air.instructions.items(.tag);
const datas = air.instructions.items(.data);
for (body) |inst| {
const data = datas[@intFromEnum(inst)];
switch (tags[@intFromEnum(inst)]) {
.inferred_alloc, .inferred_alloc_comptime => unreachable,
.arg => {
if (!checkType(data.arg.ty.toType(), zcu)) return false;
},
.add,
.add_safe,
.add_optimized,
.add_wrap,
.add_sat,
.sub,
.sub_safe,
.sub_optimized,
.sub_wrap,
.sub_sat,
.mul,
.mul_safe,
.mul_optimized,
.mul_wrap,
.mul_sat,
.div_float,
.div_float_optimized,
.div_trunc,
.div_trunc_optimized,
.div_floor,
.div_floor_optimized,
.div_exact,
.div_exact_optimized,
.rem,
.rem_optimized,
.mod,
.mod_optimized,
.max,
.min,
.bit_and,
.bit_or,
.shr,
.shr_exact,
.shl,
.shl_exact,
.shl_sat,
.xor,
.cmp_lt,
.cmp_lt_optimized,
.cmp_lte,
.cmp_lte_optimized,
.cmp_eq,
.cmp_eq_optimized,
.cmp_gte,
.cmp_gte_optimized,
.cmp_gt,
.cmp_gt_optimized,
.cmp_neq,
.cmp_neq_optimized,
.bool_and,
.bool_or,
.store,
.store_safe,
.set_union_tag,
.array_elem_val,
.slice_elem_val,
.ptr_elem_val,
.memset,
.memset_safe,
.memcpy,
.atomic_store_unordered,
.atomic_store_monotonic,
.atomic_store_release,
.atomic_store_seq_cst,
=> {
if (!checkRef(data.bin_op.lhs, zcu)) return false;
if (!checkRef(data.bin_op.rhs, zcu)) return false;
},
.not,
.bitcast,
.clz,
.ctz,
.popcount,
.byte_swap,
.bit_reverse,
.abs,
.load,
.fptrunc,
.fpext,
.intcast,
.trunc,
.optional_payload,
.optional_payload_ptr,
.optional_payload_ptr_set,
.wrap_optional,
.unwrap_errunion_payload,
.unwrap_errunion_err,
.unwrap_errunion_payload_ptr,
.unwrap_errunion_err_ptr,
.errunion_payload_ptr_set,
.wrap_errunion_payload,
.wrap_errunion_err,
.struct_field_ptr_index_0,
.struct_field_ptr_index_1,
.struct_field_ptr_index_2,
.struct_field_ptr_index_3,
.get_union_tag,
.slice_len,
.slice_ptr,
.ptr_slice_len_ptr,
.ptr_slice_ptr_ptr,
.array_to_slice,
.int_from_float,
.int_from_float_optimized,
.float_from_int,
.splat,
.error_set_has_value,
.addrspace_cast,
.c_va_arg,
.c_va_copy,
=> {
if (!checkType(data.ty_op.ty.toType(), zcu)) return false;
if (!checkRef(data.ty_op.operand, zcu)) return false;
},
.alloc,
.ret_ptr,
.c_va_start,
=> {
if (!checkType(data.ty, zcu)) return false;
},
.ptr_add,
.ptr_sub,
.add_with_overflow,
.sub_with_overflow,
.mul_with_overflow,
.shl_with_overflow,
.slice,
.slice_elem_ptr,
.ptr_elem_ptr,
=> {
const bin = air.extraData(Air.Bin, data.ty_pl.payload).data;
if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
if (!checkRef(bin.lhs, zcu)) return false;
if (!checkRef(bin.rhs, zcu)) return false;
},
.block,
.loop,
=> {
const extra = air.extraData(Air.Block, data.ty_pl.payload);
if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
if (!checkBody(
air,
@ptrCast(air.extra[extra.end..][0..extra.data.body_len]),
zcu,
)) return false;
},
.dbg_inline_block => {
const extra = air.extraData(Air.DbgInlineBlock, data.ty_pl.payload);
if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
if (!checkBody(
air,
@ptrCast(air.extra[extra.end..][0..extra.data.body_len]),
zcu,
)) return false;
},
.sqrt,
.sin,
.cos,
.tan,
.exp,
.exp2,
.log,
.log2,
.log10,
.floor,
.ceil,
.round,
.trunc_float,
.neg,
.neg_optimized,
.is_null,
.is_non_null,
.is_null_ptr,
.is_non_null_ptr,
.is_err,
.is_non_err,
.is_err_ptr,
.is_non_err_ptr,
.int_from_ptr,
.int_from_bool,
.ret,
.ret_safe,
.ret_load,
.is_named_enum_value,
.tag_name,
.error_name,
.cmp_lt_errors_len,
.c_va_end,
.set_err_return_trace,
=> {
if (!checkRef(data.un_op, zcu)) return false;
},
.br => {
if (!checkRef(data.br.operand, zcu)) return false;
},
.cmp_vector,
.cmp_vector_optimized,
=> {
const extra = air.extraData(Air.VectorCmp, data.ty_pl.payload).data;
if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
if (!checkRef(extra.lhs, zcu)) return false;
if (!checkRef(extra.rhs, zcu)) return false;
},
.reduce,
.reduce_optimized,
=> {
if (!checkRef(data.reduce.operand, zcu)) return false;
},
.struct_field_ptr,
.struct_field_val,
=> {
const extra = air.extraData(Air.StructField, data.ty_pl.payload).data;
if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
if (!checkRef(extra.struct_operand, zcu)) return false;
},
.shuffle => {
const extra = air.extraData(Air.Shuffle, data.ty_pl.payload).data;
if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
if (!checkRef(extra.a, zcu)) return false;
if (!checkRef(extra.b, zcu)) return false;
if (!checkVal(Value.fromInterned(extra.mask), zcu)) return false;
},
.cmpxchg_weak,
.cmpxchg_strong,
=> {
const extra = air.extraData(Air.Cmpxchg, data.ty_pl.payload).data;
if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
if (!checkRef(extra.ptr, zcu)) return false;
if (!checkRef(extra.expected_value, zcu)) return false;
if (!checkRef(extra.new_value, zcu)) return false;
},
.aggregate_init => {
const ty = data.ty_pl.ty.toType();
const elems_len: usize = @intCast(ty.arrayLen(zcu));
const elems: []const Air.Inst.Ref = @ptrCast(air.extra[data.ty_pl.payload..][0..elems_len]);
if (!checkType(ty, zcu)) return false;
if (ty.zigTypeTag(zcu) == .Struct) {
for (elems, 0..) |elem, elem_idx| {
if (ty.structFieldIsComptime(elem_idx, zcu)) continue;
if (!checkRef(elem, zcu)) return false;
}
} else {
for (elems) |elem| {
if (!checkRef(elem, zcu)) return false;
}
}
},
.union_init => {
const extra = air.extraData(Air.UnionInit, data.ty_pl.payload).data;
if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
if (!checkRef(extra.init, zcu)) return false;
},
.field_parent_ptr => {
const extra = air.extraData(Air.FieldParentPtr, data.ty_pl.payload).data;
if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
if (!checkRef(extra.field_ptr, zcu)) return false;
},
.atomic_load => {
if (!checkRef(data.atomic_load.ptr, zcu)) return false;
},
.prefetch => {
if (!checkRef(data.prefetch.ptr, zcu)) return false;
},
.vector_store_elem => {
const bin = air.extraData(Air.Bin, data.vector_store_elem.payload).data;
if (!checkRef(data.vector_store_elem.vector_ptr, zcu)) return false;
if (!checkRef(bin.lhs, zcu)) return false;
if (!checkRef(bin.rhs, zcu)) return false;
},
.select,
.mul_add,
=> {
const bin = air.extraData(Air.Bin, data.pl_op.payload).data;
if (!checkRef(data.pl_op.operand, zcu)) return false;
if (!checkRef(bin.lhs, zcu)) return false;
if (!checkRef(bin.rhs, zcu)) return false;
},
.atomic_rmw => {
const extra = air.extraData(Air.AtomicRmw, data.pl_op.payload).data;
if (!checkRef(data.pl_op.operand, zcu)) return false;
if (!checkRef(extra.operand, zcu)) return false;
},
.call,
.call_always_tail,
.call_never_tail,
.call_never_inline,
=> {
const extra = air.extraData(Air.Call, data.pl_op.payload);
const args: []const Air.Inst.Ref = @ptrCast(air.extra[extra.end..][0..extra.data.args_len]);
if (!checkRef(data.pl_op.operand, zcu)) return false;
for (args) |arg| if (!checkRef(arg, zcu)) return false;
},
.dbg_var_ptr,
.dbg_var_val,
=> {
if (!checkRef(data.pl_op.operand, zcu)) return false;
},
.@"try" => {
const extra = air.extraData(Air.Try, data.pl_op.payload);
if (!checkRef(data.pl_op.operand, zcu)) return false;
if (!checkBody(
air,
@ptrCast(air.extra[extra.end..][0..extra.data.body_len]),
zcu,
)) return false;
},
.try_ptr => {
const extra = air.extraData(Air.TryPtr, data.ty_pl.payload);
if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
if (!checkRef(extra.data.ptr, zcu)) return false;
if (!checkBody(
air,
@ptrCast(air.extra[extra.end..][0..extra.data.body_len]),
zcu,
)) return false;
},
.cond_br => {
const extra = air.extraData(Air.CondBr, data.pl_op.payload);
if (!checkRef(data.pl_op.operand, zcu)) return false;
if (!checkBody(
air,
@ptrCast(air.extra[extra.end..][0..extra.data.then_body_len]),
zcu,
)) return false;
if (!checkBody(
air,
@ptrCast(air.extra[extra.end + extra.data.then_body_len ..][0..extra.data.else_body_len]),
zcu,
)) return false;
},
.switch_br => {
const extra = air.extraData(Air.SwitchBr, data.pl_op.payload);
if (!checkRef(data.pl_op.operand, zcu)) return false;
var extra_index = extra.end;
for (0..extra.data.cases_len) |_| {
const case = air.extraData(Air.SwitchBr.Case, extra_index);
extra_index = case.end;
const items: []const Air.Inst.Ref = @ptrCast(air.extra[extra_index..][0..case.data.items_len]);
extra_index += case.data.items_len;
for (items) |item| if (!checkRef(item, zcu)) return false;
if (!checkBody(
air,
@ptrCast(air.extra[extra_index..][0..case.data.body_len]),
zcu,
)) return false;
extra_index += case.data.body_len;
}
if (!checkBody(
air,
@ptrCast(air.extra[extra_index..][0..extra.data.else_body_len]),
zcu,
)) return false;
},
.assembly => {
const extra = air.extraData(Air.Asm, data.ty_pl.payload);
if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
// Luckily, we only care about the inputs and outputs, so we don't have to do
// the whole null-terminated string dance.
const outputs: []const Air.Inst.Ref = @ptrCast(air.extra[extra.end..][0..extra.data.outputs_len]);
const inputs: []const Air.Inst.Ref = @ptrCast(air.extra[extra.end + extra.data.outputs_len ..][0..extra.data.inputs_len]);
for (outputs) |output| if (output != .none and !checkRef(output, zcu)) return false;
for (inputs) |input| if (input != .none and !checkRef(input, zcu)) return false;
},
.trap,
.breakpoint,
.ret_addr,
.frame_addr,
.unreach,
.wasm_memory_size,
.wasm_memory_grow,
.work_item_id,
.work_group_size,
.work_group_id,
.fence,
.dbg_stmt,
.err_return_trace,
.save_err_return_trace_index,
=> {},
}
}
return true;
}
fn checkRef(ref: Air.Inst.Ref, zcu: *Zcu) bool {
const ip_index = ref.toInterned() orelse {
// This operand refers back to a previous instruction.
// We have already checked that instruction's type.
// So, there's no need to check this operand's type.
return true;
};
return checkVal(Value.fromInterned(ip_index), zcu);
}
fn checkVal(val: Value, zcu: *Zcu) bool {
if (!checkType(val.typeOf(zcu), zcu)) return false;
// Check for lazy values
switch (zcu.intern_pool.indexToKey(val.toIntern())) {
.int => |int| switch (int.storage) {
.u64, .i64, .big_int => return true,
.lazy_align, .lazy_size => |ty_index| {
return checkType(Type.fromInterned(ty_index), zcu);
},
},
else => return true,
}
}
fn checkType(ty: Type, zcu: *Zcu) bool {
const ip = &zcu.intern_pool;
return switch (ty.zigTypeTag(zcu)) {
.Type,
.Void,
.Bool,
.NoReturn,
.Int,
.Float,
.ErrorSet,
.Enum,
.Opaque,
.Vector,
// These types can appear due to some dummy instructions Sema introduces and expects to be omitted by Liveness.
// It's a little silly -- but fine, we'll return `true`.
.ComptimeFloat,
.ComptimeInt,
.Undefined,
.Null,
.EnumLiteral,
=> true,
.Frame,
.AnyFrame,
=> @panic("TODO Air.types_resolved.checkType async frames"),
.Optional => checkType(ty.childType(zcu), zcu),
.ErrorUnion => checkType(ty.errorUnionPayload(zcu), zcu),
.Pointer => checkType(ty.childType(zcu), zcu),
.Array => checkType(ty.childType(zcu), zcu),
.Fn => {
const info = zcu.typeToFunc(ty).?;
for (0..info.param_types.len) |i| {
const param_ty = info.param_types.get(ip)[i];
if (!checkType(Type.fromInterned(param_ty), zcu)) return false;
}
return checkType(Type.fromInterned(info.return_type), zcu);
},
.Struct => switch (ip.indexToKey(ty.toIntern())) {
.struct_type => {
const struct_obj = zcu.typeToStruct(ty).?;
return switch (struct_obj.layout) {
.@"packed" => struct_obj.backingIntType(ip).* != .none,
.auto, .@"extern" => struct_obj.flagsPtr(ip).fully_resolved,
};
},
.anon_struct_type => |tuple| {
for (0..tuple.types.len) |i| {
const field_is_comptime = tuple.values.get(ip)[i] != .none;
if (field_is_comptime) continue;
const field_ty = tuple.types.get(ip)[i];
if (!checkType(Type.fromInterned(field_ty), zcu)) return false;
}
return true;
},
else => unreachable,
},
.Union => return zcu.typeToUnion(ty).?.flagsPtr(ip).status == .fully_resolved,
};
}

View File

@ -37,6 +37,7 @@ const Cache = std.Build.Cache;
const c_codegen = @import("codegen/c.zig"); const c_codegen = @import("codegen/c.zig");
const libtsan = @import("libtsan.zig"); const libtsan = @import("libtsan.zig");
const Zir = std.zig.Zir; const Zir = std.zig.Zir;
const Air = @import("Air.zig");
const Builtin = @import("Builtin.zig"); const Builtin = @import("Builtin.zig");
const LlvmObject = @import("codegen/llvm.zig").Object; const LlvmObject = @import("codegen/llvm.zig").Object;
@ -316,18 +317,29 @@ const Job = union(enum) {
codegen_decl: InternPool.DeclIndex, codegen_decl: InternPool.DeclIndex,
/// Write the machine code for a function to the output file. /// Write the machine code for a function to the output file.
/// This will either be a non-generic `func_decl` or a `func_instance`. /// This will either be a non-generic `func_decl` or a `func_instance`.
codegen_func: InternPool.Index, codegen_func: struct {
func: InternPool.Index,
/// This `Air` is owned by the `Job` and allocated with `gpa`.
/// It must be deinited when the job is processed.
air: Air,
},
/// Render the .h file snippet for the Decl. /// Render the .h file snippet for the Decl.
emit_h_decl: InternPool.DeclIndex, emit_h_decl: InternPool.DeclIndex,
/// The Decl needs to be analyzed and possibly export itself. /// The Decl needs to be analyzed and possibly export itself.
/// It may have already be analyzed, or it may have been determined /// It may have already be analyzed, or it may have been determined
/// to be outdated; in this case perform semantic analysis again. /// to be outdated; in this case perform semantic analysis again.
analyze_decl: InternPool.DeclIndex, analyze_decl: InternPool.DeclIndex,
/// Analyze the body of a runtime function.
/// After analysis, a `codegen_func` job will be queued.
/// These must be separate jobs to ensure any needed type resolution occurs *before* codegen.
analyze_func: InternPool.Index,
/// The source file containing the Decl has been updated, and so the /// The source file containing the Decl has been updated, and so the
/// Decl may need its line number information updated in the debug info. /// Decl may need its line number information updated in the debug info.
update_line_number: InternPool.DeclIndex, update_line_number: InternPool.DeclIndex,
/// The main source file for the module needs to be analyzed. /// The main source file for the module needs to be analyzed.
analyze_mod: *Package.Module, analyze_mod: *Package.Module,
/// Fully resolve the given `struct` or `union` type.
resolve_type_fully: InternPool.Index,
/// one of the glibc static objects /// one of the glibc static objects
glibc_crt_file: glibc.CRTFile, glibc_crt_file: glibc.CRTFile,
@ -3389,7 +3401,7 @@ pub fn performAllTheWork(
if (try zcu.findOutdatedToAnalyze()) |outdated| { if (try zcu.findOutdatedToAnalyze()) |outdated| {
switch (outdated.unwrap()) { switch (outdated.unwrap()) {
.decl => |decl| try comp.work_queue.writeItem(.{ .analyze_decl = decl }), .decl => |decl| try comp.work_queue.writeItem(.{ .analyze_decl = decl }),
.func => |func| try comp.work_queue.writeItem(.{ .codegen_func = func }), .func => |func| try comp.work_queue.writeItem(.{ .analyze_func = func }),
} }
continue; continue;
} }
@ -3439,6 +3451,14 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo
const named_frame = tracy.namedFrame("codegen_func"); const named_frame = tracy.namedFrame("codegen_func");
defer named_frame.end(); defer named_frame.end();
const module = comp.module.?;
// This call takes ownership of `func.air`.
try module.linkerUpdateFunc(func.func, func.air);
},
.analyze_func => |func| {
const named_frame = tracy.namedFrame("analyze_func");
defer named_frame.end();
const module = comp.module.?; const module = comp.module.?;
module.ensureFuncBodyAnalyzed(func) catch |err| switch (err) { module.ensureFuncBodyAnalyzed(func) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory, error.OutOfMemory => return error.OutOfMemory,
@ -3518,6 +3538,16 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo
try module.ensureFuncBodyAnalysisQueued(decl.val.toIntern()); try module.ensureFuncBodyAnalysisQueued(decl.val.toIntern());
} }
}, },
.resolve_type_fully => |ty| {
const named_frame = tracy.namedFrame("resolve_type_fully");
defer named_frame.end();
const zcu = comp.module.?;
Type.fromInterned(ty).resolveFully(zcu) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => return,
};
},
.update_line_number => |decl_index| { .update_line_number => |decl_index| {
const named_frame = tracy.namedFrame("update_line_number"); const named_frame = tracy.namedFrame("update_line_number");
defer named_frame.end(); defer named_frame.end();

File diff suppressed because it is too large Load Diff

View File

@ -78,8 +78,8 @@ fn bitCastInner(
const val_ty = val.typeOf(zcu); const val_ty = val.typeOf(zcu);
try sema.resolveTypeLayout(val_ty); try val_ty.resolveLayout(zcu);
try sema.resolveTypeLayout(dest_ty); try dest_ty.resolveLayout(zcu);
assert(val_ty.hasWellDefinedLayout(zcu)); assert(val_ty.hasWellDefinedLayout(zcu));
@ -136,8 +136,8 @@ fn bitCastSpliceInner(
const val_ty = val.typeOf(zcu); const val_ty = val.typeOf(zcu);
const splice_val_ty = splice_val.typeOf(zcu); const splice_val_ty = splice_val.typeOf(zcu);
try sema.resolveTypeLayout(val_ty); try val_ty.resolveLayout(zcu);
try sema.resolveTypeLayout(splice_val_ty); try splice_val_ty.resolveLayout(zcu);
const splice_bits = splice_val_ty.bitSize(zcu); const splice_bits = splice_val_ty.bitSize(zcu);

View File

@ -5,6 +5,7 @@
const std = @import("std"); const std = @import("std");
const builtin = @import("builtin"); const builtin = @import("builtin");
const Allocator = std.mem.Allocator;
const Value = @import("Value.zig"); const Value = @import("Value.zig");
const assert = std.debug.assert; const assert = std.debug.assert;
const Target = std.Target; const Target = std.Target;
@ -18,6 +19,7 @@ const InternPool = @import("InternPool.zig");
const Alignment = InternPool.Alignment; const Alignment = InternPool.Alignment;
const Zir = std.zig.Zir; const Zir = std.zig.Zir;
const Type = @This(); const Type = @This();
const SemaError = Zcu.SemaError;
ip_index: InternPool.Index, ip_index: InternPool.Index,
@ -458,7 +460,7 @@ pub fn toValue(self: Type) Value {
return Value.fromInterned(self.toIntern()); return Value.fromInterned(self.toIntern());
} }
const RuntimeBitsError = Module.CompileError || error{NeedLazy}; const RuntimeBitsError = SemaError || error{NeedLazy};
/// true if and only if the type takes up space in memory at runtime. /// true if and only if the type takes up space in memory at runtime.
/// There are two reasons a type will return false: /// There are two reasons a type will return false:
@ -475,7 +477,7 @@ pub fn hasRuntimeBitsAdvanced(
ty: Type, ty: Type,
mod: *Module, mod: *Module,
ignore_comptime_only: bool, ignore_comptime_only: bool,
strat: AbiAlignmentAdvancedStrat, strat: ResolveStratLazy,
) RuntimeBitsError!bool { ) RuntimeBitsError!bool {
const ip = &mod.intern_pool; const ip = &mod.intern_pool;
return switch (ty.toIntern()) { return switch (ty.toIntern()) {
@ -488,8 +490,8 @@ pub fn hasRuntimeBitsAdvanced(
// to comptime-only types do not, with the exception of function pointers. // to comptime-only types do not, with the exception of function pointers.
if (ignore_comptime_only) return true; if (ignore_comptime_only) return true;
return switch (strat) { return switch (strat) {
.sema => |sema| !(try sema.typeRequiresComptime(ty)), .sema => !try ty.comptimeOnlyAdvanced(mod, .sema),
.eager => !comptimeOnly(ty, mod), .eager => !ty.comptimeOnly(mod),
.lazy => error.NeedLazy, .lazy => error.NeedLazy,
}; };
}, },
@ -506,8 +508,8 @@ pub fn hasRuntimeBitsAdvanced(
} }
if (ignore_comptime_only) return true; if (ignore_comptime_only) return true;
return switch (strat) { return switch (strat) {
.sema => |sema| !(try sema.typeRequiresComptime(child_ty)), .sema => !try child_ty.comptimeOnlyAdvanced(mod, .sema),
.eager => !comptimeOnly(child_ty, mod), .eager => !child_ty.comptimeOnly(mod),
.lazy => error.NeedLazy, .lazy => error.NeedLazy,
}; };
}, },
@ -578,7 +580,7 @@ pub fn hasRuntimeBitsAdvanced(
return true; return true;
} }
switch (strat) { switch (strat) {
.sema => |sema| _ = try sema.resolveTypeFields(ty), .sema => try ty.resolveFields(mod),
.eager => assert(struct_type.haveFieldTypes(ip)), .eager => assert(struct_type.haveFieldTypes(ip)),
.lazy => if (!struct_type.haveFieldTypes(ip)) return error.NeedLazy, .lazy => if (!struct_type.haveFieldTypes(ip)) return error.NeedLazy,
} }
@ -622,7 +624,7 @@ pub fn hasRuntimeBitsAdvanced(
}, },
} }
switch (strat) { switch (strat) {
.sema => |sema| _ = try sema.resolveTypeFields(ty), .sema => try ty.resolveFields(mod),
.eager => assert(union_type.flagsPtr(ip).status.haveFieldTypes()), .eager => assert(union_type.flagsPtr(ip).status.haveFieldTypes()),
.lazy => if (!union_type.flagsPtr(ip).status.haveFieldTypes()) .lazy => if (!union_type.flagsPtr(ip).status.haveFieldTypes())
return error.NeedLazy, return error.NeedLazy,
@ -784,19 +786,18 @@ pub fn hasRuntimeBitsIgnoreComptime(ty: Type, mod: *Module) bool {
} }
pub fn fnHasRuntimeBits(ty: Type, mod: *Module) bool { pub fn fnHasRuntimeBits(ty: Type, mod: *Module) bool {
return ty.fnHasRuntimeBitsAdvanced(mod, null) catch unreachable; return ty.fnHasRuntimeBitsAdvanced(mod, .normal) catch unreachable;
} }
/// Determines whether a function type has runtime bits, i.e. whether a /// Determines whether a function type has runtime bits, i.e. whether a
/// function with this type can exist at runtime. /// function with this type can exist at runtime.
/// Asserts that `ty` is a function type. /// Asserts that `ty` is a function type.
/// If `opt_sema` is not provided, asserts that the return type is sufficiently resolved. pub fn fnHasRuntimeBitsAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaError!bool {
pub fn fnHasRuntimeBitsAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.CompileError!bool {
const fn_info = mod.typeToFunc(ty).?; const fn_info = mod.typeToFunc(ty).?;
if (fn_info.is_generic) return false; if (fn_info.is_generic) return false;
if (fn_info.is_var_args) return true; if (fn_info.is_var_args) return true;
if (fn_info.cc == .Inline) return false; if (fn_info.cc == .Inline) return false;
return !try Type.fromInterned(fn_info.return_type).comptimeOnlyAdvanced(mod, opt_sema); return !try Type.fromInterned(fn_info.return_type).comptimeOnlyAdvanced(mod, strat);
} }
pub fn isFnOrHasRuntimeBits(ty: Type, mod: *Module) bool { pub fn isFnOrHasRuntimeBits(ty: Type, mod: *Module) bool {
@ -820,23 +821,23 @@ pub fn isNoReturn(ty: Type, mod: *Module) bool {
/// Returns `none` if the pointer is naturally aligned and the element type is 0-bit. /// Returns `none` if the pointer is naturally aligned and the element type is 0-bit.
pub fn ptrAlignment(ty: Type, mod: *Module) Alignment { pub fn ptrAlignment(ty: Type, mod: *Module) Alignment {
return ptrAlignmentAdvanced(ty, mod, null) catch unreachable; return ptrAlignmentAdvanced(ty, mod, .normal) catch unreachable;
} }
pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) !Alignment { pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) !Alignment {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) { return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| { .ptr_type => |ptr_type| {
if (ptr_type.flags.alignment != .none) if (ptr_type.flags.alignment != .none)
return ptr_type.flags.alignment; return ptr_type.flags.alignment;
if (opt_sema) |sema| { if (strat == .sema) {
const res = try Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .{ .sema = sema }); const res = try Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .sema);
return res.scalar; return res.scalar;
} }
return (Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; return (Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar;
}, },
.opt_type => |child| Type.fromInterned(child).ptrAlignmentAdvanced(mod, opt_sema), .opt_type => |child| Type.fromInterned(child).ptrAlignmentAdvanced(mod, strat),
else => unreachable, else => unreachable,
}; };
} }
@ -868,10 +869,34 @@ pub const AbiAlignmentAdvanced = union(enum) {
val: Value, val: Value,
}; };
pub const AbiAlignmentAdvancedStrat = union(enum) { pub const ResolveStratLazy = enum {
eager, /// Return a `lazy_size` or `lazy_align` value if necessary.
/// This value can be resolved later using `Value.resolveLazy`.
lazy, lazy,
sema: *Sema, /// Return a scalar result, expecting all necessary type resolution to be completed.
/// Backends should typically use this, since they must not perform type resolution.
eager,
/// Return a scalar result, performing type resolution as necessary.
/// This should typically be used from semantic analysis.
sema,
};
/// The chosen strategy can be easily optimized away in release builds.
/// However, in debug builds, it helps to avoid acceidentally resolving types in backends.
pub const ResolveStrat = enum {
/// Assert that all necessary resolution is completed.
/// Backends should typically use this, since they must not perform type resolution.
normal,
/// Perform type resolution as necessary using `Zcu`.
/// This should typically be used from semantic analysis.
sema,
pub fn toLazy(strat: ResolveStrat) ResolveStratLazy {
return switch (strat) {
.normal => .eager,
.sema => .sema,
};
}
}; };
/// If you pass `eager` you will get back `scalar` and assert the type is resolved. /// If you pass `eager` you will get back `scalar` and assert the type is resolved.
@ -883,17 +908,12 @@ pub const AbiAlignmentAdvancedStrat = union(enum) {
pub fn abiAlignmentAdvanced( pub fn abiAlignmentAdvanced(
ty: Type, ty: Type,
mod: *Module, mod: *Module,
strat: AbiAlignmentAdvancedStrat, strat: ResolveStratLazy,
) Module.CompileError!AbiAlignmentAdvanced { ) SemaError!AbiAlignmentAdvanced {
const target = mod.getTarget(); const target = mod.getTarget();
const use_llvm = mod.comp.config.use_llvm; const use_llvm = mod.comp.config.use_llvm;
const ip = &mod.intern_pool; const ip = &mod.intern_pool;
const opt_sema = switch (strat) {
.sema => |sema| sema,
else => null,
};
switch (ty.toIntern()) { switch (ty.toIntern()) {
.empty_struct_type => return AbiAlignmentAdvanced{ .scalar = .@"1" }, .empty_struct_type => return AbiAlignmentAdvanced{ .scalar = .@"1" },
else => switch (ip.indexToKey(ty.toIntern())) { else => switch (ip.indexToKey(ty.toIntern())) {
@ -911,7 +931,7 @@ pub fn abiAlignmentAdvanced(
if (vector_type.len == 0) return .{ .scalar = .@"1" }; if (vector_type.len == 0) return .{ .scalar = .@"1" };
switch (mod.comp.getZigBackend()) { switch (mod.comp.getZigBackend()) {
else => { else => {
const elem_bits: u32 = @intCast(try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, opt_sema)); const elem_bits: u32 = @intCast(try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, .sema));
if (elem_bits == 0) return .{ .scalar = .@"1" }; if (elem_bits == 0) return .{ .scalar = .@"1" };
const bytes = ((elem_bits * vector_type.len) + 7) / 8; const bytes = ((elem_bits * vector_type.len) + 7) / 8;
const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes); const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes);
@ -1024,7 +1044,7 @@ pub fn abiAlignmentAdvanced(
const struct_type = ip.loadStructType(ty.toIntern()); const struct_type = ip.loadStructType(ty.toIntern());
if (struct_type.layout == .@"packed") { if (struct_type.layout == .@"packed") {
switch (strat) { switch (strat) {
.sema => |sema| try sema.resolveTypeLayout(ty), .sema => try ty.resolveLayout(mod),
.lazy => if (struct_type.backingIntType(ip).* == .none) return .{ .lazy => if (struct_type.backingIntType(ip).* == .none) return .{
.val = Value.fromInterned((try mod.intern(.{ .int = .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
.ty = .comptime_int_type, .ty = .comptime_int_type,
@ -1036,19 +1056,16 @@ pub fn abiAlignmentAdvanced(
return .{ .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiAlignment(mod) }; return .{ .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiAlignment(mod) };
} }
const flags = struct_type.flagsPtr(ip).*; if (struct_type.flagsPtr(ip).alignment == .none) switch (strat) {
if (flags.alignment != .none) return .{ .scalar = flags.alignment };
return switch (strat) {
.eager => unreachable, // struct alignment not resolved .eager => unreachable, // struct alignment not resolved
.sema => |sema| .{ .sema => try ty.resolveStructAlignment(mod),
.scalar = try sema.resolveStructAlignment(ty.toIntern(), struct_type), .lazy => return .{ .val = Value.fromInterned(try mod.intern(.{ .int = .{
},
.lazy => .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
.ty = .comptime_int_type, .ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() }, .storage = .{ .lazy_align = ty.toIntern() },
} }))) }, } })) },
}; };
return .{ .scalar = struct_type.flagsPtr(ip).alignment };
}, },
.anon_struct_type => |tuple| { .anon_struct_type => |tuple| {
var big_align: Alignment = .@"1"; var big_align: Alignment = .@"1";
@ -1070,12 +1087,10 @@ pub fn abiAlignmentAdvanced(
}, },
.union_type => { .union_type => {
const union_type = ip.loadUnionType(ty.toIntern()); const union_type = ip.loadUnionType(ty.toIntern());
const flags = union_type.flagsPtr(ip).*;
if (flags.alignment != .none) return .{ .scalar = flags.alignment };
if (!union_type.haveLayout(ip)) switch (strat) { if (union_type.flagsPtr(ip).alignment == .none) switch (strat) {
.eager => unreachable, // union layout not resolved .eager => unreachable, // union layout not resolved
.sema => |sema| return .{ .scalar = try sema.resolveUnionAlignment(ty, union_type) }, .sema => try ty.resolveUnionAlignment(mod),
.lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
.ty = .comptime_int_type, .ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() }, .storage = .{ .lazy_align = ty.toIntern() },
@ -1117,9 +1132,9 @@ pub fn abiAlignmentAdvanced(
fn abiAlignmentAdvancedErrorUnion( fn abiAlignmentAdvancedErrorUnion(
ty: Type, ty: Type,
mod: *Module, mod: *Module,
strat: AbiAlignmentAdvancedStrat, strat: ResolveStratLazy,
payload_ty: Type, payload_ty: Type,
) Module.CompileError!AbiAlignmentAdvanced { ) SemaError!AbiAlignmentAdvanced {
// This code needs to be kept in sync with the equivalent switch prong // This code needs to be kept in sync with the equivalent switch prong
// in abiSizeAdvanced. // in abiSizeAdvanced.
const code_align = abiAlignment(Type.anyerror, mod); const code_align = abiAlignment(Type.anyerror, mod);
@ -1154,8 +1169,8 @@ fn abiAlignmentAdvancedErrorUnion(
fn abiAlignmentAdvancedOptional( fn abiAlignmentAdvancedOptional(
ty: Type, ty: Type,
mod: *Module, mod: *Module,
strat: AbiAlignmentAdvancedStrat, strat: ResolveStratLazy,
) Module.CompileError!AbiAlignmentAdvanced { ) SemaError!AbiAlignmentAdvanced {
const target = mod.getTarget(); const target = mod.getTarget();
const child_type = ty.optionalChild(mod); const child_type = ty.optionalChild(mod);
@ -1217,8 +1232,8 @@ const AbiSizeAdvanced = union(enum) {
pub fn abiSizeAdvanced( pub fn abiSizeAdvanced(
ty: Type, ty: Type,
mod: *Module, mod: *Module,
strat: AbiAlignmentAdvancedStrat, strat: ResolveStratLazy,
) Module.CompileError!AbiSizeAdvanced { ) SemaError!AbiSizeAdvanced {
const target = mod.getTarget(); const target = mod.getTarget();
const use_llvm = mod.comp.config.use_llvm; const use_llvm = mod.comp.config.use_llvm;
const ip = &mod.intern_pool; const ip = &mod.intern_pool;
@ -1252,9 +1267,9 @@ pub fn abiSizeAdvanced(
} }
}, },
.vector_type => |vector_type| { .vector_type => |vector_type| {
const opt_sema = switch (strat) { const sub_strat: ResolveStrat = switch (strat) {
.sema => |sema| sema, .sema => .sema,
.eager => null, .eager => .normal,
.lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
.ty = .comptime_int_type, .ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() }, .storage = .{ .lazy_size = ty.toIntern() },
@ -1269,7 +1284,7 @@ pub fn abiSizeAdvanced(
}; };
const total_bytes = switch (mod.comp.getZigBackend()) { const total_bytes = switch (mod.comp.getZigBackend()) {
else => total_bytes: { else => total_bytes: {
const elem_bits = try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, opt_sema); const elem_bits = try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, sub_strat);
const total_bits = elem_bits * vector_type.len; const total_bits = elem_bits * vector_type.len;
break :total_bytes (total_bits + 7) / 8; break :total_bytes (total_bits + 7) / 8;
}, },
@ -1403,7 +1418,7 @@ pub fn abiSizeAdvanced(
.struct_type => { .struct_type => {
const struct_type = ip.loadStructType(ty.toIntern()); const struct_type = ip.loadStructType(ty.toIntern());
switch (strat) { switch (strat) {
.sema => |sema| try sema.resolveTypeLayout(ty), .sema => try ty.resolveLayout(mod),
.lazy => switch (struct_type.layout) { .lazy => switch (struct_type.layout) {
.@"packed" => { .@"packed" => {
if (struct_type.backingIntType(ip).* == .none) return .{ if (struct_type.backingIntType(ip).* == .none) return .{
@ -1436,7 +1451,7 @@ pub fn abiSizeAdvanced(
}, },
.anon_struct_type => |tuple| { .anon_struct_type => |tuple| {
switch (strat) { switch (strat) {
.sema => |sema| try sema.resolveTypeLayout(ty), .sema => try ty.resolveLayout(mod),
.lazy, .eager => {}, .lazy, .eager => {},
} }
const field_count = tuple.types.len; const field_count = tuple.types.len;
@ -1449,7 +1464,7 @@ pub fn abiSizeAdvanced(
.union_type => { .union_type => {
const union_type = ip.loadUnionType(ty.toIntern()); const union_type = ip.loadUnionType(ty.toIntern());
switch (strat) { switch (strat) {
.sema => |sema| try sema.resolveTypeLayout(ty), .sema => try ty.resolveLayout(mod),
.lazy => if (!union_type.flagsPtr(ip).status.haveLayout()) return .{ .lazy => if (!union_type.flagsPtr(ip).status.haveLayout()) return .{
.val = Value.fromInterned((try mod.intern(.{ .int = .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
.ty = .comptime_int_type, .ty = .comptime_int_type,
@ -1493,8 +1508,8 @@ pub fn abiSizeAdvanced(
fn abiSizeAdvancedOptional( fn abiSizeAdvancedOptional(
ty: Type, ty: Type,
mod: *Module, mod: *Module,
strat: AbiAlignmentAdvancedStrat, strat: ResolveStratLazy,
) Module.CompileError!AbiSizeAdvanced { ) SemaError!AbiSizeAdvanced {
const child_ty = ty.optionalChild(mod); const child_ty = ty.optionalChild(mod);
if (child_ty.isNoReturn(mod)) { if (child_ty.isNoReturn(mod)) {
@ -1661,21 +1676,18 @@ pub fn maxIntAlignment(target: std.Target, use_llvm: bool) u16 {
} }
pub fn bitSize(ty: Type, mod: *Module) u64 { pub fn bitSize(ty: Type, mod: *Module) u64 {
return bitSizeAdvanced(ty, mod, null) catch unreachable; return bitSizeAdvanced(ty, mod, .normal) catch unreachable;
} }
/// If you pass `opt_sema`, any recursive type resolutions will happen if
/// necessary, possibly returning a CompileError. Passing `null` instead asserts
/// the type is fully resolved, and there will be no error, guaranteed.
pub fn bitSizeAdvanced( pub fn bitSizeAdvanced(
ty: Type, ty: Type,
mod: *Module, mod: *Module,
opt_sema: ?*Sema, strat: ResolveStrat,
) Module.CompileError!u64 { ) SemaError!u64 {
const target = mod.getTarget(); const target = mod.getTarget();
const ip = &mod.intern_pool; const ip = &mod.intern_pool;
const strat: AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; const strat_lazy: ResolveStratLazy = strat.toLazy();
switch (ip.indexToKey(ty.toIntern())) { switch (ip.indexToKey(ty.toIntern())) {
.int_type => |int_type| return int_type.bits, .int_type => |int_type| return int_type.bits,
@ -1690,22 +1702,22 @@ pub fn bitSizeAdvanced(
if (len == 0) return 0; if (len == 0) return 0;
const elem_ty = Type.fromInterned(array_type.child); const elem_ty = Type.fromInterned(array_type.child);
const elem_size = @max( const elem_size = @max(
(try elem_ty.abiAlignmentAdvanced(mod, strat)).scalar.toByteUnits() orelse 0, (try elem_ty.abiAlignmentAdvanced(mod, strat_lazy)).scalar.toByteUnits() orelse 0,
(try elem_ty.abiSizeAdvanced(mod, strat)).scalar, (try elem_ty.abiSizeAdvanced(mod, strat_lazy)).scalar,
); );
if (elem_size == 0) return 0; if (elem_size == 0) return 0;
const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, opt_sema); const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, strat);
return (len - 1) * 8 * elem_size + elem_bit_size; return (len - 1) * 8 * elem_size + elem_bit_size;
}, },
.vector_type => |vector_type| { .vector_type => |vector_type| {
const child_ty = Type.fromInterned(vector_type.child); const child_ty = Type.fromInterned(vector_type.child);
const elem_bit_size = try bitSizeAdvanced(child_ty, mod, opt_sema); const elem_bit_size = try bitSizeAdvanced(child_ty, mod, strat);
return elem_bit_size * vector_type.len; return elem_bit_size * vector_type.len;
}, },
.opt_type => { .opt_type => {
// Optionals and error unions are not packed so their bitsize // Optionals and error unions are not packed so their bitsize
// includes padding bits. // includes padding bits.
return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; return (try abiSizeAdvanced(ty, mod, strat_lazy)).scalar * 8;
}, },
.error_set_type, .inferred_error_set_type => return mod.errorSetBits(), .error_set_type, .inferred_error_set_type => return mod.errorSetBits(),
@ -1713,7 +1725,7 @@ pub fn bitSizeAdvanced(
.error_union_type => { .error_union_type => {
// Optionals and error unions are not packed so their bitsize // Optionals and error unions are not packed so their bitsize
// includes padding bits. // includes padding bits.
return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; return (try abiSizeAdvanced(ty, mod, strat_lazy)).scalar * 8;
}, },
.func_type => unreachable, // represents machine code; not a pointer .func_type => unreachable, // represents machine code; not a pointer
.simple_type => |t| switch (t) { .simple_type => |t| switch (t) {
@ -1770,43 +1782,43 @@ pub fn bitSizeAdvanced(
.struct_type => { .struct_type => {
const struct_type = ip.loadStructType(ty.toIntern()); const struct_type = ip.loadStructType(ty.toIntern());
const is_packed = struct_type.layout == .@"packed"; const is_packed = struct_type.layout == .@"packed";
if (opt_sema) |sema| { if (strat == .sema) {
try sema.resolveTypeFields(ty); try ty.resolveFields(mod);
if (is_packed) try sema.resolveTypeLayout(ty); if (is_packed) try ty.resolveLayout(mod);
} }
if (is_packed) { if (is_packed) {
return try Type.fromInterned(struct_type.backingIntType(ip).*).bitSizeAdvanced(mod, opt_sema); return try Type.fromInterned(struct_type.backingIntType(ip).*).bitSizeAdvanced(mod, strat);
} }
return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; return (try ty.abiSizeAdvanced(mod, strat_lazy)).scalar * 8;
}, },
.anon_struct_type => { .anon_struct_type => {
if (opt_sema) |sema| try sema.resolveTypeFields(ty); if (strat == .sema) try ty.resolveFields(mod);
return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; return (try ty.abiSizeAdvanced(mod, strat_lazy)).scalar * 8;
}, },
.union_type => { .union_type => {
const union_type = ip.loadUnionType(ty.toIntern()); const union_type = ip.loadUnionType(ty.toIntern());
const is_packed = ty.containerLayout(mod) == .@"packed"; const is_packed = ty.containerLayout(mod) == .@"packed";
if (opt_sema) |sema| { if (strat == .sema) {
try sema.resolveTypeFields(ty); try ty.resolveFields(mod);
if (is_packed) try sema.resolveTypeLayout(ty); if (is_packed) try ty.resolveLayout(mod);
} }
if (!is_packed) { if (!is_packed) {
return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; return (try ty.abiSizeAdvanced(mod, strat_lazy)).scalar * 8;
} }
assert(union_type.flagsPtr(ip).status.haveFieldTypes()); assert(union_type.flagsPtr(ip).status.haveFieldTypes());
var size: u64 = 0; var size: u64 = 0;
for (0..union_type.field_types.len) |field_index| { for (0..union_type.field_types.len) |field_index| {
const field_ty = union_type.field_types.get(ip)[field_index]; const field_ty = union_type.field_types.get(ip)[field_index];
size = @max(size, try bitSizeAdvanced(Type.fromInterned(field_ty), mod, opt_sema)); size = @max(size, try bitSizeAdvanced(Type.fromInterned(field_ty), mod, strat));
} }
return size; return size;
}, },
.opaque_type => unreachable, .opaque_type => unreachable,
.enum_type => return bitSizeAdvanced(Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), mod, opt_sema), .enum_type => return bitSizeAdvanced(Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), mod, strat),
// values, not types // values, not types
.undef, .undef,
@ -2722,13 +2734,12 @@ pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value {
/// During semantic analysis, instead call `Sema.typeRequiresComptime` which /// During semantic analysis, instead call `Sema.typeRequiresComptime` which
/// resolves field types rather than asserting they are already resolved. /// resolves field types rather than asserting they are already resolved.
pub fn comptimeOnly(ty: Type, mod: *Module) bool { pub fn comptimeOnly(ty: Type, mod: *Module) bool {
return ty.comptimeOnlyAdvanced(mod, null) catch unreachable; return ty.comptimeOnlyAdvanced(mod, .normal) catch unreachable;
} }
/// `generic_poison` will return false. /// `generic_poison` will return false.
/// May return false negatives when structs and unions are having their field types resolved. /// May return false negatives when structs and unions are having their field types resolved.
/// If `opt_sema` is not provided, asserts that the type is sufficiently resolved. pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaError!bool {
pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.CompileError!bool {
const ip = &mod.intern_pool; const ip = &mod.intern_pool;
return switch (ty.toIntern()) { return switch (ty.toIntern()) {
.empty_struct_type => false, .empty_struct_type => false,
@ -2738,19 +2749,19 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.Com
.ptr_type => |ptr_type| { .ptr_type => |ptr_type| {
const child_ty = Type.fromInterned(ptr_type.child); const child_ty = Type.fromInterned(ptr_type.child);
switch (child_ty.zigTypeTag(mod)) { switch (child_ty.zigTypeTag(mod)) {
.Fn => return !try child_ty.fnHasRuntimeBitsAdvanced(mod, opt_sema), .Fn => return !try child_ty.fnHasRuntimeBitsAdvanced(mod, strat),
.Opaque => return false, .Opaque => return false,
else => return child_ty.comptimeOnlyAdvanced(mod, opt_sema), else => return child_ty.comptimeOnlyAdvanced(mod, strat),
} }
}, },
.anyframe_type => |child| { .anyframe_type => |child| {
if (child == .none) return false; if (child == .none) return false;
return Type.fromInterned(child).comptimeOnlyAdvanced(mod, opt_sema); return Type.fromInterned(child).comptimeOnlyAdvanced(mod, strat);
}, },
.array_type => |array_type| return Type.fromInterned(array_type.child).comptimeOnlyAdvanced(mod, opt_sema), .array_type => |array_type| return Type.fromInterned(array_type.child).comptimeOnlyAdvanced(mod, strat),
.vector_type => |vector_type| return Type.fromInterned(vector_type.child).comptimeOnlyAdvanced(mod, opt_sema), .vector_type => |vector_type| return Type.fromInterned(vector_type.child).comptimeOnlyAdvanced(mod, strat),
.opt_type => |child| return Type.fromInterned(child).comptimeOnlyAdvanced(mod, opt_sema), .opt_type => |child| return Type.fromInterned(child).comptimeOnlyAdvanced(mod, strat),
.error_union_type => |error_union_type| return Type.fromInterned(error_union_type.payload_type).comptimeOnlyAdvanced(mod, opt_sema), .error_union_type => |error_union_type| return Type.fromInterned(error_union_type.payload_type).comptimeOnlyAdvanced(mod, strat),
.error_set_type, .error_set_type,
.inferred_error_set_type, .inferred_error_set_type,
@ -2817,8 +2828,7 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.Com
.no, .wip => false, .no, .wip => false,
.yes => true, .yes => true,
.unknown => { .unknown => {
// The type is not resolved; assert that we have a Sema. assert(strat == .sema);
const sema = opt_sema.?;
if (struct_type.flagsPtr(ip).field_types_wip) if (struct_type.flagsPtr(ip).field_types_wip)
return false; return false;
@ -2826,13 +2836,13 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.Com
struct_type.flagsPtr(ip).requires_comptime = .wip; struct_type.flagsPtr(ip).requires_comptime = .wip;
errdefer struct_type.flagsPtr(ip).requires_comptime = .unknown; errdefer struct_type.flagsPtr(ip).requires_comptime = .unknown;
try sema.resolveTypeFieldsStruct(ty.toIntern(), struct_type); try ty.resolveFields(mod);
for (0..struct_type.field_types.len) |i_usize| { for (0..struct_type.field_types.len) |i_usize| {
const i: u32 = @intCast(i_usize); const i: u32 = @intCast(i_usize);
if (struct_type.fieldIsComptime(ip, i)) continue; if (struct_type.fieldIsComptime(ip, i)) continue;
const field_ty = struct_type.field_types.get(ip)[i]; const field_ty = struct_type.field_types.get(ip)[i];
if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) { if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, strat)) {
// Note that this does not cause the layout to // Note that this does not cause the layout to
// be considered resolved. Comptime-only types // be considered resolved. Comptime-only types
// still maintain a layout of their // still maintain a layout of their
@ -2851,7 +2861,7 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.Com
.anon_struct_type => |tuple| { .anon_struct_type => |tuple| {
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| { for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
const have_comptime_val = val != .none; const have_comptime_val = val != .none;
if (!have_comptime_val and try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) return true; if (!have_comptime_val and try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, strat)) return true;
} }
return false; return false;
}, },
@ -2862,8 +2872,7 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.Com
.no, .wip => return false, .no, .wip => return false,
.yes => return true, .yes => return true,
.unknown => { .unknown => {
// The type is not resolved; assert that we have a Sema. assert(strat == .sema);
const sema = opt_sema.?;
if (union_type.flagsPtr(ip).status == .field_types_wip) if (union_type.flagsPtr(ip).status == .field_types_wip)
return false; return false;
@ -2871,11 +2880,11 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.Com
union_type.flagsPtr(ip).requires_comptime = .wip; union_type.flagsPtr(ip).requires_comptime = .wip;
errdefer union_type.flagsPtr(ip).requires_comptime = .unknown; errdefer union_type.flagsPtr(ip).requires_comptime = .unknown;
try sema.resolveTypeFieldsUnion(ty, union_type); try ty.resolveFields(mod);
for (0..union_type.field_types.len) |field_idx| { for (0..union_type.field_types.len) |field_idx| {
const field_ty = union_type.field_types.get(ip)[field_idx]; const field_ty = union_type.field_types.get(ip)[field_idx];
if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) { if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, strat)) {
union_type.flagsPtr(ip).requires_comptime = .yes; union_type.flagsPtr(ip).requires_comptime = .yes;
return true; return true;
} }
@ -2889,7 +2898,7 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.Com
.opaque_type => false, .opaque_type => false,
.enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).comptimeOnlyAdvanced(mod, opt_sema), .enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).comptimeOnlyAdvanced(mod, strat),
// values, not types // values, not types
.undef, .undef,
@ -3180,10 +3189,10 @@ pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type {
} }
pub fn structFieldAlign(ty: Type, index: usize, zcu: *Zcu) Alignment { pub fn structFieldAlign(ty: Type, index: usize, zcu: *Zcu) Alignment {
return ty.structFieldAlignAdvanced(index, zcu, null) catch unreachable; return ty.structFieldAlignAdvanced(index, zcu, .normal) catch unreachable;
} }
pub fn structFieldAlignAdvanced(ty: Type, index: usize, zcu: *Zcu, opt_sema: ?*Sema) !Alignment { pub fn structFieldAlignAdvanced(ty: Type, index: usize, zcu: *Zcu, strat: ResolveStrat) !Alignment {
const ip = &zcu.intern_pool; const ip = &zcu.intern_pool;
switch (ip.indexToKey(ty.toIntern())) { switch (ip.indexToKey(ty.toIntern())) {
.struct_type => { .struct_type => {
@ -3191,22 +3200,14 @@ pub fn structFieldAlignAdvanced(ty: Type, index: usize, zcu: *Zcu, opt_sema: ?*S
assert(struct_type.layout != .@"packed"); assert(struct_type.layout != .@"packed");
const explicit_align = struct_type.fieldAlign(ip, index); const explicit_align = struct_type.fieldAlign(ip, index);
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]); const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]);
if (opt_sema) |sema| { return zcu.structFieldAlignmentAdvanced(explicit_align, field_ty, struct_type.layout, strat);
return sema.structFieldAlignment(explicit_align, field_ty, struct_type.layout);
} else {
return zcu.structFieldAlignment(explicit_align, field_ty, struct_type.layout);
}
}, },
.anon_struct_type => |anon_struct| { .anon_struct_type => |anon_struct| {
return (try Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignmentAdvanced(zcu, if (opt_sema) |sema| .{ .sema = sema } else .eager)).scalar; return (try Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignmentAdvanced(zcu, strat.toLazy())).scalar;
}, },
.union_type => { .union_type => {
const union_obj = ip.loadUnionType(ty.toIntern()); const union_obj = ip.loadUnionType(ty.toIntern());
if (opt_sema) |sema| { return zcu.unionFieldNormalAlignmentAdvanced(union_obj, @intCast(index), strat);
return sema.unionFieldAlignment(union_obj, @intCast(index));
} else {
return zcu.unionFieldNormalAlignment(union_obj, @intCast(index));
}
}, },
else => unreachable, else => unreachable,
} }
@ -3546,6 +3547,397 @@ pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx:
} }; } };
} }
pub fn resolveLayout(ty: Type, zcu: *Zcu) SemaError!void {
const ip = &zcu.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.simple_type => |simple_type| return resolveSimpleType(simple_type, zcu),
else => {},
}
switch (ty.zigTypeTag(zcu)) {
.Struct => switch (ip.indexToKey(ty.toIntern())) {
.anon_struct_type => |anon_struct_type| for (0..anon_struct_type.types.len) |i| {
const field_ty = Type.fromInterned(anon_struct_type.types.get(ip)[i]);
try field_ty.resolveLayout(zcu);
},
.struct_type => return ty.resolveStructInner(zcu, .layout),
else => unreachable,
},
.Union => return ty.resolveUnionInner(zcu, .layout),
.Array => {
if (ty.arrayLenIncludingSentinel(zcu) == 0) return;
const elem_ty = ty.childType(zcu);
return elem_ty.resolveLayout(zcu);
},
.Optional => {
const payload_ty = ty.optionalChild(zcu);
return payload_ty.resolveLayout(zcu);
},
.ErrorUnion => {
const payload_ty = ty.errorUnionPayload(zcu);
return payload_ty.resolveLayout(zcu);
},
.Fn => {
const info = zcu.typeToFunc(ty).?;
if (info.is_generic) {
// Resolving of generic function types is deferred to when
// the function is instantiated.
return;
}
for (0..info.param_types.len) |i| {
const param_ty = info.param_types.get(ip)[i];
try Type.fromInterned(param_ty).resolveLayout(zcu);
}
try Type.fromInterned(info.return_type).resolveLayout(zcu);
},
else => {},
}
}
pub fn resolveFields(ty: Type, zcu: *Zcu) SemaError!void {
const ip = &zcu.intern_pool;
const ty_ip = ty.toIntern();
switch (ty_ip) {
.none => unreachable,
.u0_type,
.i0_type,
.u1_type,
.u8_type,
.i8_type,
.u16_type,
.i16_type,
.u29_type,
.u32_type,
.i32_type,
.u64_type,
.i64_type,
.u80_type,
.u128_type,
.i128_type,
.usize_type,
.isize_type,
.c_char_type,
.c_short_type,
.c_ushort_type,
.c_int_type,
.c_uint_type,
.c_long_type,
.c_ulong_type,
.c_longlong_type,
.c_ulonglong_type,
.c_longdouble_type,
.f16_type,
.f32_type,
.f64_type,
.f80_type,
.f128_type,
.anyopaque_type,
.bool_type,
.void_type,
.type_type,
.anyerror_type,
.adhoc_inferred_error_set_type,
.comptime_int_type,
.comptime_float_type,
.noreturn_type,
.anyframe_type,
.null_type,
.undefined_type,
.enum_literal_type,
.manyptr_u8_type,
.manyptr_const_u8_type,
.manyptr_const_u8_sentinel_0_type,
.single_const_pointer_to_comptime_int_type,
.slice_const_u8_type,
.slice_const_u8_sentinel_0_type,
.optional_noreturn_type,
.anyerror_void_error_union_type,
.generic_poison_type,
.empty_struct_type,
=> {},
.undef => unreachable,
.zero => unreachable,
.zero_usize => unreachable,
.zero_u8 => unreachable,
.one => unreachable,
.one_usize => unreachable,
.one_u8 => unreachable,
.four_u8 => unreachable,
.negative_one => unreachable,
.calling_convention_c => unreachable,
.calling_convention_inline => unreachable,
.void_value => unreachable,
.unreachable_value => unreachable,
.null_value => unreachable,
.bool_true => unreachable,
.bool_false => unreachable,
.empty_struct => unreachable,
.generic_poison => unreachable,
else => switch (ip.items.items(.tag)[@intFromEnum(ty_ip)]) {
.type_struct,
.type_struct_packed,
.type_struct_packed_inits,
=> return ty.resolveStructInner(zcu, .fields),
.type_union => return ty.resolveUnionInner(zcu, .fields),
.simple_type => return resolveSimpleType(ip.indexToKey(ty_ip).simple_type, zcu),
else => {},
},
}
}
pub fn resolveFully(ty: Type, zcu: *Zcu) SemaError!void {
const ip = &zcu.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.simple_type => |simple_type| return resolveSimpleType(simple_type, zcu),
else => {},
}
switch (ty.zigTypeTag(zcu)) {
.Type,
.Void,
.Bool,
.NoReturn,
.Int,
.Float,
.ComptimeFloat,
.ComptimeInt,
.Undefined,
.Null,
.ErrorSet,
.Enum,
.Opaque,
.Frame,
.AnyFrame,
.Vector,
.EnumLiteral,
=> {},
.Pointer => return ty.childType(zcu).resolveFully(zcu),
.Array => return ty.childType(zcu).resolveFully(zcu),
.Optional => return ty.optionalChild(zcu).resolveFully(zcu),
.ErrorUnion => return ty.errorUnionPayload(zcu).resolveFully(zcu),
.Fn => {
const info = zcu.typeToFunc(ty).?;
if (info.is_generic) return;
for (0..info.param_types.len) |i| {
const param_ty = info.param_types.get(ip)[i];
try Type.fromInterned(param_ty).resolveFully(zcu);
}
try Type.fromInterned(info.return_type).resolveFully(zcu);
},
.Struct => switch (ip.indexToKey(ty.toIntern())) {
.anon_struct_type => |anon_struct_type| for (0..anon_struct_type.types.len) |i| {
const field_ty = Type.fromInterned(anon_struct_type.types.get(ip)[i]);
try field_ty.resolveFully(zcu);
},
.struct_type => return ty.resolveStructInner(zcu, .full),
else => unreachable,
},
.Union => return ty.resolveUnionInner(zcu, .full),
}
}
pub fn resolveStructFieldInits(ty: Type, zcu: *Zcu) SemaError!void {
// TODO: stop calling this for tuples!
_ = zcu.typeToStruct(ty) orelse return;
return ty.resolveStructInner(zcu, .inits);
}
pub fn resolveStructAlignment(ty: Type, zcu: *Zcu) SemaError!void {
return ty.resolveStructInner(zcu, .alignment);
}
pub fn resolveUnionAlignment(ty: Type, zcu: *Zcu) SemaError!void {
return ty.resolveUnionInner(zcu, .alignment);
}
/// `ty` must be a struct.
fn resolveStructInner(
ty: Type,
zcu: *Zcu,
resolution: enum { fields, inits, alignment, layout, full },
) SemaError!void {
const gpa = zcu.gpa;
const struct_obj = zcu.typeToStruct(ty).?;
const owner_decl_index = struct_obj.decl.unwrap() orelse return;
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
defer analysis_arena.deinit();
var comptime_err_ret_trace = std.ArrayList(Zcu.LazySrcLoc).init(gpa);
defer comptime_err_ret_trace.deinit();
var sema: Sema = .{
.mod = zcu,
.gpa = gpa,
.arena = analysis_arena.allocator(),
.code = undefined, // This ZIR will not be used.
.owner_decl = zcu.declPtr(owner_decl_index),
.owner_decl_index = owner_decl_index,
.func_index = .none,
.func_is_naked = false,
.fn_ret_ty = Type.void,
.fn_ret_ty_ies = null,
.owner_func_index = .none,
.comptime_err_ret_trace = &comptime_err_ret_trace,
};
defer sema.deinit();
switch (resolution) {
.fields => return sema.resolveTypeFieldsStruct(ty.toIntern(), struct_obj),
.inits => return sema.resolveStructFieldInits(ty),
.alignment => return sema.resolveStructAlignment(ty.toIntern(), struct_obj),
.layout => return sema.resolveStructLayout(ty),
.full => return sema.resolveStructFully(ty),
}
}
/// `ty` must be a union.
fn resolveUnionInner(
ty: Type,
zcu: *Zcu,
resolution: enum { fields, alignment, layout, full },
) SemaError!void {
const gpa = zcu.gpa;
const union_obj = zcu.typeToUnion(ty).?;
const owner_decl_index = union_obj.decl;
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
defer analysis_arena.deinit();
var comptime_err_ret_trace = std.ArrayList(Zcu.LazySrcLoc).init(gpa);
defer comptime_err_ret_trace.deinit();
var sema: Sema = .{
.mod = zcu,
.gpa = gpa,
.arena = analysis_arena.allocator(),
.code = undefined, // This ZIR will not be used.
.owner_decl = zcu.declPtr(owner_decl_index),
.owner_decl_index = owner_decl_index,
.func_index = .none,
.func_is_naked = false,
.fn_ret_ty = Type.void,
.fn_ret_ty_ies = null,
.owner_func_index = .none,
.comptime_err_ret_trace = &comptime_err_ret_trace,
};
defer sema.deinit();
switch (resolution) {
.fields => return sema.resolveTypeFieldsUnion(ty, union_obj),
.alignment => return sema.resolveUnionAlignment(ty, union_obj),
.layout => return sema.resolveUnionLayout(ty),
.full => return sema.resolveUnionFully(ty),
}
}
/// Fully resolves a simple type. This is usually a nop, but for builtin types with
/// special InternPool indices (such as std.builtin.Type) it will analyze and fully
/// resolve the type.
fn resolveSimpleType(simple_type: InternPool.SimpleType, zcu: *Zcu) Allocator.Error!void {
const builtin_type_name: []const u8 = switch (simple_type) {
.atomic_order => "AtomicOrder",
.atomic_rmw_op => "AtomicRmwOp",
.calling_convention => "CallingConvention",
.address_space => "AddressSpace",
.float_mode => "FloatMode",
.reduce_op => "ReduceOp",
.call_modifier => "CallModifer",
.prefetch_options => "PrefetchOptions",
.export_options => "ExportOptions",
.extern_options => "ExternOptions",
.type_info => "Type",
else => return,
};
// This will fully resolve the type.
_ = try zcu.getBuiltinType(builtin_type_name);
}
/// Returns the type of a pointer to an element.
/// Asserts that the type is a pointer, and that the element type is indexable.
/// If the element index is comptime-known, it must be passed in `offset`.
/// For *@Vector(n, T), return *align(a:b:h:v) T
/// For *[N]T, return *T
/// For [*]T, returns *T
/// For []T, returns *T
/// Handles const-ness and address spaces in particular.
/// This code is duplicated in `Sema.analyzePtrArithmetic`.
/// May perform type resolution and return a transitive `error.AnalysisFail`.
pub fn elemPtrType(ptr_ty: Type, offset: ?usize, zcu: *Zcu) !Type {
const ptr_info = ptr_ty.ptrInfo(zcu);
const elem_ty = ptr_ty.elemType2(zcu);
const is_allowzero = ptr_info.flags.is_allowzero and (offset orelse 0) == 0;
const parent_ty = ptr_ty.childType(zcu);
const VI = InternPool.Key.PtrType.VectorIndex;
const vector_info: struct {
host_size: u16 = 0,
alignment: Alignment = .none,
vector_index: VI = .none,
} = if (parent_ty.isVector(zcu) and ptr_info.flags.size == .One) blk: {
const elem_bits = elem_ty.bitSize(zcu);
if (elem_bits == 0) break :blk .{};
const is_packed = elem_bits < 8 or !std.math.isPowerOfTwo(elem_bits);
if (!is_packed) break :blk .{};
break :blk .{
.host_size = @intCast(parent_ty.arrayLen(zcu)),
.alignment = parent_ty.abiAlignment(zcu),
.vector_index = if (offset) |some| @enumFromInt(some) else .runtime,
};
} else .{};
const alignment: Alignment = a: {
// Calculate the new pointer alignment.
if (ptr_info.flags.alignment == .none) {
// In case of an ABI-aligned pointer, any pointer arithmetic
// maintains the same ABI-alignedness.
break :a vector_info.alignment;
}
// If the addend is not a comptime-known value we can still count on
// it being a multiple of the type size.
const elem_size = (try elem_ty.abiSizeAdvanced(zcu, .sema)).scalar;
const addend = if (offset) |off| elem_size * off else elem_size;
// The resulting pointer is aligned to the lcd between the offset (an
// arbitrary number) and the alignment factor (always a power of two,
// non zero).
const new_align: Alignment = @enumFromInt(@min(
@ctz(addend),
ptr_info.flags.alignment.toLog2Units(),
));
assert(new_align != .none);
break :a new_align;
};
return zcu.ptrTypeSema(.{
.child = elem_ty.toIntern(),
.flags = .{
.alignment = alignment,
.is_const = ptr_info.flags.is_const,
.is_volatile = ptr_info.flags.is_volatile,
.is_allowzero = is_allowzero,
.address_space = ptr_info.flags.address_space,
.vector_index = vector_info.vector_index,
},
.packed_offset = .{
.host_size = vector_info.host_size,
.bit_offset = 0,
},
});
}
pub const @"u1": Type = .{ .ip_index = .u1_type }; pub const @"u1": Type = .{ .ip_index = .u1_type };
pub const @"u8": Type = .{ .ip_index = .u8_type }; pub const @"u8": Type = .{ .ip_index = .u8_type };
pub const @"u16": Type = .{ .ip_index = .u16_type }; pub const @"u16": Type = .{ .ip_index = .u16_type };

View File

@ -161,9 +161,11 @@ pub fn intFromEnum(val: Value, ty: Type, mod: *Module) Allocator.Error!Value {
}; };
} }
pub const ResolveStrat = Type.ResolveStrat;
/// Asserts the value is an integer. /// Asserts the value is an integer.
pub fn toBigInt(val: Value, space: *BigIntSpace, mod: *Module) BigIntConst { pub fn toBigInt(val: Value, space: *BigIntSpace, mod: *Module) BigIntConst {
return val.toBigIntAdvanced(space, mod, null) catch unreachable; return val.toBigIntAdvanced(space, mod, .normal) catch unreachable;
} }
/// Asserts the value is an integer. /// Asserts the value is an integer.
@ -171,7 +173,7 @@ pub fn toBigIntAdvanced(
val: Value, val: Value,
space: *BigIntSpace, space: *BigIntSpace,
mod: *Module, mod: *Module,
opt_sema: ?*Sema, strat: ResolveStrat,
) Module.CompileError!BigIntConst { ) Module.CompileError!BigIntConst {
return switch (val.toIntern()) { return switch (val.toIntern()) {
.bool_false => BigIntMutable.init(&space.limbs, 0).toConst(), .bool_false => BigIntMutable.init(&space.limbs, 0).toConst(),
@ -181,7 +183,7 @@ pub fn toBigIntAdvanced(
.int => |int| switch (int.storage) { .int => |int| switch (int.storage) {
.u64, .i64, .big_int => int.storage.toBigInt(space), .u64, .i64, .big_int => int.storage.toBigInt(space),
.lazy_align, .lazy_size => |ty| { .lazy_align, .lazy_size => |ty| {
if (opt_sema) |sema| try sema.resolveTypeLayout(Type.fromInterned(ty)); if (strat == .sema) try Type.fromInterned(ty).resolveLayout(mod);
const x = switch (int.storage) { const x = switch (int.storage) {
else => unreachable, else => unreachable,
.lazy_align => Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0, .lazy_align => Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0,
@ -190,10 +192,10 @@ pub fn toBigIntAdvanced(
return BigIntMutable.init(&space.limbs, x).toConst(); return BigIntMutable.init(&space.limbs, x).toConst();
}, },
}, },
.enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).toBigIntAdvanced(space, mod, opt_sema), .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).toBigIntAdvanced(space, mod, strat),
.opt, .ptr => BigIntMutable.init( .opt, .ptr => BigIntMutable.init(
&space.limbs, &space.limbs,
(try val.getUnsignedIntAdvanced(mod, opt_sema)).?, (try val.getUnsignedIntAdvanced(mod, strat)).?,
).toConst(), ).toConst(),
else => unreachable, else => unreachable,
}, },
@ -228,12 +230,12 @@ pub fn getVariable(val: Value, mod: *Module) ?InternPool.Key.Variable {
/// If the value fits in a u64, return it, otherwise null. /// If the value fits in a u64, return it, otherwise null.
/// Asserts not undefined. /// Asserts not undefined.
pub fn getUnsignedInt(val: Value, mod: *Module) ?u64 { pub fn getUnsignedInt(val: Value, mod: *Module) ?u64 {
return getUnsignedIntAdvanced(val, mod, null) catch unreachable; return getUnsignedIntAdvanced(val, mod, .normal) catch unreachable;
} }
/// If the value fits in a u64, return it, otherwise null. /// If the value fits in a u64, return it, otherwise null.
/// Asserts not undefined. /// Asserts not undefined.
pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64 { pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, strat: ResolveStrat) !?u64 {
return switch (val.toIntern()) { return switch (val.toIntern()) {
.undef => unreachable, .undef => unreachable,
.bool_false => 0, .bool_false => 0,
@ -244,28 +246,22 @@ pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64
.big_int => |big_int| big_int.to(u64) catch null, .big_int => |big_int| big_int.to(u64) catch null,
.u64 => |x| x, .u64 => |x| x,
.i64 => |x| std.math.cast(u64, x), .i64 => |x| std.math.cast(u64, x),
.lazy_align => |ty| if (opt_sema) |sema| .lazy_align => |ty| (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, strat.toLazy())).scalar.toByteUnits() orelse 0,
(try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits() orelse 0 .lazy_size => |ty| (try Type.fromInterned(ty).abiSizeAdvanced(mod, strat.toLazy())).scalar,
else
Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0,
.lazy_size => |ty| if (opt_sema) |sema|
(try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar
else
Type.fromInterned(ty).abiSize(mod),
}, },
.ptr => |ptr| switch (ptr.base_addr) { .ptr => |ptr| switch (ptr.base_addr) {
.int => ptr.byte_offset, .int => ptr.byte_offset,
.field => |field| { .field => |field| {
const base_addr = (try Value.fromInterned(field.base).getUnsignedIntAdvanced(mod, opt_sema)) orelse return null; const base_addr = (try Value.fromInterned(field.base).getUnsignedIntAdvanced(mod, strat)) orelse return null;
const struct_ty = Value.fromInterned(field.base).typeOf(mod).childType(mod); const struct_ty = Value.fromInterned(field.base).typeOf(mod).childType(mod);
if (opt_sema) |sema| try sema.resolveTypeLayout(struct_ty); if (strat == .sema) try struct_ty.resolveLayout(mod);
return base_addr + struct_ty.structFieldOffset(@intCast(field.index), mod) + ptr.byte_offset; return base_addr + struct_ty.structFieldOffset(@intCast(field.index), mod) + ptr.byte_offset;
}, },
else => null, else => null,
}, },
.opt => |opt| switch (opt.val) { .opt => |opt| switch (opt.val) {
.none => 0, .none => 0,
else => |payload| Value.fromInterned(payload).getUnsignedIntAdvanced(mod, opt_sema), else => |payload| Value.fromInterned(payload).getUnsignedIntAdvanced(mod, strat),
}, },
else => null, else => null,
}, },
@ -273,13 +269,13 @@ pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64
} }
/// Asserts the value is an integer and it fits in a u64 /// Asserts the value is an integer and it fits in a u64
pub fn toUnsignedInt(val: Value, mod: *Module) u64 { pub fn toUnsignedInt(val: Value, zcu: *Zcu) u64 {
return getUnsignedInt(val, mod).?; return getUnsignedInt(val, zcu).?;
} }
/// Asserts the value is an integer and it fits in a u64 /// Asserts the value is an integer and it fits in a u64
pub fn toUnsignedIntAdvanced(val: Value, sema: *Sema) !u64 { pub fn toUnsignedIntSema(val: Value, zcu: *Zcu) !u64 {
return (try getUnsignedIntAdvanced(val, sema.mod, sema)).?; return (try getUnsignedIntAdvanced(val, zcu, .sema)).?;
} }
/// Asserts the value is an integer and it fits in a i64 /// Asserts the value is an integer and it fits in a i64
@ -1028,13 +1024,13 @@ pub fn floatHasFraction(self: Value, mod: *const Module) bool {
} }
pub fn orderAgainstZero(lhs: Value, mod: *Module) std.math.Order { pub fn orderAgainstZero(lhs: Value, mod: *Module) std.math.Order {
return orderAgainstZeroAdvanced(lhs, mod, null) catch unreachable; return orderAgainstZeroAdvanced(lhs, mod, .normal) catch unreachable;
} }
pub fn orderAgainstZeroAdvanced( pub fn orderAgainstZeroAdvanced(
lhs: Value, lhs: Value,
mod: *Module, mod: *Module,
opt_sema: ?*Sema, strat: ResolveStrat,
) Module.CompileError!std.math.Order { ) Module.CompileError!std.math.Order {
return switch (lhs.toIntern()) { return switch (lhs.toIntern()) {
.bool_false => .eq, .bool_false => .eq,
@ -1052,13 +1048,13 @@ pub fn orderAgainstZeroAdvanced(
.lazy_size => |ty| return if (Type.fromInterned(ty).hasRuntimeBitsAdvanced( .lazy_size => |ty| return if (Type.fromInterned(ty).hasRuntimeBitsAdvanced(
mod, mod,
false, false,
if (opt_sema) |sema| .{ .sema = sema } else .eager, strat.toLazy(),
) catch |err| switch (err) { ) catch |err| switch (err) {
error.NeedLazy => unreachable, error.NeedLazy => unreachable,
else => |e| return e, else => |e| return e,
}) .gt else .eq, }) .gt else .eq,
}, },
.enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).orderAgainstZeroAdvanced(mod, opt_sema), .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).orderAgainstZeroAdvanced(mod, strat),
.float => |float| switch (float.storage) { .float => |float| switch (float.storage) {
inline else => |x| std.math.order(x, 0), inline else => |x| std.math.order(x, 0),
}, },
@ -1069,14 +1065,13 @@ pub fn orderAgainstZeroAdvanced(
/// Asserts the value is comparable. /// Asserts the value is comparable.
pub fn order(lhs: Value, rhs: Value, mod: *Module) std.math.Order { pub fn order(lhs: Value, rhs: Value, mod: *Module) std.math.Order {
return orderAdvanced(lhs, rhs, mod, null) catch unreachable; return orderAdvanced(lhs, rhs, mod, .normal) catch unreachable;
} }
/// Asserts the value is comparable. /// Asserts the value is comparable.
/// If opt_sema is null then this function asserts things are resolved and cannot fail. pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *Module, strat: ResolveStrat) !std.math.Order {
pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *Module, opt_sema: ?*Sema) !std.math.Order { const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(mod, strat);
const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(mod, opt_sema); const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(mod, strat);
const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(mod, opt_sema);
switch (lhs_against_zero) { switch (lhs_against_zero) {
.lt => if (rhs_against_zero != .lt) return .lt, .lt => if (rhs_against_zero != .lt) return .lt,
.eq => return rhs_against_zero.invert(), .eq => return rhs_against_zero.invert(),
@ -1096,15 +1091,15 @@ pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *Module, opt_sema: ?*Sema) !st
var lhs_bigint_space: BigIntSpace = undefined; var lhs_bigint_space: BigIntSpace = undefined;
var rhs_bigint_space: BigIntSpace = undefined; var rhs_bigint_space: BigIntSpace = undefined;
const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, mod, opt_sema); const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, mod, strat);
const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, mod, opt_sema); const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, mod, strat);
return lhs_bigint.order(rhs_bigint); return lhs_bigint.order(rhs_bigint);
} }
/// Asserts the value is comparable. Does not take a type parameter because it supports /// Asserts the value is comparable. Does not take a type parameter because it supports
/// comparisons between heterogeneous types. /// comparisons between heterogeneous types.
pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, mod: *Module) bool { pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, mod: *Module) bool {
return compareHeteroAdvanced(lhs, op, rhs, mod, null) catch unreachable; return compareHeteroAdvanced(lhs, op, rhs, mod, .normal) catch unreachable;
} }
pub fn compareHeteroAdvanced( pub fn compareHeteroAdvanced(
@ -1112,7 +1107,7 @@ pub fn compareHeteroAdvanced(
op: std.math.CompareOperator, op: std.math.CompareOperator,
rhs: Value, rhs: Value,
mod: *Module, mod: *Module,
opt_sema: ?*Sema, strat: ResolveStrat,
) !bool { ) !bool {
if (lhs.pointerDecl(mod)) |lhs_decl| { if (lhs.pointerDecl(mod)) |lhs_decl| {
if (rhs.pointerDecl(mod)) |rhs_decl| { if (rhs.pointerDecl(mod)) |rhs_decl| {
@ -1135,7 +1130,7 @@ pub fn compareHeteroAdvanced(
else => {}, else => {},
} }
} }
return (try orderAdvanced(lhs, rhs, mod, opt_sema)).compare(op); return (try orderAdvanced(lhs, rhs, mod, strat)).compare(op);
} }
/// Asserts the values are comparable. Both operands have type `ty`. /// Asserts the values are comparable. Both operands have type `ty`.
@ -1176,22 +1171,22 @@ pub fn compareScalar(
/// ///
/// Note that `!compareAllWithZero(.eq, ...) != compareAllWithZero(.neq, ...)` /// Note that `!compareAllWithZero(.eq, ...) != compareAllWithZero(.neq, ...)`
pub fn compareAllWithZero(lhs: Value, op: std.math.CompareOperator, mod: *Module) bool { pub fn compareAllWithZero(lhs: Value, op: std.math.CompareOperator, mod: *Module) bool {
return compareAllWithZeroAdvancedExtra(lhs, op, mod, null) catch unreachable; return compareAllWithZeroAdvancedExtra(lhs, op, mod, .normal) catch unreachable;
} }
pub fn compareAllWithZeroAdvanced( pub fn compareAllWithZeroSema(
lhs: Value, lhs: Value,
op: std.math.CompareOperator, op: std.math.CompareOperator,
sema: *Sema, zcu: *Zcu,
) Module.CompileError!bool { ) Module.CompileError!bool {
return compareAllWithZeroAdvancedExtra(lhs, op, sema.mod, sema); return compareAllWithZeroAdvancedExtra(lhs, op, zcu, .sema);
} }
pub fn compareAllWithZeroAdvancedExtra( pub fn compareAllWithZeroAdvancedExtra(
lhs: Value, lhs: Value,
op: std.math.CompareOperator, op: std.math.CompareOperator,
mod: *Module, mod: *Module,
opt_sema: ?*Sema, strat: ResolveStrat,
) Module.CompileError!bool { ) Module.CompileError!bool {
if (lhs.isInf(mod)) { if (lhs.isInf(mod)) {
switch (op) { switch (op) {
@ -1211,14 +1206,14 @@ pub fn compareAllWithZeroAdvancedExtra(
if (!std.math.order(byte, 0).compare(op)) break false; if (!std.math.order(byte, 0).compare(op)) break false;
} else true, } else true,
.elems => |elems| for (elems) |elem| { .elems => |elems| for (elems) |elem| {
if (!try Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, opt_sema)) break false; if (!try Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, strat)) break false;
} else true, } else true,
.repeated_elem => |elem| Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, opt_sema), .repeated_elem => |elem| Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, strat),
}, },
.undef => return false, .undef => return false,
else => {}, else => {},
} }
return (try orderAgainstZeroAdvanced(lhs, mod, opt_sema)).compare(op); return (try orderAgainstZeroAdvanced(lhs, mod, strat)).compare(op);
} }
pub fn eql(a: Value, b: Value, ty: Type, mod: *Module) bool { pub fn eql(a: Value, b: Value, ty: Type, mod: *Module) bool {
@ -1279,9 +1274,9 @@ pub fn slicePtr(val: Value, mod: *Module) Value {
} }
/// Gets the `len` field of a slice value as a `u64`. /// Gets the `len` field of a slice value as a `u64`.
/// Resolves the length using the provided `Sema` if necessary. /// Resolves the length using `Sema` if necessary.
pub fn sliceLen(val: Value, sema: *Sema) !u64 { pub fn sliceLen(val: Value, zcu: *Zcu) !u64 {
return Value.fromInterned(sema.mod.intern_pool.sliceLen(val.toIntern())).toUnsignedIntAdvanced(sema); return Value.fromInterned(zcu.intern_pool.sliceLen(val.toIntern())).toUnsignedIntSema(zcu);
} }
/// Asserts the value is an aggregate, and returns the element value at the given index. /// Asserts the value is an aggregate, and returns the element value at the given index.
@ -1482,29 +1477,29 @@ pub fn isFloat(self: Value, mod: *const Module) bool {
} }
pub fn floatFromInt(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module) !Value { pub fn floatFromInt(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module) !Value {
return floatFromIntAdvanced(val, arena, int_ty, float_ty, mod, null) catch |err| switch (err) { return floatFromIntAdvanced(val, arena, int_ty, float_ty, mod, .normal) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory, error.OutOfMemory => return error.OutOfMemory,
else => unreachable, else => unreachable,
}; };
} }
pub fn floatFromIntAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { pub fn floatFromIntAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, strat: ResolveStrat) !Value {
if (int_ty.zigTypeTag(mod) == .Vector) { if (int_ty.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(InternPool.Index, int_ty.vectorLen(mod)); const result_data = try arena.alloc(InternPool.Index, int_ty.vectorLen(mod));
const scalar_ty = float_ty.scalarType(mod); const scalar_ty = float_ty.scalarType(mod);
for (result_data, 0..) |*scalar, i| { for (result_data, 0..) |*scalar, i| {
const elem_val = try val.elemValue(mod, i); const elem_val = try val.elemValue(mod, i);
scalar.* = (try floatFromIntScalar(elem_val, scalar_ty, mod, opt_sema)).toIntern(); scalar.* = (try floatFromIntScalar(elem_val, scalar_ty, mod, strat)).toIntern();
} }
return Value.fromInterned((try mod.intern(.{ .aggregate = .{ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_ty.toIntern(), .ty = float_ty.toIntern(),
.storage = .{ .elems = result_data }, .storage = .{ .elems = result_data },
} }))); } })));
} }
return floatFromIntScalar(val, float_ty, mod, opt_sema); return floatFromIntScalar(val, float_ty, mod, strat);
} }
pub fn floatFromIntScalar(val: Value, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { pub fn floatFromIntScalar(val: Value, float_ty: Type, mod: *Module, strat: ResolveStrat) !Value {
return switch (mod.intern_pool.indexToKey(val.toIntern())) { return switch (mod.intern_pool.indexToKey(val.toIntern())) {
.undef => try mod.undefValue(float_ty), .undef => try mod.undefValue(float_ty),
.int => |int| switch (int.storage) { .int => |int| switch (int.storage) {
@ -1513,16 +1508,8 @@ pub fn floatFromIntScalar(val: Value, float_ty: Type, mod: *Module, opt_sema: ?*
return mod.floatValue(float_ty, float); return mod.floatValue(float_ty, float);
}, },
inline .u64, .i64 => |x| floatFromIntInner(x, float_ty, mod), inline .u64, .i64 => |x| floatFromIntInner(x, float_ty, mod),
.lazy_align => |ty| if (opt_sema) |sema| { .lazy_align => |ty| return floatFromIntInner((try Type.fromInterned(ty).abiAlignmentAdvanced(mod, strat.toLazy())).scalar.toByteUnits() orelse 0, float_ty, mod),
return floatFromIntInner((try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits() orelse 0, float_ty, mod); .lazy_size => |ty| return floatFromIntInner((try Type.fromInterned(ty).abiSizeAdvanced(mod, strat.toLazy())).scalar, float_ty, mod),
} else {
return floatFromIntInner(Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0, float_ty, mod);
},
.lazy_size => |ty| if (opt_sema) |sema| {
return floatFromIntInner((try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod);
} else {
return floatFromIntInner(Type.fromInterned(ty).abiSize(mod), float_ty, mod);
},
}, },
else => unreachable, else => unreachable,
}; };
@ -3616,17 +3603,15 @@ pub const RuntimeIndex = InternPool.RuntimeIndex;
/// `parent_ptr` must be a single-pointer to some optional. /// `parent_ptr` must be a single-pointer to some optional.
/// Returns a pointer to the payload of the optional. /// Returns a pointer to the payload of the optional.
/// This takes a `Sema` because it may need to perform type resolution. /// May perform type resolution.
pub fn ptrOptPayload(parent_ptr: Value, sema: *Sema) !Value { pub fn ptrOptPayload(parent_ptr: Value, zcu: *Zcu) !Value {
const zcu = sema.mod;
const parent_ptr_ty = parent_ptr.typeOf(zcu); const parent_ptr_ty = parent_ptr.typeOf(zcu);
const opt_ty = parent_ptr_ty.childType(zcu); const opt_ty = parent_ptr_ty.childType(zcu);
assert(parent_ptr_ty.ptrSize(zcu) == .One); assert(parent_ptr_ty.ptrSize(zcu) == .One);
assert(opt_ty.zigTypeTag(zcu) == .Optional); assert(opt_ty.zigTypeTag(zcu) == .Optional);
const result_ty = try sema.ptrType(info: { const result_ty = try zcu.ptrTypeSema(info: {
var new = parent_ptr_ty.ptrInfo(zcu); var new = parent_ptr_ty.ptrInfo(zcu);
// We can correctly preserve alignment `.none`, since an optional has the same // We can correctly preserve alignment `.none`, since an optional has the same
// natural alignment as its child type. // natural alignment as its child type.
@ -3651,17 +3636,15 @@ pub fn ptrOptPayload(parent_ptr: Value, sema: *Sema) !Value {
/// `parent_ptr` must be a single-pointer to some error union. /// `parent_ptr` must be a single-pointer to some error union.
/// Returns a pointer to the payload of the error union. /// Returns a pointer to the payload of the error union.
/// This takes a `Sema` because it may need to perform type resolution. /// May perform type resolution.
pub fn ptrEuPayload(parent_ptr: Value, sema: *Sema) !Value { pub fn ptrEuPayload(parent_ptr: Value, zcu: *Zcu) !Value {
const zcu = sema.mod;
const parent_ptr_ty = parent_ptr.typeOf(zcu); const parent_ptr_ty = parent_ptr.typeOf(zcu);
const eu_ty = parent_ptr_ty.childType(zcu); const eu_ty = parent_ptr_ty.childType(zcu);
assert(parent_ptr_ty.ptrSize(zcu) == .One); assert(parent_ptr_ty.ptrSize(zcu) == .One);
assert(eu_ty.zigTypeTag(zcu) == .ErrorUnion); assert(eu_ty.zigTypeTag(zcu) == .ErrorUnion);
const result_ty = try sema.ptrType(info: { const result_ty = try zcu.ptrTypeSema(info: {
var new = parent_ptr_ty.ptrInfo(zcu); var new = parent_ptr_ty.ptrInfo(zcu);
// We can correctly preserve alignment `.none`, since an error union has a // We can correctly preserve alignment `.none`, since an error union has a
// natural alignment greater than or equal to that of its payload type. // natural alignment greater than or equal to that of its payload type.
@ -3682,10 +3665,8 @@ pub fn ptrEuPayload(parent_ptr: Value, sema: *Sema) !Value {
/// `parent_ptr` must be a single-pointer to a struct, union, or slice. /// `parent_ptr` must be a single-pointer to a struct, union, or slice.
/// Returns a pointer to the aggregate field at the specified index. /// Returns a pointer to the aggregate field at the specified index.
/// For slices, uses `slice_ptr_index` and `slice_len_index`. /// For slices, uses `slice_ptr_index` and `slice_len_index`.
/// This takes a `Sema` because it may need to perform type resolution. /// May perform type resolution.
pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value { pub fn ptrField(parent_ptr: Value, field_idx: u32, zcu: *Zcu) !Value {
const zcu = sema.mod;
const parent_ptr_ty = parent_ptr.typeOf(zcu); const parent_ptr_ty = parent_ptr.typeOf(zcu);
const aggregate_ty = parent_ptr_ty.childType(zcu); const aggregate_ty = parent_ptr_ty.childType(zcu);
@ -3698,17 +3679,17 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value {
.Struct => field: { .Struct => field: {
const field_ty = aggregate_ty.structFieldType(field_idx, zcu); const field_ty = aggregate_ty.structFieldType(field_idx, zcu);
switch (aggregate_ty.containerLayout(zcu)) { switch (aggregate_ty.containerLayout(zcu)) {
.auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), zcu, sema) }, .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), zcu, .sema) },
.@"extern" => { .@"extern" => {
// Well-defined layout, so just offset the pointer appropriately. // Well-defined layout, so just offset the pointer appropriately.
const byte_off = aggregate_ty.structFieldOffset(field_idx, zcu); const byte_off = aggregate_ty.structFieldOffset(field_idx, zcu);
const field_align = a: { const field_align = a: {
const parent_align = if (parent_ptr_info.flags.alignment == .none) pa: { const parent_align = if (parent_ptr_info.flags.alignment == .none) pa: {
break :pa try sema.typeAbiAlignment(aggregate_ty); break :pa (try aggregate_ty.abiAlignmentAdvanced(zcu, .sema)).scalar;
} else parent_ptr_info.flags.alignment; } else parent_ptr_info.flags.alignment;
break :a InternPool.Alignment.fromLog2Units(@min(parent_align.toLog2Units(), @ctz(byte_off))); break :a InternPool.Alignment.fromLog2Units(@min(parent_align.toLog2Units(), @ctz(byte_off)));
}; };
const result_ty = try sema.ptrType(info: { const result_ty = try zcu.ptrTypeSema(info: {
var new = parent_ptr_info; var new = parent_ptr_info;
new.child = field_ty.toIntern(); new.child = field_ty.toIntern();
new.flags.alignment = field_align; new.flags.alignment = field_align;
@ -3723,14 +3704,14 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value {
new.packed_offset = packed_offset; new.packed_offset = packed_offset;
new.child = field_ty.toIntern(); new.child = field_ty.toIntern();
if (new.flags.alignment == .none) { if (new.flags.alignment == .none) {
new.flags.alignment = try sema.typeAbiAlignment(aggregate_ty); new.flags.alignment = (try aggregate_ty.abiAlignmentAdvanced(zcu, .sema)).scalar;
} }
break :info new; break :info new;
}); });
return zcu.getCoerced(parent_ptr, result_ty); return zcu.getCoerced(parent_ptr, result_ty);
}, },
.byte_ptr => |ptr_info| { .byte_ptr => |ptr_info| {
const result_ty = try sema.ptrType(info: { const result_ty = try zcu.ptrTypeSema(info: {
var new = parent_ptr_info; var new = parent_ptr_info;
new.child = field_ty.toIntern(); new.child = field_ty.toIntern();
new.packed_offset = .{ new.packed_offset = .{
@ -3749,10 +3730,10 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value {
const union_obj = zcu.typeToUnion(aggregate_ty).?; const union_obj = zcu.typeToUnion(aggregate_ty).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(&zcu.intern_pool)[field_idx]); const field_ty = Type.fromInterned(union_obj.field_types.get(&zcu.intern_pool)[field_idx]);
switch (aggregate_ty.containerLayout(zcu)) { switch (aggregate_ty.containerLayout(zcu)) {
.auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), zcu, sema) }, .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), zcu, .sema) },
.@"extern" => { .@"extern" => {
// Point to the same address. // Point to the same address.
const result_ty = try sema.ptrType(info: { const result_ty = try zcu.ptrTypeSema(info: {
var new = parent_ptr_info; var new = parent_ptr_info;
new.child = field_ty.toIntern(); new.child = field_ty.toIntern();
break :info new; break :info new;
@ -3762,28 +3743,28 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value {
.@"packed" => { .@"packed" => {
// If the field has an ABI size matching its bit size, then we can continue to use a // If the field has an ABI size matching its bit size, then we can continue to use a
// non-bit pointer if the parent pointer is also a non-bit pointer. // non-bit pointer if the parent pointer is also a non-bit pointer.
if (parent_ptr_info.packed_offset.host_size == 0 and try sema.typeAbiSize(field_ty) * 8 == try field_ty.bitSizeAdvanced(zcu, sema)) { if (parent_ptr_info.packed_offset.host_size == 0 and (try field_ty.abiSizeAdvanced(zcu, .sema)).scalar * 8 == try field_ty.bitSizeAdvanced(zcu, .sema)) {
// We must offset the pointer on big-endian targets, since the bits of packed memory don't align nicely. // We must offset the pointer on big-endian targets, since the bits of packed memory don't align nicely.
const byte_offset = switch (zcu.getTarget().cpu.arch.endian()) { const byte_offset = switch (zcu.getTarget().cpu.arch.endian()) {
.little => 0, .little => 0,
.big => try sema.typeAbiSize(aggregate_ty) - try sema.typeAbiSize(field_ty), .big => (try aggregate_ty.abiSizeAdvanced(zcu, .sema)).scalar - (try field_ty.abiSizeAdvanced(zcu, .sema)).scalar,
}; };
const result_ty = try sema.ptrType(info: { const result_ty = try zcu.ptrTypeSema(info: {
var new = parent_ptr_info; var new = parent_ptr_info;
new.child = field_ty.toIntern(); new.child = field_ty.toIntern();
new.flags.alignment = InternPool.Alignment.fromLog2Units( new.flags.alignment = InternPool.Alignment.fromLog2Units(
@ctz(byte_offset | (try parent_ptr_ty.ptrAlignmentAdvanced(zcu, sema)).toByteUnits().?), @ctz(byte_offset | (try parent_ptr_ty.ptrAlignmentAdvanced(zcu, .sema)).toByteUnits().?),
); );
break :info new; break :info new;
}); });
return parent_ptr.getOffsetPtr(byte_offset, result_ty, zcu); return parent_ptr.getOffsetPtr(byte_offset, result_ty, zcu);
} else { } else {
// The result must be a bit-pointer if it is not already. // The result must be a bit-pointer if it is not already.
const result_ty = try sema.ptrType(info: { const result_ty = try zcu.ptrTypeSema(info: {
var new = parent_ptr_info; var new = parent_ptr_info;
new.child = field_ty.toIntern(); new.child = field_ty.toIntern();
if (new.packed_offset.host_size == 0) { if (new.packed_offset.host_size == 0) {
new.packed_offset.host_size = @intCast(((try aggregate_ty.bitSizeAdvanced(zcu, sema)) + 7) / 8); new.packed_offset.host_size = @intCast(((try aggregate_ty.bitSizeAdvanced(zcu, .sema)) + 7) / 8);
assert(new.packed_offset.bit_offset == 0); assert(new.packed_offset.bit_offset == 0);
} }
break :info new; break :info new;
@ -3805,14 +3786,14 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value {
}; };
const new_align: InternPool.Alignment = if (parent_ptr_info.flags.alignment != .none) a: { const new_align: InternPool.Alignment = if (parent_ptr_info.flags.alignment != .none) a: {
const ty_align = try sema.typeAbiAlignment(field_ty); const ty_align = (try field_ty.abiAlignmentAdvanced(zcu, .sema)).scalar;
const true_field_align = if (field_align == .none) ty_align else field_align; const true_field_align = if (field_align == .none) ty_align else field_align;
const new_align = true_field_align.min(parent_ptr_info.flags.alignment); const new_align = true_field_align.min(parent_ptr_info.flags.alignment);
if (new_align == ty_align) break :a .none; if (new_align == ty_align) break :a .none;
break :a new_align; break :a new_align;
} else field_align; } else field_align;
const result_ty = try sema.ptrType(info: { const result_ty = try zcu.ptrTypeSema(info: {
var new = parent_ptr_info; var new = parent_ptr_info;
new.child = field_ty.toIntern(); new.child = field_ty.toIntern();
new.flags.alignment = new_align; new.flags.alignment = new_align;
@ -3834,10 +3815,8 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value {
/// `orig_parent_ptr` must be either a single-pointer to an array or vector, or a many-pointer or C-pointer or slice. /// `orig_parent_ptr` must be either a single-pointer to an array or vector, or a many-pointer or C-pointer or slice.
/// Returns a pointer to the element at the specified index. /// Returns a pointer to the element at the specified index.
/// This takes a `Sema` because it may need to perform type resolution. /// May perform type resolution.
pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, sema: *Sema) !Value { pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, zcu: *Zcu) !Value {
const zcu = sema.mod;
const parent_ptr = switch (orig_parent_ptr.typeOf(zcu).ptrSize(zcu)) { const parent_ptr = switch (orig_parent_ptr.typeOf(zcu).ptrSize(zcu)) {
.One, .Many, .C => orig_parent_ptr, .One, .Many, .C => orig_parent_ptr,
.Slice => orig_parent_ptr.slicePtr(zcu), .Slice => orig_parent_ptr.slicePtr(zcu),
@ -3845,7 +3824,7 @@ pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, sema: *Sema) !Value {
const parent_ptr_ty = parent_ptr.typeOf(zcu); const parent_ptr_ty = parent_ptr.typeOf(zcu);
const elem_ty = parent_ptr_ty.childType(zcu); const elem_ty = parent_ptr_ty.childType(zcu);
const result_ty = try sema.elemPtrType(parent_ptr_ty, @intCast(field_idx)); const result_ty = try parent_ptr_ty.elemPtrType(@intCast(field_idx), zcu);
if (parent_ptr.isUndef(zcu)) return zcu.undefValue(result_ty); if (parent_ptr.isUndef(zcu)) return zcu.undefValue(result_ty);
@ -3862,21 +3841,21 @@ pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, sema: *Sema) !Value {
const strat: PtrStrat = switch (parent_ptr_ty.ptrSize(zcu)) { const strat: PtrStrat = switch (parent_ptr_ty.ptrSize(zcu)) {
.One => switch (elem_ty.zigTypeTag(zcu)) { .One => switch (elem_ty.zigTypeTag(zcu)) {
.Vector => .{ .offset = field_idx * @divExact(try elem_ty.childType(zcu).bitSizeAdvanced(zcu, sema), 8) }, .Vector => .{ .offset = field_idx * @divExact(try elem_ty.childType(zcu).bitSizeAdvanced(zcu, .sema), 8) },
.Array => strat: { .Array => strat: {
const arr_elem_ty = elem_ty.childType(zcu); const arr_elem_ty = elem_ty.childType(zcu);
if (try sema.typeRequiresComptime(arr_elem_ty)) { if (try arr_elem_ty.comptimeOnlyAdvanced(zcu, .sema)) {
break :strat .{ .elem_ptr = arr_elem_ty }; break :strat .{ .elem_ptr = arr_elem_ty };
} }
break :strat .{ .offset = field_idx * try sema.typeAbiSize(arr_elem_ty) }; break :strat .{ .offset = field_idx * (try arr_elem_ty.abiSizeAdvanced(zcu, .sema)).scalar };
}, },
else => unreachable, else => unreachable,
}, },
.Many, .C => if (try sema.typeRequiresComptime(elem_ty)) .Many, .C => if (try elem_ty.comptimeOnlyAdvanced(zcu, .sema))
.{ .elem_ptr = elem_ty } .{ .elem_ptr = elem_ty }
else else
.{ .offset = field_idx * try sema.typeAbiSize(elem_ty) }, .{ .offset = field_idx * (try elem_ty.abiSizeAdvanced(zcu, .sema)).scalar },
.Slice => unreachable, .Slice => unreachable,
}; };
@ -4014,11 +3993,7 @@ pub const PointerDeriveStep = union(enum) {
pub fn pointerDerivation(ptr_val: Value, arena: Allocator, zcu: *Zcu) Allocator.Error!PointerDeriveStep { pub fn pointerDerivation(ptr_val: Value, arena: Allocator, zcu: *Zcu) Allocator.Error!PointerDeriveStep {
return ptr_val.pointerDerivationAdvanced(arena, zcu, null) catch |err| switch (err) { return ptr_val.pointerDerivationAdvanced(arena, zcu, null) catch |err| switch (err) {
error.OutOfMemory => |e| return e, error.OutOfMemory => |e| return e,
error.AnalysisFail, error.AnalysisFail => unreachable,
error.GenericPoison,
error.ComptimeReturn,
error.ComptimeBreak,
=> unreachable,
}; };
} }
@ -4087,8 +4062,8 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op
const base_ptr_ty = base_ptr.typeOf(zcu); const base_ptr_ty = base_ptr.typeOf(zcu);
const agg_ty = base_ptr_ty.childType(zcu); const agg_ty = base_ptr_ty.childType(zcu);
const field_ty, const field_align = switch (agg_ty.zigTypeTag(zcu)) { const field_ty, const field_align = switch (agg_ty.zigTypeTag(zcu)) {
.Struct => .{ agg_ty.structFieldType(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), zcu, opt_sema) }, .Struct => .{ agg_ty.structFieldType(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), zcu, .sema) },
.Union => .{ agg_ty.unionFieldTypeByIndex(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), zcu, opt_sema) }, .Union => .{ agg_ty.unionFieldTypeByIndex(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), zcu, .sema) },
.Pointer => .{ switch (field.index) { .Pointer => .{ switch (field.index) {
Value.slice_ptr_index => agg_ty.slicePtrFieldType(zcu), Value.slice_ptr_index => agg_ty.slicePtrFieldType(zcu),
Value.slice_len_index => Type.usize, Value.slice_len_index => Type.usize,
@ -4269,3 +4244,118 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op
.new_ptr_ty = Type.fromInterned(ptr.ty), .new_ptr_ty = Type.fromInterned(ptr.ty),
} }; } };
} }
pub fn resolveLazy(val: Value, arena: Allocator, zcu: *Zcu) Zcu.SemaError!Value {
switch (zcu.intern_pool.indexToKey(val.toIntern())) {
.int => |int| switch (int.storage) {
.u64, .i64, .big_int => return val,
.lazy_align, .lazy_size => return zcu.intValue(
Type.fromInterned(int.ty),
(try val.getUnsignedIntAdvanced(zcu, .sema)).?,
),
},
.slice => |slice| {
const ptr = try Value.fromInterned(slice.ptr).resolveLazy(arena, zcu);
const len = try Value.fromInterned(slice.len).resolveLazy(arena, zcu);
if (ptr.toIntern() == slice.ptr and len.toIntern() == slice.len) return val;
return Value.fromInterned(try zcu.intern(.{ .slice = .{
.ty = slice.ty,
.ptr = ptr.toIntern(),
.len = len.toIntern(),
} }));
},
.ptr => |ptr| {
switch (ptr.base_addr) {
.decl, .comptime_alloc, .anon_decl, .int => return val,
.comptime_field => |field_val| {
const resolved_field_val = (try Value.fromInterned(field_val).resolveLazy(arena, zcu)).toIntern();
return if (resolved_field_val == field_val)
val
else
Value.fromInterned((try zcu.intern(.{ .ptr = .{
.ty = ptr.ty,
.base_addr = .{ .comptime_field = resolved_field_val },
.byte_offset = ptr.byte_offset,
} })));
},
.eu_payload, .opt_payload => |base| {
const resolved_base = (try Value.fromInterned(base).resolveLazy(arena, zcu)).toIntern();
return if (resolved_base == base)
val
else
Value.fromInterned((try zcu.intern(.{ .ptr = .{
.ty = ptr.ty,
.base_addr = switch (ptr.base_addr) {
.eu_payload => .{ .eu_payload = resolved_base },
.opt_payload => .{ .opt_payload = resolved_base },
else => unreachable,
},
.byte_offset = ptr.byte_offset,
} })));
},
.arr_elem, .field => |base_index| {
const resolved_base = (try Value.fromInterned(base_index.base).resolveLazy(arena, zcu)).toIntern();
return if (resolved_base == base_index.base)
val
else
Value.fromInterned((try zcu.intern(.{ .ptr = .{
.ty = ptr.ty,
.base_addr = switch (ptr.base_addr) {
.arr_elem => .{ .arr_elem = .{
.base = resolved_base,
.index = base_index.index,
} },
.field => .{ .field = .{
.base = resolved_base,
.index = base_index.index,
} },
else => unreachable,
},
.byte_offset = ptr.byte_offset,
} })));
},
}
},
.aggregate => |aggregate| switch (aggregate.storage) {
.bytes => return val,
.elems => |elems| {
var resolved_elems: []InternPool.Index = &.{};
for (elems, 0..) |elem, i| {
const resolved_elem = (try Value.fromInterned(elem).resolveLazy(arena, zcu)).toIntern();
if (resolved_elems.len == 0 and resolved_elem != elem) {
resolved_elems = try arena.alloc(InternPool.Index, elems.len);
@memcpy(resolved_elems[0..i], elems[0..i]);
}
if (resolved_elems.len > 0) resolved_elems[i] = resolved_elem;
}
return if (resolved_elems.len == 0) val else Value.fromInterned((try zcu.intern(.{ .aggregate = .{
.ty = aggregate.ty,
.storage = .{ .elems = resolved_elems },
} })));
},
.repeated_elem => |elem| {
const resolved_elem = (try Value.fromInterned(elem).resolveLazy(arena, zcu)).toIntern();
return if (resolved_elem == elem) val else Value.fromInterned((try zcu.intern(.{ .aggregate = .{
.ty = aggregate.ty,
.storage = .{ .repeated_elem = resolved_elem },
} })));
},
},
.un => |un| {
const resolved_tag = if (un.tag == .none)
.none
else
(try Value.fromInterned(un.tag).resolveLazy(arena, zcu)).toIntern();
const resolved_val = (try Value.fromInterned(un.val).resolveLazy(arena, zcu)).toIntern();
return if (resolved_tag == un.tag and resolved_val == un.val)
val
else
Value.fromInterned((try zcu.intern(.{ .un = .{
.ty = un.ty,
.tag = resolved_tag,
.val = resolved_val,
} })));
},
else => return val,
}
}

View File

@ -3593,7 +3593,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In
}, },
error.OutOfMemory => return error.OutOfMemory, error.OutOfMemory => return error.OutOfMemory,
}; };
defer air.deinit(gpa); errdefer air.deinit(gpa);
const invalidate_ies_deps = i: { const invalidate_ies_deps = i: {
if (!was_outdated) break :i false; if (!was_outdated) break :i false;
@ -3615,13 +3615,36 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In
const dump_llvm_ir = build_options.enable_debug_extensions and (comp.verbose_llvm_ir != null or comp.verbose_llvm_bc != null); const dump_llvm_ir = build_options.enable_debug_extensions and (comp.verbose_llvm_ir != null or comp.verbose_llvm_bc != null);
if (comp.bin_file == null and zcu.llvm_object == null and !dump_air and !dump_llvm_ir) { if (comp.bin_file == null and zcu.llvm_object == null and !dump_air and !dump_llvm_ir) {
air.deinit(gpa);
return; return;
} }
try comp.work_queue.writeItem(.{ .codegen_func = .{
.func = func_index,
.air = air,
} });
}
/// Takes ownership of `air`, even on error.
/// If any types referenced by `air` are unresolved, marks the codegen as failed.
pub fn linkerUpdateFunc(zcu: *Zcu, func_index: InternPool.Index, air: Air) Allocator.Error!void {
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const comp = zcu.comp;
defer {
var air_mut = air;
air_mut.deinit(gpa);
}
const func = zcu.funcInfo(func_index);
const decl_index = func.owner_decl;
const decl = zcu.declPtr(decl_index);
var liveness = try Liveness.analyze(gpa, air, ip); var liveness = try Liveness.analyze(gpa, air, ip);
defer liveness.deinit(gpa); defer liveness.deinit(gpa);
if (dump_air) { if (build_options.enable_debug_extensions and comp.verbose_air) {
const fqn = try decl.fullyQualifiedName(zcu); const fqn = try decl.fullyQualifiedName(zcu);
std.debug.print("# Begin Function AIR: {}:\n", .{fqn.fmt(ip)}); std.debug.print("# Begin Function AIR: {}:\n", .{fqn.fmt(ip)});
@import("print_air.zig").dump(zcu, air, liveness); @import("print_air.zig").dump(zcu, air, liveness);
@ -3629,7 +3652,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In
} }
if (std.debug.runtime_safety) { if (std.debug.runtime_safety) {
var verify = Liveness.Verify{ var verify: Liveness.Verify = .{
.gpa = gpa, .gpa = gpa,
.air = air, .air = air,
.liveness = liveness, .liveness = liveness,
@ -3642,7 +3665,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In
else => { else => {
try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1);
zcu.failed_analysis.putAssumeCapacityNoClobber( zcu.failed_analysis.putAssumeCapacityNoClobber(
AnalUnit.wrap(.{ .decl = decl_index }), AnalUnit.wrap(.{ .func = func_index }),
try Module.ErrorMsg.create( try Module.ErrorMsg.create(
gpa, gpa,
decl.navSrcLoc(zcu), decl.navSrcLoc(zcu),
@ -3659,7 +3682,13 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In
const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(zcu)).toSlice(ip), 0); const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(zcu)).toSlice(ip), 0);
defer codegen_prog_node.end(); defer codegen_prog_node.end();
if (comp.bin_file) |lf| { if (!air.typesFullyResolved(zcu)) {
// A type we depend on failed to resolve. This is a transitive failure.
// Correcting this failure will involve changing a type this function
// depends on, hence triggering re-analysis of this function, so this
// interacts correctly with incremental compilation.
func.analysis(ip).state = .codegen_failure;
} else if (comp.bin_file) |lf| {
lf.updateFunc(zcu, func_index, air, liveness) catch |err| switch (err) { lf.updateFunc(zcu, func_index, air, liveness) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory, error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => { error.AnalysisFail => {
@ -3667,7 +3696,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In
}, },
else => { else => {
try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1);
zcu.failed_analysis.putAssumeCapacityNoClobber(AnalUnit.wrap(.{ .decl = decl_index }), try Module.ErrorMsg.create( zcu.failed_analysis.putAssumeCapacityNoClobber(AnalUnit.wrap(.{ .func = func_index }), try Module.ErrorMsg.create(
gpa, gpa,
decl.navSrcLoc(zcu), decl.navSrcLoc(zcu),
"unable to codegen: {s}", "unable to codegen: {s}",
@ -3735,7 +3764,7 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: InternPool.Index)
// Decl itself is safely analyzed, and body analysis is not yet queued // Decl itself is safely analyzed, and body analysis is not yet queued
try mod.comp.work_queue.writeItem(.{ .codegen_func = func_index }); try mod.comp.work_queue.writeItem(.{ .analyze_func = func_index });
if (mod.emit_h != null) { if (mod.emit_h != null) {
// TODO: we ideally only want to do this if the function's type changed // TODO: we ideally only want to do this if the function's type changed
// since the last update // since the last update
@ -3812,7 +3841,7 @@ fn getFileRootStruct(zcu: *Zcu, decl_index: Decl.Index, namespace_index: Namespa
decl.analysis = .complete; decl.analysis = .complete;
try zcu.scanNamespace(namespace_index, decls, decl); try zcu.scanNamespace(namespace_index, decls, decl);
try zcu.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index });
return wip_ty.finish(ip, decl_index, namespace_index.toOptional()); return wip_ty.finish(ip, decl_index, namespace_index.toOptional());
} }
@ -4103,7 +4132,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult {
// Note this resolves the type of the Decl, not the value; if this Decl // Note this resolves the type of the Decl, not the value; if this Decl
// is a struct, for example, this resolves `type` (which needs no resolution), // is a struct, for example, this resolves `type` (which needs no resolution),
// not the struct itself. // not the struct itself.
try sema.resolveTypeLayout(decl_ty); try decl_ty.resolveLayout(mod);
if (decl.kind == .@"usingnamespace") { if (decl.kind == .@"usingnamespace") {
if (!decl_ty.eql(Type.type, mod)) { if (!decl_ty.eql(Type.type, mod)) {
@ -4220,7 +4249,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult {
if (has_runtime_bits) { if (has_runtime_bits) {
// Needed for codegen_decl which will call updateDecl and then the // Needed for codegen_decl which will call updateDecl and then the
// codegen backend wants full access to the Decl Type. // codegen backend wants full access to the Decl Type.
try sema.resolveTypeFully(decl_ty); try decl_ty.resolveFully(mod);
try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl_index }); try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl_index });
@ -5212,23 +5241,6 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato
else => |e| return e, else => |e| return e,
}; };
// Similarly, resolve any queued up types that were requested to be resolved for
// the backends.
for (sema.types_to_resolve.keys()) |ty| {
sema.resolveTypeFully(Type.fromInterned(ty)) catch |err| switch (err) {
error.GenericPoison => unreachable,
error.ComptimeReturn => unreachable,
error.ComptimeBreak => unreachable,
error.AnalysisFail => {
// In this case our function depends on a type that had a compile error.
// We should not try to lower this function.
decl.analysis = .dependency_failure;
return error.AnalysisFail;
},
else => |e| return e,
};
}
try sema.flushExports(); try sema.flushExports();
return .{ return .{
@ -5793,6 +5805,16 @@ pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type
return Type.fromInterned((try intern(mod, .{ .ptr_type = canon_info }))); return Type.fromInterned((try intern(mod, .{ .ptr_type = canon_info })));
} }
/// Like `ptrType`, but if `info` specifies an `alignment`, first ensures the pointer
/// child type's alignment is resolved so that an invalid alignment is not used.
/// In general, prefer this function during semantic analysis.
pub fn ptrTypeSema(zcu: *Zcu, info: InternPool.Key.PtrType) SemaError!Type {
if (info.flags.alignment != .none) {
_ = try Type.fromInterned(info.child).abiAlignmentAdvanced(zcu, .sema);
}
return zcu.ptrType(info);
}
pub fn singleMutPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { pub fn singleMutPtrType(mod: *Module, child_type: Type) Allocator.Error!Type {
return ptrType(mod, .{ .child = child_type.toIntern() }); return ptrType(mod, .{ .child = child_type.toIntern() });
} }
@ -6368,15 +6390,21 @@ pub fn unionAbiAlignment(mod: *Module, loaded_union: InternPool.LoadedUnionType)
return max_align; return max_align;
} }
/// Returns the field alignment, assuming the union is not packed. /// Returns the field alignment of a non-packed union. Asserts the layout is not packed.
/// Keep implementation in sync with `Sema.unionFieldAlignment`. pub fn unionFieldNormalAlignment(zcu: *Zcu, loaded_union: InternPool.LoadedUnionType, field_index: u32) Alignment {
/// Prefer to call that function instead of this one during Sema. return zcu.unionFieldNormalAlignmentAdvanced(loaded_union, field_index, .normal) catch unreachable;
pub fn unionFieldNormalAlignment(mod: *Module, loaded_union: InternPool.LoadedUnionType, field_index: u32) Alignment { }
const ip = &mod.intern_pool;
/// Returns the field alignment of a non-packed union. Asserts the layout is not packed.
/// If `strat` is `.sema`, may perform type resolution.
pub fn unionFieldNormalAlignmentAdvanced(zcu: *Zcu, loaded_union: InternPool.LoadedUnionType, field_index: u32, strat: Type.ResolveStrat) SemaError!Alignment {
const ip = &zcu.intern_pool;
assert(loaded_union.flagsPtr(ip).layout != .@"packed");
const field_align = loaded_union.fieldAlign(ip, field_index); const field_align = loaded_union.fieldAlign(ip, field_index);
if (field_align != .none) return field_align; if (field_align != .none) return field_align;
const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]); const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
return field_ty.abiAlignment(mod); if (field_ty.isNoReturn(zcu)) return .none;
return (try field_ty.abiAlignmentAdvanced(zcu, strat.toLazy())).scalar;
} }
/// Returns the index of the active field, given the current tag value /// Returns the index of the active field, given the current tag value
@ -6387,41 +6415,37 @@ pub fn unionTagFieldIndex(mod: *Module, loaded_union: InternPool.LoadedUnionType
return loaded_union.loadTagType(ip).tagValueIndex(ip, enum_tag.toIntern()); return loaded_union.loadTagType(ip).tagValueIndex(ip, enum_tag.toIntern());
} }
/// Returns the field alignment of a non-packed struct in byte units. /// Returns the field alignment of a non-packed struct. Asserts the layout is not packed.
/// Keep implementation in sync with `Sema.structFieldAlignment`.
/// asserts the layout is not packed.
pub fn structFieldAlignment( pub fn structFieldAlignment(
mod: *Module, zcu: *Zcu,
explicit_alignment: InternPool.Alignment, explicit_alignment: InternPool.Alignment,
field_ty: Type, field_ty: Type,
layout: std.builtin.Type.ContainerLayout, layout: std.builtin.Type.ContainerLayout,
) Alignment { ) Alignment {
assert(layout != .@"packed"); return zcu.structFieldAlignmentAdvanced(explicit_alignment, field_ty, layout, .normal) catch unreachable;
if (explicit_alignment != .none) return explicit_alignment;
switch (layout) {
.@"packed" => unreachable,
.auto => {
if (mod.getTarget().ofmt == .c) {
return structFieldAlignmentExtern(mod, field_ty);
} else {
return field_ty.abiAlignment(mod);
}
},
.@"extern" => return structFieldAlignmentExtern(mod, field_ty),
}
} }
/// Returns the field alignment of an extern struct in byte units. /// Returns the field alignment of a non-packed struct. Asserts the layout is not packed.
/// This logic is duplicated in Type.abiAlignmentAdvanced. /// If `strat` is `.sema`, may perform type resolution.
pub fn structFieldAlignmentExtern(mod: *Module, field_ty: Type) Alignment { pub fn structFieldAlignmentAdvanced(
const ty_abi_align = field_ty.abiAlignment(mod); zcu: *Zcu,
explicit_alignment: InternPool.Alignment,
if (field_ty.isAbiInt(mod) and field_ty.intInfo(mod).bits >= 128) { field_ty: Type,
// The C ABI requires 128 bit integer fields of structs layout: std.builtin.Type.ContainerLayout,
// to be 16-bytes aligned. strat: Type.ResolveStrat,
return ty_abi_align.max(.@"16"); ) SemaError!Alignment {
assert(layout != .@"packed");
if (explicit_alignment != .none) return explicit_alignment;
const ty_abi_align = (try field_ty.abiAlignmentAdvanced(zcu, strat.toLazy())).scalar;
switch (layout) {
.@"packed" => unreachable,
.auto => if (zcu.getTarget().ofmt != .c) return ty_abi_align,
.@"extern" => {},
}
// extern
if (field_ty.isAbiInt(zcu) and field_ty.intInfo(zcu).bits >= 128) {
return ty_abi_align.maxStrict(.@"16");
} }
return ty_abi_align; return ty_abi_align;
} }
@ -6480,3 +6504,29 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, Resolved
return result; return result;
} }
pub fn getBuiltin(zcu: *Zcu, name: []const u8) Allocator.Error!Air.Inst.Ref {
const decl_index = try zcu.getBuiltinDecl(name);
zcu.ensureDeclAnalyzed(decl_index) catch @panic("std.builtin is corrupt");
return Air.internedToRef(zcu.declPtr(decl_index).val.toIntern());
}
pub fn getBuiltinDecl(zcu: *Zcu, name: []const u8) Allocator.Error!InternPool.DeclIndex {
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const std_file = (zcu.importPkg(zcu.std_mod) catch @panic("failed to import lib/std.zig")).file;
const std_namespace = zcu.declPtr(std_file.root_decl.unwrap().?).getOwnedInnerNamespace(zcu).?;
const builtin_str = try ip.getOrPutString(gpa, "builtin", .no_embedded_nulls);
const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse @panic("lib/std.zig is corrupt and missing 'builtin'");
zcu.ensureDeclAnalyzed(builtin_decl) catch @panic("std.builtin is corrupt");
const builtin_namespace = zcu.declPtr(builtin_decl).getInnerNamespace(zcu) orelse @panic("std.builtin is corrupt");
const name_str = try ip.getOrPutString(gpa, name, .no_embedded_nulls);
return builtin_namespace.decls.getKeyAdapted(name_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse @panic("lib/std/builtin.zig is corrupt");
}
pub fn getBuiltinType(zcu: *Zcu, name: []const u8) Allocator.Error!Type {
const ty_inst = try zcu.getBuiltin(name);
const ty = Type.fromInterned(ty_inst.toInterned() orelse @panic("std.builtin is corrupt"));
ty.resolveFully(zcu) catch @panic("std.builtin is corrupt");
return ty;
}

View File

@ -2603,7 +2603,10 @@ pub const Object = struct {
if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue; if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue;
const field_size = Type.fromInterned(field_ty).abiSize(mod); const field_size = Type.fromInterned(field_ty).abiSize(mod);
const field_align = mod.unionFieldNormalAlignment(union_type, @intCast(field_index)); const field_align: InternPool.Alignment = switch (union_type.flagsPtr(ip).layout) {
.@"packed" => .none,
.auto, .@"extern" => mod.unionFieldNormalAlignment(union_type, @intCast(field_index)),
};
const field_name = tag_type.names.get(ip)[field_index]; const field_name = tag_type.names.get(ip)[field_index];
fields.appendAssumeCapacity(try o.builder.debugMemberType( fields.appendAssumeCapacity(try o.builder.debugMemberType(

View File

@ -81,12 +81,12 @@ pub fn print(
}), }),
.int => |int| switch (int.storage) { .int => |int| switch (int.storage) {
inline .u64, .i64, .big_int => |x| try writer.print("{}", .{x}), inline .u64, .i64, .big_int => |x| try writer.print("{}", .{x}),
.lazy_align => |ty| if (opt_sema) |sema| { .lazy_align => |ty| if (opt_sema != null) {
const a = (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar; const a = (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .sema)).scalar;
try writer.print("{}", .{a.toByteUnits() orelse 0}); try writer.print("{}", .{a.toByteUnits() orelse 0});
} else try writer.print("@alignOf({})", .{Type.fromInterned(ty).fmt(mod)}), } else try writer.print("@alignOf({})", .{Type.fromInterned(ty).fmt(mod)}),
.lazy_size => |ty| if (opt_sema) |sema| { .lazy_size => |ty| if (opt_sema != null) {
const s = (try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar; const s = (try Type.fromInterned(ty).abiSizeAdvanced(mod, .sema)).scalar;
try writer.print("{}", .{s}); try writer.print("{}", .{s});
} else try writer.print("@sizeOf({})", .{Type.fromInterned(ty).fmt(mod)}), } else try writer.print("@sizeOf({})", .{Type.fromInterned(ty).fmt(mod)}),
}, },

View File

@ -10,4 +10,3 @@ export fn entry() usize {
// target=native // target=native
// //
// :1:11: error: struct 'tmp.A' depends on itself // :1:11: error: struct 'tmp.A' depends on itself
// :2:5: note: while checking this field

View File

@ -16,6 +16,3 @@ export fn entry() usize {
// target=native // target=native
// //
// :1:11: error: struct 'tmp.A' depends on itself // :1:11: error: struct 'tmp.A' depends on itself
// :8:5: note: while checking this field
// :5:5: note: while checking this field
// :2:5: note: while checking this field

View File

@ -13,4 +13,3 @@ export fn entry() usize {
// target=native // target=native
// //
// :1:13: error: struct 'tmp.Foo' depends on itself // :1:13: error: struct 'tmp.Foo' depends on itself
// :2:5: note: while checking this field

View File

@ -13,4 +13,3 @@ export fn entry() usize {
// target=native // target=native
// //
// :1:13: error: union 'tmp.Foo' depends on itself // :1:13: error: union 'tmp.Foo' depends on itself
// :2:5: note: while checking this field

View File

@ -16,4 +16,3 @@ comptime {
// target=native // target=native
// //
// :6:21: error: struct layout depends on it having runtime bits // :6:21: error: struct layout depends on it having runtime bits
// :4:13: note: while checking this field

View File

@ -15,5 +15,3 @@ export fn entry() void {
// target=native // target=native
// //
// :1:17: error: struct 'tmp.LhsExpr' depends on itself // :1:17: error: struct 'tmp.LhsExpr' depends on itself
// :5:5: note: while checking this field
// :2:5: note: while checking this field

View File

@ -1,5 +1,5 @@
pub export fn entry(param: usize) usize { pub export fn entry(param: usize) usize {
return struct { param }; return struct { @TypeOf(param) };
} }
// error // error

View File

@ -395,10 +395,7 @@ fn addFromDirInner(
if (entry.kind != .file) continue; if (entry.kind != .file) continue;
// Ignore stuff such as .swp files // Ignore stuff such as .swp files
switch (Compilation.classifyFileExt(entry.basename)) { if (!knownFileExtension(entry.basename)) continue;
.unknown => continue,
else => {},
}
try filenames.append(try ctx.arena.dupe(u8, entry.path)); try filenames.append(try ctx.arena.dupe(u8, entry.path));
} }
@ -623,8 +620,6 @@ pub fn lowerToBuildSteps(
b: *std.Build, b: *std.Build,
parent_step: *std.Build.Step, parent_step: *std.Build.Step,
test_filters: []const []const u8, test_filters: []const []const u8,
cases_dir_path: []const u8,
incremental_exe: *std.Build.Step.Compile,
) void { ) void {
const host = std.zig.system.resolveTargetQuery(.{}) catch |err| const host = std.zig.system.resolveTargetQuery(.{}) catch |err|
std.debug.panic("unable to detect native host: {s}\n", .{@errorName(err)}); std.debug.panic("unable to detect native host: {s}\n", .{@errorName(err)});
@ -637,20 +632,11 @@ pub fn lowerToBuildSteps(
// compilation is in a happier state. // compilation is in a happier state.
continue; continue;
} }
for (test_filters) |test_filter| { // TODO: the logic for running these was bad, so I've ripped it out. Rewrite this
if (std.mem.indexOf(u8, incr_case.base_path, test_filter)) |_| break; // in a way that actually spawns the compiler, communicating with it over the
} else if (test_filters.len > 0) continue; // compiler server protocol.
const case_base_path_with_dir = std.fs.path.join(b.allocator, &.{ _ = incr_case;
cases_dir_path, incr_case.base_path, @panic("TODO implement incremental test case executor");
}) catch @panic("OOM");
const run = b.addRunArtifact(incremental_exe);
run.setName(incr_case.base_path);
run.addArgs(&.{
case_base_path_with_dir,
b.graph.zig_exe,
});
run.expectStdOutEqual("");
parent_step.dependOn(&run.step);
} }
for (self.cases.items) |case| { for (self.cases.items) |case| {
@ -1236,192 +1222,6 @@ const assert = std.debug.assert;
const Allocator = std.mem.Allocator; const Allocator = std.mem.Allocator;
const getExternalExecutor = std.zig.system.getExternalExecutor; const getExternalExecutor = std.zig.system.getExternalExecutor;
const Compilation = @import("../../src/Compilation.zig");
const zig_h = @import("../../src/link.zig").File.C.zig_h;
const introspect = @import("../../src/introspect.zig");
const ThreadPool = std.Thread.Pool;
const WaitGroup = std.Thread.WaitGroup;
const build_options = @import("build_options");
const Package = @import("../../src/Package.zig");
pub const std_options = .{
.log_level = .err,
};
var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{
.stack_trace_frames = build_options.mem_leak_frames,
}){};
// TODO: instead of embedding the compiler in this process, spawn the compiler
// as a sub-process and communicate the updates using the compiler protocol.
pub fn main() !void {
const use_gpa = build_options.force_gpa or !builtin.link_libc;
const gpa = gpa: {
if (use_gpa) {
break :gpa general_purpose_allocator.allocator();
}
// We would prefer to use raw libc allocator here, but cannot
// use it if it won't support the alignment we need.
if (@alignOf(std.c.max_align_t) < @alignOf(i128)) {
break :gpa std.heap.c_allocator;
}
break :gpa std.heap.raw_c_allocator;
};
var single_threaded_arena = std.heap.ArenaAllocator.init(gpa);
defer single_threaded_arena.deinit();
var thread_safe_arena: std.heap.ThreadSafeAllocator = .{
.child_allocator = single_threaded_arena.allocator(),
};
const arena = thread_safe_arena.allocator();
const args = try std.process.argsAlloc(arena);
const case_file_path = args[1];
const zig_exe_path = args[2];
var filenames = std.ArrayList([]const u8).init(arena);
const case_dirname = std.fs.path.dirname(case_file_path).?;
var iterable_dir = try std.fs.cwd().openDir(case_dirname, .{ .iterate = true });
defer iterable_dir.close();
if (std.mem.endsWith(u8, case_file_path, ".0.zig")) {
const stem = case_file_path[case_dirname.len + 1 .. case_file_path.len - "0.zig".len];
var it = iterable_dir.iterate();
while (try it.next()) |entry| {
if (entry.kind != .file) continue;
if (!std.mem.startsWith(u8, entry.name, stem)) continue;
try filenames.append(try std.fs.path.join(arena, &.{ case_dirname, entry.name }));
}
} else {
try filenames.append(case_file_path);
}
if (filenames.items.len == 0) {
std.debug.print("failed to find the input source file(s) from '{s}'\n", .{
case_file_path,
});
std.process.exit(1);
}
// Sort filenames, so that incremental tests are contiguous and in-order
sortTestFilenames(filenames.items);
var ctx = Cases.init(gpa, arena);
var test_it = TestIterator{ .filenames = filenames.items };
while (try test_it.next()) |batch| {
const strategy: TestStrategy = if (batch.len > 1) .incremental else .independent;
var cases = std.ArrayList(usize).init(arena);
for (batch) |filename| {
const max_file_size = 10 * 1024 * 1024;
const src = try iterable_dir.readFileAllocOptions(arena, filename, max_file_size, null, 1, 0);
// Parse the manifest
var manifest = try TestManifest.parse(arena, src);
if (cases.items.len == 0) {
const backends = try manifest.getConfigForKeyAlloc(arena, "backend", Backend);
const targets = try manifest.getConfigForKeyAlloc(arena, "target", std.Target.Query);
const c_frontends = try manifest.getConfigForKeyAlloc(ctx.arena, "c_frontend", CFrontend);
const is_test = try manifest.getConfigForKeyAssertSingle("is_test", bool);
const link_libc = try manifest.getConfigForKeyAssertSingle("link_libc", bool);
const output_mode = try manifest.getConfigForKeyAssertSingle("output_mode", std.builtin.OutputMode);
if (manifest.type == .translate_c) {
for (c_frontends) |c_frontend| {
for (targets) |target_query| {
const output = try manifest.trailingLinesSplit(ctx.arena);
try ctx.translate.append(.{
.name = std.fs.path.stem(filename),
.c_frontend = c_frontend,
.target = resolveTargetQuery(target_query),
.is_test = is_test,
.link_libc = link_libc,
.input = src,
.kind = .{ .translate = output },
});
}
}
continue;
}
if (manifest.type == .run_translated_c) {
for (c_frontends) |c_frontend| {
for (targets) |target_query| {
const output = try manifest.trailingSplit(ctx.arena);
try ctx.translate.append(.{
.name = std.fs.path.stem(filename),
.c_frontend = c_frontend,
.target = resolveTargetQuery(target_query),
.is_test = is_test,
.link_libc = link_libc,
.output = output,
.input = src,
.kind = .{ .run = output },
});
}
}
continue;
}
// Cross-product to get all possible test combinations
for (backends) |backend| {
for (targets) |target| {
const next = ctx.cases.items.len;
try ctx.cases.append(.{
.name = std.fs.path.stem(filename),
.target = target,
.backend = backend,
.updates = std.ArrayList(Cases.Update).init(ctx.cases.allocator),
.is_test = is_test,
.output_mode = output_mode,
.link_libc = backend == .llvm,
.deps = std.ArrayList(DepModule).init(ctx.cases.allocator),
});
try cases.append(next);
}
}
}
for (cases.items) |case_index| {
const case = &ctx.cases.items[case_index];
if (strategy == .incremental and case.backend == .stage2 and case.target.getCpuArch() == .x86_64 and !case.link_libc and case.target.getOsTag() != .plan9) {
// https://github.com/ziglang/zig/issues/15174
continue;
}
switch (manifest.type) {
.compile => {
case.addCompile(src);
},
.@"error" => {
const errors = try manifest.trailingLines(arena);
switch (strategy) {
.independent => {
case.addError(src, errors);
},
.incremental => {
case.addErrorNamed("update", src, errors);
},
}
},
.run => {
const output = try manifest.trailingSplit(ctx.arena);
case.addCompareOutput(src, output);
},
.translate_c => @panic("c_frontend specified for compile case"),
.run_translated_c => @panic("c_frontend specified for compile case"),
.cli => @panic("TODO cli tests"),
}
}
}
}
return runCases(&ctx, zig_exe_path);
}
fn resolveTargetQuery(query: std.Target.Query) std.Build.ResolvedTarget { fn resolveTargetQuery(query: std.Target.Query) std.Build.ResolvedTarget {
return .{ return .{
.query = query, .query = query,
@ -1430,470 +1230,33 @@ fn resolveTargetQuery(query: std.Target.Query) std.Build.ResolvedTarget {
}; };
} }
fn runCases(self: *Cases, zig_exe_path: []const u8) !void { fn knownFileExtension(filename: []const u8) bool {
const host = try std.zig.system.resolveTargetQuery(.{}); // List taken from `Compilation.classifyFileExt` in the compiler.
for ([_][]const u8{
var progress = std.Progress{}; ".c", ".C", ".cc", ".cpp",
const root_node = progress.start("compiler", self.cases.items.len); ".cxx", ".stub", ".m", ".mm",
progress.terminal = null; ".ll", ".bc", ".s", ".S",
defer root_node.end(); ".h", ".zig", ".so", ".dll",
".dylib", ".tbd", ".a", ".lib",
var zig_lib_directory = try introspect.findZigLibDirFromSelfExe(self.gpa, zig_exe_path); ".o", ".obj", ".cu", ".def",
defer zig_lib_directory.handle.close(); ".rc", ".res", ".manifest",
defer self.gpa.free(zig_lib_directory.path.?); }) |ext| {
if (std.mem.endsWith(u8, filename, ext)) return true;
var aux_thread_pool: ThreadPool = undefined;
try aux_thread_pool.init(.{ .allocator = self.gpa });
defer aux_thread_pool.deinit();
// Use the same global cache dir for all the tests, such that we for example don't have to
// rebuild musl libc for every case (when LLVM backend is enabled).
var global_tmp = std.testing.tmpDir(.{});
defer global_tmp.cleanup();
var cache_dir = try global_tmp.dir.makeOpenPath(".zig-cache", .{});
defer cache_dir.close();
const tmp_dir_path = try std.fs.path.join(self.gpa, &[_][]const u8{ ".", ".zig-cache", "tmp", &global_tmp.sub_path });
defer self.gpa.free(tmp_dir_path);
const global_cache_directory: Compilation.Directory = .{
.handle = cache_dir,
.path = try std.fs.path.join(self.gpa, &[_][]const u8{ tmp_dir_path, ".zig-cache" }),
};
defer self.gpa.free(global_cache_directory.path.?);
{
for (self.cases.items) |*case| {
if (build_options.skip_non_native) {
if (case.target.getCpuArch() != builtin.cpu.arch)
continue;
if (case.target.getObjectFormat() != builtin.object_format)
continue;
}
// Skip tests that require LLVM backend when it is not available
if (!build_options.have_llvm and case.backend == .llvm)
continue;
assert(case.backend != .stage1);
for (build_options.test_filters) |test_filter| {
if (std.mem.indexOf(u8, case.name, test_filter)) |_| break;
} else if (build_options.test_filters.len > 0) continue;
var prg_node = root_node.start(case.name, case.updates.items.len);
prg_node.activate();
defer prg_node.end();
try runOneCase(
self.gpa,
&prg_node,
case.*,
zig_lib_directory,
zig_exe_path,
&aux_thread_pool,
global_cache_directory,
host,
);
}
for (self.translate.items) |*case| {
_ = case;
@panic("TODO is this even used?");
} }
// Final check for .so.X, .so.X.Y, .so.X.Y.Z.
// From `Compilation.hasSharedLibraryExt`.
var it = std.mem.splitScalar(u8, filename, '.');
_ = it.first();
var so_txt = it.next() orelse return false;
while (!std.mem.eql(u8, so_txt, "so")) {
so_txt = it.next() orelse return false;
} }
} const n1 = it.next() orelse return false;
const n2 = it.next();
fn runOneCase( const n3 = it.next();
allocator: Allocator, _ = std.fmt.parseInt(u32, n1, 10) catch return false;
root_node: *std.Progress.Node, if (n2) |x| _ = std.fmt.parseInt(u32, x, 10) catch return false;
case: Case, if (n3) |x| _ = std.fmt.parseInt(u32, x, 10) catch return false;
zig_lib_directory: Compilation.Directory, if (it.next() != null) return false;
zig_exe_path: []const u8, return false;
thread_pool: *ThreadPool,
global_cache_directory: Compilation.Directory,
host: std.Target,
) !void {
const tmp_src_path = "tmp.zig";
const enable_rosetta = build_options.enable_rosetta;
const enable_qemu = build_options.enable_qemu;
const enable_wine = build_options.enable_wine;
const enable_wasmtime = build_options.enable_wasmtime;
const enable_darling = build_options.enable_darling;
const glibc_runtimes_dir: ?[]const u8 = build_options.glibc_runtimes_dir;
const target = try std.zig.system.resolveTargetQuery(case.target);
var arena_allocator = std.heap.ArenaAllocator.init(allocator);
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
var tmp = std.testing.tmpDir(.{});
defer tmp.cleanup();
var cache_dir = try tmp.dir.makeOpenPath(".zig-cache", .{});
defer cache_dir.close();
const tmp_dir_path = try std.fs.path.join(
arena,
&[_][]const u8{ ".", ".zig-cache", "tmp", &tmp.sub_path },
);
const local_cache_path = try std.fs.path.join(
arena,
&[_][]const u8{ tmp_dir_path, ".zig-cache" },
);
const zig_cache_directory: Compilation.Directory = .{
.handle = cache_dir,
.path = local_cache_path,
};
var main_pkg: Package = .{
.root_src_directory = .{ .path = tmp_dir_path, .handle = tmp.dir },
.root_src_path = tmp_src_path,
};
defer {
var it = main_pkg.table.iterator();
while (it.next()) |kv| {
allocator.free(kv.key_ptr.*);
kv.value_ptr.*.destroy(allocator);
}
main_pkg.table.deinit(allocator);
}
for (case.deps.items) |dep| {
var pkg = try Package.create(
allocator,
tmp_dir_path,
dep.path,
);
errdefer pkg.destroy(allocator);
try main_pkg.add(allocator, dep.name, pkg);
}
const bin_name = try std.zig.binNameAlloc(arena, .{
.root_name = "test_case",
.target = target,
.output_mode = case.output_mode,
});
const emit_directory: Compilation.Directory = .{
.path = tmp_dir_path,
.handle = tmp.dir,
};
const emit_bin: Compilation.EmitLoc = .{
.directory = emit_directory,
.basename = bin_name,
};
const emit_h: ?Compilation.EmitLoc = if (case.emit_h) .{
.directory = emit_directory,
.basename = "test_case.h",
} else null;
const use_llvm: bool = switch (case.backend) {
.llvm => true,
else => false,
};
const comp = try Compilation.create(allocator, .{
.local_cache_directory = zig_cache_directory,
.global_cache_directory = global_cache_directory,
.zig_lib_directory = zig_lib_directory,
.thread_pool = thread_pool,
.root_name = "test_case",
.target = target,
// TODO: support tests for object file building, and library builds
// and linking. This will require a rework to support multi-file
// tests.
.output_mode = case.output_mode,
.is_test = case.is_test,
.optimize_mode = case.optimize_mode,
.emit_bin = emit_bin,
.emit_h = emit_h,
.main_pkg = &main_pkg,
.keep_source_files_loaded = true,
.is_native_os = case.target.isNativeOs(),
.is_native_abi = case.target.isNativeAbi(),
.dynamic_linker = target.dynamic_linker.get(),
.link_libc = case.link_libc,
.use_llvm = use_llvm,
.self_exe_path = zig_exe_path,
// TODO instead of turning off color, pass in a std.Progress.Node
.color = .off,
.reference_trace = 0,
// TODO: force self-hosted linkers with stage2 backend to avoid LLD creeping in
// until the auto-select mechanism deems them worthy
.use_lld = switch (case.backend) {
.stage2 => false,
else => null,
},
});
defer comp.destroy();
update: for (case.updates.items, 0..) |update, update_index| {
var update_node = root_node.start(update.name, 3);
update_node.activate();
defer update_node.end();
var sync_node = update_node.start("write", 0);
sync_node.activate();
for (update.files.items) |file| {
try tmp.dir.writeFile(.{ .sub_path = file.path, .data = file.src });
}
sync_node.end();
var module_node = update_node.start("parse/analysis/codegen", 0);
module_node.activate();
try comp.makeBinFileWritable();
try comp.update(&module_node);
module_node.end();
if (update.case != .Error) {
var all_errors = try comp.getAllErrorsAlloc();
defer all_errors.deinit(allocator);
if (all_errors.errorMessageCount() > 0) {
all_errors.renderToStdErr(.{
.ttyconf = std.io.tty.detectConfig(std.io.getStdErr()),
});
// TODO print generated C code
return error.UnexpectedCompileErrors;
}
}
switch (update.case) {
.Header => |expected_output| {
var file = try tmp.dir.openFile("test_case.h", .{ .mode = .read_only });
defer file.close();
const out = try file.reader().readAllAlloc(arena, 5 * 1024 * 1024);
try std.testing.expectEqualStrings(expected_output, out);
},
.CompareObjectFile => |expected_output| {
var file = try tmp.dir.openFile(bin_name, .{ .mode = .read_only });
defer file.close();
const out = try file.reader().readAllAlloc(arena, 5 * 1024 * 1024);
try std.testing.expectEqualStrings(expected_output, out);
},
.Compile => {},
.Error => |expected_errors| {
var test_node = update_node.start("assert", 0);
test_node.activate();
defer test_node.end();
var error_bundle = try comp.getAllErrorsAlloc();
defer error_bundle.deinit(allocator);
if (error_bundle.errorMessageCount() == 0) {
return error.ExpectedCompilationErrors;
}
var actual_stderr = std.ArrayList(u8).init(arena);
try error_bundle.renderToWriter(.{
.ttyconf = .no_color,
.include_reference_trace = false,
.include_source_line = false,
}, actual_stderr.writer());
// Render the expected lines into a string that we can compare verbatim.
var expected_generated = std.ArrayList(u8).init(arena);
var actual_line_it = std.mem.splitScalar(u8, actual_stderr.items, '\n');
for (expected_errors) |expect_line| {
const actual_line = actual_line_it.next() orelse {
try expected_generated.appendSlice(expect_line);
try expected_generated.append('\n');
continue;
};
if (std.mem.endsWith(u8, actual_line, expect_line)) {
try expected_generated.appendSlice(actual_line);
try expected_generated.append('\n');
continue;
}
if (std.mem.startsWith(u8, expect_line, ":?:?: ")) {
if (std.mem.endsWith(u8, actual_line, expect_line[":?:?: ".len..])) {
try expected_generated.appendSlice(actual_line);
try expected_generated.append('\n');
continue;
}
}
try expected_generated.appendSlice(expect_line);
try expected_generated.append('\n');
}
try std.testing.expectEqualStrings(expected_generated.items, actual_stderr.items);
},
.Execution => |expected_stdout| {
if (!std.process.can_spawn) {
std.debug.print("Unable to spawn child processes on {s}, skipping test.\n", .{@tagName(builtin.os.tag)});
continue :update; // Pass test.
}
update_node.setEstimatedTotalItems(4);
var argv = std.ArrayList([]const u8).init(allocator);
defer argv.deinit();
const exec_result = x: {
var exec_node = update_node.start("execute", 0);
exec_node.activate();
defer exec_node.end();
// We go out of our way here to use the unique temporary directory name in
// the exe_path so that it makes its way into the cache hash, avoiding
// cache collisions from multiple threads doing `zig run` at the same time
// on the same test_case.c input filename.
const ss = std.fs.path.sep_str;
const exe_path = try std.fmt.allocPrint(
arena,
".." ++ ss ++ "{s}" ++ ss ++ "{s}",
.{ &tmp.sub_path, bin_name },
);
if (case.target.ofmt != null and case.target.ofmt.? == .c) {
if (getExternalExecutor(host, &target, .{ .link_libc = true }) != .native) {
// We wouldn't be able to run the compiled C code.
continue :update; // Pass test.
}
try argv.appendSlice(&[_][]const u8{
zig_exe_path,
"run",
"-cflags",
"-std=c99",
"-pedantic",
"-Werror",
"-Wno-incompatible-library-redeclaration", // https://github.com/ziglang/zig/issues/875
"--",
"-lc",
exe_path,
});
if (zig_lib_directory.path) |p| {
try argv.appendSlice(&.{ "-I", p });
}
} else switch (getExternalExecutor(host, &target, .{ .link_libc = case.link_libc })) {
.native => {
if (case.backend == .stage2 and case.target.getCpuArch().isArmOrThumb()) {
// https://github.com/ziglang/zig/issues/13623
continue :update; // Pass test.
}
try argv.append(exe_path);
},
.bad_dl, .bad_os_or_cpu => continue :update, // Pass test.
.rosetta => if (enable_rosetta) {
try argv.append(exe_path);
} else {
continue :update; // Rosetta not available, pass test.
},
.qemu => |qemu_bin_name| if (enable_qemu) {
const need_cross_glibc = target.isGnuLibC() and case.link_libc;
const glibc_dir_arg: ?[]const u8 = if (need_cross_glibc)
glibc_runtimes_dir orelse continue :update // glibc dir not available; pass test
else
null;
try argv.append(qemu_bin_name);
if (glibc_dir_arg) |dir| {
const linux_triple = try target.linuxTriple(arena);
const full_dir = try std.fs.path.join(arena, &[_][]const u8{
dir,
linux_triple,
});
try argv.append("-L");
try argv.append(full_dir);
}
try argv.append(exe_path);
} else {
continue :update; // QEMU not available; pass test.
},
.wine => |wine_bin_name| if (enable_wine) {
try argv.append(wine_bin_name);
try argv.append(exe_path);
} else {
continue :update; // Wine not available; pass test.
},
.wasmtime => |wasmtime_bin_name| if (enable_wasmtime) {
try argv.append(wasmtime_bin_name);
try argv.append("--dir=.");
try argv.append(exe_path);
} else {
continue :update; // wasmtime not available; pass test.
},
.darling => |darling_bin_name| if (enable_darling) {
try argv.append(darling_bin_name);
// Since we use relative to cwd here, we invoke darling with
// "shell" subcommand.
try argv.append("shell");
try argv.append(exe_path);
} else {
continue :update; // Darling not available; pass test.
},
}
try comp.makeBinFileExecutable();
while (true) {
break :x std.process.Child.run(.{
.allocator = allocator,
.argv = argv.items,
.cwd_dir = tmp.dir,
.cwd = tmp_dir_path,
}) catch |err| switch (err) {
error.FileBusy => {
// There is a fundamental design flaw in Unix systems with how
// ETXTBSY interacts with fork+exec.
// https://github.com/golang/go/issues/22315
// https://bugs.openjdk.org/browse/JDK-8068370
// Unfortunately, this could be a real error, but we can't
// tell the difference here.
continue;
},
else => {
std.debug.print("\n{s}.{d} The following command failed with {s}:\n", .{
case.name, update_index, @errorName(err),
});
dumpArgs(argv.items);
return error.ChildProcessExecution;
},
};
}
};
var test_node = update_node.start("test", 0);
test_node.activate();
defer test_node.end();
defer allocator.free(exec_result.stdout);
defer allocator.free(exec_result.stderr);
switch (exec_result.term) {
.Exited => |code| {
if (code != 0) {
std.debug.print("\n{s}\n{s}: execution exited with code {d}:\n", .{
exec_result.stderr, case.name, code,
});
dumpArgs(argv.items);
return error.ChildProcessExecution;
}
},
else => {
std.debug.print("\n{s}\n{s}: execution crashed:\n", .{
exec_result.stderr, case.name,
});
dumpArgs(argv.items);
return error.ChildProcessExecution;
},
}
try std.testing.expectEqualStrings(expected_stdout, exec_result.stdout);
// We allow stderr to have garbage in it because wasmtime prints a
// warning about --invoke even though we don't pass it.
//std.testing.expectEqualStrings("", exec_result.stderr);
},
}
}
}
fn dumpArgs(argv: []const []const u8) void {
for (argv) |arg| {
std.debug.print("{s} ", .{arg});
}
std.debug.print("\n", .{});
} }

View File

@ -1250,7 +1250,6 @@ pub fn addCases(
b: *std.Build, b: *std.Build,
parent_step: *Step, parent_step: *Step,
test_filters: []const []const u8, test_filters: []const []const u8,
check_case_exe: *std.Build.Step.Compile,
target: std.Build.ResolvedTarget, target: std.Build.ResolvedTarget,
translate_c_options: @import("src/Cases.zig").TranslateCOptions, translate_c_options: @import("src/Cases.zig").TranslateCOptions,
build_options: @import("cases.zig").BuildOptions, build_options: @import("cases.zig").BuildOptions,
@ -1268,12 +1267,9 @@ pub fn addCases(
cases.lowerToTranslateCSteps(b, parent_step, test_filters, target, translate_c_options); cases.lowerToTranslateCSteps(b, parent_step, test_filters, target, translate_c_options);
const cases_dir_path = try b.build_root.join(b.allocator, &.{ "test", "cases" });
cases.lowerToBuildSteps( cases.lowerToBuildSteps(
b, b,
parent_step, parent_step,
test_filters, test_filters,
cases_dir_path,
check_case_exe,
); );
} }