Type,Value: mark ResolveStrat parameter of type queries as comptime

This eliminates the statically-reachable recursion loop between code
generation backends and Sema. This is beneficial for optimizers
(although I do not measure any performance improvement for this change),
and for profilers.
This commit is contained in:
mlugg 2024-07-15 16:18:41 +01:00
parent 9356cb1475
commit b1d3d48f68
No known key found for this signature in database
GPG Key ID: 3F5B7DCCBF4AF02E
3 changed files with 22 additions and 22 deletions

View File

@ -478,7 +478,7 @@ pub fn hasRuntimeBitsAdvanced(
ty: Type,
pt: Zcu.PerThread,
ignore_comptime_only: bool,
strat: ResolveStratLazy,
comptime strat: ResolveStratLazy,
) RuntimeBitsError!bool {
const mod = pt.zcu;
const ip = &mod.intern_pool;
@ -792,7 +792,7 @@ pub fn fnHasRuntimeBits(ty: Type, pt: Zcu.PerThread) bool {
/// Determines whether a function type has runtime bits, i.e. whether a
/// function with this type can exist at runtime.
/// Asserts that `ty` is a function type.
pub fn fnHasRuntimeBitsAdvanced(ty: Type, pt: Zcu.PerThread, strat: ResolveStrat) SemaError!bool {
pub fn fnHasRuntimeBitsAdvanced(ty: Type, pt: Zcu.PerThread, comptime strat: ResolveStrat) SemaError!bool {
const fn_info = pt.zcu.typeToFunc(ty).?;
if (fn_info.is_generic) return false;
if (fn_info.is_var_args) return true;
@ -824,7 +824,7 @@ pub fn ptrAlignment(ty: Type, pt: Zcu.PerThread) Alignment {
return ptrAlignmentAdvanced(ty, pt, .normal) catch unreachable;
}
pub fn ptrAlignmentAdvanced(ty: Type, pt: Zcu.PerThread, strat: ResolveStrat) !Alignment {
pub fn ptrAlignmentAdvanced(ty: Type, pt: Zcu.PerThread, comptime strat: ResolveStrat) !Alignment {
return switch (pt.zcu.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| {
if (ptr_type.flags.alignment != .none)
@ -891,7 +891,7 @@ pub const ResolveStrat = enum {
/// This should typically be used from semantic analysis.
sema,
pub fn toLazy(strat: ResolveStrat) ResolveStratLazy {
pub inline fn toLazy(strat: ResolveStrat) ResolveStratLazy {
return switch (strat) {
.normal => .eager,
.sema => .sema,
@ -908,7 +908,7 @@ pub const ResolveStrat = enum {
pub fn abiAlignmentAdvanced(
ty: Type,
pt: Zcu.PerThread,
strat: ResolveStratLazy,
comptime strat: ResolveStratLazy,
) SemaError!AbiAlignmentAdvanced {
const mod = pt.zcu;
const target = mod.getTarget();
@ -1130,7 +1130,7 @@ pub fn abiAlignmentAdvanced(
fn abiAlignmentAdvancedErrorUnion(
ty: Type,
pt: Zcu.PerThread,
strat: ResolveStratLazy,
comptime strat: ResolveStratLazy,
payload_ty: Type,
) SemaError!AbiAlignmentAdvanced {
// This code needs to be kept in sync with the equivalent switch prong
@ -1167,7 +1167,7 @@ fn abiAlignmentAdvancedErrorUnion(
fn abiAlignmentAdvancedOptional(
ty: Type,
pt: Zcu.PerThread,
strat: ResolveStratLazy,
comptime strat: ResolveStratLazy,
) SemaError!AbiAlignmentAdvanced {
const mod = pt.zcu;
const target = mod.getTarget();
@ -1231,7 +1231,7 @@ const AbiSizeAdvanced = union(enum) {
pub fn abiSizeAdvanced(
ty: Type,
pt: Zcu.PerThread,
strat: ResolveStratLazy,
comptime strat: ResolveStratLazy,
) SemaError!AbiSizeAdvanced {
const mod = pt.zcu;
const target = mod.getTarget();
@ -1505,7 +1505,7 @@ pub fn abiSizeAdvanced(
fn abiSizeAdvancedOptional(
ty: Type,
pt: Zcu.PerThread,
strat: ResolveStratLazy,
comptime strat: ResolveStratLazy,
) SemaError!AbiSizeAdvanced {
const mod = pt.zcu;
const child_ty = ty.optionalChild(mod);
@ -1680,7 +1680,7 @@ pub fn bitSize(ty: Type, pt: Zcu.PerThread) u64 {
pub fn bitSizeAdvanced(
ty: Type,
pt: Zcu.PerThread,
strat: ResolveStrat,
comptime strat: ResolveStrat,
) SemaError!u64 {
const mod = pt.zcu;
const target = mod.getTarget();
@ -2739,7 +2739,7 @@ pub fn comptimeOnly(ty: Type, pt: Zcu.PerThread) bool {
/// `generic_poison` will return false.
/// May return false negatives when structs and unions are having their field types resolved.
pub fn comptimeOnlyAdvanced(ty: Type, pt: Zcu.PerThread, strat: ResolveStrat) SemaError!bool {
pub fn comptimeOnlyAdvanced(ty: Type, pt: Zcu.PerThread, comptime strat: ResolveStrat) SemaError!bool {
const mod = pt.zcu;
const ip = &mod.intern_pool;
return switch (ty.toIntern()) {
@ -3198,7 +3198,7 @@ pub fn structFieldAlign(ty: Type, index: usize, pt: Zcu.PerThread) Alignment {
return ty.structFieldAlignAdvanced(index, pt, .normal) catch unreachable;
}
pub fn structFieldAlignAdvanced(ty: Type, index: usize, pt: Zcu.PerThread, strat: ResolveStrat) !Alignment {
pub fn structFieldAlignAdvanced(ty: Type, index: usize, pt: Zcu.PerThread, comptime strat: ResolveStrat) !Alignment {
const ip = &pt.zcu.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.struct_type => {

View File

@ -178,7 +178,7 @@ pub fn toBigIntAdvanced(
val: Value,
space: *BigIntSpace,
pt: Zcu.PerThread,
strat: ResolveStrat,
comptime strat: ResolveStrat,
) Module.CompileError!BigIntConst {
return switch (val.toIntern()) {
.bool_false => BigIntMutable.init(&space.limbs, 0).toConst(),
@ -240,7 +240,7 @@ pub fn getUnsignedInt(val: Value, pt: Zcu.PerThread) ?u64 {
/// If the value fits in a u64, return it, otherwise null.
/// Asserts not undefined.
pub fn getUnsignedIntAdvanced(val: Value, pt: Zcu.PerThread, strat: ResolveStrat) !?u64 {
pub fn getUnsignedIntAdvanced(val: Value, pt: Zcu.PerThread, comptime strat: ResolveStrat) !?u64 {
const mod = pt.zcu;
return switch (val.toIntern()) {
.undef => unreachable,
@ -1042,7 +1042,7 @@ pub fn orderAgainstZero(lhs: Value, pt: Zcu.PerThread) std.math.Order {
pub fn orderAgainstZeroAdvanced(
lhs: Value,
pt: Zcu.PerThread,
strat: ResolveStrat,
comptime strat: ResolveStrat,
) Module.CompileError!std.math.Order {
return switch (lhs.toIntern()) {
.bool_false => .eq,
@ -1081,7 +1081,7 @@ pub fn order(lhs: Value, rhs: Value, pt: Zcu.PerThread) std.math.Order {
}
/// Asserts the value is comparable.
pub fn orderAdvanced(lhs: Value, rhs: Value, pt: Zcu.PerThread, strat: ResolveStrat) !std.math.Order {
pub fn orderAdvanced(lhs: Value, rhs: Value, pt: Zcu.PerThread, comptime strat: ResolveStrat) !std.math.Order {
const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(pt, strat);
const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(pt, strat);
switch (lhs_against_zero) {
@ -1119,7 +1119,7 @@ pub fn compareHeteroAdvanced(
op: std.math.CompareOperator,
rhs: Value,
pt: Zcu.PerThread,
strat: ResolveStrat,
comptime strat: ResolveStrat,
) !bool {
if (lhs.pointerDecl(pt.zcu)) |lhs_decl| {
if (rhs.pointerDecl(pt.zcu)) |rhs_decl| {
@ -1199,7 +1199,7 @@ pub fn compareAllWithZeroAdvancedExtra(
lhs: Value,
op: std.math.CompareOperator,
pt: Zcu.PerThread,
strat: ResolveStrat,
comptime strat: ResolveStrat,
) Module.CompileError!bool {
const mod = pt.zcu;
if (lhs.isInf(mod)) {
@ -1505,7 +1505,7 @@ pub fn floatFromIntAdvanced(
int_ty: Type,
float_ty: Type,
pt: Zcu.PerThread,
strat: ResolveStrat,
comptime strat: ResolveStrat,
) !Value {
const mod = pt.zcu;
if (int_ty.zigTypeTag(mod) == .Vector) {
@ -1523,7 +1523,7 @@ pub fn floatFromIntAdvanced(
return floatFromIntScalar(val, float_ty, pt, strat);
}
pub fn floatFromIntScalar(val: Value, float_ty: Type, pt: Zcu.PerThread, strat: ResolveStrat) !Value {
pub fn floatFromIntScalar(val: Value, float_ty: Type, pt: Zcu.PerThread, comptime strat: ResolveStrat) !Value {
const mod = pt.zcu;
return switch (mod.intern_pool.indexToKey(val.toIntern())) {
.undef => try pt.undefValue(float_ty),

View File

@ -3145,7 +3145,7 @@ pub fn unionFieldNormalAlignmentAdvanced(
pt: Zcu.PerThread,
loaded_union: InternPool.LoadedUnionType,
field_index: u32,
strat: Type.ResolveStrat,
comptime strat: Type.ResolveStrat,
) Zcu.SemaError!InternPool.Alignment {
const ip = &pt.zcu.intern_pool;
assert(loaded_union.flagsUnordered(ip).layout != .@"packed");
@ -3173,7 +3173,7 @@ pub fn structFieldAlignmentAdvanced(
explicit_alignment: InternPool.Alignment,
field_ty: Type,
layout: std.builtin.Type.ContainerLayout,
strat: Type.ResolveStrat,
comptime strat: Type.ResolveStrat,
) Zcu.SemaError!InternPool.Alignment {
assert(layout != .@"packed");
if (explicit_alignment != .none) return explicit_alignment;