mirror of
https://github.com/ziglang/zig.git
synced 2026-02-13 04:48:20 +00:00
stage2: implement intTagType logic
This commit changes a lot of `*const Module` to `*Module` to make it work, since accessing the integer tag type of an enum might need to mutate the InternPool by adding a new integer type into it. An alternate strategy would be to pre-heat the InternPool with the integer tag type when creating an enum type, which would make it so that intTagType could accept a const Module instead of a mutable one, asserting that the InternPool already had the integer tag type.
This commit is contained in:
parent
a5fb169594
commit
4d88f825bc
@ -944,7 +944,7 @@ pub const Decl = struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getAlignment(decl: Decl, mod: *const Module) u32 {
|
||||
pub fn getAlignment(decl: Decl, mod: *Module) u32 {
|
||||
assert(decl.has_tv);
|
||||
if (decl.@"align" != 0) {
|
||||
// Explicit alignment.
|
||||
@ -1053,7 +1053,7 @@ pub const Struct = struct {
|
||||
/// Returns the field alignment. If the struct is packed, returns 0.
|
||||
pub fn alignment(
|
||||
field: Field,
|
||||
mod: *const Module,
|
||||
mod: *Module,
|
||||
layout: std.builtin.Type.ContainerLayout,
|
||||
) u32 {
|
||||
if (field.abi_align != 0) {
|
||||
@ -1076,7 +1076,7 @@ pub const Struct = struct {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn alignmentExtern(field: Field, mod: *const Module) u32 {
|
||||
pub fn alignmentExtern(field: Field, mod: *Module) u32 {
|
||||
// This logic is duplicated in Type.abiAlignmentAdvanced.
|
||||
const ty_abi_align = field.ty.abiAlignment(mod);
|
||||
|
||||
@ -1157,7 +1157,7 @@ pub const Struct = struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn packedFieldBitOffset(s: Struct, mod: *const Module, index: usize) u16 {
|
||||
pub fn packedFieldBitOffset(s: Struct, mod: *Module, index: usize) u16 {
|
||||
assert(s.layout == .Packed);
|
||||
assert(s.haveLayout());
|
||||
var bit_sum: u64 = 0;
|
||||
@ -1171,7 +1171,7 @@ pub const Struct = struct {
|
||||
}
|
||||
|
||||
pub const RuntimeFieldIterator = struct {
|
||||
module: *const Module,
|
||||
module: *Module,
|
||||
struct_obj: *const Struct,
|
||||
index: u32 = 0,
|
||||
|
||||
@ -1201,7 +1201,7 @@ pub const Struct = struct {
|
||||
}
|
||||
};
|
||||
|
||||
pub fn runtimeFieldIterator(s: *const Struct, module: *const Module) RuntimeFieldIterator {
|
||||
pub fn runtimeFieldIterator(s: *const Struct, module: *Module) RuntimeFieldIterator {
|
||||
return .{
|
||||
.struct_obj = s,
|
||||
.module = module,
|
||||
@ -1353,7 +1353,7 @@ pub const Union = struct {
|
||||
/// Returns the field alignment, assuming the union is not packed.
|
||||
/// Keep implementation in sync with `Sema.unionFieldAlignment`.
|
||||
/// Prefer to call that function instead of this one during Sema.
|
||||
pub fn normalAlignment(field: Field, mod: *const Module) u32 {
|
||||
pub fn normalAlignment(field: Field, mod: *Module) u32 {
|
||||
if (field.abi_align == 0) {
|
||||
return field.ty.abiAlignment(mod);
|
||||
} else {
|
||||
@ -1413,7 +1413,7 @@ pub const Union = struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn hasAllZeroBitFieldTypes(u: Union, mod: *const Module) bool {
|
||||
pub fn hasAllZeroBitFieldTypes(u: Union, mod: *Module) bool {
|
||||
assert(u.haveFieldTypes());
|
||||
for (u.fields.values()) |field| {
|
||||
if (field.ty.hasRuntimeBits(mod)) return false;
|
||||
@ -1421,7 +1421,7 @@ pub const Union = struct {
|
||||
return true;
|
||||
}
|
||||
|
||||
pub fn mostAlignedField(u: Union, mod: *const Module) u32 {
|
||||
pub fn mostAlignedField(u: Union, mod: *Module) u32 {
|
||||
assert(u.haveFieldTypes());
|
||||
var most_alignment: u32 = 0;
|
||||
var most_index: usize = undefined;
|
||||
@ -1438,7 +1438,7 @@ pub const Union = struct {
|
||||
}
|
||||
|
||||
/// Returns 0 if the union is represented with 0 bits at runtime.
|
||||
pub fn abiAlignment(u: Union, mod: *const Module, have_tag: bool) u32 {
|
||||
pub fn abiAlignment(u: Union, mod: *Module, have_tag: bool) u32 {
|
||||
var max_align: u32 = 0;
|
||||
if (have_tag) max_align = u.tag_ty.abiAlignment(mod);
|
||||
for (u.fields.values()) |field| {
|
||||
@ -1450,7 +1450,7 @@ pub const Union = struct {
|
||||
return max_align;
|
||||
}
|
||||
|
||||
pub fn abiSize(u: Union, mod: *const Module, have_tag: bool) u64 {
|
||||
pub fn abiSize(u: Union, mod: *Module, have_tag: bool) u64 {
|
||||
return u.getLayout(mod, have_tag).abi_size;
|
||||
}
|
||||
|
||||
@ -1481,7 +1481,7 @@ pub const Union = struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getLayout(u: Union, mod: *const Module, have_tag: bool) Layout {
|
||||
pub fn getLayout(u: Union, mod: *Module, have_tag: bool) Layout {
|
||||
assert(u.haveLayout());
|
||||
var most_aligned_field: u32 = undefined;
|
||||
var most_aligned_field_size: u64 = undefined;
|
||||
@ -6988,6 +6988,7 @@ pub const AtomicPtrAlignmentError = error{
|
||||
FloatTooBig,
|
||||
IntTooBig,
|
||||
BadType,
|
||||
OutOfMemory,
|
||||
};
|
||||
|
||||
pub const AtomicPtrAlignmentDiagnostics = struct {
|
||||
@ -7001,7 +7002,7 @@ pub const AtomicPtrAlignmentDiagnostics = struct {
|
||||
// TODO this function does not take into account CPU features, which can affect
|
||||
// this value. Audit this!
|
||||
pub fn atomicPtrAlignment(
|
||||
mod: *const Module,
|
||||
mod: *Module,
|
||||
ty: Type,
|
||||
diags: *AtomicPtrAlignmentDiagnostics,
|
||||
) AtomicPtrAlignmentError!u32 {
|
||||
@ -7080,7 +7081,7 @@ pub fn atomicPtrAlignment(
|
||||
|
||||
const int_ty = switch (ty.zigTypeTag(mod)) {
|
||||
.Int => ty,
|
||||
.Enum => ty.intTagType(),
|
||||
.Enum => try ty.intTagType(mod),
|
||||
.Float => {
|
||||
const bit_count = ty.floatBits(target);
|
||||
if (bit_count > max_atomic_bits) {
|
||||
|
||||
14
src/Sema.zig
14
src/Sema.zig
@ -8249,7 +8249,6 @@ fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
|
||||
|
||||
fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
||||
const mod = sema.mod;
|
||||
const arena = sema.arena;
|
||||
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
||||
const src = inst_data.src();
|
||||
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
||||
@ -8278,7 +8277,7 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
|
||||
};
|
||||
const enum_tag_ty = sema.typeOf(enum_tag);
|
||||
|
||||
const int_tag_ty = try enum_tag_ty.intTagType().copy(arena);
|
||||
const int_tag_ty = try enum_tag_ty.intTagType(mod);
|
||||
|
||||
if (try sema.typeHasOnePossibleValue(enum_tag_ty)) |opv| {
|
||||
return sema.addConstant(int_tag_ty, opv);
|
||||
@ -8310,7 +8309,7 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
|
||||
|
||||
if (try sema.resolveMaybeUndefVal(operand)) |int_val| {
|
||||
if (dest_ty.isNonexhaustiveEnum()) {
|
||||
const int_tag_ty = dest_ty.intTagType();
|
||||
const int_tag_ty = try dest_ty.intTagType(mod);
|
||||
if (try sema.intFitsInType(int_val, int_tag_ty, null)) {
|
||||
return sema.addConstant(dest_ty, int_val);
|
||||
}
|
||||
@ -16268,7 +16267,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
||||
},
|
||||
.Enum => {
|
||||
// TODO: look into memoizing this result.
|
||||
const int_tag_ty = try ty.intTagType().copy(sema.arena);
|
||||
const int_tag_ty = try ty.intTagType(mod);
|
||||
|
||||
const is_exhaustive = Value.makeBool(!ty.isNonexhaustiveEnum());
|
||||
|
||||
@ -20354,7 +20353,7 @@ fn zirBitCount(
|
||||
block: *Block,
|
||||
inst: Zir.Inst.Index,
|
||||
air_tag: Air.Inst.Tag,
|
||||
comptime comptimeOp: fn (val: Value, ty: Type, mod: *const Module) u64,
|
||||
comptime comptimeOp: fn (val: Value, ty: Type, mod: *Module) u64,
|
||||
) CompileError!Air.Inst.Ref {
|
||||
const mod = sema.mod;
|
||||
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
||||
@ -20755,6 +20754,7 @@ fn checkAtomicPtrOperand(
|
||||
const mod = sema.mod;
|
||||
var diag: Module.AtomicPtrAlignmentDiagnostics = .{};
|
||||
const alignment = mod.atomicPtrAlignment(elem_ty, &diag) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.FloatTooBig => return sema.fail(
|
||||
block,
|
||||
elem_ty_src,
|
||||
@ -23462,7 +23462,7 @@ fn validateExternType(
|
||||
return !Type.fnCallingConventionAllowsZigTypes(target, ty.fnCallingConvention());
|
||||
},
|
||||
.Enum => {
|
||||
return sema.validateExternType(ty.intTagType(), position);
|
||||
return sema.validateExternType(try ty.intTagType(mod), position);
|
||||
},
|
||||
.Struct, .Union => switch (ty.containerLayout()) {
|
||||
.Extern => return true,
|
||||
@ -23540,7 +23540,7 @@ fn explainWhyTypeIsNotExtern(
|
||||
}
|
||||
},
|
||||
.Enum => {
|
||||
const tag_ty = ty.intTagType();
|
||||
const tag_ty = try ty.intTagType(mod);
|
||||
try mod.errNoteNonLazy(src_loc, msg, "enum tag type '{}' is not extern compatible", .{tag_ty.fmt(sema.mod)});
|
||||
try sema.explainWhyTypeIsNotExtern(msg, src_loc, tag_ty, position);
|
||||
},
|
||||
|
||||
@ -4533,7 +4533,7 @@ fn cmp(
|
||||
}
|
||||
},
|
||||
.Float => return self.fail("TODO ARM cmp floats", .{}),
|
||||
.Enum => lhs_ty.intTagType(),
|
||||
.Enum => try lhs_ty.intTagType(mod),
|
||||
.Int => lhs_ty,
|
||||
.Bool => Type.u1,
|
||||
.Pointer => Type.usize,
|
||||
|
||||
@ -15,7 +15,7 @@ pub const Class = union(enum) {
|
||||
};
|
||||
|
||||
/// For `float_array` the second element will be the amount of floats.
|
||||
pub fn classifyType(ty: Type, mod: *const Module) Class {
|
||||
pub fn classifyType(ty: Type, mod: *Module) Class {
|
||||
std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(mod));
|
||||
|
||||
var maybe_float_bits: ?u16 = null;
|
||||
@ -74,7 +74,7 @@ pub fn classifyType(ty: Type, mod: *const Module) Class {
|
||||
}
|
||||
|
||||
const sret_float_count = 4;
|
||||
fn countFloats(ty: Type, mod: *const Module, maybe_float_bits: *?u16) u8 {
|
||||
fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u8 {
|
||||
const target = mod.getTarget();
|
||||
const invalid = std.math.maxInt(u8);
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
@ -115,7 +115,7 @@ fn countFloats(ty: Type, mod: *const Module, maybe_float_bits: *?u16) u8 {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn getFloatArrayType(ty: Type, mod: *const Module) ?Type {
|
||||
pub fn getFloatArrayType(ty: Type, mod: *Module) ?Type {
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
.Union => {
|
||||
const fields = ty.unionFields();
|
||||
|
||||
@ -4480,7 +4480,7 @@ fn cmp(
|
||||
}
|
||||
},
|
||||
.Float => return self.fail("TODO ARM cmp floats", .{}),
|
||||
.Enum => lhs_ty.intTagType(),
|
||||
.Enum => try lhs_ty.intTagType(mod),
|
||||
.Int => lhs_ty,
|
||||
.Bool => Type.u1,
|
||||
.Pointer => Type.usize,
|
||||
|
||||
@ -24,7 +24,7 @@ pub const Class = union(enum) {
|
||||
|
||||
pub const Context = enum { ret, arg };
|
||||
|
||||
pub fn classifyType(ty: Type, mod: *const Module, ctx: Context) Class {
|
||||
pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class {
|
||||
assert(ty.hasRuntimeBitsIgnoreComptime(mod));
|
||||
|
||||
var maybe_float_bits: ?u16 = null;
|
||||
@ -116,7 +116,7 @@ pub fn classifyType(ty: Type, mod: *const Module, ctx: Context) Class {
|
||||
}
|
||||
|
||||
const byval_float_count = 4;
|
||||
fn countFloats(ty: Type, mod: *const Module, maybe_float_bits: *?u16) u32 {
|
||||
fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u32 {
|
||||
const target = mod.getTarget();
|
||||
const invalid = std.math.maxInt(u32);
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
|
||||
@ -7,7 +7,7 @@ const Module = @import("../../Module.zig");
|
||||
|
||||
pub const Class = enum { memory, byval, integer, double_integer };
|
||||
|
||||
pub fn classifyType(ty: Type, mod: *const Module) Class {
|
||||
pub fn classifyType(ty: Type, mod: *Module) Class {
|
||||
const target = mod.getTarget();
|
||||
std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(mod));
|
||||
|
||||
|
||||
@ -1436,7 +1436,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
|
||||
|
||||
const int_ty = switch (lhs_ty.zigTypeTag(mod)) {
|
||||
.Vector => unreachable, // Handled by cmp_vector.
|
||||
.Enum => lhs_ty.intTagType(),
|
||||
.Enum => try lhs_ty.intTagType(mod),
|
||||
.Int => lhs_ty,
|
||||
.Bool => Type.u1,
|
||||
.Pointer => Type.usize,
|
||||
|
||||
@ -1393,7 +1393,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
|
||||
return result;
|
||||
}
|
||||
|
||||
fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, mod: *const Module) bool {
|
||||
fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, mod: *Module) bool {
|
||||
switch (cc) {
|
||||
.Unspecified, .Inline => return isByRef(return_type, mod),
|
||||
.C => {
|
||||
@ -1713,7 +1713,7 @@ fn arch(func: *const CodeGen) std.Target.Cpu.Arch {
|
||||
|
||||
/// For a given `Type`, will return true when the type will be passed
|
||||
/// by reference, rather than by value
|
||||
fn isByRef(ty: Type, mod: *const Module) bool {
|
||||
fn isByRef(ty: Type, mod: *Module) bool {
|
||||
const target = mod.getTarget();
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
.Type,
|
||||
@ -1787,7 +1787,7 @@ const SimdStoreStrategy = enum {
|
||||
/// This means when a given type is 128 bits and either the simd128 or relaxed-simd
|
||||
/// features are enabled, the function will return `.direct`. This would allow to store
|
||||
/// it using a instruction, rather than an unrolled version.
|
||||
fn determineSimdStoreStrategy(ty: Type, mod: *const Module) SimdStoreStrategy {
|
||||
fn determineSimdStoreStrategy(ty: Type, mod: *Module) SimdStoreStrategy {
|
||||
std.debug.assert(ty.zigTypeTag(mod) == .Vector);
|
||||
if (ty.bitSize(mod) != 128) return .unrolled;
|
||||
const hasFeature = std.Target.wasm.featureSetHas;
|
||||
@ -3121,7 +3121,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
|
||||
else => return func.fail("TODO: lowerConstant for enum tag: {}", .{ty.tag()}),
|
||||
}
|
||||
} else {
|
||||
const int_tag_ty = ty.intTagType();
|
||||
const int_tag_ty = try ty.intTagType(mod);
|
||||
return func.lowerConstant(val, int_tag_ty);
|
||||
}
|
||||
},
|
||||
@ -3235,7 +3235,7 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
|
||||
/// Returns a `Value` as a signed 32 bit value.
|
||||
/// It's illegal to provide a value with a type that cannot be represented
|
||||
/// as an integer value.
|
||||
fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 {
|
||||
fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) !i32 {
|
||||
const mod = func.bin_file.base.options.module.?;
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
.Enum => {
|
||||
@ -3257,7 +3257,7 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 {
|
||||
else => unreachable,
|
||||
}
|
||||
} else {
|
||||
const int_tag_ty = ty.intTagType();
|
||||
const int_tag_ty = try ty.intTagType(mod);
|
||||
return func.valueAsI32(val, int_tag_ty);
|
||||
}
|
||||
},
|
||||
@ -3793,7 +3793,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
|
||||
for (items, 0..) |ref, i| {
|
||||
const item_val = (try func.air.value(ref, mod)).?;
|
||||
const int_val = func.valueAsI32(item_val, target_ty);
|
||||
const int_val = try func.valueAsI32(item_val, target_ty);
|
||||
if (lowest_maybe == null or int_val < lowest_maybe.?) {
|
||||
lowest_maybe = int_val;
|
||||
}
|
||||
@ -6814,7 +6814,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
|
||||
return loc.index;
|
||||
}
|
||||
|
||||
const int_tag_ty = enum_ty.intTagType();
|
||||
const int_tag_ty = try enum_ty.intTagType(mod);
|
||||
|
||||
if (int_tag_ty.bitSize(mod) > 64) {
|
||||
return func.fail("TODO: Implement @tagName for enums with tag size larger than 64 bits", .{});
|
||||
|
||||
@ -21,7 +21,7 @@ const direct: [2]Class = .{ .direct, .none };
|
||||
/// Classifies a given Zig type to determine how they must be passed
|
||||
/// or returned as value within a wasm function.
|
||||
/// When all elements result in `.none`, no value must be passed in or returned.
|
||||
pub fn classifyType(ty: Type, mod: *const Module) [2]Class {
|
||||
pub fn classifyType(ty: Type, mod: *Module) [2]Class {
|
||||
const target = mod.getTarget();
|
||||
if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return none;
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
@ -93,7 +93,7 @@ pub fn classifyType(ty: Type, mod: *const Module) [2]Class {
|
||||
/// Returns the scalar type a given type can represent.
|
||||
/// Asserts given type can be represented as scalar, such as
|
||||
/// a struct with a single scalar field.
|
||||
pub fn scalarType(ty: Type, mod: *const Module) Type {
|
||||
pub fn scalarType(ty: Type, mod: *Module) Type {
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
.Struct => {
|
||||
switch (ty.containerLayout()) {
|
||||
|
||||
@ -605,7 +605,7 @@ const FrameAlloc = struct {
|
||||
.ref_count = 0,
|
||||
};
|
||||
}
|
||||
fn initType(ty: Type, mod: *const Module) FrameAlloc {
|
||||
fn initType(ty: Type, mod: *Module) FrameAlloc {
|
||||
return init(.{ .size = ty.abiSize(mod), .alignment = ty.abiAlignment(mod) });
|
||||
}
|
||||
};
|
||||
@ -2309,7 +2309,7 @@ fn allocRegOrMemAdvanced(self: *Self, ty: Type, inst: ?Air.Inst.Index, reg_ok: b
|
||||
return .{ .load_frame = .{ .index = frame_index } };
|
||||
}
|
||||
|
||||
fn regClassForType(ty: Type, mod: *const Module) RegisterManager.RegisterBitSet {
|
||||
fn regClassForType(ty: Type, mod: *Module) RegisterManager.RegisterBitSet {
|
||||
return switch (ty.zigTypeTag(mod)) {
|
||||
.Float, .Vector => sse,
|
||||
else => gp,
|
||||
|
||||
@ -12,7 +12,7 @@ pub const Class = enum {
|
||||
float_combine,
|
||||
};
|
||||
|
||||
pub fn classifyWindows(ty: Type, mod: *const Module) Class {
|
||||
pub fn classifyWindows(ty: Type, mod: *Module) Class {
|
||||
// https://docs.microsoft.com/en-gb/cpp/build/x64-calling-convention?view=vs-2017
|
||||
// "There's a strict one-to-one correspondence between a function call's arguments
|
||||
// and the registers used for those arguments. Any argument that doesn't fit in 8
|
||||
@ -68,7 +68,7 @@ pub const Context = enum { ret, arg, other };
|
||||
|
||||
/// There are a maximum of 8 possible return slots. Returned values are in
|
||||
/// the beginning of the array; unused slots are filled with .none.
|
||||
pub fn classifySystemV(ty: Type, mod: *const Module, ctx: Context) [8]Class {
|
||||
pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
|
||||
const target = mod.getTarget();
|
||||
const memory_class = [_]Class{
|
||||
.memory, .none, .none, .none,
|
||||
|
||||
@ -1241,7 +1241,7 @@ pub fn genTypedValue(
|
||||
if (enum_values.count() != 0) {
|
||||
const tag_val = enum_values.keys()[field_index.data];
|
||||
return genTypedValue(bin_file, src_loc, .{
|
||||
.ty = typed_value.ty.intTagType(),
|
||||
.ty = try typed_value.ty.intTagType(mod),
|
||||
.val = tag_val,
|
||||
}, owner_decl_index);
|
||||
} else {
|
||||
@ -1251,7 +1251,7 @@ pub fn genTypedValue(
|
||||
else => unreachable,
|
||||
}
|
||||
} else {
|
||||
const int_tag_ty = typed_value.ty.intTagType();
|
||||
const int_tag_ty = try typed_value.ty.intTagType(mod);
|
||||
return genTypedValue(bin_file, src_loc, .{
|
||||
.ty = int_tag_ty,
|
||||
.val = typed_value.val,
|
||||
@ -1303,7 +1303,7 @@ pub fn genTypedValue(
|
||||
return genUnnamedConst(bin_file, src_loc, typed_value, owner_decl_index);
|
||||
}
|
||||
|
||||
pub fn errUnionPayloadOffset(payload_ty: Type, mod: *const Module) u64 {
|
||||
pub fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) u64 {
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0;
|
||||
const payload_align = payload_ty.abiAlignment(mod);
|
||||
const error_align = Type.anyerror.abiAlignment(mod);
|
||||
@ -1314,7 +1314,7 @@ pub fn errUnionPayloadOffset(payload_ty: Type, mod: *const Module) u64 {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn errUnionErrorOffset(payload_ty: Type, mod: *const Module) u64 {
|
||||
pub fn errUnionErrorOffset(payload_ty: Type, mod: *Module) u64 {
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0;
|
||||
const payload_align = payload_ty.abiAlignment(mod);
|
||||
const error_align = Type.anyerror.abiAlignment(mod);
|
||||
|
||||
@ -1300,7 +1300,7 @@ pub const DeclGen = struct {
|
||||
}
|
||||
},
|
||||
else => {
|
||||
const int_tag_ty = ty.intTagType();
|
||||
const int_tag_ty = try ty.intTagType(mod);
|
||||
return dg.renderValue(writer, int_tag_ty, val, location);
|
||||
},
|
||||
}
|
||||
@ -5198,7 +5198,7 @@ fn fieldLocation(
|
||||
container_ty: Type,
|
||||
field_ptr_ty: Type,
|
||||
field_index: u32,
|
||||
mod: *const Module,
|
||||
mod: *Module,
|
||||
) union(enum) {
|
||||
begin: void,
|
||||
field: CValue,
|
||||
@ -7722,7 +7722,7 @@ const LowerFnRetTyBuffer = struct {
|
||||
values: [1]Value,
|
||||
payload: Type.Payload.AnonStruct,
|
||||
};
|
||||
fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, mod: *const Module) Type {
|
||||
fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, mod: *Module) Type {
|
||||
if (ret_ty.zigTypeTag(mod) == .NoReturn) return Type.noreturn;
|
||||
|
||||
if (lowersToArray(ret_ty, mod)) {
|
||||
@ -7740,7 +7740,7 @@ fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, mod: *const Module) T
|
||||
return if (ret_ty.hasRuntimeBitsIgnoreComptime(mod)) ret_ty else Type.void;
|
||||
}
|
||||
|
||||
fn lowersToArray(ty: Type, mod: *const Module) bool {
|
||||
fn lowersToArray(ty: Type, mod: *Module) bool {
|
||||
return switch (ty.zigTypeTag(mod)) {
|
||||
.Array, .Vector => return true,
|
||||
else => return ty.isAbiInt(mod) and toCIntBits(@intCast(u32, ty.bitSize(mod))) == null,
|
||||
|
||||
@ -292,17 +292,17 @@ pub const CType = extern union {
|
||||
.abi = std.math.log2_int(u32, abi_alignment),
|
||||
};
|
||||
}
|
||||
pub fn abiAlign(ty: Type, mod: *const Module) AlignAs {
|
||||
pub fn abiAlign(ty: Type, mod: *Module) AlignAs {
|
||||
const abi_align = ty.abiAlignment(mod);
|
||||
return init(abi_align, abi_align);
|
||||
}
|
||||
pub fn fieldAlign(struct_ty: Type, field_i: usize, mod: *const Module) AlignAs {
|
||||
pub fn fieldAlign(struct_ty: Type, field_i: usize, mod: *Module) AlignAs {
|
||||
return init(
|
||||
struct_ty.structFieldAlign(field_i, mod),
|
||||
struct_ty.structFieldType(field_i).abiAlignment(mod),
|
||||
);
|
||||
}
|
||||
pub fn unionPayloadAlign(union_ty: Type, mod: *const Module) AlignAs {
|
||||
pub fn unionPayloadAlign(union_ty: Type, mod: *Module) AlignAs {
|
||||
const union_obj = union_ty.cast(Type.Payload.Union).?.data;
|
||||
const union_payload_align = union_obj.abiAlignment(mod, false);
|
||||
return init(union_payload_align, union_payload_align);
|
||||
@ -1897,7 +1897,7 @@ pub const CType = extern union {
|
||||
}
|
||||
}
|
||||
|
||||
fn createFromType(store: *Store.Promoted, ty: Type, mod: *const Module, kind: Kind) !CType {
|
||||
fn createFromType(store: *Store.Promoted, ty: Type, mod: *Module, kind: Kind) !CType {
|
||||
var convert: Convert = undefined;
|
||||
try convert.initType(ty, kind, .{ .imm = .{ .set = &store.set, .mod = mod } });
|
||||
return createFromConvert(store, ty, mod, kind, &convert);
|
||||
|
||||
@ -1527,7 +1527,7 @@ pub const Object = struct {
|
||||
};
|
||||
const field_index_val = Value.initPayload(&buf_field_index.base);
|
||||
|
||||
const int_ty = ty.intTagType();
|
||||
const int_ty = try ty.intTagType(mod);
|
||||
const int_info = ty.intInfo(mod);
|
||||
assert(int_info.bits != 0);
|
||||
|
||||
@ -2805,7 +2805,7 @@ pub const DeclGen = struct {
|
||||
return dg.context.intType(info.bits);
|
||||
},
|
||||
.Enum => {
|
||||
const int_ty = t.intTagType();
|
||||
const int_ty = try t.intTagType(mod);
|
||||
const bit_count = int_ty.intInfo(mod).bits;
|
||||
assert(bit_count != 0);
|
||||
return dg.context.intType(bit_count);
|
||||
@ -4334,7 +4334,9 @@ pub const DeclGen = struct {
|
||||
const mod = dg.module;
|
||||
const int_ty = switch (ty.zigTypeTag(mod)) {
|
||||
.Int => ty,
|
||||
.Enum => ty.intTagType(),
|
||||
.Enum => ty.intTagType(mod) catch |err| switch (err) {
|
||||
error.OutOfMemory => @panic("OOM"),
|
||||
},
|
||||
.Float => {
|
||||
if (!is_rmw_xchg) return null;
|
||||
return dg.context.intType(@intCast(c_uint, ty.abiSize(mod) * 8));
|
||||
@ -5286,7 +5288,7 @@ pub const FuncGen = struct {
|
||||
const mod = self.dg.module;
|
||||
const scalar_ty = operand_ty.scalarType(mod);
|
||||
const int_ty = switch (scalar_ty.zigTypeTag(mod)) {
|
||||
.Enum => scalar_ty.intTagType(),
|
||||
.Enum => try scalar_ty.intTagType(mod),
|
||||
.Int, .Bool, .Pointer, .ErrorSet => scalar_ty,
|
||||
.Optional => blk: {
|
||||
const payload_ty = operand_ty.optionalChild(mod);
|
||||
@ -8867,7 +8869,7 @@ pub const FuncGen = struct {
|
||||
defer self.gpa.free(fqn);
|
||||
const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_is_named_enum_value_{s}", .{fqn});
|
||||
|
||||
const int_tag_ty = enum_ty.intTagType();
|
||||
const int_tag_ty = try enum_ty.intTagType(mod);
|
||||
const param_types = [_]*llvm.Type{try self.dg.lowerType(int_tag_ty)};
|
||||
|
||||
const llvm_ret_ty = try self.dg.lowerType(Type.bool);
|
||||
@ -8950,7 +8952,7 @@ pub const FuncGen = struct {
|
||||
const usize_llvm_ty = try self.dg.lowerType(Type.usize);
|
||||
const slice_alignment = slice_ty.abiAlignment(mod);
|
||||
|
||||
const int_tag_ty = enum_ty.intTagType();
|
||||
const int_tag_ty = try enum_ty.intTagType(mod);
|
||||
const param_types = [_]*llvm.Type{try self.dg.lowerType(int_tag_ty)};
|
||||
|
||||
const fn_type = llvm.functionType(llvm_ret_ty, ¶m_types, param_types.len, .False);
|
||||
@ -10487,7 +10489,7 @@ fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, targ
|
||||
fn llvmFieldIndex(
|
||||
ty: Type,
|
||||
field_index: usize,
|
||||
mod: *const Module,
|
||||
mod: *Module,
|
||||
ptr_pl_buf: *Type.Payload.Pointer,
|
||||
) ?c_uint {
|
||||
// Detects where we inserted extra padding fields so that we can skip
|
||||
@ -10564,7 +10566,7 @@ fn llvmFieldIndex(
|
||||
}
|
||||
}
|
||||
|
||||
fn firstParamSRet(fn_info: Type.Payload.Function.Data, mod: *const Module) bool {
|
||||
fn firstParamSRet(fn_info: Type.Payload.Function.Data, mod: *Module) bool {
|
||||
if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime(mod)) return false;
|
||||
|
||||
const target = mod.getTarget();
|
||||
@ -10593,7 +10595,7 @@ fn firstParamSRet(fn_info: Type.Payload.Function.Data, mod: *const Module) bool
|
||||
}
|
||||
}
|
||||
|
||||
fn firstParamSRetSystemV(ty: Type, mod: *const Module) bool {
|
||||
fn firstParamSRetSystemV(ty: Type, mod: *Module) bool {
|
||||
const class = x86_64_abi.classifySystemV(ty, mod, .ret);
|
||||
if (class[0] == .memory) return true;
|
||||
if (class[0] == .x87 and class[2] != .none) return true;
|
||||
@ -11041,7 +11043,7 @@ fn iterateParamTypes(dg: *DeclGen, fn_info: Type.Payload.Function.Data) ParamTyp
|
||||
|
||||
fn ccAbiPromoteInt(
|
||||
cc: std.builtin.CallingConvention,
|
||||
mod: *const Module,
|
||||
mod: *Module,
|
||||
ty: Type,
|
||||
) ?std.builtin.Signedness {
|
||||
const target = mod.getTarget();
|
||||
@ -11080,7 +11082,7 @@ fn ccAbiPromoteInt(
|
||||
|
||||
/// This is the one source of truth for whether a type is passed around as an LLVM pointer,
|
||||
/// or as an LLVM value.
|
||||
fn isByRef(ty: Type, mod: *const Module) bool {
|
||||
fn isByRef(ty: Type, mod: *Module) bool {
|
||||
// For tuples and structs, if there are more than this many non-void
|
||||
// fields, then we make it byref, otherwise byval.
|
||||
const max_fields_byval = 0;
|
||||
@ -11159,7 +11161,7 @@ fn isByRef(ty: Type, mod: *const Module) bool {
|
||||
}
|
||||
}
|
||||
|
||||
fn isScalar(mod: *const Module, ty: Type) bool {
|
||||
fn isScalar(mod: *Module, ty: Type) bool {
|
||||
return switch (ty.zigTypeTag(mod)) {
|
||||
.Void,
|
||||
.Bool,
|
||||
@ -11344,11 +11346,11 @@ fn buildAllocaInner(
|
||||
return alloca;
|
||||
}
|
||||
|
||||
fn errUnionPayloadOffset(payload_ty: Type, mod: *const Module) u1 {
|
||||
fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) u1 {
|
||||
return @boolToInt(Type.anyerror.abiAlignment(mod) > payload_ty.abiAlignment(mod));
|
||||
}
|
||||
|
||||
fn errUnionErrorOffset(payload_ty: Type, mod: *const Module) u1 {
|
||||
fn errUnionErrorOffset(payload_ty: Type, mod: *Module) u1 {
|
||||
return @boolToInt(Type.anyerror.abiAlignment(mod) <= payload_ty.abiAlignment(mod));
|
||||
}
|
||||
|
||||
|
||||
@ -745,7 +745,7 @@ pub const DeclGen = struct {
|
||||
.Enum => {
|
||||
const int_val = try val.enumToInt(ty, mod);
|
||||
|
||||
const int_ty = ty.intTagType();
|
||||
const int_ty = try ty.intTagType(mod);
|
||||
|
||||
try self.lower(int_ty, int_val);
|
||||
},
|
||||
@ -1195,7 +1195,7 @@ pub const DeclGen = struct {
|
||||
return try self.intType(int_info.signedness, int_info.bits);
|
||||
},
|
||||
.Enum => {
|
||||
const tag_ty = ty.intTagType();
|
||||
const tag_ty = try ty.intTagType(mod);
|
||||
return self.resolveType(tag_ty, repr);
|
||||
},
|
||||
.Float => {
|
||||
@ -3090,7 +3090,7 @@ pub const DeclGen = struct {
|
||||
break :blk if (backing_bits <= 32) @as(u32, 1) else 2;
|
||||
},
|
||||
.Enum => blk: {
|
||||
const int_ty = cond_ty.intTagType();
|
||||
const int_ty = try cond_ty.intTagType(mod);
|
||||
const int_info = int_ty.intInfo(mod);
|
||||
const backing_bits = self.backingIntBits(int_info.bits) orelse {
|
||||
return self.todo("implement composite int switch", .{});
|
||||
|
||||
80
src/type.zig
80
src/type.zig
@ -1606,7 +1606,7 @@ pub const Type = struct {
|
||||
/// may return false positives.
|
||||
pub fn hasRuntimeBitsAdvanced(
|
||||
ty: Type,
|
||||
mod: *const Module,
|
||||
mod: *Module,
|
||||
ignore_comptime_only: bool,
|
||||
strat: AbiAlignmentAdvancedStrat,
|
||||
) RuntimeBitsError!bool {
|
||||
@ -1785,7 +1785,7 @@ pub const Type = struct {
|
||||
return enum_simple.fields.count() >= 2;
|
||||
},
|
||||
.enum_numbered, .enum_nonexhaustive => {
|
||||
const int_tag_ty = ty.intTagType();
|
||||
const int_tag_ty = try ty.intTagType(mod);
|
||||
return int_tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat);
|
||||
},
|
||||
|
||||
@ -1850,7 +1850,7 @@ pub const Type = struct {
|
||||
/// true if and only if the type has a well-defined memory layout
|
||||
/// readFrom/writeToMemory are supported only for types with a well-
|
||||
/// defined memory layout
|
||||
pub fn hasWellDefinedLayout(ty: Type, mod: *const Module) bool {
|
||||
pub fn hasWellDefinedLayout(ty: Type, mod: *Module) bool {
|
||||
if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
|
||||
.int_type => true,
|
||||
.ptr_type => true,
|
||||
@ -1952,15 +1952,15 @@ pub const Type = struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn hasRuntimeBits(ty: Type, mod: *const Module) bool {
|
||||
pub fn hasRuntimeBits(ty: Type, mod: *Module) bool {
|
||||
return hasRuntimeBitsAdvanced(ty, mod, false, .eager) catch unreachable;
|
||||
}
|
||||
|
||||
pub fn hasRuntimeBitsIgnoreComptime(ty: Type, mod: *const Module) bool {
|
||||
pub fn hasRuntimeBitsIgnoreComptime(ty: Type, mod: *Module) bool {
|
||||
return hasRuntimeBitsAdvanced(ty, mod, true, .eager) catch unreachable;
|
||||
}
|
||||
|
||||
pub fn isFnOrHasRuntimeBits(ty: Type, mod: *const Module) bool {
|
||||
pub fn isFnOrHasRuntimeBits(ty: Type, mod: *Module) bool {
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
.Fn => {
|
||||
const fn_info = ty.fnInfo();
|
||||
@ -1980,7 +1980,7 @@ pub const Type = struct {
|
||||
}
|
||||
|
||||
/// Same as `isFnOrHasRuntimeBits` but comptime-only types may return a false positive.
|
||||
pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type, mod: *const Module) bool {
|
||||
pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type, mod: *Module) bool {
|
||||
return switch (ty.zigTypeTag(mod)) {
|
||||
.Fn => true,
|
||||
else => return ty.hasRuntimeBitsIgnoreComptime(mod),
|
||||
@ -2019,11 +2019,11 @@ pub const Type = struct {
|
||||
}
|
||||
|
||||
/// Returns 0 if the pointer is naturally aligned and the element type is 0-bit.
|
||||
pub fn ptrAlignment(ty: Type, mod: *const Module) u32 {
|
||||
pub fn ptrAlignment(ty: Type, mod: *Module) u32 {
|
||||
return ptrAlignmentAdvanced(ty, mod, null) catch unreachable;
|
||||
}
|
||||
|
||||
pub fn ptrAlignmentAdvanced(ty: Type, mod: *const Module, opt_sema: ?*Sema) !u32 {
|
||||
pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) !u32 {
|
||||
switch (ty.ip_index) {
|
||||
.none => switch (ty.tag()) {
|
||||
.pointer => {
|
||||
@ -2072,7 +2072,7 @@ pub const Type = struct {
|
||||
}
|
||||
|
||||
/// Returns 0 for 0-bit types.
|
||||
pub fn abiAlignment(ty: Type, mod: *const Module) u32 {
|
||||
pub fn abiAlignment(ty: Type, mod: *Module) u32 {
|
||||
return (ty.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar;
|
||||
}
|
||||
|
||||
@ -2103,7 +2103,7 @@ pub const Type = struct {
|
||||
/// necessary, possibly returning a CompileError.
|
||||
pub fn abiAlignmentAdvanced(
|
||||
ty: Type,
|
||||
mod: *const Module,
|
||||
mod: *Module,
|
||||
strat: AbiAlignmentAdvancedStrat,
|
||||
) Module.CompileError!AbiAlignmentAdvanced {
|
||||
const target = mod.getTarget();
|
||||
@ -2320,7 +2320,7 @@ pub const Type = struct {
|
||||
},
|
||||
|
||||
.enum_full, .enum_nonexhaustive, .enum_simple, .enum_numbered => {
|
||||
const int_tag_ty = ty.intTagType();
|
||||
const int_tag_ty = try ty.intTagType(mod);
|
||||
return AbiAlignmentAdvanced{ .scalar = int_tag_ty.abiAlignment(mod) };
|
||||
},
|
||||
.@"union" => {
|
||||
@ -2344,7 +2344,7 @@ pub const Type = struct {
|
||||
|
||||
fn abiAlignmentAdvancedErrorUnion(
|
||||
ty: Type,
|
||||
mod: *const Module,
|
||||
mod: *Module,
|
||||
strat: AbiAlignmentAdvancedStrat,
|
||||
) Module.CompileError!AbiAlignmentAdvanced {
|
||||
// This code needs to be kept in sync with the equivalent switch prong
|
||||
@ -2380,7 +2380,7 @@ pub const Type = struct {
|
||||
|
||||
fn abiAlignmentAdvancedOptional(
|
||||
ty: Type,
|
||||
mod: *const Module,
|
||||
mod: *Module,
|
||||
strat: AbiAlignmentAdvancedStrat,
|
||||
) Module.CompileError!AbiAlignmentAdvanced {
|
||||
const target = mod.getTarget();
|
||||
@ -2412,7 +2412,7 @@ pub const Type = struct {
|
||||
|
||||
pub fn abiAlignmentAdvancedUnion(
|
||||
ty: Type,
|
||||
mod: *const Module,
|
||||
mod: *Module,
|
||||
strat: AbiAlignmentAdvancedStrat,
|
||||
union_obj: *Module.Union,
|
||||
have_tag: bool,
|
||||
@ -2477,7 +2477,7 @@ pub const Type = struct {
|
||||
|
||||
/// Asserts the type has the ABI size already resolved.
|
||||
/// Types that return false for hasRuntimeBits() return 0.
|
||||
pub fn abiSize(ty: Type, mod: *const Module) u64 {
|
||||
pub fn abiSize(ty: Type, mod: *Module) u64 {
|
||||
return (abiSizeAdvanced(ty, mod, .eager) catch unreachable).scalar;
|
||||
}
|
||||
|
||||
@ -2494,7 +2494,7 @@ pub const Type = struct {
|
||||
/// necessary, possibly returning a CompileError.
|
||||
pub fn abiSizeAdvanced(
|
||||
ty: Type,
|
||||
mod: *const Module,
|
||||
mod: *Module,
|
||||
strat: AbiAlignmentAdvancedStrat,
|
||||
) Module.CompileError!AbiSizeAdvanced {
|
||||
const target = mod.getTarget();
|
||||
@ -2661,7 +2661,7 @@ pub const Type = struct {
|
||||
},
|
||||
|
||||
.enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => {
|
||||
const int_tag_ty = ty.intTagType();
|
||||
const int_tag_ty = try ty.intTagType(mod);
|
||||
return AbiSizeAdvanced{ .scalar = int_tag_ty.abiSize(mod) };
|
||||
},
|
||||
.@"union" => {
|
||||
@ -2754,7 +2754,7 @@ pub const Type = struct {
|
||||
|
||||
pub fn abiSizeAdvancedUnion(
|
||||
ty: Type,
|
||||
mod: *const Module,
|
||||
mod: *Module,
|
||||
strat: AbiAlignmentAdvancedStrat,
|
||||
union_obj: *Module.Union,
|
||||
have_tag: bool,
|
||||
@ -2773,7 +2773,7 @@ pub const Type = struct {
|
||||
|
||||
fn abiSizeAdvancedOptional(
|
||||
ty: Type,
|
||||
mod: *const Module,
|
||||
mod: *Module,
|
||||
strat: AbiAlignmentAdvancedStrat,
|
||||
) Module.CompileError!AbiSizeAdvanced {
|
||||
const child_ty = ty.optionalChild(mod);
|
||||
@ -2821,7 +2821,7 @@ pub const Type = struct {
|
||||
);
|
||||
}
|
||||
|
||||
pub fn bitSize(ty: Type, mod: *const Module) u64 {
|
||||
pub fn bitSize(ty: Type, mod: *Module) u64 {
|
||||
return bitSizeAdvanced(ty, mod, null) catch unreachable;
|
||||
}
|
||||
|
||||
@ -2830,7 +2830,7 @@ pub const Type = struct {
|
||||
/// the type is fully resolved, and there will be no error, guaranteed.
|
||||
pub fn bitSizeAdvanced(
|
||||
ty: Type,
|
||||
mod: *const Module,
|
||||
mod: *Module,
|
||||
opt_sema: ?*Sema,
|
||||
) Module.CompileError!u64 {
|
||||
const target = mod.getTarget();
|
||||
@ -2950,7 +2950,7 @@ pub const Type = struct {
|
||||
},
|
||||
|
||||
.enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => {
|
||||
const int_tag_ty = ty.intTagType();
|
||||
const int_tag_ty = try ty.intTagType(mod);
|
||||
return try bitSizeAdvanced(int_tag_ty, mod, opt_sema);
|
||||
},
|
||||
|
||||
@ -3464,11 +3464,11 @@ pub const Type = struct {
|
||||
return union_obj.fields.getIndex(name);
|
||||
}
|
||||
|
||||
pub fn unionHasAllZeroBitFieldTypes(ty: Type, mod: *const Module) bool {
|
||||
pub fn unionHasAllZeroBitFieldTypes(ty: Type, mod: *Module) bool {
|
||||
return ty.cast(Payload.Union).?.data.hasAllZeroBitFieldTypes(mod);
|
||||
}
|
||||
|
||||
pub fn unionGetLayout(ty: Type, mod: *const Module) Module.Union.Layout {
|
||||
pub fn unionGetLayout(ty: Type, mod: *Module) Module.Union.Layout {
|
||||
switch (ty.tag()) {
|
||||
.@"union" => {
|
||||
const union_obj = ty.castTag(.@"union").?.data;
|
||||
@ -4428,24 +4428,18 @@ pub const Type = struct {
|
||||
}
|
||||
|
||||
/// Asserts the type is an enum or a union.
|
||||
pub fn intTagType(ty: Type) Type {
|
||||
pub fn intTagType(ty: Type, mod: *Module) !Type {
|
||||
switch (ty.tag()) {
|
||||
.enum_full, .enum_nonexhaustive => return ty.cast(Payload.EnumFull).?.data.tag_ty,
|
||||
.enum_numbered => return ty.castTag(.enum_numbered).?.data.tag_ty,
|
||||
.enum_simple => {
|
||||
@panic("TODO move enum_simple to use the intern pool");
|
||||
//const enum_simple = ty.castTag(.enum_simple).?.data;
|
||||
//const field_count = enum_simple.fields.count();
|
||||
//const bits: u16 = if (field_count == 0) 0 else std.math.log2_int_ceil(usize, field_count);
|
||||
//buffer.* = .{
|
||||
// .base = .{ .tag = .int_unsigned },
|
||||
// .data = bits,
|
||||
//};
|
||||
//return Type.initPayload(&buffer.base);
|
||||
const enum_simple = ty.castTag(.enum_simple).?.data;
|
||||
const field_count = enum_simple.fields.count();
|
||||
const bits: u16 = if (field_count == 0) 0 else std.math.log2_int_ceil(usize, field_count);
|
||||
return mod.intType(.unsigned, bits);
|
||||
},
|
||||
.union_tagged => {
|
||||
@panic("TODO move union_tagged to use the intern pool");
|
||||
//return ty.castTag(.union_tagged).?.data.tag_ty.intTagType(buffer),
|
||||
return ty.castTag(.union_tagged).?.data.tag_ty.intTagType(mod);
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
@ -4628,7 +4622,7 @@ pub const Type = struct {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn structFieldAlign(ty: Type, index: usize, mod: *const Module) u32 {
|
||||
pub fn structFieldAlign(ty: Type, index: usize, mod: *Module) u32 {
|
||||
switch (ty.tag()) {
|
||||
.@"struct" => {
|
||||
const struct_obj = ty.castTag(.@"struct").?.data;
|
||||
@ -4718,7 +4712,7 @@ pub const Type = struct {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, mod: *const Module) u32 {
|
||||
pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, mod: *Module) u32 {
|
||||
const struct_obj = ty.castTag(.@"struct").?.data;
|
||||
assert(struct_obj.layout == .Packed);
|
||||
comptime assert(Type.packed_struct_layout_version == 2);
|
||||
@ -4750,7 +4744,7 @@ pub const Type = struct {
|
||||
offset: u64 = 0,
|
||||
big_align: u32 = 0,
|
||||
struct_obj: *Module.Struct,
|
||||
module: *const Module,
|
||||
module: *Module,
|
||||
|
||||
pub fn next(it: *StructOffsetIterator) ?FieldOffset {
|
||||
const mod = it.module;
|
||||
@ -4779,7 +4773,7 @@ pub const Type = struct {
|
||||
|
||||
/// Get an iterator that iterates over all the struct field, returning the field and
|
||||
/// offset of that field. Asserts that the type is a non-packed struct.
|
||||
pub fn iterateStructOffsets(ty: Type, mod: *const Module) StructOffsetIterator {
|
||||
pub fn iterateStructOffsets(ty: Type, mod: *Module) StructOffsetIterator {
|
||||
const struct_obj = ty.castTag(.@"struct").?.data;
|
||||
assert(struct_obj.haveLayout());
|
||||
assert(struct_obj.layout != .Packed);
|
||||
@ -4787,7 +4781,7 @@ pub const Type = struct {
|
||||
}
|
||||
|
||||
/// Supports structs and unions.
|
||||
pub fn structFieldOffset(ty: Type, index: usize, mod: *const Module) u64 {
|
||||
pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 {
|
||||
switch (ty.tag()) {
|
||||
.@"struct" => {
|
||||
const struct_obj = ty.castTag(.@"struct").?.data;
|
||||
@ -5226,7 +5220,7 @@ pub const Type = struct {
|
||||
|
||||
pub const VectorIndex = InternPool.Key.PtrType.VectorIndex;
|
||||
|
||||
pub fn alignment(data: Data, mod: *const Module) u32 {
|
||||
pub fn alignment(data: Data, mod: *Module) u32 {
|
||||
if (data.@"align" != 0) return data.@"align";
|
||||
return abiAlignment(data.pointee_type, mod);
|
||||
}
|
||||
|
||||
@ -694,7 +694,7 @@ pub const Value = struct {
|
||||
},
|
||||
.enum_simple => {
|
||||
// Field index and integer values are the same.
|
||||
const tag_ty = ty.intTagType();
|
||||
const tag_ty = try ty.intTagType(mod);
|
||||
return mod.intValue(tag_ty, field_index);
|
||||
},
|
||||
else => unreachable,
|
||||
@ -722,7 +722,9 @@ pub const Value = struct {
|
||||
// auto-numbered enum
|
||||
break :field_index @intCast(u32, val.toUnsignedInt(mod));
|
||||
}
|
||||
const int_tag_ty = ty.intTagType();
|
||||
const int_tag_ty = ty.intTagType(mod) catch |err| switch (err) {
|
||||
error.OutOfMemory => @panic("OOM"), // TODO handle this failure
|
||||
};
|
||||
break :field_index @intCast(u32, values.getIndexContext(val, .{ .ty = int_tag_ty, .mod = mod }).?);
|
||||
},
|
||||
};
|
||||
@ -737,7 +739,7 @@ pub const Value = struct {
|
||||
}
|
||||
|
||||
/// Asserts the value is an integer.
|
||||
pub fn toBigInt(val: Value, space: *BigIntSpace, mod: *const Module) BigIntConst {
|
||||
pub fn toBigInt(val: Value, space: *BigIntSpace, mod: *Module) BigIntConst {
|
||||
return val.toBigIntAdvanced(space, mod, null) catch unreachable;
|
||||
}
|
||||
|
||||
@ -745,7 +747,7 @@ pub const Value = struct {
|
||||
pub fn toBigIntAdvanced(
|
||||
val: Value,
|
||||
space: *BigIntSpace,
|
||||
mod: *const Module,
|
||||
mod: *Module,
|
||||
opt_sema: ?*Sema,
|
||||
) Module.CompileError!BigIntConst {
|
||||
return switch (val.ip_index) {
|
||||
@ -801,13 +803,13 @@ pub const Value = struct {
|
||||
|
||||
/// If the value fits in a u64, return it, otherwise null.
|
||||
/// Asserts not undefined.
|
||||
pub fn getUnsignedInt(val: Value, mod: *const Module) ?u64 {
|
||||
pub fn getUnsignedInt(val: Value, mod: *Module) ?u64 {
|
||||
return getUnsignedIntAdvanced(val, mod, null) catch unreachable;
|
||||
}
|
||||
|
||||
/// If the value fits in a u64, return it, otherwise null.
|
||||
/// Asserts not undefined.
|
||||
pub fn getUnsignedIntAdvanced(val: Value, mod: *const Module, opt_sema: ?*Sema) !?u64 {
|
||||
pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64 {
|
||||
switch (val.ip_index) {
|
||||
.bool_false => return 0,
|
||||
.bool_true => return 1,
|
||||
@ -847,12 +849,12 @@ pub const Value = struct {
|
||||
}
|
||||
|
||||
/// Asserts the value is an integer and it fits in a u64
|
||||
pub fn toUnsignedInt(val: Value, mod: *const Module) u64 {
|
||||
pub fn toUnsignedInt(val: Value, mod: *Module) u64 {
|
||||
return getUnsignedInt(val, mod).?;
|
||||
}
|
||||
|
||||
/// Asserts the value is an integer and it fits in a i64
|
||||
pub fn toSignedInt(val: Value, mod: *const Module) i64 {
|
||||
pub fn toSignedInt(val: Value, mod: *Module) i64 {
|
||||
switch (val.ip_index) {
|
||||
.bool_false => return 0,
|
||||
.bool_true => return 1,
|
||||
@ -1405,7 +1407,7 @@ pub const Value = struct {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn clz(val: Value, ty: Type, mod: *const Module) u64 {
|
||||
pub fn clz(val: Value, ty: Type, mod: *Module) u64 {
|
||||
const ty_bits = ty.intInfo(mod).bits;
|
||||
return switch (val.ip_index) {
|
||||
.bool_false => ty_bits,
|
||||
@ -1435,7 +1437,7 @@ pub const Value = struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn ctz(val: Value, ty: Type, mod: *const Module) u64 {
|
||||
pub fn ctz(val: Value, ty: Type, mod: *Module) u64 {
|
||||
const ty_bits = ty.intInfo(mod).bits;
|
||||
return switch (val.ip_index) {
|
||||
.bool_false => ty_bits,
|
||||
@ -1468,7 +1470,7 @@ pub const Value = struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn popCount(val: Value, ty: Type, mod: *const Module) u64 {
|
||||
pub fn popCount(val: Value, ty: Type, mod: *Module) u64 {
|
||||
assert(!val.isUndef());
|
||||
switch (val.ip_index) {
|
||||
.bool_false => return 0,
|
||||
@ -1527,7 +1529,7 @@ pub const Value = struct {
|
||||
|
||||
/// Asserts the value is an integer and not undefined.
|
||||
/// Returns the number of bits the value requires to represent stored in twos complement form.
|
||||
pub fn intBitCountTwosComp(self: Value, mod: *const Module) usize {
|
||||
pub fn intBitCountTwosComp(self: Value, mod: *Module) usize {
|
||||
const target = mod.getTarget();
|
||||
return switch (self.ip_index) {
|
||||
.bool_false => 0,
|
||||
@ -1593,13 +1595,13 @@ pub const Value = struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn orderAgainstZero(lhs: Value, mod: *const Module) std.math.Order {
|
||||
pub fn orderAgainstZero(lhs: Value, mod: *Module) std.math.Order {
|
||||
return orderAgainstZeroAdvanced(lhs, mod, null) catch unreachable;
|
||||
}
|
||||
|
||||
pub fn orderAgainstZeroAdvanced(
|
||||
lhs: Value,
|
||||
mod: *const Module,
|
||||
mod: *Module,
|
||||
opt_sema: ?*Sema,
|
||||
) Module.CompileError!std.math.Order {
|
||||
switch (lhs.ip_index) {
|
||||
@ -1683,13 +1685,13 @@ pub const Value = struct {
|
||||
}
|
||||
|
||||
/// Asserts the value is comparable.
|
||||
pub fn order(lhs: Value, rhs: Value, mod: *const Module) std.math.Order {
|
||||
pub fn order(lhs: Value, rhs: Value, mod: *Module) std.math.Order {
|
||||
return orderAdvanced(lhs, rhs, mod, null) catch unreachable;
|
||||
}
|
||||
|
||||
/// Asserts the value is comparable.
|
||||
/// If opt_sema is null then this function asserts things are resolved and cannot fail.
|
||||
pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *const Module, opt_sema: ?*Sema) !std.math.Order {
|
||||
pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *Module, opt_sema: ?*Sema) !std.math.Order {
|
||||
const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(mod, opt_sema);
|
||||
const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(mod, opt_sema);
|
||||
switch (lhs_against_zero) {
|
||||
@ -1734,7 +1736,7 @@ pub const Value = struct {
|
||||
|
||||
/// Asserts the value is comparable. Does not take a type parameter because it supports
|
||||
/// comparisons between heterogeneous types.
|
||||
pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, mod: *const Module) bool {
|
||||
pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, mod: *Module) bool {
|
||||
return compareHeteroAdvanced(lhs, op, rhs, mod, null) catch unreachable;
|
||||
}
|
||||
|
||||
@ -1742,7 +1744,7 @@ pub const Value = struct {
|
||||
lhs: Value,
|
||||
op: std.math.CompareOperator,
|
||||
rhs: Value,
|
||||
mod: *const Module,
|
||||
mod: *Module,
|
||||
opt_sema: ?*Sema,
|
||||
) !bool {
|
||||
if (lhs.pointerDecl()) |lhs_decl| {
|
||||
@ -2047,7 +2049,7 @@ pub const Value = struct {
|
||||
.Enum => {
|
||||
const a_val = try a.enumToInt(ty, mod);
|
||||
const b_val = try b.enumToInt(ty, mod);
|
||||
const int_ty = ty.intTagType();
|
||||
const int_ty = try ty.intTagType(mod);
|
||||
return eqlAdvanced(a_val, int_ty, b_val, int_ty, mod, opt_sema);
|
||||
},
|
||||
.Array, .Vector => {
|
||||
@ -2462,7 +2464,7 @@ pub const Value = struct {
|
||||
};
|
||||
}
|
||||
|
||||
fn hashInt(int_val: Value, hasher: *std.hash.Wyhash, mod: *const Module) void {
|
||||
fn hashInt(int_val: Value, hasher: *std.hash.Wyhash, mod: *Module) void {
|
||||
var buffer: BigIntSpace = undefined;
|
||||
const big = int_val.toBigInt(&buffer, mod);
|
||||
std.hash.autoHash(hasher, big.positive);
|
||||
@ -2471,7 +2473,7 @@ pub const Value = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn hashPtr(ptr_val: Value, hasher: *std.hash.Wyhash, mod: *const Module) void {
|
||||
fn hashPtr(ptr_val: Value, hasher: *std.hash.Wyhash, mod: *Module) void {
|
||||
switch (ptr_val.tag()) {
|
||||
.decl_ref,
|
||||
.decl_ref_mut,
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user