Merge pull request #16105 from jacobly0/intern-pool-opt

InternPool: various optimizations
This commit is contained in:
Andrew Kelley 2023-06-21 00:07:49 -07:00 committed by GitHub
commit 12813d5912
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 1431 additions and 1395 deletions

View File

@ -850,6 +850,8 @@ pub const Inst = struct {
pub const Index = u32;
pub const Ref = enum(u32) {
u0_type = @intFromEnum(InternPool.Index.u0_type),
i0_type = @intFromEnum(InternPool.Index.i0_type),
u1_type = @intFromEnum(InternPool.Index.u1_type),
u8_type = @intFromEnum(InternPool.Index.u8_type),
i8_type = @intFromEnum(InternPool.Index.i8_type),
@ -909,6 +911,7 @@ pub const Inst = struct {
single_const_pointer_to_comptime_int_type = @intFromEnum(InternPool.Index.single_const_pointer_to_comptime_int_type),
slice_const_u8_type = @intFromEnum(InternPool.Index.slice_const_u8_type),
slice_const_u8_sentinel_0_type = @intFromEnum(InternPool.Index.slice_const_u8_sentinel_0_type),
optional_noreturn_type = @intFromEnum(InternPool.Index.optional_noreturn_type),
anyerror_void_error_union_type = @intFromEnum(InternPool.Index.anyerror_void_error_union_type),
generic_poison_type = @intFromEnum(InternPool.Index.generic_poison_type),
empty_struct_type = @intFromEnum(InternPool.Index.empty_struct_type),
@ -1182,7 +1185,7 @@ pub fn getMainBody(air: Air) []const Air.Inst.Index {
return air.extra[extra.end..][0..extra.data.body_len];
}
pub fn typeOf(air: Air, inst: Air.Inst.Ref, ip: *const InternPool) Type {
pub fn typeOf(air: *const Air, inst: Air.Inst.Ref, ip: *const InternPool) Type {
const ref_int = @intFromEnum(inst);
if (ref_int < InternPool.static_keys.len) {
return InternPool.static_keys[ref_int].typeOf().toType();
@ -1190,7 +1193,7 @@ pub fn typeOf(air: Air, inst: Air.Inst.Ref, ip: *const InternPool) Type {
return air.typeOfIndex(ref_int - ref_start_index, ip);
}
pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: *const InternPool) Type {
pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool) Type {
const datas = air.instructions.items(.data);
switch (air.instructions.items(.tag)[inst]) {
.add,
@ -1403,7 +1406,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: *const InternPool) Type {
.call, .call_always_tail, .call_never_tail, .call_never_inline => {
const callee_ty = air.typeOf(datas[inst].pl_op.operand, ip);
return callee_ty.fnReturnTypeIp(ip);
return ip.funcReturnType(callee_ty.toIntern()).toType();
},
.slice_elem_val, .ptr_elem_val, .array_elem_val => {

View File

@ -620,6 +620,8 @@ pub const Key = union(enum) {
len: Index = .none,
pub const Addr = union(enum) {
const Tag = @typeInfo(Addr).Union.tag_type.?;
decl: Module.Decl.Index,
mut_decl: MutDecl,
comptime_field: Index,
@ -1241,11 +1243,13 @@ pub const Item = struct {
/// When adding a tag to this enum, consider adding a corresponding entry to
/// `primitives` in AstGen.zig.
pub const Index = enum(u32) {
pub const first_type: Index = .u1_type;
pub const first_type: Index = .u0_type;
pub const last_type: Index = .empty_struct_type;
pub const first_value: Index = .undef;
pub const last_value: Index = .empty_struct;
u0_type,
i0_type,
u1_type,
u8_type,
i8_type,
@ -1305,6 +1309,7 @@ pub const Index = enum(u32) {
single_const_pointer_to_comptime_int_type,
slice_const_u8_type,
slice_const_u8_sentinel_0_type,
optional_noreturn_type,
anyerror_void_error_union_type,
generic_poison_type,
/// `@TypeOf(.{})`
@ -1531,6 +1536,16 @@ pub const Index = enum(u32) {
};
pub const static_keys = [_]Key{
.{ .int_type = .{
.signedness = .unsigned,
.bits = 0,
} },
.{ .int_type = .{
.signedness = .signed,
.bits = 0,
} },
.{ .int_type = .{
.signedness = .unsigned,
.bits = 1,
@ -1637,6 +1652,7 @@ pub const static_keys = [_]Key{
.{ .simple_type = .extern_options },
.{ .simple_type = .type_info },
// [*]u8
.{ .ptr_type = .{
.child = .u8_type,
.flags = .{
@ -1644,7 +1660,7 @@ pub const static_keys = [_]Key{
},
} },
// manyptr_const_u8_type
// [*]const u8
.{ .ptr_type = .{
.child = .u8_type,
.flags = .{
@ -1653,7 +1669,7 @@ pub const static_keys = [_]Key{
},
} },
// manyptr_const_u8_sentinel_0_type
// [*:0]const u8
.{ .ptr_type = .{
.child = .u8_type,
.sentinel = .zero_u8,
@ -1663,6 +1679,7 @@ pub const static_keys = [_]Key{
},
} },
// comptime_int
.{ .ptr_type = .{
.child = .comptime_int_type,
.flags = .{
@ -1671,7 +1688,7 @@ pub const static_keys = [_]Key{
},
} },
// slice_const_u8_type
// []const u8
.{ .ptr_type = .{
.child = .u8_type,
.flags = .{
@ -1680,7 +1697,7 @@ pub const static_keys = [_]Key{
},
} },
// slice_const_u8_sentinel_0_type
// [:0]const u8
.{ .ptr_type = .{
.child = .u8_type,
.sentinel = .zero_u8,
@ -1690,7 +1707,10 @@ pub const static_keys = [_]Key{
},
} },
// anyerror_void_error_union_type
// ?noreturn
.{ .opt_type = .noreturn_type },
// anyerror!void
.{ .error_union_type = .{
.error_set_type = .anyerror_type,
.payload_type = .void_type,
@ -2279,8 +2299,9 @@ pub const Alignment = enum(u6) {
return fromByteUnits(n);
}
pub fn min(a: Alignment, b: Alignment) Alignment {
return @enumFromInt(Alignment, @min(@intFromEnum(a), @intFromEnum(b)));
pub fn order(lhs: Alignment, rhs: Alignment) std.math.Order {
assert(lhs != .none and rhs != .none);
return std.math.order(@intFromEnum(lhs), @intFromEnum(rhs));
}
};
@ -5463,6 +5484,8 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
// An alternative would be to topological sort the static keys, but this would
// mean that the range of type indices would not be dense.
return switch (index) {
.u0_type,
.i0_type,
.u1_type,
.u8_type,
.i8_type,
@ -5522,6 +5545,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
.single_const_pointer_to_comptime_int_type,
.slice_const_u8_type,
.slice_const_u8_sentinel_0_type,
.optional_noreturn_type,
.anyerror_void_error_union_type,
.generic_poison_type,
.empty_struct_type,
@ -5669,20 +5693,96 @@ pub fn aggregateTypeLenIncludingSentinel(ip: *const InternPool, ty: Index) u64 {
};
}
pub fn funcReturnType(ip: *const InternPool, ty: Index) Index {
const item = ip.items.get(@intFromEnum(ty));
const child_item = switch (item.tag) {
.type_pointer => ip.items.get(ip.extra.items[
item.data + std.meta.fieldIndex(Tag.TypePointer, "child").?
]),
.type_function => item,
else => unreachable,
};
assert(child_item.tag == .type_function);
return @enumFromInt(Index, ip.extra.items[
child_item.data + std.meta.fieldIndex(TypeFunction, "return_type").?
]);
}
pub fn isNoReturn(ip: *const InternPool, ty: Index) bool {
return switch (ty) {
.noreturn_type => true,
else => switch (ip.indexToKey(ty)) {
.error_set_type => |error_set_type| error_set_type.names.len == 0,
else => switch (ip.items.items(.tag)[@intFromEnum(ty)]) {
.type_error_set => ip.extra.items[ip.items.items(.data)[@intFromEnum(ty)] + std.meta.fieldIndex(ErrorSet, "names_len").?] == 0,
else => false,
},
};
}
pub fn isUndef(ip: *const InternPool, val: Index) bool {
return val == .undef or ip.items.items(.tag)[@intFromEnum(val)] == .undef;
}
pub fn isRuntimeValue(ip: *const InternPool, val: Index) bool {
return ip.items.items(.tag)[@intFromEnum(val)] == .runtime_value;
}
pub fn isVariable(ip: *const InternPool, val: Index) bool {
return ip.items.items(.tag)[@intFromEnum(val)] == .variable;
}
pub fn getBackingDecl(ip: *const InternPool, val: Index) Module.Decl.OptionalIndex {
var base = @intFromEnum(val);
while (true) {
switch (ip.items.items(.tag)[base]) {
inline .ptr_decl,
.ptr_mut_decl,
=> |tag| return @enumFromInt(Module.Decl.OptionalIndex, ip.extra.items[
ip.items.items(.data)[base] + std.meta.fieldIndex(tag.Payload(), "decl").?
]),
inline .ptr_eu_payload,
.ptr_opt_payload,
.ptr_elem,
.ptr_field,
=> |tag| base = ip.extra.items[
ip.items.items(.data)[base] + std.meta.fieldIndex(tag.Payload(), "base").?
],
inline .ptr_slice => |tag| base = ip.extra.items[
ip.items.items(.data)[base] + std.meta.fieldIndex(tag.Payload(), "ptr").?
],
else => return .none,
}
}
}
pub fn getBackingAddrTag(ip: *const InternPool, val: Index) ?Key.Ptr.Addr.Tag {
var base = @intFromEnum(val);
while (true) {
switch (ip.items.items(.tag)[base]) {
.ptr_decl => return .decl,
.ptr_mut_decl => return .mut_decl,
.ptr_comptime_field => return .comptime_field,
.ptr_int => return .int,
inline .ptr_eu_payload,
.ptr_opt_payload,
.ptr_elem,
.ptr_field,
=> |tag| base = ip.extra.items[
ip.items.items(.data)[base] + std.meta.fieldIndex(tag.Payload(), "base").?
],
inline .ptr_slice => |tag| base = ip.extra.items[
ip.items.items(.data)[base] + std.meta.fieldIndex(tag.Payload(), "ptr").?
],
else => return null,
}
}
}
/// This is a particularly hot function, so we operate directly on encodings
/// rather than the more straightforward implementation of calling `indexToKey`.
pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPoison}!std.builtin.TypeId {
return switch (index) {
.u0_type,
.i0_type,
.u1_type,
.u8_type,
.i8_type,
@ -5754,6 +5854,7 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois
.slice_const_u8_sentinel_0_type,
=> .Pointer,
.optional_noreturn_type => .Optional,
.anyerror_void_error_union_type => .ErrorUnion,
.empty_struct_type => .Struct,

View File

@ -33,6 +33,7 @@ const Liveness = @import("Liveness.zig");
const isUpDir = @import("introspect.zig").isUpDir;
const clang = @import("clang.zig");
const InternPool = @import("InternPool.zig");
const Alignment = InternPool.Alignment;
comptime {
@setEvalBranchQuota(4000);
@ -241,7 +242,7 @@ pub const MonomorphedFuncsAdaptedContext = struct {
};
pub const SetAlignStack = struct {
alignment: u32,
alignment: Alignment,
/// TODO: This needs to store a non-lazy source location for the case of an inline function
/// which does `@setAlignStack` (applying it to the caller).
src: LazySrcLoc,
@ -432,7 +433,7 @@ pub const Decl = struct {
/// Populated when `has_tv`.
@"linksection": InternPool.OptionalNullTerminatedString,
/// Populated when `has_tv`.
@"align": u32,
alignment: Alignment,
/// Populated when `has_tv`.
@"addrspace": std.builtin.AddressSpace,
/// The direct parent namespace of the Decl.
@ -863,13 +864,7 @@ pub const Decl = struct {
pub fn getAlignment(decl: Decl, mod: *Module) u32 {
assert(decl.has_tv);
if (decl.@"align" != 0) {
// Explicit alignment.
return decl.@"align";
} else {
// Natural alignment.
return decl.ty.abiAlignment(mod);
}
return @intCast(u32, decl.alignment.toByteUnitsOptional() orelse decl.ty.abiAlignment(mod));
}
pub fn intern(decl: *Decl, mod: *Module) Allocator.Error!void {
@ -955,7 +950,7 @@ pub const Struct = struct {
/// Uses `none` to indicate no default.
default_val: InternPool.Index,
/// Zero means to use the ABI alignment of the type.
abi_align: u32,
abi_align: Alignment,
/// undefined until `status` is `have_layout`.
offset: u32,
/// If true then `default_val` is the comptime field value.
@ -967,9 +962,9 @@ pub const Struct = struct {
mod: *Module,
layout: std.builtin.Type.ContainerLayout,
) u32 {
if (field.abi_align != 0) {
if (field.abi_align.toByteUnitsOptional()) |abi_align| {
assert(layout != .Packed);
return field.abi_align;
return @intCast(u32, abi_align);
}
const target = mod.getTarget();
@ -1150,17 +1145,13 @@ pub const Union = struct {
/// undefined until `status` is `have_field_types` or `have_layout`.
ty: Type,
/// 0 means the ABI alignment of the type.
abi_align: u32,
abi_align: Alignment,
/// Returns the field alignment, assuming the union is not packed.
/// Keep implementation in sync with `Sema.unionFieldAlignment`.
/// Prefer to call that function instead of this one during Sema.
pub fn normalAlignment(field: Field, mod: *Module) u32 {
if (field.abi_align == 0) {
return field.ty.abiAlignment(mod);
} else {
return field.abi_align;
}
return @intCast(u32, field.abi_align.toByteUnitsOptional() orelse field.ty.abiAlignment(mod));
}
};
@ -1272,20 +1263,14 @@ pub const Union = struct {
for (fields, 0..) |field, i| {
if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const field_align = a: {
if (field.abi_align == 0) {
break :a field.ty.abiAlignment(mod);
} else {
break :a field.abi_align;
}
};
const field_align = field.abi_align.toByteUnitsOptional() orelse field.ty.abiAlignment(mod);
const field_size = field.ty.abiSize(mod);
if (field_size > payload_size) {
payload_size = field_size;
biggest_field = @intCast(u32, i);
}
if (field_align > payload_align) {
payload_align = field_align;
payload_align = @intCast(u32, field_align);
most_aligned_field = @intCast(u32, i);
most_aligned_field_size = field_size;
}
@ -4394,7 +4379,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
new_decl.has_linksection_or_addrspace = false;
new_decl.ty = Type.type;
new_decl.val = struct_ty.toValue();
new_decl.@"align" = 0;
new_decl.alignment = .none;
new_decl.@"linksection" = .none;
new_decl.has_tv = true;
new_decl.owns_tv = true;
@ -4584,7 +4569,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
decl.ty = InternPool.Index.type_type.toType();
decl.val = ty.toValue();
decl.@"align" = 0;
decl.alignment = .none;
decl.@"linksection" = .none;
decl.has_tv = true;
decl.owns_tv = false;
@ -4665,9 +4650,9 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
decl.ty = decl_tv.ty;
decl.val = (try decl_tv.val.intern(decl_tv.ty, mod)).toValue();
decl.@"align" = blk: {
decl.alignment = blk: {
const align_ref = decl.zirAlignRef(mod);
if (align_ref == .none) break :blk 0;
if (align_ref == .none) break :blk .none;
break :blk try sema.resolveAlign(&block_scope, align_src, align_ref);
};
decl.@"linksection" = blk: {
@ -5758,7 +5743,7 @@ pub fn allocateNewDecl(
.owns_tv = false,
.ty = undefined,
.val = undefined,
.@"align" = undefined,
.alignment = undefined,
.@"linksection" = .none,
.@"addrspace" = .generic,
.analysis = .unreferenced,
@ -5830,7 +5815,7 @@ pub fn initNewAnonDecl(
new_decl.src_line = src_line;
new_decl.ty = typed_value.ty;
new_decl.val = typed_value.val;
new_decl.@"align" = 0;
new_decl.alignment = .none;
new_decl.@"linksection" = .none;
new_decl.has_tv = true;
new_decl.analysis = .complete;
@ -6773,13 +6758,9 @@ pub fn manyConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type {
}
pub fn adjustPtrTypeChild(mod: *Module, ptr_ty: Type, new_child: Type) Allocator.Error!Type {
const info = Type.ptrInfoIp(&mod.intern_pool, ptr_ty.toIntern());
return mod.ptrType(.{
.child = new_child.toIntern(),
.sentinel = info.sentinel,
.flags = info.flags,
.packed_offset = info.packed_offset,
});
var info = ptr_ty.ptrInfo(mod);
info.child = new_child.toIntern();
return mod.ptrType(info);
}
pub fn funcType(mod: *Module, info: InternPool.Key.FuncType) Allocator.Error!Type {
@ -7018,7 +6999,7 @@ pub fn atomicPtrAlignment(
mod: *Module,
ty: Type,
diags: *AtomicPtrAlignmentDiagnostics,
) AtomicPtrAlignmentError!u32 {
) AtomicPtrAlignmentError!Alignment {
const target = mod.getTarget();
const max_atomic_bits: u16 = switch (target.cpu.arch) {
.avr,
@ -7104,11 +7085,11 @@ pub fn atomicPtrAlignment(
};
return error.FloatTooBig;
}
return 0;
return .none;
},
.Bool => return 0,
.Bool => return .none,
else => {
if (ty.isPtrAtRuntime(mod)) return 0;
if (ty.isPtrAtRuntime(mod)) return .none;
return error.BadType;
},
};
@ -7122,7 +7103,7 @@ pub fn atomicPtrAlignment(
return error.IntTooBig;
}
return 0;
return .none;
}
pub fn opaqueSrcLoc(mod: *Module, opaque_type: InternPool.Key.OpaqueType) SrcLoc {

File diff suppressed because it is too large Load Diff

View File

@ -2005,6 +2005,8 @@ pub const Inst = struct {
/// The tag type is specified so that it is safe to bitcast between `[]u32`
/// and `[]Ref`.
pub const Ref = enum(u32) {
u0_type = @intFromEnum(InternPool.Index.u0_type),
i0_type = @intFromEnum(InternPool.Index.i0_type),
u1_type = @intFromEnum(InternPool.Index.u1_type),
u8_type = @intFromEnum(InternPool.Index.u8_type),
i8_type = @intFromEnum(InternPool.Index.i8_type),
@ -2064,6 +2066,7 @@ pub const Inst = struct {
single_const_pointer_to_comptime_int_type = @intFromEnum(InternPool.Index.single_const_pointer_to_comptime_int_type),
slice_const_u8_type = @intFromEnum(InternPool.Index.slice_const_u8_type),
slice_const_u8_sentinel_0_type = @intFromEnum(InternPool.Index.slice_const_u8_sentinel_0_type),
optional_noreturn_type = @intFromEnum(InternPool.Index.optional_noreturn_type),
anyerror_void_error_union_type = @intFromEnum(InternPool.Index.anyerror_void_error_union_type),
generic_poison_type = @intFromEnum(InternPool.Index.generic_poison_type),
empty_struct_type = @intFromEnum(InternPool.Index.empty_struct_type),

View File

@ -2308,25 +2308,25 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
const ptr_info = ptr_ty.ptrInfo(mod);
const ty = ptr_ty.childType(mod);
if (ptr_info.host_size == 0) {
if (ptr_info.packed_offset.host_size == 0) {
try func.store(lhs, rhs, ty, 0);
} else {
// at this point we have a non-natural alignment, we must
// load the value, and then shift+or the rhs into the result location.
const int_elem_ty = try mod.intType(.unsigned, ptr_info.host_size * 8);
const int_elem_ty = try mod.intType(.unsigned, ptr_info.packed_offset.host_size * 8);
if (isByRef(int_elem_ty, mod)) {
return func.fail("TODO: airStore for pointers to bitfields with backing type larger than 64bits", .{});
}
var mask = @intCast(u64, (@as(u65, 1) << @intCast(u7, ty.bitSize(mod))) - 1);
mask <<= @intCast(u6, ptr_info.bit_offset);
mask <<= @intCast(u6, ptr_info.packed_offset.bit_offset);
mask ^= ~@as(u64, 0);
const shift_val = if (ptr_info.host_size <= 4)
WValue{ .imm32 = ptr_info.bit_offset }
const shift_val = if (ptr_info.packed_offset.host_size <= 4)
WValue{ .imm32 = ptr_info.packed_offset.bit_offset }
else
WValue{ .imm64 = ptr_info.bit_offset };
const mask_val = if (ptr_info.host_size <= 4)
WValue{ .imm64 = ptr_info.packed_offset.bit_offset };
const mask_val = if (ptr_info.packed_offset.host_size <= 4)
WValue{ .imm32 = @truncate(u32, mask) }
else
WValue{ .imm64 = mask };
@ -2335,7 +2335,7 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
const loaded = try func.load(lhs, int_elem_ty, 0);
const anded = try func.binOp(loaded, mask_val, int_elem_ty, .@"and");
const extended_value = try func.intcast(rhs, ty, int_elem_ty);
const shifted_value = if (ptr_info.bit_offset > 0) shifted: {
const shifted_value = if (ptr_info.packed_offset.bit_offset > 0) shifted: {
break :shifted try func.binOp(extended_value, shift_val, int_elem_ty, .shl);
} else extended_value;
const result = try func.binOp(anded, shifted_value, int_elem_ty, .@"or");
@ -2468,18 +2468,18 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :result new_local;
}
if (ptr_info.host_size == 0) {
if (ptr_info.packed_offset.host_size == 0) {
const stack_loaded = try func.load(operand, ty, 0);
break :result try stack_loaded.toLocal(func, ty);
}
// at this point we have a non-natural alignment, we must
// shift the value to obtain the correct bit.
const int_elem_ty = try mod.intType(.unsigned, ptr_info.host_size * 8);
const shift_val = if (ptr_info.host_size <= 4)
WValue{ .imm32 = ptr_info.bit_offset }
else if (ptr_info.host_size <= 8)
WValue{ .imm64 = ptr_info.bit_offset }
const int_elem_ty = try mod.intType(.unsigned, ptr_info.packed_offset.host_size * 8);
const shift_val = if (ptr_info.packed_offset.host_size <= 4)
WValue{ .imm32 = ptr_info.packed_offset.bit_offset }
else if (ptr_info.packed_offset.host_size <= 8)
WValue{ .imm64 = ptr_info.packed_offset.bit_offset }
else
return func.fail("TODO: airLoad where ptr to bitfield exceeds 64 bits", .{});
@ -3699,7 +3699,7 @@ fn structFieldPtr(
const offset = switch (struct_ty.containerLayout(mod)) {
.Packed => switch (struct_ty.zigTypeTag(mod)) {
.Struct => offset: {
if (result_ty.ptrInfo(mod).host_size != 0) {
if (result_ty.ptrInfo(mod).packed_offset.host_size != 0) {
break :offset @as(u32, 0);
}
break :offset struct_ty.packedStructFieldByteOffset(index, mod);

View File

@ -34,8 +34,8 @@ pub fn classifyType(ty: Type, mod: *Module) [2]Class {
if (ty.structFieldCount(mod) > 1) return memory;
// When the struct's alignment is non-natural
const field = ty.structFields(mod).values()[0];
if (field.abi_align != 0) {
if (field.abi_align > field.ty.abiAlignment(mod)) {
if (field.abi_align != .none) {
if (field.abi_align.toByteUnitsOptional().? > field.ty.abiAlignment(mod)) {
return memory;
}
}

View File

@ -694,7 +694,7 @@ pub fn generate(
FrameAlloc.init(.{
.size = 0,
.alignment = if (mod.align_stack_fns.get(module_fn_index)) |set_align_stack|
set_align_stack.alignment
@intCast(u32, set_align_stack.alignment.toByteUnitsOptional().?)
else
1,
}),
@ -5254,12 +5254,12 @@ fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) Inn
const mod = self.bin_file.options.module.?;
const ptr_info = ptr_ty.ptrInfo(mod);
const val_ty = ptr_info.pointee_type;
const val_ty = ptr_info.child.toType();
const val_abi_size = @intCast(u32, val_ty.abiSize(mod));
const limb_abi_size: u32 = @min(val_abi_size, 8);
const limb_abi_bits = limb_abi_size * 8;
const val_byte_off = @intCast(i32, ptr_info.bit_offset / limb_abi_bits * limb_abi_size);
const val_bit_off = ptr_info.bit_offset % limb_abi_bits;
const val_byte_off = @intCast(i32, ptr_info.packed_offset.bit_offset / limb_abi_bits * limb_abi_size);
const val_bit_off = ptr_info.packed_offset.bit_offset % limb_abi_bits;
const val_extra_bits = self.regExtraBits(val_ty);
if (val_abi_size > 8) return self.fail("TODO implement packed load of {}", .{
@ -5385,7 +5385,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
else
try self.allocRegOrMem(inst, true);
if (ptr_ty.ptrInfo(mod).host_size > 0) {
if (ptr_ty.ptrInfo(mod).packed_offset.host_size > 0) {
try self.packedLoad(dst_mcv, ptr_ty, ptr_mcv);
} else {
try self.load(dst_mcv, ptr_ty, ptr_mcv);
@ -5400,12 +5400,12 @@ fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) In
const ptr_info = ptr_ty.ptrInfo(mod);
const src_ty = ptr_ty.childType(mod);
const limb_abi_size: u16 = @min(ptr_info.host_size, 8);
const limb_abi_size: u16 = @min(ptr_info.packed_offset.host_size, 8);
const limb_abi_bits = limb_abi_size * 8;
const src_bit_size = src_ty.bitSize(mod);
const src_byte_off = @intCast(i32, ptr_info.bit_offset / limb_abi_bits * limb_abi_size);
const src_bit_off = ptr_info.bit_offset % limb_abi_bits;
const src_byte_off = @intCast(i32, ptr_info.packed_offset.bit_offset / limb_abi_bits * limb_abi_size);
const src_bit_off = ptr_info.packed_offset.bit_offset % limb_abi_bits;
const ptr_reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv);
const ptr_lock = self.register_manager.lockRegAssumeUnused(ptr_reg);
@ -5516,7 +5516,7 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
const ptr_mcv = try self.resolveInst(bin_op.lhs);
const ptr_ty = self.typeOf(bin_op.lhs);
const src_mcv = try self.resolveInst(bin_op.rhs);
if (ptr_ty.ptrInfo(mod).host_size > 0) {
if (ptr_ty.ptrInfo(mod).packed_offset.host_size > 0) {
try self.packedStore(ptr_ty, ptr_mcv, src_mcv);
} else {
try self.store(ptr_ty, ptr_mcv, src_mcv);
@ -5545,7 +5545,7 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32
const field_offset = @intCast(i32, switch (container_ty.containerLayout(mod)) {
.Auto, .Extern => container_ty.structFieldOffset(index, mod),
.Packed => if (container_ty.zigTypeTag(mod) == .Struct and
ptr_field_ty.ptrInfo(mod).host_size == 0)
ptr_field_ty.ptrInfo(mod).packed_offset.host_size == 0)
container_ty.packedStructFieldByteOffset(index, mod)
else
0,

View File

@ -223,8 +223,8 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
var byte_i: usize = 0; // out of 8
const fields = ty.structFields(mod);
for (fields.values()) |field| {
if (field.abi_align != 0) {
if (field.abi_align < field.ty.abiAlignment(mod)) {
if (field.abi_align != .none) {
if (field.abi_align.toByteUnitsOptional().? < field.ty.abiAlignment(mod)) {
return memory_class;
}
}
@ -340,8 +340,8 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
const fields = ty.unionFields(mod);
for (fields.values()) |field| {
if (field.abi_align != 0) {
if (field.abi_align < field.ty.abiAlignment(mod)) {
if (field.abi_align != .none) {
if (field.abi_align.toByteUnitsOptional().? < field.ty.abiAlignment(mod)) {
return memory_class;
}
}

View File

@ -1714,7 +1714,7 @@ pub const DeclGen = struct {
ty: Type,
name: CValue,
qualifiers: CQualifiers,
alignment: u32,
alignment: u64,
kind: CType.Kind,
) error{ OutOfMemory, AnalysisFail }!void {
const mod = dg.module;
@ -1733,10 +1733,10 @@ pub const DeclGen = struct {
const store = &dg.ctypes.set;
const mod = dg.module;
switch (std.math.order(alignas.@"align", alignas.abi)) {
.lt => try w.print("zig_under_align({}) ", .{alignas.getAlign()}),
switch (alignas.abiOrder()) {
.lt => try w.print("zig_under_align({}) ", .{alignas.toByteUnits()}),
.eq => {},
.gt => try w.print("zig_align({}) ", .{alignas.getAlign()}),
.gt => try w.print("zig_align({}) ", .{alignas.toByteUnits()}),
}
const trailing =
@ -1840,7 +1840,7 @@ pub const DeclGen = struct {
decl.ty,
.{ .decl = decl_index },
CQualifiers.init(.{ .@"const" = variable.is_const }),
decl.@"align",
@intCast(u32, decl.alignment.toByteUnits(0)),
.complete,
);
try fwd_decl_writer.writeAll(";\n");
@ -2314,10 +2314,10 @@ fn renderAggregateFields(
const fields = cty.fields();
for (fields) |field| {
try writer.writeByteNTimes(' ', indent + 1);
switch (std.math.order(field.alignas.@"align", field.alignas.abi)) {
.lt => try writer.print("zig_under_align({}) ", .{field.alignas.getAlign()}),
switch (field.alignas.abiOrder()) {
.lt => try writer.print("zig_under_align({}) ", .{field.alignas.toByteUnits()}),
.eq => {},
.gt => try writer.print("zig_align({}) ", .{field.alignas.getAlign()}),
.gt => try writer.print("zig_align({}) ", .{field.alignas.toByteUnits()}),
}
const trailing = try renderTypePrefix(.none, store, mod, writer, field.type, .suffix, .{});
try writer.print("{}{ }", .{ trailing, fmtIdent(mem.span(field.name)) });
@ -2639,7 +2639,7 @@ pub fn genFunc(f: *Function) !void {
pub fn lessThan(ctx: @This(), lhs_index: usize, rhs_index: usize) bool {
const lhs_ty = ctx.keys[lhs_index];
const rhs_ty = ctx.keys[rhs_index];
return lhs_ty.alignas.getAlign() > rhs_ty.alignas.getAlign();
return lhs_ty.alignas.order(rhs_ty.alignas).compare(.gt);
}
};
free_locals.sort(SortContext{ .keys = free_locals.keys() });
@ -2690,7 +2690,7 @@ pub fn genDecl(o: *Object) !void {
if (variable.is_weak_linkage) try w.writeAll("zig_weak_linkage ");
if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s|
try w.print("zig_linksection(\"{s}\", ", .{s});
try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .{}, decl.@"align", .complete);
try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .{}, decl.alignment.toByteUnits(0), .complete);
if (decl.@"linksection" != .none) try w.writeAll(", read, write)");
try w.writeAll(" = ");
try o.dg.renderValue(w, tv.ty, variable.init.toValue(), .StaticInitializer);
@ -2701,14 +2701,14 @@ pub fn genDecl(o: *Object) !void {
const fwd_decl_writer = o.dg.fwd_decl.writer();
try fwd_decl_writer.writeAll(if (is_global) "zig_extern " else "static ");
try o.dg.renderTypeAndName(fwd_decl_writer, tv.ty, decl_c_value, Const, decl.@"align", .complete);
try o.dg.renderTypeAndName(fwd_decl_writer, tv.ty, decl_c_value, Const, decl.alignment.toByteUnits(0), .complete);
try fwd_decl_writer.writeAll(";\n");
const w = o.writer();
if (!is_global) try w.writeAll("static ");
if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s|
try w.print("zig_linksection(\"{s}\", ", .{s});
try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, Const, decl.@"align", .complete);
try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, Const, decl.alignment.toByteUnits(0), .complete);
if (decl.@"linksection" != .none) try w.writeAll(", read)");
try w.writeAll(" = ");
try o.dg.renderValue(w, tv.ty, tv.val, .StaticInitializer);
@ -3324,7 +3324,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const ptr_ty = f.typeOf(ty_op.operand);
const ptr_scalar_ty = ptr_ty.scalarType(mod);
const ptr_info = ptr_scalar_ty.ptrInfo(mod);
const src_ty = ptr_info.pointee_type;
const src_ty = ptr_info.child.toType();
if (!src_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try reap(f, inst, &.{ty_op.operand});
@ -3335,7 +3335,10 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try reap(f, inst, &.{ty_op.operand});
const is_aligned = ptr_info.@"align" == 0 or ptr_info.@"align" >= src_ty.abiAlignment(mod);
const is_aligned = if (ptr_info.flags.alignment.toByteUnitsOptional()) |alignment|
alignment >= src_ty.abiAlignment(mod)
else
true;
const is_array = lowersToArray(src_ty, mod);
const need_memcpy = !is_aligned or is_array;
@ -3354,12 +3357,12 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll(", sizeof(");
try f.renderType(writer, src_ty);
try writer.writeAll("))");
} else if (ptr_info.host_size > 0 and ptr_info.vector_index == .none) {
const host_bits: u16 = ptr_info.host_size * 8;
} else if (ptr_info.packed_offset.host_size > 0 and ptr_info.flags.vector_index == .none) {
const host_bits: u16 = ptr_info.packed_offset.host_size * 8;
const host_ty = try mod.intType(.unsigned, host_bits);
const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1));
const bit_offset_val = try mod.intValue(bit_offset_ty, ptr_info.bit_offset);
const bit_offset_val = try mod.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset);
const field_ty = try mod.intType(.unsigned, @intCast(u16, src_ty.bitSize(mod)));
@ -3593,20 +3596,22 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
if (val_is_undef) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
if (safety and ptr_info.host_size == 0) {
if (safety and ptr_info.packed_offset.host_size == 0) {
const writer = f.object.writer();
try writer.writeAll("memset(");
try f.writeCValue(writer, ptr_val, .FunctionArgument);
try writer.writeAll(", 0xaa, sizeof(");
try f.renderType(writer, ptr_info.pointee_type);
try f.renderType(writer, ptr_info.child.toType());
try writer.writeAll("));\n");
}
return .none;
}
const is_aligned = ptr_info.@"align" == 0 or
ptr_info.@"align" >= ptr_info.pointee_type.abiAlignment(mod);
const is_array = lowersToArray(ptr_info.pointee_type, mod);
const is_aligned = if (ptr_info.flags.alignment.toByteUnitsOptional()) |alignment|
alignment >= src_ty.abiAlignment(mod)
else
true;
const is_array = lowersToArray(ptr_info.child.toType(), mod);
const need_memcpy = !is_aligned or is_array;
const src_val = try f.resolveInst(bin_op.rhs);
@ -3618,7 +3623,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
if (need_memcpy) {
// For this memcpy to safely work we need the rhs to have the same
// underlying type as the lhs (i.e. they must both be arrays of the same underlying type).
assert(src_ty.eql(ptr_info.pointee_type, f.object.dg.module));
assert(src_ty.eql(ptr_info.child.toType(), f.object.dg.module));
// If the source is a constant, writeCValue will emit a brace initialization
// so work around this by initializing into new local.
@ -3646,12 +3651,12 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
if (src_val == .constant) {
try freeLocal(f, inst, array_src.new_local, 0);
}
} else if (ptr_info.host_size > 0 and ptr_info.vector_index == .none) {
const host_bits = ptr_info.host_size * 8;
} else if (ptr_info.packed_offset.host_size > 0 and ptr_info.flags.vector_index == .none) {
const host_bits = ptr_info.packed_offset.host_size * 8;
const host_ty = try mod.intType(.unsigned, host_bits);
const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1));
const bit_offset_val = try mod.intValue(bit_offset_ty, ptr_info.bit_offset);
const bit_offset_val = try mod.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset);
const src_bits = src_ty.bitSize(mod);
@ -3663,7 +3668,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
defer mask.deinit();
try mask.setTwosCompIntLimit(.max, .unsigned, @intCast(usize, src_bits));
try mask.shiftLeft(&mask, ptr_info.bit_offset);
try mask.shiftLeft(&mask, ptr_info.packed_offset.bit_offset);
try mask.bitNotWrap(&mask, .unsigned, host_bits);
const mask_val = try mod.intValue_big(host_ty, mask.toConst());
@ -5201,7 +5206,7 @@ fn fieldLocation(
else
.{ .identifier = ip.stringToSlice(container_ty.structFieldName(next_field_index, mod)) } };
} else if (container_ty.hasRuntimeBitsIgnoreComptime(mod)) .end else .begin,
.Packed => if (field_ptr_ty.ptrInfo(mod).host_size == 0)
.Packed => if (field_ptr_ty.ptrInfo(mod).packed_offset.host_size == 0)
.{ .byte_offset = container_ty.packedStructFieldByteOffset(field_index, mod) }
else
.begin,

View File

@ -6,6 +6,7 @@ const assert = std.debug.assert;
const autoHash = std.hash.autoHash;
const Target = std.Target;
const Alignment = @import("../../InternPool.zig").Alignment;
const Module = @import("../../Module.zig");
const Type = @import("../../type.zig").Type;
@ -280,16 +281,15 @@ pub const CType = extern union {
};
pub const AlignAs = struct {
@"align": std.math.Log2Int(u32),
abi: std.math.Log2Int(u32),
@"align": Alignment,
abi: Alignment,
pub fn init(alignment: u32, abi_alignment: u32) AlignAs {
const actual_align = if (alignment != 0) alignment else abi_alignment;
assert(std.math.isPowerOfTwo(actual_align));
assert(std.math.isPowerOfTwo(abi_alignment));
pub fn init(alignment: u64, abi_alignment: u32) AlignAs {
const @"align" = Alignment.fromByteUnits(alignment);
const abi_align = Alignment.fromNonzeroByteUnits(abi_alignment);
return .{
.@"align" = std.math.log2_int(u32, actual_align),
.abi = std.math.log2_int(u32, abi_alignment),
.@"align" = if (@"align" != .none) @"align" else abi_align,
.abi = abi_align,
};
}
pub fn abiAlign(ty: Type, mod: *Module) AlignAs {
@ -308,8 +308,14 @@ pub const CType = extern union {
return init(union_payload_align, union_payload_align);
}
pub fn getAlign(self: AlignAs) u32 {
return @as(u32, 1) << self.@"align";
pub fn order(lhs: AlignAs, rhs: AlignAs) std.math.Order {
return lhs.@"align".order(rhs.@"align");
}
pub fn abiOrder(self: AlignAs) std.math.Order {
return self.@"align".order(self.abi);
}
pub fn toByteUnits(self: AlignAs) u64 {
return self.@"align".toByteUnitsOptional().?;
}
};
@ -1298,7 +1304,7 @@ pub const CType = extern union {
const slice = self.storage.anon.fields[0..fields_len];
mem.sort(Field, slice, {}, struct {
fn before(_: void, lhs: Field, rhs: Field) bool {
return lhs.alignas.@"align" > rhs.alignas.@"align";
return lhs.alignas.order(rhs.alignas).compare(.gt);
}
}.before);
return slice;
@ -1424,7 +1430,7 @@ pub const CType = extern union {
.Pointer => {
const info = ty.ptrInfo(mod);
switch (info.size) {
switch (info.flags.size) {
.Slice => {
if (switch (kind) {
.forward, .forward_parameter => @as(Index, undefined),
@ -1454,27 +1460,24 @@ pub const CType = extern union {
},
.One, .Many, .C => {
const t: Tag = switch (info.@"volatile") {
false => switch (info.mutable) {
true => .pointer,
false => .pointer_const,
const t: Tag = switch (info.flags.is_volatile) {
false => switch (info.flags.is_const) {
false => .pointer,
true => .pointer_const,
},
true => switch (info.mutable) {
true => .pointer_volatile,
false => .pointer_const_volatile,
true => switch (info.flags.is_const) {
false => .pointer_volatile,
true => .pointer_const_volatile,
},
};
const pointee_ty = if (info.host_size > 0 and info.vector_index == .none)
try mod.intType(.unsigned, info.host_size * 8)
const pointee_ty = if (info.packed_offset.host_size > 0 and
info.flags.vector_index == .none)
try mod.intType(.unsigned, info.packed_offset.host_size * 8)
else
info.pointee_type;
info.child.toType();
if (if (info.size == .C and pointee_ty.ip_index == .u8_type)
Tag.char.toIndex()
else
try lookup.typeToIndex(pointee_ty, .forward)) |child_idx|
{
if (try lookup.typeToIndex(pointee_ty, .forward)) |child_idx| {
self.storage = .{ .child = .{
.base = .{ .tag = t },
.data = child_idx,
@ -1586,7 +1589,7 @@ pub const CType = extern union {
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const field_align = AlignAs.fieldAlign(ty, field_i, mod);
if (field_align.@"align" < field_align.abi) {
if (field_align.abiOrder().compare(.lt)) {
is_packed = true;
if (!lookup.isMutable()) break;
}

View File

@ -885,7 +885,7 @@ pub const Object = struct {
const llvm_func = try dg.resolveLlvmFunction(decl_index);
if (mod.align_stack_fns.get(func_index)) |align_info| {
dg.addFnAttrInt(llvm_func, "alignstack", align_info.alignment);
dg.addFnAttrInt(llvm_func, "alignstack", align_info.alignment.toByteUnitsOptional().?);
dg.addFnAttr(llvm_func, "noinline");
} else {
DeclGen.removeFnAttr(llvm_func, "alignstack");
@ -1063,15 +1063,12 @@ pub const Object = struct {
if (param_ty.zigTypeTag(mod) != .Optional) {
dg.addArgAttr(llvm_func, llvm_arg_i, "nonnull");
}
if (!ptr_info.mutable) {
if (ptr_info.flags.is_const) {
dg.addArgAttr(llvm_func, llvm_arg_i, "readonly");
}
if (ptr_info.@"align" != 0) {
dg.addArgAttrInt(llvm_func, llvm_arg_i, "align", ptr_info.@"align");
} else {
const elem_align = @max(ptr_info.pointee_type.abiAlignment(mod), 1);
dg.addArgAttrInt(llvm_func, llvm_arg_i, "align", elem_align);
}
const elem_align = ptr_info.flags.alignment.toByteUnitsOptional() orelse
@max(ptr_info.child.toType().abiAlignment(mod), 1);
dg.addArgAttrInt(llvm_func, llvm_arg_i, "align", elem_align);
const ptr_param = llvm_func.getParam(llvm_arg_i);
llvm_arg_i += 1;
const len_param = llvm_func.getParam(llvm_arg_i);
@ -1474,7 +1471,7 @@ pub const Object = struct {
.Int => {
const info = ty.intInfo(mod);
assert(info.bits != 0);
const name = try ty.nameAlloc(gpa, o.module);
const name = try o.allocTypeName(ty);
defer gpa.free(name);
const dwarf_encoding: c_uint = switch (info.signedness) {
.signed => DW.ATE.signed,
@ -1536,7 +1533,7 @@ pub const Object = struct {
const di_file = try o.getDIFile(gpa, mod.namespacePtr(owner_decl.src_namespace).file_scope);
const di_scope = try o.namespaceToDebugScope(owner_decl.src_namespace);
const name = try ty.nameAlloc(gpa, o.module);
const name = try o.allocTypeName(ty);
defer gpa.free(name);
const enum_di_ty = dib.createEnumerationType(
@ -1557,7 +1554,7 @@ pub const Object = struct {
},
.Float => {
const bits = ty.floatBits(target);
const name = try ty.nameAlloc(gpa, o.module);
const name = try o.allocTypeName(ty);
defer gpa.free(name);
const di_type = dib.createBasicType(name, bits, DW.ATE.float);
gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_type);
@ -1571,7 +1568,7 @@ pub const Object = struct {
},
.Pointer => {
// Normalize everything that the debug info does not represent.
const ptr_info = Type.ptrInfoIp(&mod.intern_pool, ty.toIntern());
const ptr_info = ty.ptrInfo(mod);
if (ptr_info.sentinel != .none or
ptr_info.flags.address_space != .generic or
@ -1607,7 +1604,7 @@ pub const Object = struct {
const ptr_ty = ty.slicePtrFieldType(mod);
const len_ty = Type.usize;
const name = try ty.nameAlloc(gpa, o.module);
const name = try o.allocTypeName(ty);
defer gpa.free(name);
const di_file: ?*llvm.DIFile = null;
const line = 0;
@ -1683,7 +1680,7 @@ pub const Object = struct {
}
const elem_di_ty = try o.lowerDebugType(ptr_info.child.toType(), .fwd);
const name = try ty.nameAlloc(gpa, o.module);
const name = try o.allocTypeName(ty);
defer gpa.free(name);
const ptr_di_ty = dib.createPointerType(
elem_di_ty,
@ -1701,7 +1698,7 @@ pub const Object = struct {
gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty);
return di_ty;
}
const name = try ty.nameAlloc(gpa, o.module);
const name = try o.allocTypeName(ty);
defer gpa.free(name);
const owner_decl_index = ty.getOwnerDecl(mod);
const owner_decl = o.module.declPtr(owner_decl_index);
@ -1738,7 +1735,7 @@ pub const Object = struct {
.Int => blk: {
const info = elem_ty.intInfo(mod);
assert(info.bits != 0);
const name = try ty.nameAlloc(gpa, o.module);
const name = try o.allocTypeName(ty);
defer gpa.free(name);
const dwarf_encoding: c_uint = switch (info.signedness) {
.signed => DW.ATE.signed,
@ -1761,7 +1758,7 @@ pub const Object = struct {
return vector_di_ty;
},
.Optional => {
const name = try ty.nameAlloc(gpa, o.module);
const name = try o.allocTypeName(ty);
defer gpa.free(name);
const child_ty = ty.optionalChild(mod);
if (!child_ty.hasRuntimeBitsIgnoreComptime(mod)) {
@ -1857,7 +1854,7 @@ pub const Object = struct {
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(err_set_di_ty));
return err_set_di_ty;
}
const name = try ty.nameAlloc(gpa, o.module);
const name = try o.allocTypeName(ty);
defer gpa.free(name);
const di_file: ?*llvm.DIFile = null;
const line = 0;
@ -1949,7 +1946,7 @@ pub const Object = struct {
},
.Struct => {
const compile_unit_scope = o.di_compile_unit.?.toScope();
const name = try ty.nameAlloc(gpa, o.module);
const name = try o.allocTypeName(ty);
defer gpa.free(name);
if (mod.typeToStruct(ty)) |struct_obj| {
@ -2128,7 +2125,7 @@ pub const Object = struct {
const compile_unit_scope = o.di_compile_unit.?.toScope();
const owner_decl_index = ty.getOwnerDecl(mod);
const name = try ty.nameAlloc(gpa, o.module);
const name = try o.allocTypeName(ty);
defer gpa.free(name);
const fwd_decl = opt_fwd_decl orelse blk: {
@ -2417,6 +2414,13 @@ pub const Object = struct {
assert(stack_trace_decl.has_tv);
return stack_trace_decl.val.toType();
}
fn allocTypeName(o: *Object, ty: Type) Allocator.Error![:0]const u8 {
var buffer = std.ArrayList(u8).init(o.gpa);
errdefer buffer.deinit();
try ty.print(buffer.writer(), o.module);
return buffer.toOwnedSliceSentinel(0);
}
};
pub const DeclGen = struct {
@ -2792,7 +2796,7 @@ pub const DeclGen = struct {
return dg.context.structType(&fields, fields.len, .False);
}
const ptr_info = t.ptrInfo(mod);
const llvm_addrspace = toLlvmAddressSpace(ptr_info.@"addrspace", target);
const llvm_addrspace = toLlvmAddressSpace(ptr_info.flags.address_space, target);
return dg.context.pointerType(llvm_addrspace);
},
.Opaque => {
@ -3392,7 +3396,7 @@ pub const DeclGen = struct {
.opt_payload,
.elem,
.field,
=> try dg.lowerParentPtr(ptr_tv.val, ptr_tv.ty.ptrInfo(mod).bit_offset % 8 == 0),
=> try dg.lowerParentPtr(ptr_tv.val, ptr_tv.ty.ptrInfo(mod).packed_offset.bit_offset % 8 == 0),
.comptime_field => unreachable,
};
switch (ptr.len) {
@ -4034,7 +4038,6 @@ pub const DeclGen = struct {
fn lowerPtrToVoid(dg: *DeclGen, ptr_ty: Type) !*llvm.Value {
const mod = dg.module;
const alignment = ptr_ty.ptrInfo(mod).@"align";
// Even though we are pointing at something which has zero bits (e.g. `void`),
// Pointers are defined to have bits. So we must return something here.
// The value cannot be undefined, because we use the `nonnull` annotation
@ -4042,7 +4045,7 @@ pub const DeclGen = struct {
// the address will never be dereferenced.
const llvm_usize = try dg.lowerType(Type.usize);
const llvm_ptr_ty = try dg.lowerType(ptr_ty);
if (alignment != 0) {
if (ptr_ty.ptrInfo(mod).flags.alignment.toByteUnitsOptional()) |alignment| {
return llvm_usize.constInt(alignment, .False).constIntToPtr(llvm_ptr_ty);
}
// Note that these 0xaa values are appropriate even in release-optimized builds
@ -4163,18 +4166,15 @@ pub const DeclGen = struct {
dg.addArgAttr(llvm_fn, llvm_arg_i, "noalias");
}
}
if (!param_ty.isPtrLikeOptional(mod) and !ptr_info.@"allowzero") {
if (!param_ty.isPtrLikeOptional(mod) and !ptr_info.flags.is_allowzero) {
dg.addArgAttr(llvm_fn, llvm_arg_i, "nonnull");
}
if (!ptr_info.mutable) {
if (ptr_info.flags.is_const) {
dg.addArgAttr(llvm_fn, llvm_arg_i, "readonly");
}
if (ptr_info.@"align" != 0) {
dg.addArgAttrInt(llvm_fn, llvm_arg_i, "align", ptr_info.@"align");
} else {
const elem_align = @max(ptr_info.pointee_type.abiAlignment(mod), 1);
dg.addArgAttrInt(llvm_fn, llvm_arg_i, "align", elem_align);
}
const elem_align = ptr_info.flags.alignment.toByteUnitsOptional() orelse
@max(ptr_info.child.toType().abiAlignment(mod), 1);
dg.addArgAttrInt(llvm_fn, llvm_arg_i, "align", elem_align);
} else if (ccAbiPromoteInt(fn_info.cc, mod, param_ty)) |s| switch (s) {
.signed => dg.addArgAttr(llvm_fn, llvm_arg_i, "signext"),
.unsigned => dg.addArgAttr(llvm_fn, llvm_arg_i, "zeroext"),
@ -4806,15 +4806,12 @@ pub const FuncGen = struct {
if (param_ty.zigTypeTag(mod) != .Optional) {
self.dg.addArgAttr(call, llvm_arg_i, "nonnull");
}
if (!ptr_info.mutable) {
if (ptr_info.flags.is_const) {
self.dg.addArgAttr(call, llvm_arg_i, "readonly");
}
if (ptr_info.@"align" != 0) {
self.dg.addArgAttrInt(call, llvm_arg_i, "align", ptr_info.@"align");
} else {
const elem_align = @max(ptr_info.pointee_type.abiAlignment(mod), 1);
self.dg.addArgAttrInt(call, llvm_arg_i, "align", elem_align);
}
const elem_align = ptr_info.flags.alignment.toByteUnitsOptional() orelse
@max(ptr_info.child.toType().abiAlignment(mod), 1);
self.dg.addArgAttrInt(call, llvm_arg_i, "align", elem_align);
},
};
}
@ -5737,7 +5734,7 @@ pub const FuncGen = struct {
const rhs = try self.resolveInst(bin_op.rhs);
const elem_ptr = self.air.getRefType(ty_pl.ty);
if (elem_ptr.ptrInfo(mod).vector_index != .none) return base_ptr;
if (elem_ptr.ptrInfo(mod).flags.vector_index != .none) return base_ptr;
const llvm_elem_ty = try self.dg.lowerPtrElemTy(elem_ty);
if (ptr_ty.isSinglePointer(mod)) {
@ -8062,7 +8059,7 @@ pub const FuncGen = struct {
const ptr = try fg.resolveInst(ty_op.operand);
elide: {
if (!isByRef(ptr_info.pointee_type, mod)) break :elide;
if (!isByRef(ptr_info.child.toType(), mod)) break :elide;
if (!canElideLoad(fg, body_tail)) break :elide;
return ptr;
}
@ -8235,13 +8232,14 @@ pub const FuncGen = struct {
const ptr = try self.resolveInst(atomic_load.ptr);
const ptr_ty = self.typeOf(atomic_load.ptr);
const ptr_info = ptr_ty.ptrInfo(mod);
const elem_ty = ptr_info.pointee_type;
const elem_ty = ptr_info.child.toType();
if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod))
return null;
const ordering = toLlvmAtomicOrdering(atomic_load.order);
const opt_abi_llvm_ty = self.dg.getAtomicAbiType(elem_ty, false);
const ptr_alignment = ptr_info.alignment(mod);
const ptr_volatile = llvm.Bool.fromBool(ptr_info.@"volatile");
const ptr_alignment = @intCast(u32, ptr_info.flags.alignment.toByteUnitsOptional() orelse
ptr_info.child.toType().abiAlignment(mod));
const ptr_volatile = llvm.Bool.fromBool(ptr_info.flags.is_volatile);
const elem_llvm_ty = try self.dg.lowerType(elem_ty);
if (opt_abi_llvm_ty) |abi_llvm_ty| {
@ -9567,7 +9565,7 @@ pub const FuncGen = struct {
const result_ty = self.typeOfIndex(inst);
const result_ty_info = result_ty.ptrInfo(mod);
if (result_ty_info.host_size != 0) {
if (result_ty_info.packed_offset.host_size != 0) {
// From LLVM's perspective, a pointer to a packed struct and a pointer
// to a field of a packed struct are the same. The difference is in the
// Zig pointer type which provides information for how to mask and shift
@ -9651,16 +9649,18 @@ pub const FuncGen = struct {
fn load(self: *FuncGen, ptr: *llvm.Value, ptr_ty: Type) !?*llvm.Value {
const mod = self.dg.module;
const info = ptr_ty.ptrInfo(mod);
if (!info.pointee_type.hasRuntimeBitsIgnoreComptime(mod)) return null;
const elem_ty = info.child.toType();
if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null;
const ptr_alignment = info.alignment(mod);
const ptr_volatile = llvm.Bool.fromBool(ptr_ty.isVolatilePtr(mod));
const ptr_alignment = @intCast(u32, info.flags.alignment.toByteUnitsOptional() orelse
elem_ty.abiAlignment(mod));
const ptr_volatile = llvm.Bool.fromBool(info.flags.is_volatile);
assert(info.vector_index != .runtime);
if (info.vector_index != .none) {
const index_u32 = self.context.intType(32).constInt(@intFromEnum(info.vector_index), .False);
const vec_elem_ty = try self.dg.lowerType(info.pointee_type);
const vec_ty = vec_elem_ty.vectorType(info.host_size);
assert(info.flags.vector_index != .runtime);
if (info.flags.vector_index != .none) {
const index_u32 = self.context.intType(32).constInt(@intFromEnum(info.flags.vector_index), .False);
const vec_elem_ty = try self.dg.lowerType(elem_ty);
const vec_ty = vec_elem_ty.vectorType(info.packed_offset.host_size);
const loaded_vector = self.builder.buildLoad(vec_ty, ptr, "");
loaded_vector.setAlignment(ptr_alignment);
@ -9669,29 +9669,29 @@ pub const FuncGen = struct {
return self.builder.buildExtractElement(loaded_vector, index_u32, "");
}
if (info.host_size == 0) {
if (isByRef(info.pointee_type, mod)) {
return self.loadByRef(ptr, info.pointee_type, ptr_alignment, info.@"volatile");
if (info.packed_offset.host_size == 0) {
if (isByRef(elem_ty, mod)) {
return self.loadByRef(ptr, elem_ty, ptr_alignment, info.flags.is_volatile);
}
const elem_llvm_ty = try self.dg.lowerType(info.pointee_type);
const elem_llvm_ty = try self.dg.lowerType(elem_ty);
const llvm_inst = self.builder.buildLoad(elem_llvm_ty, ptr, "");
llvm_inst.setAlignment(ptr_alignment);
llvm_inst.setVolatile(ptr_volatile);
return llvm_inst;
}
const int_elem_ty = self.context.intType(info.host_size * 8);
const int_elem_ty = self.context.intType(info.packed_offset.host_size * 8);
const containing_int = self.builder.buildLoad(int_elem_ty, ptr, "");
containing_int.setAlignment(ptr_alignment);
containing_int.setVolatile(ptr_volatile);
const elem_bits = @intCast(c_uint, ptr_ty.childType(mod).bitSize(mod));
const shift_amt = containing_int.typeOf().constInt(info.bit_offset, .False);
const shift_amt = containing_int.typeOf().constInt(info.packed_offset.bit_offset, .False);
const shifted_value = self.builder.buildLShr(containing_int, shift_amt, "");
const elem_llvm_ty = try self.dg.lowerType(info.pointee_type);
const elem_llvm_ty = try self.dg.lowerType(elem_ty);
if (isByRef(info.pointee_type, mod)) {
const result_align = info.pointee_type.abiAlignment(mod);
if (isByRef(elem_ty, mod)) {
const result_align = elem_ty.abiAlignment(mod);
const result_ptr = self.buildAlloca(elem_llvm_ty, result_align);
const same_size_int = self.context.intType(elem_bits);
@ -9701,13 +9701,13 @@ pub const FuncGen = struct {
return result_ptr;
}
if (info.pointee_type.zigTypeTag(mod) == .Float or info.pointee_type.zigTypeTag(mod) == .Vector) {
if (elem_ty.zigTypeTag(mod) == .Float or elem_ty.zigTypeTag(mod) == .Vector) {
const same_size_int = self.context.intType(elem_bits);
const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, "");
return self.builder.buildBitCast(truncated_int, elem_llvm_ty, "");
}
if (info.pointee_type.isPtrAtRuntime(mod)) {
if (elem_ty.isPtrAtRuntime(mod)) {
const same_size_int = self.context.intType(elem_bits);
const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, "");
return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, "");
@ -9725,18 +9725,18 @@ pub const FuncGen = struct {
) !void {
const mod = self.dg.module;
const info = ptr_ty.ptrInfo(mod);
const elem_ty = info.pointee_type;
const elem_ty = info.child.toType();
if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
return;
}
const ptr_alignment = ptr_ty.ptrAlignment(mod);
const ptr_volatile = llvm.Bool.fromBool(info.@"volatile");
const ptr_volatile = llvm.Bool.fromBool(info.flags.is_volatile);
assert(info.vector_index != .runtime);
if (info.vector_index != .none) {
const index_u32 = self.context.intType(32).constInt(@intFromEnum(info.vector_index), .False);
assert(info.flags.vector_index != .runtime);
if (info.flags.vector_index != .none) {
const index_u32 = self.context.intType(32).constInt(@intFromEnum(info.flags.vector_index), .False);
const vec_elem_ty = try self.dg.lowerType(elem_ty);
const vec_ty = vec_elem_ty.vectorType(info.host_size);
const vec_ty = vec_elem_ty.vectorType(info.packed_offset.host_size);
const loaded_vector = self.builder.buildLoad(vec_ty, ptr, "");
loaded_vector.setAlignment(ptr_alignment);
@ -9751,15 +9751,15 @@ pub const FuncGen = struct {
return;
}
if (info.host_size != 0) {
const int_elem_ty = self.context.intType(info.host_size * 8);
if (info.packed_offset.host_size != 0) {
const int_elem_ty = self.context.intType(info.packed_offset.host_size * 8);
const containing_int = self.builder.buildLoad(int_elem_ty, ptr, "");
assert(ordering == .NotAtomic);
containing_int.setAlignment(ptr_alignment);
containing_int.setVolatile(ptr_volatile);
const elem_bits = @intCast(c_uint, ptr_ty.childType(mod).bitSize(mod));
const containing_int_ty = containing_int.typeOf();
const shift_amt = containing_int_ty.constInt(info.bit_offset, .False);
const shift_amt = containing_int_ty.constInt(info.packed_offset.bit_offset, .False);
// Convert to equally-sized integer type in order to perform the bit
// operations on the value to store
const value_bits_type = self.context.intType(elem_bits);
@ -9799,7 +9799,7 @@ pub const FuncGen = struct {
elem,
elem_ty.abiAlignment(mod),
self.context.intType(Type.usize.intInfo(mod).bits).constInt(size_bytes, .False),
info.@"volatile",
info.flags.is_volatile,
);
}

View File

@ -1210,13 +1210,13 @@ pub const DeclGen = struct {
.Pointer => {
const ptr_info = ty.ptrInfo(mod);
const storage_class = spvStorageClass(ptr_info.@"addrspace");
const child_ty_ref = try self.resolveType(ptr_info.pointee_type, .indirect);
const storage_class = spvStorageClass(ptr_info.flags.address_space);
const child_ty_ref = try self.resolveType(ptr_info.child.toType(), .indirect);
const ptr_ty_ref = try self.spv.resolve(.{ .ptr_type = .{
.storage_class = storage_class,
.child_type = child_ty_ref,
} });
if (ptr_info.size != .Slice) {
if (ptr_info.flags.size != .Slice) {
return ptr_ty_ref;
}
@ -1573,7 +1573,7 @@ pub const DeclGen = struct {
init_val,
actual_storage_class,
final_storage_class == .Generic,
decl.@"align",
@intCast(u32, decl.alignment.toByteUnits(0)),
);
}
}

View File

@ -163,7 +163,6 @@ pub const DeclState = struct {
atom_index: Atom.Index,
ty: Type,
) error{OutOfMemory}!void {
const arena = self.abbrev_type_arena.allocator();
const dbg_info_buffer = &self.dbg_info;
const target = mod.getTarget();
const target_endian = target.cpu.arch.endian();
@ -344,10 +343,8 @@ pub const DeclState = struct {
.struct_type => |struct_type| s: {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :s;
// DW.AT.name, DW.FORM.string
const struct_name = try ty.nameAllocArena(arena, mod);
try dbg_info_buffer.ensureUnusedCapacity(struct_name.len + 1);
dbg_info_buffer.appendSliceAssumeCapacity(struct_name);
dbg_info_buffer.appendAssumeCapacity(0);
try ty.print(dbg_info_buffer.writer(), mod);
try dbg_info_buffer.append(0);
if (struct_obj.layout == .Packed) {
log.debug("TODO implement .debug_info for packed structs", .{});
@ -388,10 +385,8 @@ pub const DeclState = struct {
// DW.AT.byte_size, DW.FORM.udata
try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod));
// DW.AT.name, DW.FORM.string
const enum_name = try ty.nameAllocArena(arena, mod);
try dbg_info_buffer.ensureUnusedCapacity(enum_name.len + 1);
dbg_info_buffer.appendSliceAssumeCapacity(enum_name);
dbg_info_buffer.appendAssumeCapacity(0);
try ty.print(dbg_info_buffer.writer(), mod);
try dbg_info_buffer.append(0);
const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type;
for (enum_type.names, 0..) |field_name_index, field_i| {
@ -422,21 +417,18 @@ pub const DeclState = struct {
const union_obj = mod.typeToUnion(ty).?;
const payload_offset = if (layout.tag_align >= layout.payload_align) layout.tag_size else 0;
const tag_offset = if (layout.tag_align >= layout.payload_align) 0 else layout.payload_size;
const is_tagged = layout.tag_size > 0;
const union_name = try ty.nameAllocArena(arena, mod);
// TODO this is temporary to match current state of unions in Zig - we don't yet have
// safety checks implemented meaning the implicit tag is not yet stored and generated
// for untagged unions.
const is_tagged = layout.tag_size > 0;
if (is_tagged) {
// DW.AT.structure_type
try dbg_info_buffer.append(@intFromEnum(AbbrevKind.struct_type));
// DW.AT.byte_size, DW.FORM.udata
try leb128.writeULEB128(dbg_info_buffer.writer(), layout.abi_size);
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.ensureUnusedCapacity(union_name.len + 1);
dbg_info_buffer.appendSliceAssumeCapacity(union_name);
dbg_info_buffer.appendAssumeCapacity(0);
try ty.print(dbg_info_buffer.writer(), mod);
try dbg_info_buffer.append(0);
// DW.AT.member
try dbg_info_buffer.ensureUnusedCapacity(9);
@ -460,7 +452,8 @@ pub const DeclState = struct {
if (is_tagged) {
try dbg_info_buffer.writer().print("AnonUnion\x00", .{});
} else {
try dbg_info_buffer.writer().print("{s}\x00", .{union_name});
try ty.print(dbg_info_buffer.writer(), mod);
try dbg_info_buffer.append(0);
}
const fields = ty.unionFields(mod);
@ -500,15 +493,7 @@ pub const DeclState = struct {
try dbg_info_buffer.append(0);
}
},
.ErrorSet => {
try addDbgInfoErrorSet(
self.abbrev_type_arena.allocator(),
mod,
ty,
target,
&self.dbg_info,
);
},
.ErrorSet => try addDbgInfoErrorSet(mod, ty, target, &self.dbg_info),
.ErrorUnion => {
const error_ty = ty.errorUnionSet(mod);
const payload_ty = ty.errorUnionPayload(mod);
@ -523,8 +508,8 @@ pub const DeclState = struct {
// DW.AT.byte_size, DW.FORM.udata
try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size);
// DW.AT.name, DW.FORM.string
const name = try ty.nameAllocArena(arena, mod);
try dbg_info_buffer.writer().print("{s}\x00", .{name});
try ty.print(dbg_info_buffer.writer(), mod);
try dbg_info_buffer.append(0);
if (!payload_ty.isNoReturn(mod)) {
// DW.AT.member
@ -2527,7 +2512,7 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void {
const error_ty = try module.intern(.{ .error_set_type = .{ .names = names } });
var dbg_info_buffer = std.ArrayList(u8).init(arena);
try addDbgInfoErrorSet(arena, module, error_ty.toType(), self.target, &dbg_info_buffer);
try addDbgInfoErrorSet(module, error_ty.toType(), self.target, &dbg_info_buffer);
const di_atom_index = try self.createAtom(.di_atom);
log.debug("updateDeclDebugInfoAllocation in flushModule", .{});
@ -2644,7 +2629,6 @@ fn genIncludeDirsAndFileNames(self: *Dwarf, arena: Allocator) !struct {
}
fn addDbgInfoErrorSet(
arena: Allocator,
mod: *Module,
ty: Type,
target: std.Target,
@ -2658,8 +2642,8 @@ fn addDbgInfoErrorSet(
const abi_size = Type.anyerror.abiSize(mod);
try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size);
// DW.AT.name, DW.FORM.string
const name = try ty.nameAllocArena(arena, mod);
try dbg_info_buffer.writer().print("{s}\x00", .{name});
try ty.print(dbg_info_buffer.writer(), mod);
try dbg_info_buffer.append(0);
// DW.AT.enumerator
const no_error = "(no error)";

View File

@ -2,7 +2,6 @@ const std = @import("std");
const builtin = @import("builtin");
const Value = @import("value.zig").Value;
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const Target = std.Target;
const Module = @import("Module.zig");
const log = std.log.scoped(.Type);
@ -102,10 +101,10 @@ pub const Type = struct {
};
}
pub fn ptrInfoIp(ip: *const InternPool, ty: InternPool.Index) InternPool.Key.PtrType {
return switch (ip.indexToKey(ty)) {
pub fn ptrInfo(ty: Type, mod: *const Module) InternPool.Key.PtrType {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |p| p,
.opt_type => |child| switch (ip.indexToKey(child)) {
.opt_type => |child| switch (mod.intern_pool.indexToKey(child)) {
.ptr_type => |p| p,
else => unreachable,
},
@ -113,10 +112,6 @@ pub const Type = struct {
};
}
pub fn ptrInfo(ty: Type, mod: *const Module) Payload.Pointer.Data {
return Payload.Pointer.Data.fromKey(ptrInfoIp(&mod.intern_pool, ty.toIntern()));
}
pub fn eql(a: Type, b: Type, mod: *const Module) bool {
_ = mod; // TODO: remove this parameter
// The InternPool data structure hashes based on Key to make interned objects
@ -181,15 +176,6 @@ pub const Type = struct {
return writer.print("{any}", .{start_type.ip_index});
}
pub const nameAllocArena = nameAlloc;
pub fn nameAlloc(ty: Type, ally: Allocator, module: *Module) Allocator.Error![:0]const u8 {
var buffer = std.ArrayList(u8).init(ally);
defer buffer.deinit();
try ty.print(buffer.writer(), module);
return buffer.toOwnedSliceSentinel(0);
}
/// Prints a name suitable for `@typeName`.
pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void {
switch (mod.intern_pool.indexToKey(ty.toIntern())) {
@ -203,42 +189,44 @@ pub const Type = struct {
.ptr_type => {
const info = ty.ptrInfo(mod);
if (info.sentinel) |s| switch (info.size) {
if (info.sentinel != .none) switch (info.flags.size) {
.One, .C => unreachable,
.Many => try writer.print("[*:{}]", .{s.fmtValue(info.pointee_type, mod)}),
.Slice => try writer.print("[:{}]", .{s.fmtValue(info.pointee_type, mod)}),
} else switch (info.size) {
.Many => try writer.print("[*:{}]", .{info.sentinel.toValue().fmtValue(info.child.toType(), mod)}),
.Slice => try writer.print("[:{}]", .{info.sentinel.toValue().fmtValue(info.child.toType(), mod)}),
} else switch (info.flags.size) {
.One => try writer.writeAll("*"),
.Many => try writer.writeAll("[*]"),
.C => try writer.writeAll("[*c]"),
.Slice => try writer.writeAll("[]"),
}
if (info.@"align" != 0 or info.host_size != 0 or info.vector_index != .none) {
if (info.@"align" != 0) {
try writer.print("align({d}", .{info.@"align"});
} else {
const alignment = info.pointee_type.abiAlignment(mod);
try writer.print("align({d}", .{alignment});
}
if (info.flags.alignment != .none or
info.packed_offset.host_size != 0 or
info.flags.vector_index != .none)
{
const alignment = info.flags.alignment.toByteUnitsOptional() orelse
info.child.toType().abiAlignment(mod);
try writer.print("align({d}", .{alignment});
if (info.bit_offset != 0 or info.host_size != 0) {
try writer.print(":{d}:{d}", .{ info.bit_offset, info.host_size });
if (info.packed_offset.bit_offset != 0 or info.packed_offset.host_size != 0) {
try writer.print(":{d}:{d}", .{
info.packed_offset.bit_offset, info.packed_offset.host_size,
});
}
if (info.vector_index == .runtime) {
if (info.flags.vector_index == .runtime) {
try writer.writeAll(":?");
} else if (info.vector_index != .none) {
try writer.print(":{d}", .{@intFromEnum(info.vector_index)});
} else if (info.flags.vector_index != .none) {
try writer.print(":{d}", .{@intFromEnum(info.flags.vector_index)});
}
try writer.writeAll(") ");
}
if (info.@"addrspace" != .generic) {
try writer.print("addrspace(.{s}) ", .{@tagName(info.@"addrspace")});
if (info.flags.address_space != .generic) {
try writer.print("addrspace(.{s}) ", .{@tagName(info.flags.address_space)});
}
if (!info.mutable) try writer.writeAll("const ");
if (info.@"volatile") try writer.writeAll("volatile ");
if (info.@"allowzero" and info.size != .C) try writer.writeAll("allowzero ");
if (info.flags.is_const) try writer.writeAll("const ");
if (info.flags.is_volatile) try writer.writeAll("volatile ");
if (info.flags.is_allowzero and info.flags.size != .C) try writer.writeAll("allowzero ");
try print(info.pointee_type, writer, mod);
try print(info.child.toType(), writer, mod);
return;
},
.array_type => |array_type| {
@ -1035,9 +1023,8 @@ pub const Type = struct {
else => |e| return e,
})) continue;
const field_align = if (field.abi_align != 0)
field.abi_align
else switch (try field.ty.abiAlignmentAdvanced(mod, strat)) {
const field_align = @intCast(u32, field.abi_align.toByteUnitsOptional() orelse
switch (try field.ty.abiAlignmentAdvanced(mod, strat)) {
.scalar => |a| a,
.val => switch (strat) {
.eager => unreachable, // struct layout not resolved
@ -1047,7 +1034,7 @@ pub const Type = struct {
.storage = .{ .lazy_align = ty.toIntern() },
} })).toValue() },
},
};
});
big_align = @max(big_align, field_align);
// This logic is duplicated in Module.Struct.Field.alignment.
@ -1242,9 +1229,8 @@ pub const Type = struct {
else => |e| return e,
})) continue;
const field_align = if (field.abi_align != 0)
field.abi_align
else switch (try field.ty.abiAlignmentAdvanced(mod, strat)) {
const field_align = @intCast(u32, field.abi_align.toByteUnitsOptional() orelse
switch (try field.ty.abiAlignmentAdvanced(mod, strat)) {
.scalar => |a| a,
.val => switch (strat) {
.eager => unreachable, // struct layout not resolved
@ -1254,7 +1240,7 @@ pub const Type = struct {
.storage = .{ .lazy_align = ty.toIntern() },
} })).toValue() },
},
};
});
max_align = @max(max_align, field_align);
}
return AbiAlignmentAdvanced{ .scalar = max_align };
@ -1883,7 +1869,7 @@ pub const Type = struct {
if (ty.isPtrLikeOptional(mod)) {
return true;
}
return ty.ptrInfo(mod).@"allowzero";
return ty.ptrInfo(mod).flags.is_allowzero;
}
/// See also `isPtrLikeOptional`.
@ -2387,15 +2373,7 @@ pub const Type = struct {
/// Asserts the type is a function or a function pointer.
pub fn fnReturnType(ty: Type, mod: *Module) Type {
return fnReturnTypeIp(ty, &mod.intern_pool);
}
pub fn fnReturnTypeIp(ty: Type, ip: *const InternPool) Type {
return switch (ip.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ip.indexToKey(ptr_type.child).func_type.return_type,
.func_type => |func_type| func_type.return_type,
else => unreachable,
}.toType();
return mod.intern_pool.funcReturnType(ty.toIntern()).toType();
}
/// Asserts the type is a function.
@ -3355,58 +3333,6 @@ pub const Type = struct {
};
}
pub const Payload = struct {
/// TODO: remove this data structure since we have `InternPool.Key.PtrType`.
pub const Pointer = struct {
pub const Data = struct {
pointee_type: Type,
sentinel: ?Value = null,
/// If zero use pointee_type.abiAlignment()
/// When creating pointer types, if alignment is equal to pointee type
/// abi alignment, this value should be set to 0 instead.
@"align": u32 = 0,
/// See src/target.zig defaultAddressSpace function for how to obtain
/// an appropriate value for this field.
@"addrspace": std.builtin.AddressSpace,
bit_offset: u16 = 0,
/// If this is non-zero it means the pointer points to a sub-byte
/// range of data, which is backed by a "host integer" with this
/// number of bytes.
/// When host_size=pointee_abi_size and bit_offset=0, this must be
/// represented with host_size=0 instead.
host_size: u16 = 0,
vector_index: VectorIndex = .none,
@"allowzero": bool = false,
mutable: bool = true, // TODO rename this to const, not mutable
@"volatile": bool = false,
size: std.builtin.Type.Pointer.Size = .One,
pub const VectorIndex = InternPool.Key.PtrType.VectorIndex;
pub fn alignment(data: Data, mod: *Module) u32 {
if (data.@"align" != 0) return data.@"align";
return abiAlignment(data.pointee_type, mod);
}
pub fn fromKey(p: InternPool.Key.PtrType) Data {
return .{
.pointee_type = p.child.toType(),
.sentinel = if (p.sentinel != .none) p.sentinel.toValue() else null,
.@"align" = @intCast(u32, p.flags.alignment.toByteUnits(0)),
.@"addrspace" = p.flags.address_space,
.bit_offset = p.packed_offset.bit_offset,
.host_size = p.packed_offset.host_size,
.vector_index = p.flags.vector_index,
.@"allowzero" = p.flags.is_allowzero,
.mutable = !p.flags.is_const,
.@"volatile" = p.flags.is_volatile,
.size = p.flags.size,
};
}
};
};
};
pub const @"u1": Type = .{ .ip_index = .u1_type };
pub const @"u8": Type = .{ .ip_index = .u8_type };
pub const @"u16": Type = .{ .ip_index = .u16_type };
@ -3464,80 +3390,6 @@ pub const Type = struct {
pub const err_int = Type.u16;
pub fn ptr(arena: Allocator, mod: *Module, data: Payload.Pointer.Data) !Type {
// TODO: update callsites of this function to directly call mod.ptrType
// and then delete this function.
_ = arena;
var d = data;
// Canonicalize non-zero alignment. If it matches the ABI alignment of the pointee
// type, we change it to 0 here. If this causes an assertion trip because the
// pointee type needs to be resolved more, that needs to be done before calling
// this ptr() function.
if (d.@"align" != 0) canonicalize: {
if (!d.pointee_type.layoutIsResolved(mod)) break :canonicalize;
if (d.@"align" == d.pointee_type.abiAlignment(mod)) {
d.@"align" = 0;
}
}
// Canonicalize host_size. If it matches the bit size of the pointee type,
// we change it to 0 here. If this causes an assertion trip, the pointee type
// needs to be resolved before calling this ptr() function.
if (d.host_size != 0) {
assert(d.bit_offset < d.host_size * 8);
if (d.host_size * 8 == d.pointee_type.bitSize(mod)) {
assert(d.bit_offset == 0);
d.host_size = 0;
}
}
return mod.ptrType(.{
.child = d.pointee_type.ip_index,
.sentinel = if (d.sentinel) |s| s.ip_index else .none,
.flags = .{
.alignment = InternPool.Alignment.fromByteUnits(d.@"align"),
.vector_index = d.vector_index,
.size = d.size,
.is_const = !d.mutable,
.is_volatile = d.@"volatile",
.is_allowzero = d.@"allowzero",
.address_space = d.@"addrspace",
},
.packed_offset = .{
.host_size = d.host_size,
.bit_offset = d.bit_offset,
},
});
}
pub fn array(
arena: Allocator,
len: u64,
sent: ?Value,
elem_type: Type,
mod: *Module,
) Allocator.Error!Type {
// TODO: update callsites of this function to directly call mod.arrayType
// and then delete this function.
_ = arena;
return mod.arrayType(.{
.len = len,
.child = elem_type.ip_index,
.sentinel = if (sent) |s| s.ip_index else .none,
});
}
pub fn optional(arena: Allocator, child_type: Type, mod: *Module) Allocator.Error!Type {
// TODO: update callsites of this function to directly call
// mod.optionalType and then delete this function.
_ = arena;
return mod.optionalType(child_type.ip_index);
}
pub fn smallestUnsignedBits(max: u64) u16 {
if (max == 0) return 0;
const base = std.math.log2(max);

View File

@ -1823,7 +1823,7 @@ pub const Value = struct {
}
pub fn isRuntimeValue(val: Value, mod: *Module) bool {
return mod.intern_pool.indexToKey(val.toIntern()) == .runtime_value;
return mod.intern_pool.isRuntimeValue(val.toIntern());
}
/// Returns true if a Value is backed by a variable
@ -1851,33 +1851,9 @@ pub const Value = struct {
}
pub fn isPtrToThreadLocal(val: Value, mod: *Module) bool {
return val.ip_index != .none and switch (mod.intern_pool.indexToKey(val.toIntern())) {
.variable => false,
else => val.isPtrToThreadLocalInner(mod),
};
}
pub fn isPtrToThreadLocalInner(val: Value, mod: *Module) bool {
return val.ip_index != .none and switch (mod.intern_pool.indexToKey(val.toIntern())) {
.variable => |variable| variable.is_threadlocal,
.ptr => |ptr| switch (ptr.addr) {
.decl => |decl_index| {
const decl = mod.declPtr(decl_index);
assert(decl.has_tv);
return decl.val.isPtrToThreadLocalInner(mod);
},
.mut_decl => |mut_decl| {
const decl = mod.declPtr(mut_decl.decl);
assert(decl.has_tv);
return decl.val.isPtrToThreadLocalInner(mod);
},
.int => false,
.eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isPtrToThreadLocalInner(mod),
.comptime_field => |comptime_field| comptime_field.toValue().isPtrToThreadLocalInner(mod),
.elem, .field => |base_index| base_index.base.toValue().isPtrToThreadLocalInner(mod),
},
else => false,
};
const backing_decl = mod.intern_pool.getBackingDecl(val.toIntern()).unwrap() orelse return false;
const variable = mod.declPtr(backing_decl).getOwnedVariable(mod) orelse return false;
return variable.is_threadlocal;
}
// Asserts that the provided start/end are in-bounds.
@ -2015,12 +1991,7 @@ pub const Value = struct {
}
pub fn isUndef(val: Value, mod: *Module) bool {
if (val.ip_index == .none) return false;
return switch (mod.intern_pool.indexToKey(val.toIntern())) {
.undef => true,
.simple_value => |v| v == .undefined,
else => false,
};
return val.ip_index != .none and mod.intern_pool.isUndef(val.toIntern());
}
/// TODO: check for cases such as array that is not marked undef but all the element