mirror of
https://github.com/ziglang/zig.git
synced 2025-12-06 06:13:07 +00:00
Merge pull request #19630 from mlugg/comptime-ptr-access-5
compiler: rework comptime pointer representation and access
This commit is contained in:
commit
1fb2381316
@ -232,7 +232,7 @@ test "icon data size too small" {
|
||||
try std.testing.expectError(error.ImpossibleDataSize, read(std.testing.allocator, fbs.reader(), data.len));
|
||||
}
|
||||
|
||||
pub const ImageFormat = enum {
|
||||
pub const ImageFormat = enum(u2) {
|
||||
dib,
|
||||
png,
|
||||
riff,
|
||||
@ -272,7 +272,7 @@ pub const BitmapHeader = extern struct {
|
||||
}
|
||||
|
||||
/// https://en.wikipedia.org/wiki/BMP_file_format#DIB_header_(bitmap_information_header)
|
||||
pub const Version = enum {
|
||||
pub const Version = enum(u3) {
|
||||
unknown,
|
||||
@"win2.0", // Windows 2.0 or later
|
||||
@"nt3.1", // Windows NT, 3.1x or later
|
||||
|
||||
@ -131,7 +131,7 @@ pub const Node = struct {
|
||||
}
|
||||
};
|
||||
|
||||
pub const TableCellAlignment = enum {
|
||||
pub const TableCellAlignment = enum(u2) {
|
||||
unset,
|
||||
left,
|
||||
center,
|
||||
|
||||
@ -271,7 +271,7 @@ pub const Ip4Address = extern struct {
|
||||
sa: posix.sockaddr.in,
|
||||
|
||||
pub fn parse(buf: []const u8, port: u16) IPv4ParseError!Ip4Address {
|
||||
var result = Ip4Address{
|
||||
var result: Ip4Address = .{
|
||||
.sa = .{
|
||||
.port = mem.nativeToBig(u16, port),
|
||||
.addr = undefined,
|
||||
|
||||
@ -565,7 +565,7 @@ pub const OptionalNullTerminatedString = enum(u32) {
|
||||
/// * decl val (so that we can analyze the value lazily)
|
||||
/// * decl ref (so that we can analyze the reference lazily)
|
||||
pub const CaptureValue = packed struct(u32) {
|
||||
tag: enum { @"comptime", runtime, decl_val, decl_ref },
|
||||
tag: enum(u2) { @"comptime", runtime, decl_val, decl_ref },
|
||||
idx: u30,
|
||||
|
||||
pub fn wrap(val: Unwrapped) CaptureValue {
|
||||
@ -1026,22 +1026,76 @@ pub const Key = union(enum) {
|
||||
pub const Ptr = struct {
|
||||
/// This is the pointer type, not the element type.
|
||||
ty: Index,
|
||||
/// The value of the address that the pointer points to.
|
||||
addr: Addr,
|
||||
/// The base address which this pointer is offset from.
|
||||
base_addr: BaseAddr,
|
||||
/// The offset of this pointer from `base_addr` in bytes.
|
||||
byte_offset: u64,
|
||||
|
||||
pub const Addr = union(enum) {
|
||||
const Tag = @typeInfo(Addr).Union.tag_type.?;
|
||||
pub const BaseAddr = union(enum) {
|
||||
const Tag = @typeInfo(BaseAddr).Union.tag_type.?;
|
||||
|
||||
/// Points to the value of a single `Decl`, which may be constant or a `variable`.
|
||||
decl: DeclIndex,
|
||||
|
||||
/// Points to the value of a single comptime alloc stored in `Sema`.
|
||||
comptime_alloc: ComptimeAllocIndex,
|
||||
|
||||
/// Points to a single unnamed constant value.
|
||||
anon_decl: AnonDecl,
|
||||
|
||||
/// Points to a comptime field of a struct. Index is the field's value.
|
||||
///
|
||||
/// TODO: this exists because these fields are semantically mutable. We
|
||||
/// should probably change the language so that this isn't the case.
|
||||
comptime_field: Index,
|
||||
int: Index,
|
||||
|
||||
/// A pointer with a fixed integer address, usually from `@ptrFromInt`.
|
||||
///
|
||||
/// The address is stored entirely by `byte_offset`, which will be positive
|
||||
/// and in-range of a `usize`. The base address is, for all intents and purposes, 0.
|
||||
int,
|
||||
|
||||
/// A pointer to the payload of an error union. Index is the error union pointer.
|
||||
/// To ensure a canonical representation, the type of the base pointer must:
|
||||
/// * be a one-pointer
|
||||
/// * be `const`, `volatile` and `allowzero`
|
||||
/// * have alignment 1
|
||||
/// * have the same address space as this pointer
|
||||
/// * have a host size, bit offset, and vector index of 0
|
||||
/// See `Value.canonicalizeBasePtr` which enforces these properties.
|
||||
eu_payload: Index,
|
||||
|
||||
/// A pointer to the payload of a non-pointer-like optional. Index is the
|
||||
/// optional pointer. To ensure a canonical representation, the base
|
||||
/// pointer is subject to the same restrictions as in `eu_payload`.
|
||||
opt_payload: Index,
|
||||
elem: BaseIndex,
|
||||
|
||||
/// A pointer to a field of a slice, or of an auto-layout struct or union. Slice fields
|
||||
/// are referenced according to `Value.slice_ptr_index` and `Value.slice_len_index`.
|
||||
/// Base is the aggregate pointer, which is subject to the same restrictions as
|
||||
/// in `eu_payload`.
|
||||
field: BaseIndex,
|
||||
|
||||
/// A pointer to an element of a comptime-only array. Base is the
|
||||
/// many-pointer we are indexing into. It is subject to the same restrictions
|
||||
/// as in `eu_payload`, except it must be a many-pointer rather than a one-pointer.
|
||||
///
|
||||
/// The element type of the base pointer must NOT be an array. Additionally, the
|
||||
/// base pointer is guaranteed to not be an `arr_elem` into a pointer with the
|
||||
/// same child type. Thus, since there are no two comptime-only types which are
|
||||
/// IMC to one another, the only case where the base pointer may also be an
|
||||
/// `arr_elem` is when this pointer is semantically invalid (e.g. it reinterprets
|
||||
/// a `type` as a `comptime_int`). These restrictions are in place to ensure
|
||||
/// a canonical representation.
|
||||
///
|
||||
/// This kind of base address differs from others in that it may refer to any
|
||||
/// sequence of values; for instance, an `arr_elem` at index 2 may refer to
|
||||
/// any number of elements starting from index 2.
|
||||
///
|
||||
/// Index must not be 0. To refer to the element at index 0, simply reinterpret
|
||||
/// the aggregate pointer.
|
||||
arr_elem: BaseIndex,
|
||||
|
||||
pub const MutDecl = struct {
|
||||
decl: DeclIndex,
|
||||
runtime_index: RuntimeIndex,
|
||||
@ -1222,10 +1276,11 @@ pub const Key = union(enum) {
|
||||
.ptr => |ptr| {
|
||||
// Int-to-ptr pointers are hashed separately than decl-referencing pointers.
|
||||
// This is sound due to pointer provenance rules.
|
||||
const addr: @typeInfo(Key.Ptr.Addr).Union.tag_type.? = ptr.addr;
|
||||
const seed2 = seed + @intFromEnum(addr);
|
||||
const common = asBytes(&ptr.ty);
|
||||
return switch (ptr.addr) {
|
||||
const addr_tag: Key.Ptr.BaseAddr.Tag = ptr.base_addr;
|
||||
const seed2 = seed + @intFromEnum(addr_tag);
|
||||
const big_offset: i128 = ptr.byte_offset;
|
||||
const common = asBytes(&ptr.ty) ++ asBytes(&big_offset);
|
||||
return switch (ptr.base_addr) {
|
||||
inline .decl,
|
||||
.comptime_alloc,
|
||||
.anon_decl,
|
||||
@ -1235,7 +1290,7 @@ pub const Key = union(enum) {
|
||||
.comptime_field,
|
||||
=> |x| Hash.hash(seed2, common ++ asBytes(&x)),
|
||||
|
||||
.elem, .field => |x| Hash.hash(
|
||||
.arr_elem, .field => |x| Hash.hash(
|
||||
seed2,
|
||||
common ++ asBytes(&x.base) ++ asBytes(&x.index),
|
||||
),
|
||||
@ -1494,21 +1549,21 @@ pub const Key = union(enum) {
|
||||
.ptr => |a_info| {
|
||||
const b_info = b.ptr;
|
||||
if (a_info.ty != b_info.ty) return false;
|
||||
if (a_info.byte_offset != b_info.byte_offset) return false;
|
||||
|
||||
const AddrTag = @typeInfo(Key.Ptr.Addr).Union.tag_type.?;
|
||||
if (@as(AddrTag, a_info.addr) != @as(AddrTag, b_info.addr)) return false;
|
||||
if (@as(Key.Ptr.BaseAddr.Tag, a_info.base_addr) != @as(Key.Ptr.BaseAddr.Tag, b_info.base_addr)) return false;
|
||||
|
||||
return switch (a_info.addr) {
|
||||
.decl => |a_decl| a_decl == b_info.addr.decl,
|
||||
.comptime_alloc => |a_alloc| a_alloc == b_info.addr.comptime_alloc,
|
||||
.anon_decl => |ad| ad.val == b_info.addr.anon_decl.val and
|
||||
ad.orig_ty == b_info.addr.anon_decl.orig_ty,
|
||||
.int => |a_int| a_int == b_info.addr.int,
|
||||
.eu_payload => |a_eu_payload| a_eu_payload == b_info.addr.eu_payload,
|
||||
.opt_payload => |a_opt_payload| a_opt_payload == b_info.addr.opt_payload,
|
||||
.comptime_field => |a_comptime_field| a_comptime_field == b_info.addr.comptime_field,
|
||||
.elem => |a_elem| std.meta.eql(a_elem, b_info.addr.elem),
|
||||
.field => |a_field| std.meta.eql(a_field, b_info.addr.field),
|
||||
return switch (a_info.base_addr) {
|
||||
.decl => |a_decl| a_decl == b_info.base_addr.decl,
|
||||
.comptime_alloc => |a_alloc| a_alloc == b_info.base_addr.comptime_alloc,
|
||||
.anon_decl => |ad| ad.val == b_info.base_addr.anon_decl.val and
|
||||
ad.orig_ty == b_info.base_addr.anon_decl.orig_ty,
|
||||
.int => true,
|
||||
.eu_payload => |a_eu_payload| a_eu_payload == b_info.base_addr.eu_payload,
|
||||
.opt_payload => |a_opt_payload| a_opt_payload == b_info.base_addr.opt_payload,
|
||||
.comptime_field => |a_comptime_field| a_comptime_field == b_info.base_addr.comptime_field,
|
||||
.arr_elem => |a_elem| std.meta.eql(a_elem, b_info.base_addr.arr_elem),
|
||||
.field => |a_field| std.meta.eql(a_field, b_info.base_addr.field),
|
||||
};
|
||||
},
|
||||
|
||||
@ -2271,6 +2326,46 @@ pub const LoadedStructType = struct {
|
||||
.struct_type = s,
|
||||
};
|
||||
}
|
||||
|
||||
pub const ReverseRuntimeOrderIterator = struct {
|
||||
ip: *InternPool,
|
||||
last_index: u32,
|
||||
struct_type: InternPool.LoadedStructType,
|
||||
|
||||
pub fn next(it: *@This()) ?u32 {
|
||||
if (it.last_index == 0)
|
||||
return null;
|
||||
|
||||
if (it.struct_type.hasReorderedFields()) {
|
||||
it.last_index -= 1;
|
||||
const order = it.struct_type.runtime_order.get(it.ip);
|
||||
while (order[it.last_index] == .omitted) {
|
||||
it.last_index -= 1;
|
||||
if (it.last_index == 0)
|
||||
return null;
|
||||
}
|
||||
return order[it.last_index].toInt();
|
||||
}
|
||||
|
||||
it.last_index -= 1;
|
||||
while (it.struct_type.fieldIsComptime(it.ip, it.last_index)) {
|
||||
it.last_index -= 1;
|
||||
if (it.last_index == 0)
|
||||
return null;
|
||||
}
|
||||
|
||||
return it.last_index;
|
||||
}
|
||||
};
|
||||
|
||||
pub fn iterateRuntimeOrderReverse(s: @This(), ip: *InternPool) ReverseRuntimeOrderIterator {
|
||||
assert(s.layout != .@"packed");
|
||||
return .{
|
||||
.ip = ip,
|
||||
.last_index = s.field_types.len,
|
||||
.struct_type = s,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType {
|
||||
@ -2836,7 +2931,7 @@ pub const Index = enum(u32) {
|
||||
ptr_anon_decl: struct { data: *PtrAnonDecl },
|
||||
ptr_anon_decl_aligned: struct { data: *PtrAnonDeclAligned },
|
||||
ptr_comptime_field: struct { data: *PtrComptimeField },
|
||||
ptr_int: struct { data: *PtrBase },
|
||||
ptr_int: struct { data: *PtrInt },
|
||||
ptr_eu_payload: struct { data: *PtrBase },
|
||||
ptr_opt_payload: struct { data: *PtrBase },
|
||||
ptr_elem: struct { data: *PtrBaseIndex },
|
||||
@ -3304,7 +3399,7 @@ pub const Tag = enum(u8) {
|
||||
/// data is extra index of `PtrComptimeField`, which contains the pointer type and field value.
|
||||
ptr_comptime_field,
|
||||
/// A pointer with an integer value.
|
||||
/// data is extra index of `PtrBase`, which contains the type and address.
|
||||
/// data is extra index of `PtrInt`, which contains the type and address (byte offset from 0).
|
||||
/// Only pointer types are allowed to have this encoding. Optional types must use
|
||||
/// `opt_payload` or `opt_null`.
|
||||
ptr_int,
|
||||
@ -3497,7 +3592,7 @@ pub const Tag = enum(u8) {
|
||||
.ptr_anon_decl => PtrAnonDecl,
|
||||
.ptr_anon_decl_aligned => PtrAnonDeclAligned,
|
||||
.ptr_comptime_field => PtrComptimeField,
|
||||
.ptr_int => PtrBase,
|
||||
.ptr_int => PtrInt,
|
||||
.ptr_eu_payload => PtrBase,
|
||||
.ptr_opt_payload => PtrBase,
|
||||
.ptr_elem => PtrBaseIndex,
|
||||
@ -4153,11 +4248,37 @@ pub const PackedU64 = packed struct(u64) {
|
||||
pub const PtrDecl = struct {
|
||||
ty: Index,
|
||||
decl: DeclIndex,
|
||||
byte_offset_a: u32,
|
||||
byte_offset_b: u32,
|
||||
fn init(ty: Index, decl: DeclIndex, byte_offset: u64) @This() {
|
||||
return .{
|
||||
.ty = ty,
|
||||
.decl = decl,
|
||||
.byte_offset_a = @intCast(byte_offset >> 32),
|
||||
.byte_offset_b = @truncate(byte_offset),
|
||||
};
|
||||
}
|
||||
fn byteOffset(data: @This()) u64 {
|
||||
return @as(u64, data.byte_offset_a) << 32 | data.byte_offset_b;
|
||||
}
|
||||
};
|
||||
|
||||
pub const PtrAnonDecl = struct {
|
||||
ty: Index,
|
||||
val: Index,
|
||||
byte_offset_a: u32,
|
||||
byte_offset_b: u32,
|
||||
fn init(ty: Index, val: Index, byte_offset: u64) @This() {
|
||||
return .{
|
||||
.ty = ty,
|
||||
.val = val,
|
||||
.byte_offset_a = @intCast(byte_offset >> 32),
|
||||
.byte_offset_b = @truncate(byte_offset),
|
||||
};
|
||||
}
|
||||
fn byteOffset(data: @This()) u64 {
|
||||
return @as(u64, data.byte_offset_a) << 32 | data.byte_offset_b;
|
||||
}
|
||||
};
|
||||
|
||||
pub const PtrAnonDeclAligned = struct {
|
||||
@ -4165,27 +4286,110 @@ pub const PtrAnonDeclAligned = struct {
|
||||
val: Index,
|
||||
/// Must be nonequal to `ty`. Only the alignment from this value is important.
|
||||
orig_ty: Index,
|
||||
byte_offset_a: u32,
|
||||
byte_offset_b: u32,
|
||||
fn init(ty: Index, val: Index, orig_ty: Index, byte_offset: u64) @This() {
|
||||
return .{
|
||||
.ty = ty,
|
||||
.val = val,
|
||||
.orig_ty = orig_ty,
|
||||
.byte_offset_a = @intCast(byte_offset >> 32),
|
||||
.byte_offset_b = @truncate(byte_offset),
|
||||
};
|
||||
}
|
||||
fn byteOffset(data: @This()) u64 {
|
||||
return @as(u64, data.byte_offset_a) << 32 | data.byte_offset_b;
|
||||
}
|
||||
};
|
||||
|
||||
pub const PtrComptimeAlloc = struct {
|
||||
ty: Index,
|
||||
index: ComptimeAllocIndex,
|
||||
byte_offset_a: u32,
|
||||
byte_offset_b: u32,
|
||||
fn init(ty: Index, index: ComptimeAllocIndex, byte_offset: u64) @This() {
|
||||
return .{
|
||||
.ty = ty,
|
||||
.index = index,
|
||||
.byte_offset_a = @intCast(byte_offset >> 32),
|
||||
.byte_offset_b = @truncate(byte_offset),
|
||||
};
|
||||
}
|
||||
fn byteOffset(data: @This()) u64 {
|
||||
return @as(u64, data.byte_offset_a) << 32 | data.byte_offset_b;
|
||||
}
|
||||
};
|
||||
|
||||
pub const PtrComptimeField = struct {
|
||||
ty: Index,
|
||||
field_val: Index,
|
||||
byte_offset_a: u32,
|
||||
byte_offset_b: u32,
|
||||
fn init(ty: Index, field_val: Index, byte_offset: u64) @This() {
|
||||
return .{
|
||||
.ty = ty,
|
||||
.field_val = field_val,
|
||||
.byte_offset_a = @intCast(byte_offset >> 32),
|
||||
.byte_offset_b = @truncate(byte_offset),
|
||||
};
|
||||
}
|
||||
fn byteOffset(data: @This()) u64 {
|
||||
return @as(u64, data.byte_offset_a) << 32 | data.byte_offset_b;
|
||||
}
|
||||
};
|
||||
|
||||
pub const PtrBase = struct {
|
||||
ty: Index,
|
||||
base: Index,
|
||||
byte_offset_a: u32,
|
||||
byte_offset_b: u32,
|
||||
fn init(ty: Index, base: Index, byte_offset: u64) @This() {
|
||||
return .{
|
||||
.ty = ty,
|
||||
.base = base,
|
||||
.byte_offset_a = @intCast(byte_offset >> 32),
|
||||
.byte_offset_b = @truncate(byte_offset),
|
||||
};
|
||||
}
|
||||
fn byteOffset(data: @This()) u64 {
|
||||
return @as(u64, data.byte_offset_a) << 32 | data.byte_offset_b;
|
||||
}
|
||||
};
|
||||
|
||||
pub const PtrBaseIndex = struct {
|
||||
ty: Index,
|
||||
base: Index,
|
||||
index: Index,
|
||||
byte_offset_a: u32,
|
||||
byte_offset_b: u32,
|
||||
fn init(ty: Index, base: Index, index: Index, byte_offset: u64) @This() {
|
||||
return .{
|
||||
.ty = ty,
|
||||
.base = base,
|
||||
.index = index,
|
||||
.byte_offset_a = @intCast(byte_offset >> 32),
|
||||
.byte_offset_b = @truncate(byte_offset),
|
||||
};
|
||||
}
|
||||
fn byteOffset(data: @This()) u64 {
|
||||
return @as(u64, data.byte_offset_a) << 32 | data.byte_offset_b;
|
||||
}
|
||||
};
|
||||
|
||||
pub const PtrInt = struct {
|
||||
ty: Index,
|
||||
byte_offset_a: u32,
|
||||
byte_offset_b: u32,
|
||||
fn init(ty: Index, byte_offset: u64) @This() {
|
||||
return .{
|
||||
.ty = ty,
|
||||
.byte_offset_a = @intCast(byte_offset >> 32),
|
||||
.byte_offset_b = @truncate(byte_offset),
|
||||
};
|
||||
}
|
||||
fn byteOffset(data: @This()) u64 {
|
||||
return @as(u64, data.byte_offset_a) << 32 | data.byte_offset_b;
|
||||
}
|
||||
};
|
||||
|
||||
pub const PtrSlice = struct {
|
||||
@ -4569,78 +4773,55 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
|
||||
},
|
||||
.ptr_decl => {
|
||||
const info = ip.extraData(PtrDecl, data);
|
||||
return .{ .ptr = .{
|
||||
.ty = info.ty,
|
||||
.addr = .{ .decl = info.decl },
|
||||
} };
|
||||
return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .decl = info.decl }, .byte_offset = info.byteOffset() } };
|
||||
},
|
||||
.ptr_comptime_alloc => {
|
||||
const info = ip.extraData(PtrComptimeAlloc, data);
|
||||
return .{ .ptr = .{
|
||||
.ty = info.ty,
|
||||
.addr = .{ .comptime_alloc = info.index },
|
||||
} };
|
||||
return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .comptime_alloc = info.index }, .byte_offset = info.byteOffset() } };
|
||||
},
|
||||
.ptr_anon_decl => {
|
||||
const info = ip.extraData(PtrAnonDecl, data);
|
||||
return .{ .ptr = .{
|
||||
.ty = info.ty,
|
||||
.addr = .{ .anon_decl = .{
|
||||
.val = info.val,
|
||||
.orig_ty = info.ty,
|
||||
} },
|
||||
} };
|
||||
return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .anon_decl = .{
|
||||
.val = info.val,
|
||||
.orig_ty = info.ty,
|
||||
} }, .byte_offset = info.byteOffset() } };
|
||||
},
|
||||
.ptr_anon_decl_aligned => {
|
||||
const info = ip.extraData(PtrAnonDeclAligned, data);
|
||||
return .{ .ptr = .{
|
||||
.ty = info.ty,
|
||||
.addr = .{ .anon_decl = .{
|
||||
.val = info.val,
|
||||
.orig_ty = info.orig_ty,
|
||||
} },
|
||||
} };
|
||||
return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .anon_decl = .{
|
||||
.val = info.val,
|
||||
.orig_ty = info.orig_ty,
|
||||
} }, .byte_offset = info.byteOffset() } };
|
||||
},
|
||||
.ptr_comptime_field => {
|
||||
const info = ip.extraData(PtrComptimeField, data);
|
||||
return .{ .ptr = .{
|
||||
.ty = info.ty,
|
||||
.addr = .{ .comptime_field = info.field_val },
|
||||
} };
|
||||
return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .comptime_field = info.field_val }, .byte_offset = info.byteOffset() } };
|
||||
},
|
||||
.ptr_int => {
|
||||
const info = ip.extraData(PtrBase, data);
|
||||
const info = ip.extraData(PtrInt, data);
|
||||
return .{ .ptr = .{
|
||||
.ty = info.ty,
|
||||
.addr = .{ .int = info.base },
|
||||
.base_addr = .int,
|
||||
.byte_offset = info.byteOffset(),
|
||||
} };
|
||||
},
|
||||
.ptr_eu_payload => {
|
||||
const info = ip.extraData(PtrBase, data);
|
||||
return .{ .ptr = .{
|
||||
.ty = info.ty,
|
||||
.addr = .{ .eu_payload = info.base },
|
||||
} };
|
||||
return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .eu_payload = info.base }, .byte_offset = info.byteOffset() } };
|
||||
},
|
||||
.ptr_opt_payload => {
|
||||
const info = ip.extraData(PtrBase, data);
|
||||
return .{ .ptr = .{
|
||||
.ty = info.ty,
|
||||
.addr = .{ .opt_payload = info.base },
|
||||
} };
|
||||
return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .opt_payload = info.base }, .byte_offset = info.byteOffset() } };
|
||||
},
|
||||
.ptr_elem => {
|
||||
// Avoid `indexToKey` recursion by asserting the tag encoding.
|
||||
const info = ip.extraData(PtrBaseIndex, data);
|
||||
const index_item = ip.items.get(@intFromEnum(info.index));
|
||||
return switch (index_item.tag) {
|
||||
.int_usize => .{ .ptr = .{
|
||||
.ty = info.ty,
|
||||
.addr = .{ .elem = .{
|
||||
.base = info.base,
|
||||
.index = index_item.data,
|
||||
} },
|
||||
} },
|
||||
.int_usize => .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .arr_elem = .{
|
||||
.base = info.base,
|
||||
.index = index_item.data,
|
||||
} }, .byte_offset = info.byteOffset() } },
|
||||
.int_positive => @panic("TODO"), // implement along with behavior test coverage
|
||||
else => unreachable,
|
||||
};
|
||||
@ -4650,13 +4831,10 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
|
||||
const info = ip.extraData(PtrBaseIndex, data);
|
||||
const index_item = ip.items.get(@intFromEnum(info.index));
|
||||
return switch (index_item.tag) {
|
||||
.int_usize => .{ .ptr = .{
|
||||
.ty = info.ty,
|
||||
.addr = .{ .field = .{
|
||||
.base = info.base,
|
||||
.index = index_item.data,
|
||||
} },
|
||||
} },
|
||||
.int_usize => .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .field = .{
|
||||
.base = info.base,
|
||||
.index = index_item.data,
|
||||
} }, .byte_offset = info.byteOffset() } },
|
||||
.int_positive => @panic("TODO"), // implement along with behavior test coverage
|
||||
else => unreachable,
|
||||
};
|
||||
@ -5211,57 +5389,40 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
|
||||
.ptr => |ptr| {
|
||||
const ptr_type = ip.indexToKey(ptr.ty).ptr_type;
|
||||
assert(ptr_type.flags.size != .Slice);
|
||||
ip.items.appendAssumeCapacity(switch (ptr.addr) {
|
||||
ip.items.appendAssumeCapacity(switch (ptr.base_addr) {
|
||||
.decl => |decl| .{
|
||||
.tag = .ptr_decl,
|
||||
.data = try ip.addExtra(gpa, PtrDecl{
|
||||
.ty = ptr.ty,
|
||||
.decl = decl,
|
||||
}),
|
||||
.data = try ip.addExtra(gpa, PtrDecl.init(ptr.ty, decl, ptr.byte_offset)),
|
||||
},
|
||||
.comptime_alloc => |alloc_index| .{
|
||||
.tag = .ptr_comptime_alloc,
|
||||
.data = try ip.addExtra(gpa, PtrComptimeAlloc{
|
||||
.ty = ptr.ty,
|
||||
.index = alloc_index,
|
||||
}),
|
||||
.data = try ip.addExtra(gpa, PtrComptimeAlloc.init(ptr.ty, alloc_index, ptr.byte_offset)),
|
||||
},
|
||||
.anon_decl => |anon_decl| if (ptrsHaveSameAlignment(ip, ptr.ty, ptr_type, anon_decl.orig_ty)) item: {
|
||||
if (ptr.ty != anon_decl.orig_ty) {
|
||||
_ = ip.map.pop();
|
||||
var new_key = key;
|
||||
new_key.ptr.addr.anon_decl.orig_ty = ptr.ty;
|
||||
new_key.ptr.base_addr.anon_decl.orig_ty = ptr.ty;
|
||||
const new_gop = try ip.map.getOrPutAdapted(gpa, new_key, adapter);
|
||||
if (new_gop.found_existing) return @enumFromInt(new_gop.index);
|
||||
}
|
||||
break :item .{
|
||||
.tag = .ptr_anon_decl,
|
||||
.data = try ip.addExtra(gpa, PtrAnonDecl{
|
||||
.ty = ptr.ty,
|
||||
.val = anon_decl.val,
|
||||
}),
|
||||
.data = try ip.addExtra(gpa, PtrAnonDecl.init(ptr.ty, anon_decl.val, ptr.byte_offset)),
|
||||
};
|
||||
} else .{
|
||||
.tag = .ptr_anon_decl_aligned,
|
||||
.data = try ip.addExtra(gpa, PtrAnonDeclAligned{
|
||||
.ty = ptr.ty,
|
||||
.val = anon_decl.val,
|
||||
.orig_ty = anon_decl.orig_ty,
|
||||
}),
|
||||
.data = try ip.addExtra(gpa, PtrAnonDeclAligned.init(ptr.ty, anon_decl.val, anon_decl.orig_ty, ptr.byte_offset)),
|
||||
},
|
||||
.comptime_field => |field_val| item: {
|
||||
assert(field_val != .none);
|
||||
break :item .{
|
||||
.tag = .ptr_comptime_field,
|
||||
.data = try ip.addExtra(gpa, PtrComptimeField{
|
||||
.ty = ptr.ty,
|
||||
.field_val = field_val,
|
||||
}),
|
||||
.data = try ip.addExtra(gpa, PtrComptimeField.init(ptr.ty, field_val, ptr.byte_offset)),
|
||||
};
|
||||
},
|
||||
.int, .eu_payload, .opt_payload => |base| item: {
|
||||
switch (ptr.addr) {
|
||||
.int => assert(ip.typeOf(base) == .usize_type),
|
||||
.eu_payload, .opt_payload => |base| item: {
|
||||
switch (ptr.base_addr) {
|
||||
.eu_payload => assert(ip.indexToKey(
|
||||
ip.indexToKey(ip.typeOf(base)).ptr_type.child,
|
||||
) == .error_union_type),
|
||||
@ -5271,40 +5432,40 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
|
||||
else => unreachable,
|
||||
}
|
||||
break :item .{
|
||||
.tag = switch (ptr.addr) {
|
||||
.int => .ptr_int,
|
||||
.tag = switch (ptr.base_addr) {
|
||||
.eu_payload => .ptr_eu_payload,
|
||||
.opt_payload => .ptr_opt_payload,
|
||||
else => unreachable,
|
||||
},
|
||||
.data = try ip.addExtra(gpa, PtrBase{
|
||||
.ty = ptr.ty,
|
||||
.base = base,
|
||||
}),
|
||||
.data = try ip.addExtra(gpa, PtrBase.init(ptr.ty, base, ptr.byte_offset)),
|
||||
};
|
||||
},
|
||||
.elem, .field => |base_index| item: {
|
||||
.int => .{
|
||||
.tag = .ptr_int,
|
||||
.data = try ip.addExtra(gpa, PtrInt.init(ptr.ty, ptr.byte_offset)),
|
||||
},
|
||||
.arr_elem, .field => |base_index| item: {
|
||||
const base_ptr_type = ip.indexToKey(ip.typeOf(base_index.base)).ptr_type;
|
||||
switch (ptr.addr) {
|
||||
.elem => assert(base_ptr_type.flags.size == .Many),
|
||||
switch (ptr.base_addr) {
|
||||
.arr_elem => assert(base_ptr_type.flags.size == .Many),
|
||||
.field => {
|
||||
assert(base_ptr_type.flags.size == .One);
|
||||
switch (ip.indexToKey(base_ptr_type.child)) {
|
||||
.anon_struct_type => |anon_struct_type| {
|
||||
assert(ptr.addr == .field);
|
||||
assert(ptr.base_addr == .field);
|
||||
assert(base_index.index < anon_struct_type.types.len);
|
||||
},
|
||||
.struct_type => {
|
||||
assert(ptr.addr == .field);
|
||||
assert(ptr.base_addr == .field);
|
||||
assert(base_index.index < ip.loadStructType(base_ptr_type.child).field_types.len);
|
||||
},
|
||||
.union_type => {
|
||||
const union_type = ip.loadUnionType(base_ptr_type.child);
|
||||
assert(ptr.addr == .field);
|
||||
assert(ptr.base_addr == .field);
|
||||
assert(base_index.index < union_type.field_types.len);
|
||||
},
|
||||
.ptr_type => |slice_type| {
|
||||
assert(ptr.addr == .field);
|
||||
assert(ptr.base_addr == .field);
|
||||
assert(slice_type.flags.size == .Slice);
|
||||
assert(base_index.index < 2);
|
||||
},
|
||||
@ -5321,16 +5482,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
|
||||
assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing);
|
||||
try ip.items.ensureUnusedCapacity(gpa, 1);
|
||||
break :item .{
|
||||
.tag = switch (ptr.addr) {
|
||||
.elem => .ptr_elem,
|
||||
.tag = switch (ptr.base_addr) {
|
||||
.arr_elem => .ptr_elem,
|
||||
.field => .ptr_field,
|
||||
else => unreachable,
|
||||
},
|
||||
.data = try ip.addExtra(gpa, PtrBaseIndex{
|
||||
.ty = ptr.ty,
|
||||
.base = base_index.base,
|
||||
.index = index_index,
|
||||
}),
|
||||
.data = try ip.addExtra(gpa, PtrBaseIndex.init(ptr.ty, base_index.base, index_index, ptr.byte_offset)),
|
||||
};
|
||||
},
|
||||
});
|
||||
@ -7584,13 +7741,15 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
|
||||
if (ip.isPointerType(new_ty)) switch (ip.indexToKey(new_ty).ptr_type.flags.size) {
|
||||
.One, .Many, .C => return ip.get(gpa, .{ .ptr = .{
|
||||
.ty = new_ty,
|
||||
.addr = .{ .int = .zero_usize },
|
||||
.base_addr = .int,
|
||||
.byte_offset = 0,
|
||||
} }),
|
||||
.Slice => return ip.get(gpa, .{ .slice = .{
|
||||
.ty = new_ty,
|
||||
.ptr = try ip.get(gpa, .{ .ptr = .{
|
||||
.ty = ip.slicePtrType(new_ty),
|
||||
.addr = .{ .int = .zero_usize },
|
||||
.base_addr = .int,
|
||||
.byte_offset = 0,
|
||||
} }),
|
||||
.len = try ip.get(gpa, .{ .undef = .usize_type }),
|
||||
} }),
|
||||
@ -7630,10 +7789,15 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
|
||||
.ty = new_ty,
|
||||
.int = try ip.getCoerced(gpa, val, ip.loadEnumType(new_ty).tag_ty),
|
||||
} }),
|
||||
.ptr_type => return ip.get(gpa, .{ .ptr = .{
|
||||
.ty = new_ty,
|
||||
.addr = .{ .int = try ip.getCoerced(gpa, val, .usize_type) },
|
||||
} }),
|
||||
.ptr_type => switch (int.storage) {
|
||||
inline .u64, .i64 => |int_val| return ip.get(gpa, .{ .ptr = .{
|
||||
.ty = new_ty,
|
||||
.base_addr = .int,
|
||||
.byte_offset = @intCast(int_val),
|
||||
} }),
|
||||
.big_int => unreachable, // must be a usize
|
||||
.lazy_align, .lazy_size => {},
|
||||
},
|
||||
else => if (ip.isIntegerType(new_ty))
|
||||
return getCoercedInts(ip, gpa, int, new_ty),
|
||||
},
|
||||
@ -7684,11 +7848,15 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
|
||||
.ptr => |ptr| if (ip.isPointerType(new_ty) and ip.indexToKey(new_ty).ptr_type.flags.size != .Slice)
|
||||
return ip.get(gpa, .{ .ptr = .{
|
||||
.ty = new_ty,
|
||||
.addr = ptr.addr,
|
||||
.base_addr = ptr.base_addr,
|
||||
.byte_offset = ptr.byte_offset,
|
||||
} })
|
||||
else if (ip.isIntegerType(new_ty))
|
||||
switch (ptr.addr) {
|
||||
.int => |int| return ip.getCoerced(gpa, int, new_ty),
|
||||
switch (ptr.base_addr) {
|
||||
.int => return ip.get(gpa, .{ .int = .{
|
||||
.ty = .usize_type,
|
||||
.storage = .{ .u64 = @intCast(ptr.byte_offset) },
|
||||
} }),
|
||||
else => {},
|
||||
},
|
||||
.opt => |opt| switch (ip.indexToKey(new_ty)) {
|
||||
@ -7696,13 +7864,15 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
|
||||
.none => switch (ptr_type.flags.size) {
|
||||
.One, .Many, .C => try ip.get(gpa, .{ .ptr = .{
|
||||
.ty = new_ty,
|
||||
.addr = .{ .int = .zero_usize },
|
||||
.base_addr = .int,
|
||||
.byte_offset = 0,
|
||||
} }),
|
||||
.Slice => try ip.get(gpa, .{ .slice = .{
|
||||
.ty = new_ty,
|
||||
.ptr = try ip.get(gpa, .{ .ptr = .{
|
||||
.ty = ip.slicePtrType(new_ty),
|
||||
.addr = .{ .int = .zero_usize },
|
||||
.base_addr = .int,
|
||||
.byte_offset = 0,
|
||||
} }),
|
||||
.len = try ip.get(gpa, .{ .undef = .usize_type }),
|
||||
} }),
|
||||
@ -8181,7 +8351,7 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
|
||||
.ptr_anon_decl => @sizeOf(PtrAnonDecl),
|
||||
.ptr_anon_decl_aligned => @sizeOf(PtrAnonDeclAligned),
|
||||
.ptr_comptime_field => @sizeOf(PtrComptimeField),
|
||||
.ptr_int => @sizeOf(PtrBase),
|
||||
.ptr_int => @sizeOf(PtrInt),
|
||||
.ptr_eu_payload => @sizeOf(PtrBase),
|
||||
.ptr_opt_payload => @sizeOf(PtrBase),
|
||||
.ptr_elem => @sizeOf(PtrBaseIndex),
|
||||
@ -8854,13 +9024,15 @@ pub fn getBackingDecl(ip: *const InternPool, val: Index) OptionalDeclIndex {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn getBackingAddrTag(ip: *const InternPool, val: Index) ?Key.Ptr.Addr.Tag {
|
||||
pub fn getBackingAddrTag(ip: *const InternPool, val: Index) ?Key.Ptr.BaseAddr.Tag {
|
||||
var base = @intFromEnum(val);
|
||||
while (true) {
|
||||
switch (ip.items.items(.tag)[base]) {
|
||||
.ptr_decl => return .decl,
|
||||
.ptr_comptime_alloc => return .comptime_alloc,
|
||||
.ptr_anon_decl, .ptr_anon_decl_aligned => return .anon_decl,
|
||||
.ptr_anon_decl,
|
||||
.ptr_anon_decl_aligned,
|
||||
=> return .anon_decl,
|
||||
.ptr_comptime_field => return .comptime_field,
|
||||
.ptr_int => return .int,
|
||||
inline .ptr_eu_payload,
|
||||
|
||||
@ -528,21 +528,6 @@ pub const Decl = struct {
|
||||
return zcu.namespacePtrUnwrap(decl.getInnerNamespaceIndex(zcu));
|
||||
}
|
||||
|
||||
pub fn dump(decl: *Decl) void {
|
||||
const loc = std.zig.findLineColumn(decl.scope.source.bytes, decl.src);
|
||||
std.debug.print("{s}:{d}:{d} name={d} status={s}", .{
|
||||
decl.scope.sub_file_path,
|
||||
loc.line + 1,
|
||||
loc.column + 1,
|
||||
@intFromEnum(decl.name),
|
||||
@tagName(decl.analysis),
|
||||
});
|
||||
if (decl.has_tv) {
|
||||
std.debug.print(" val={}", .{decl.val});
|
||||
}
|
||||
std.debug.print("\n", .{});
|
||||
}
|
||||
|
||||
pub fn getFileScope(decl: Decl, zcu: *Zcu) *File {
|
||||
return zcu.namespacePtr(decl.src_namespace).file_scope;
|
||||
}
|
||||
@ -660,6 +645,22 @@ pub const Decl = struct {
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn declPtrType(decl: Decl, zcu: *Zcu) !Type {
|
||||
assert(decl.has_tv);
|
||||
const decl_ty = decl.typeOf(zcu);
|
||||
return zcu.ptrType(.{
|
||||
.child = decl_ty.toIntern(),
|
||||
.flags = .{
|
||||
.alignment = if (decl.alignment == decl_ty.abiAlignment(zcu))
|
||||
.none
|
||||
else
|
||||
decl.alignment,
|
||||
.address_space = decl.@"addrspace",
|
||||
.is_const = decl.getOwnedVariable(zcu) == null,
|
||||
},
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
/// This state is attached to every Decl when Module emit_h is non-null.
|
||||
@ -3535,6 +3536,10 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult {
|
||||
}
|
||||
|
||||
log.debug("semaDecl '{d}'", .{@intFromEnum(decl_index)});
|
||||
log.debug("decl name '{}'", .{(try decl.fullyQualifiedName(mod)).fmt(ip)});
|
||||
defer blk: {
|
||||
log.debug("finish decl name '{}'", .{(decl.fullyQualifiedName(mod) catch break :blk).fmt(ip)});
|
||||
}
|
||||
|
||||
const old_has_tv = decl.has_tv;
|
||||
// The following values are ignored if `!old_has_tv`
|
||||
@ -4122,10 +4127,11 @@ fn newEmbedFile(
|
||||
})).toIntern();
|
||||
const ptr_val = try ip.get(gpa, .{ .ptr = .{
|
||||
.ty = ptr_ty,
|
||||
.addr = .{ .anon_decl = .{
|
||||
.base_addr = .{ .anon_decl = .{
|
||||
.val = array_val,
|
||||
.orig_ty = ptr_ty,
|
||||
} },
|
||||
.byte_offset = 0,
|
||||
} });
|
||||
|
||||
result.* = new_file;
|
||||
@ -4489,6 +4495,11 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato
|
||||
const decl_index = func.owner_decl;
|
||||
const decl = mod.declPtr(decl_index);
|
||||
|
||||
log.debug("func name '{}'", .{(try decl.fullyQualifiedName(mod)).fmt(ip)});
|
||||
defer blk: {
|
||||
log.debug("finish func name '{}'", .{(decl.fullyQualifiedName(mod) catch break :blk).fmt(ip)});
|
||||
}
|
||||
|
||||
mod.intern_pool.removeDependenciesForDepender(gpa, InternPool.Depender.wrap(.{ .func = func_index }));
|
||||
|
||||
var comptime_err_ret_trace = std.ArrayList(SrcLoc).init(gpa);
|
||||
@ -5332,7 +5343,7 @@ pub fn populateTestFunctions(
|
||||
const decl = mod.declPtr(decl_index);
|
||||
const test_fn_ty = decl.typeOf(mod).slicePtrFieldType(mod).childType(mod);
|
||||
|
||||
const array_anon_decl: InternPool.Key.Ptr.Addr.AnonDecl = array: {
|
||||
const array_anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl = array: {
|
||||
// Add mod.test_functions to an array decl then make the test_functions
|
||||
// decl reference it as a slice.
|
||||
const test_fn_vals = try gpa.alloc(InternPool.Index, mod.test_functions.count());
|
||||
@ -5342,7 +5353,7 @@ pub fn populateTestFunctions(
|
||||
const test_decl = mod.declPtr(test_decl_index);
|
||||
const test_decl_name = try test_decl.fullyQualifiedName(mod);
|
||||
const test_decl_name_len = test_decl_name.length(ip);
|
||||
const test_name_anon_decl: InternPool.Key.Ptr.Addr.AnonDecl = n: {
|
||||
const test_name_anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl = n: {
|
||||
const test_name_ty = try mod.arrayType(.{
|
||||
.len = test_decl_name_len,
|
||||
.child = .u8_type,
|
||||
@ -5363,7 +5374,8 @@ pub fn populateTestFunctions(
|
||||
.ty = .slice_const_u8_type,
|
||||
.ptr = try mod.intern(.{ .ptr = .{
|
||||
.ty = .manyptr_const_u8_type,
|
||||
.addr = .{ .anon_decl = test_name_anon_decl },
|
||||
.base_addr = .{ .anon_decl = test_name_anon_decl },
|
||||
.byte_offset = 0,
|
||||
} }),
|
||||
.len = try mod.intern(.{ .int = .{
|
||||
.ty = .usize_type,
|
||||
@ -5378,7 +5390,8 @@ pub fn populateTestFunctions(
|
||||
.is_const = true,
|
||||
},
|
||||
} }),
|
||||
.addr = .{ .decl = test_decl_index },
|
||||
.base_addr = .{ .decl = test_decl_index },
|
||||
.byte_offset = 0,
|
||||
} }),
|
||||
};
|
||||
test_fn_val.* = try mod.intern(.{ .aggregate = .{
|
||||
@ -5415,7 +5428,8 @@ pub fn populateTestFunctions(
|
||||
.ty = new_ty.toIntern(),
|
||||
.ptr = try mod.intern(.{ .ptr = .{
|
||||
.ty = new_ty.slicePtrFieldType(mod).toIntern(),
|
||||
.addr = .{ .anon_decl = array_anon_decl },
|
||||
.base_addr = .{ .anon_decl = array_anon_decl },
|
||||
.byte_offset = 0,
|
||||
} }),
|
||||
.len = (try mod.intValue(Type.usize, mod.test_functions.count())).toIntern(),
|
||||
} });
|
||||
@ -5680,9 +5694,11 @@ pub fn errorSetFromUnsortedNames(
|
||||
/// Supports only pointers, not pointer-like optionals.
|
||||
pub fn ptrIntValue(mod: *Module, ty: Type, x: u64) Allocator.Error!Value {
|
||||
assert(ty.zigTypeTag(mod) == .Pointer and !ty.isSlice(mod));
|
||||
assert(x != 0 or ty.isAllowzeroPtr(mod));
|
||||
const i = try intern(mod, .{ .ptr = .{
|
||||
.ty = ty.toIntern(),
|
||||
.addr = .{ .int = (try mod.intValue_u64(Type.usize, x)).toIntern() },
|
||||
.base_addr = .int,
|
||||
.byte_offset = x,
|
||||
} });
|
||||
return Value.fromInterned(i);
|
||||
}
|
||||
|
||||
2410
src/Sema.zig
2410
src/Sema.zig
File diff suppressed because it is too large
Load Diff
772
src/Sema/bitcast.zig
Normal file
772
src/Sema/bitcast.zig
Normal file
@ -0,0 +1,772 @@
|
||||
//! This file contains logic for bit-casting arbitrary values at comptime, including splicing
|
||||
//! bits together for comptime stores of bit-pointers. The strategy is to "flatten" values to
|
||||
//! a sequence of values in *packed* memory, and then unflatten through a combination of special
|
||||
//! cases (particularly for pointers and `undefined` values) and in-memory buffer reinterprets.
|
||||
//!
|
||||
//! This is a little awkward on big-endian targets, as non-packed datastructures (e.g. `extern struct`)
|
||||
//! have their fields reversed when represented as packed memory on such targets.
|
||||
|
||||
/// If `host_bits` is `0`, attempts to convert the memory at offset
|
||||
/// `byte_offset` into `val` to a non-packed value of type `dest_ty`,
|
||||
/// ignoring `bit_offset`.
|
||||
///
|
||||
/// Otherwise, `byte_offset` is an offset in bytes into `val` to a
|
||||
/// non-packed value consisting of `host_bits` bits. A value of type
|
||||
/// `dest_ty` will be interpreted at a packed offset of `bit_offset`
|
||||
/// into this value.
|
||||
///
|
||||
/// Returns `null` if the operation must be performed at runtime.
|
||||
pub fn bitCast(
|
||||
sema: *Sema,
|
||||
val: Value,
|
||||
dest_ty: Type,
|
||||
byte_offset: u64,
|
||||
host_bits: u64,
|
||||
bit_offset: u64,
|
||||
) CompileError!?Value {
|
||||
return bitCastInner(sema, val, dest_ty, byte_offset, host_bits, bit_offset) catch |err| switch (err) {
|
||||
error.ReinterpretDeclRef => return null,
|
||||
error.IllDefinedMemoryLayout => unreachable,
|
||||
error.Unimplemented => @panic("unimplemented bitcast"),
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
|
||||
/// Uses bitcasting to splice the value `splice_val` into `val`,
|
||||
/// replacing overlapping bits and returning the modified value.
|
||||
///
|
||||
/// If `host_bits` is `0`, splices `splice_val` at an offset
|
||||
/// `byte_offset` bytes into the virtual memory of `val`, ignoring
|
||||
/// `bit_offset`.
|
||||
///
|
||||
/// Otherwise, `byte_offset` is an offset into bytes into `val` to
|
||||
/// a non-packed value consisting of `host_bits` bits. The value
|
||||
/// `splice_val` will be placed at a packed offset of `bit_offset`
|
||||
/// into this value.
|
||||
pub fn bitCastSplice(
|
||||
sema: *Sema,
|
||||
val: Value,
|
||||
splice_val: Value,
|
||||
byte_offset: u64,
|
||||
host_bits: u64,
|
||||
bit_offset: u64,
|
||||
) CompileError!?Value {
|
||||
return bitCastSpliceInner(sema, val, splice_val, byte_offset, host_bits, bit_offset) catch |err| switch (err) {
|
||||
error.ReinterpretDeclRef => return null,
|
||||
error.IllDefinedMemoryLayout => unreachable,
|
||||
error.Unimplemented => @panic("unimplemented bitcast"),
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
|
||||
const BitCastError = CompileError || error{ ReinterpretDeclRef, IllDefinedMemoryLayout, Unimplemented };
|
||||
|
||||
fn bitCastInner(
|
||||
sema: *Sema,
|
||||
val: Value,
|
||||
dest_ty: Type,
|
||||
byte_offset: u64,
|
||||
host_bits: u64,
|
||||
bit_offset: u64,
|
||||
) BitCastError!Value {
|
||||
const zcu = sema.mod;
|
||||
const endian = zcu.getTarget().cpu.arch.endian();
|
||||
|
||||
if (dest_ty.toIntern() == val.typeOf(zcu).toIntern() and bit_offset == 0) {
|
||||
return val;
|
||||
}
|
||||
|
||||
const val_ty = val.typeOf(zcu);
|
||||
|
||||
try sema.resolveTypeLayout(val_ty);
|
||||
try sema.resolveTypeLayout(dest_ty);
|
||||
|
||||
assert(val_ty.hasWellDefinedLayout(zcu));
|
||||
|
||||
const abi_pad_bits, const host_pad_bits = if (host_bits > 0)
|
||||
.{ val_ty.abiSize(zcu) * 8 - host_bits, host_bits - val_ty.bitSize(zcu) }
|
||||
else
|
||||
.{ val_ty.abiSize(zcu) * 8 - val_ty.bitSize(zcu), 0 };
|
||||
|
||||
const skip_bits = switch (endian) {
|
||||
.little => bit_offset + byte_offset * 8,
|
||||
.big => if (host_bits > 0)
|
||||
val_ty.abiSize(zcu) * 8 - byte_offset * 8 - host_bits + bit_offset
|
||||
else
|
||||
val_ty.abiSize(zcu) * 8 - byte_offset * 8 - dest_ty.bitSize(zcu),
|
||||
};
|
||||
|
||||
var unpack: UnpackValueBits = .{
|
||||
.zcu = zcu,
|
||||
.arena = sema.arena,
|
||||
.skip_bits = skip_bits,
|
||||
.remaining_bits = dest_ty.bitSize(zcu),
|
||||
.unpacked = std.ArrayList(InternPool.Index).init(sema.arena),
|
||||
};
|
||||
switch (endian) {
|
||||
.little => {
|
||||
try unpack.add(val);
|
||||
try unpack.padding(abi_pad_bits);
|
||||
},
|
||||
.big => {
|
||||
try unpack.padding(abi_pad_bits);
|
||||
try unpack.add(val);
|
||||
},
|
||||
}
|
||||
try unpack.padding(host_pad_bits);
|
||||
|
||||
var pack: PackValueBits = .{
|
||||
.zcu = zcu,
|
||||
.arena = sema.arena,
|
||||
.unpacked = unpack.unpacked.items,
|
||||
};
|
||||
return pack.get(dest_ty);
|
||||
}
|
||||
|
||||
fn bitCastSpliceInner(
|
||||
sema: *Sema,
|
||||
val: Value,
|
||||
splice_val: Value,
|
||||
byte_offset: u64,
|
||||
host_bits: u64,
|
||||
bit_offset: u64,
|
||||
) BitCastError!Value {
|
||||
const zcu = sema.mod;
|
||||
const endian = zcu.getTarget().cpu.arch.endian();
|
||||
const val_ty = val.typeOf(zcu);
|
||||
const splice_val_ty = splice_val.typeOf(zcu);
|
||||
|
||||
try sema.resolveTypeLayout(val_ty);
|
||||
try sema.resolveTypeLayout(splice_val_ty);
|
||||
|
||||
const splice_bits = splice_val_ty.bitSize(zcu);
|
||||
|
||||
const splice_offset = switch (endian) {
|
||||
.little => bit_offset + byte_offset * 8,
|
||||
.big => if (host_bits > 0)
|
||||
val_ty.abiSize(zcu) * 8 - byte_offset * 8 - host_bits + bit_offset
|
||||
else
|
||||
val_ty.abiSize(zcu) * 8 - byte_offset * 8 - splice_bits,
|
||||
};
|
||||
|
||||
assert(splice_offset + splice_bits <= val_ty.abiSize(zcu) * 8);
|
||||
|
||||
const abi_pad_bits, const host_pad_bits = if (host_bits > 0)
|
||||
.{ val_ty.abiSize(zcu) * 8 - host_bits, host_bits - val_ty.bitSize(zcu) }
|
||||
else
|
||||
.{ val_ty.abiSize(zcu) * 8 - val_ty.bitSize(zcu), 0 };
|
||||
|
||||
var unpack: UnpackValueBits = .{
|
||||
.zcu = zcu,
|
||||
.arena = sema.arena,
|
||||
.skip_bits = 0,
|
||||
.remaining_bits = splice_offset,
|
||||
.unpacked = std.ArrayList(InternPool.Index).init(sema.arena),
|
||||
};
|
||||
switch (endian) {
|
||||
.little => {
|
||||
try unpack.add(val);
|
||||
try unpack.padding(abi_pad_bits);
|
||||
},
|
||||
.big => {
|
||||
try unpack.padding(abi_pad_bits);
|
||||
try unpack.add(val);
|
||||
},
|
||||
}
|
||||
try unpack.padding(host_pad_bits);
|
||||
|
||||
unpack.remaining_bits = splice_bits;
|
||||
try unpack.add(splice_val);
|
||||
|
||||
unpack.skip_bits = splice_offset + splice_bits;
|
||||
unpack.remaining_bits = val_ty.abiSize(zcu) * 8 - splice_offset - splice_bits;
|
||||
switch (endian) {
|
||||
.little => {
|
||||
try unpack.add(val);
|
||||
try unpack.padding(abi_pad_bits);
|
||||
},
|
||||
.big => {
|
||||
try unpack.padding(abi_pad_bits);
|
||||
try unpack.add(val);
|
||||
},
|
||||
}
|
||||
try unpack.padding(host_pad_bits);
|
||||
|
||||
var pack: PackValueBits = .{
|
||||
.zcu = zcu,
|
||||
.arena = sema.arena,
|
||||
.unpacked = unpack.unpacked.items,
|
||||
};
|
||||
switch (endian) {
|
||||
.little => {},
|
||||
.big => try pack.padding(abi_pad_bits),
|
||||
}
|
||||
return pack.get(val_ty);
|
||||
}
|
||||
|
||||
/// Recurses through struct fields, array elements, etc, to get a sequence of "primitive" values
|
||||
/// which are bit-packed in memory to represent a single value. `unpacked` represents a series
|
||||
/// of values in *packed* memory - therefore, on big-endian targets, the first element of this
|
||||
/// list contains bits from the *final* byte of the value.
|
||||
const UnpackValueBits = struct {
|
||||
zcu: *Zcu,
|
||||
arena: Allocator,
|
||||
skip_bits: u64,
|
||||
remaining_bits: u64,
|
||||
extra_bits: u64 = undefined,
|
||||
unpacked: std.ArrayList(InternPool.Index),
|
||||
|
||||
fn add(unpack: *UnpackValueBits, val: Value) BitCastError!void {
|
||||
const zcu = unpack.zcu;
|
||||
const endian = zcu.getTarget().cpu.arch.endian();
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
if (unpack.remaining_bits == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const ty = val.typeOf(zcu);
|
||||
const bit_size = ty.bitSize(zcu);
|
||||
|
||||
if (unpack.skip_bits >= bit_size) {
|
||||
unpack.skip_bits -= bit_size;
|
||||
return;
|
||||
}
|
||||
|
||||
switch (ip.indexToKey(val.toIntern())) {
|
||||
.int_type,
|
||||
.ptr_type,
|
||||
.array_type,
|
||||
.vector_type,
|
||||
.opt_type,
|
||||
.anyframe_type,
|
||||
.error_union_type,
|
||||
.simple_type,
|
||||
.struct_type,
|
||||
.anon_struct_type,
|
||||
.union_type,
|
||||
.opaque_type,
|
||||
.enum_type,
|
||||
.func_type,
|
||||
.error_set_type,
|
||||
.inferred_error_set_type,
|
||||
.variable,
|
||||
.extern_func,
|
||||
.func,
|
||||
.err,
|
||||
.error_union,
|
||||
.enum_literal,
|
||||
.slice,
|
||||
.memoized_call,
|
||||
=> unreachable, // ill-defined layout or not real values
|
||||
|
||||
.undef,
|
||||
.int,
|
||||
.enum_tag,
|
||||
.simple_value,
|
||||
.empty_enum_value,
|
||||
.float,
|
||||
.ptr,
|
||||
.opt,
|
||||
=> try unpack.primitive(val),
|
||||
|
||||
.aggregate => switch (ty.zigTypeTag(zcu)) {
|
||||
.Vector => {
|
||||
const len: usize = @intCast(ty.arrayLen(zcu));
|
||||
for (0..len) |i| {
|
||||
// We reverse vector elements in packed memory on BE targets.
|
||||
const real_idx = switch (endian) {
|
||||
.little => i,
|
||||
.big => len - i - 1,
|
||||
};
|
||||
const elem_val = try val.elemValue(zcu, real_idx);
|
||||
try unpack.add(elem_val);
|
||||
}
|
||||
},
|
||||
.Array => {
|
||||
// Each element is padded up to its ABI size. Padding bits are undefined.
|
||||
// The final element does not have trailing padding.
|
||||
// Elements are reversed in packed memory on BE targets.
|
||||
const elem_ty = ty.childType(zcu);
|
||||
const pad_bits = elem_ty.abiSize(zcu) * 8 - elem_ty.bitSize(zcu);
|
||||
const len = ty.arrayLen(zcu);
|
||||
const maybe_sent = ty.sentinel(zcu);
|
||||
|
||||
if (endian == .big) if (maybe_sent) |s| {
|
||||
try unpack.add(s);
|
||||
if (len != 0) try unpack.padding(pad_bits);
|
||||
};
|
||||
|
||||
for (0..@intCast(len)) |i| {
|
||||
// We reverse array elements in packed memory on BE targets.
|
||||
const real_idx = switch (endian) {
|
||||
.little => i,
|
||||
.big => len - i - 1,
|
||||
};
|
||||
const elem_val = try val.elemValue(zcu, @intCast(real_idx));
|
||||
try unpack.add(elem_val);
|
||||
if (i != len - 1) try unpack.padding(pad_bits);
|
||||
}
|
||||
|
||||
if (endian == .little) if (maybe_sent) |s| {
|
||||
if (len != 0) try unpack.padding(pad_bits);
|
||||
try unpack.add(s);
|
||||
};
|
||||
},
|
||||
.Struct => switch (ty.containerLayout(zcu)) {
|
||||
.auto => unreachable, // ill-defined layout
|
||||
.@"extern" => switch (endian) {
|
||||
.little => {
|
||||
var cur_bit_off: u64 = 0;
|
||||
var it = zcu.typeToStruct(ty).?.iterateRuntimeOrder(ip);
|
||||
while (it.next()) |field_idx| {
|
||||
const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8;
|
||||
const pad_bits = want_bit_off - cur_bit_off;
|
||||
const field_val = try val.fieldValue(zcu, field_idx);
|
||||
try unpack.padding(pad_bits);
|
||||
try unpack.add(field_val);
|
||||
cur_bit_off = want_bit_off + field_val.typeOf(zcu).bitSize(zcu);
|
||||
}
|
||||
// Add trailing padding bits.
|
||||
try unpack.padding(bit_size - cur_bit_off);
|
||||
},
|
||||
.big => {
|
||||
var cur_bit_off: u64 = bit_size;
|
||||
var it = zcu.typeToStruct(ty).?.iterateRuntimeOrderReverse(ip);
|
||||
while (it.next()) |field_idx| {
|
||||
const field_val = try val.fieldValue(zcu, field_idx);
|
||||
const field_ty = field_val.typeOf(zcu);
|
||||
const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8 + field_ty.bitSize(zcu);
|
||||
const pad_bits = cur_bit_off - want_bit_off;
|
||||
try unpack.padding(pad_bits);
|
||||
try unpack.add(field_val);
|
||||
cur_bit_off = want_bit_off - field_ty.bitSize(zcu);
|
||||
}
|
||||
assert(cur_bit_off == 0);
|
||||
},
|
||||
},
|
||||
.@"packed" => {
|
||||
// Just add all fields in order. There are no padding bits.
|
||||
// This is identical between LE and BE targets.
|
||||
for (0..ty.structFieldCount(zcu)) |i| {
|
||||
const field_val = try val.fieldValue(zcu, i);
|
||||
try unpack.add(field_val);
|
||||
}
|
||||
},
|
||||
},
|
||||
else => unreachable,
|
||||
},
|
||||
|
||||
.un => |un| {
|
||||
// We actually don't care about the tag here!
|
||||
// Instead, we just need to write the payload value, plus any necessary padding.
|
||||
// This correctly handles the case where `tag == .none`, since the payload is then
|
||||
// either an integer or a byte array, both of which we can unpack.
|
||||
const payload_val = Value.fromInterned(un.val);
|
||||
const pad_bits = bit_size - payload_val.typeOf(zcu).bitSize(zcu);
|
||||
if (endian == .little or ty.containerLayout(zcu) == .@"packed") {
|
||||
try unpack.add(payload_val);
|
||||
try unpack.padding(pad_bits);
|
||||
} else {
|
||||
try unpack.padding(pad_bits);
|
||||
try unpack.add(payload_val);
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn padding(unpack: *UnpackValueBits, pad_bits: u64) BitCastError!void {
|
||||
if (pad_bits == 0) return;
|
||||
const zcu = unpack.zcu;
|
||||
// Figure out how many full bytes and leftover bits there are.
|
||||
const bytes = pad_bits / 8;
|
||||
const bits = pad_bits % 8;
|
||||
// Add undef u8 values for the bytes...
|
||||
const undef_u8 = try zcu.undefValue(Type.u8);
|
||||
for (0..@intCast(bytes)) |_| {
|
||||
try unpack.primitive(undef_u8);
|
||||
}
|
||||
// ...and an undef int for the leftover bits.
|
||||
if (bits == 0) return;
|
||||
const bits_ty = try zcu.intType(.unsigned, @intCast(bits));
|
||||
const bits_val = try zcu.undefValue(bits_ty);
|
||||
try unpack.primitive(bits_val);
|
||||
}
|
||||
|
||||
fn primitive(unpack: *UnpackValueBits, val: Value) BitCastError!void {
|
||||
const zcu = unpack.zcu;
|
||||
|
||||
if (unpack.remaining_bits == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const ty = val.typeOf(zcu);
|
||||
const bit_size = ty.bitSize(zcu);
|
||||
|
||||
// Note that this skips all zero-bit types.
|
||||
if (unpack.skip_bits >= bit_size) {
|
||||
unpack.skip_bits -= bit_size;
|
||||
return;
|
||||
}
|
||||
|
||||
if (unpack.skip_bits > 0) {
|
||||
const skip = unpack.skip_bits;
|
||||
unpack.skip_bits = 0;
|
||||
return unpack.splitPrimitive(val, skip, bit_size - skip);
|
||||
}
|
||||
|
||||
if (unpack.remaining_bits < bit_size) {
|
||||
return unpack.splitPrimitive(val, 0, unpack.remaining_bits);
|
||||
}
|
||||
|
||||
unpack.remaining_bits -|= bit_size;
|
||||
|
||||
try unpack.unpacked.append(val.toIntern());
|
||||
}
|
||||
|
||||
fn splitPrimitive(unpack: *UnpackValueBits, val: Value, bit_offset: u64, bit_count: u64) BitCastError!void {
|
||||
const zcu = unpack.zcu;
|
||||
const ty = val.typeOf(zcu);
|
||||
|
||||
const val_bits = ty.bitSize(zcu);
|
||||
assert(bit_offset + bit_count <= val_bits);
|
||||
|
||||
switch (zcu.intern_pool.indexToKey(val.toIntern())) {
|
||||
// In the `ptr` case, this will return `error.ReinterpretDeclRef`
|
||||
// if we're trying to split a non-integer pointer value.
|
||||
.int, .float, .enum_tag, .ptr, .opt => {
|
||||
// This @intCast is okay because no primitive can exceed the size of a u16.
|
||||
const int_ty = try zcu.intType(.unsigned, @intCast(bit_count));
|
||||
const buf = try unpack.arena.alloc(u8, @intCast((val_bits + 7) / 8));
|
||||
try val.writeToPackedMemory(ty, zcu, buf, 0);
|
||||
const sub_val = try Value.readFromPackedMemory(int_ty, zcu, buf, @intCast(bit_offset), unpack.arena);
|
||||
try unpack.primitive(sub_val);
|
||||
},
|
||||
.undef => try unpack.padding(bit_count),
|
||||
// The only values here with runtime bits are `true` and `false.
|
||||
// These are both 1 bit, so will never need truncating.
|
||||
.simple_value => unreachable,
|
||||
.empty_enum_value => unreachable, // zero-bit
|
||||
else => unreachable, // zero-bit or not primitives
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/// Given a sequence of bit-packed values in packed memory (see `UnpackValueBits`),
|
||||
/// reconstructs a value of an arbitrary type, with correct handling of `undefined`
|
||||
/// values and of pointers which align in virtual memory.
|
||||
const PackValueBits = struct {
|
||||
zcu: *Zcu,
|
||||
arena: Allocator,
|
||||
bit_offset: u64 = 0,
|
||||
unpacked: []const InternPool.Index,
|
||||
|
||||
fn get(pack: *PackValueBits, ty: Type) BitCastError!Value {
|
||||
const zcu = pack.zcu;
|
||||
const endian = zcu.getTarget().cpu.arch.endian();
|
||||
const ip = &zcu.intern_pool;
|
||||
const arena = pack.arena;
|
||||
switch (ty.zigTypeTag(zcu)) {
|
||||
.Vector => {
|
||||
// Elements are bit-packed.
|
||||
const len = ty.arrayLen(zcu);
|
||||
const elem_ty = ty.childType(zcu);
|
||||
const elems = try arena.alloc(InternPool.Index, @intCast(len));
|
||||
// We reverse vector elements in packed memory on BE targets.
|
||||
switch (endian) {
|
||||
.little => for (elems) |*elem| {
|
||||
elem.* = (try pack.get(elem_ty)).toIntern();
|
||||
},
|
||||
.big => {
|
||||
var i = elems.len;
|
||||
while (i > 0) {
|
||||
i -= 1;
|
||||
elems[i] = (try pack.get(elem_ty)).toIntern();
|
||||
}
|
||||
},
|
||||
}
|
||||
return Value.fromInterned(try zcu.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = elems },
|
||||
} }));
|
||||
},
|
||||
.Array => {
|
||||
// Each element is padded up to its ABI size. The final element does not have trailing padding.
|
||||
const len = ty.arrayLen(zcu);
|
||||
const elem_ty = ty.childType(zcu);
|
||||
const maybe_sent = ty.sentinel(zcu);
|
||||
const pad_bits = elem_ty.abiSize(zcu) * 8 - elem_ty.bitSize(zcu);
|
||||
const elems = try arena.alloc(InternPool.Index, @intCast(len));
|
||||
|
||||
if (endian == .big and maybe_sent != null) {
|
||||
// TODO: validate sentinel was preserved!
|
||||
try pack.padding(elem_ty.bitSize(zcu));
|
||||
if (len != 0) try pack.padding(pad_bits);
|
||||
}
|
||||
|
||||
for (0..elems.len) |i| {
|
||||
const real_idx = switch (endian) {
|
||||
.little => i,
|
||||
.big => len - i - 1,
|
||||
};
|
||||
elems[@intCast(real_idx)] = (try pack.get(elem_ty)).toIntern();
|
||||
if (i != len - 1) try pack.padding(pad_bits);
|
||||
}
|
||||
|
||||
if (endian == .little and maybe_sent != null) {
|
||||
// TODO: validate sentinel was preserved!
|
||||
if (len != 0) try pack.padding(pad_bits);
|
||||
try pack.padding(elem_ty.bitSize(zcu));
|
||||
}
|
||||
|
||||
return Value.fromInterned(try zcu.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = elems },
|
||||
} }));
|
||||
},
|
||||
.Struct => switch (ty.containerLayout(zcu)) {
|
||||
.auto => unreachable, // ill-defined layout
|
||||
.@"extern" => {
|
||||
const elems = try arena.alloc(InternPool.Index, ty.structFieldCount(zcu));
|
||||
@memset(elems, .none);
|
||||
switch (endian) {
|
||||
.little => {
|
||||
var cur_bit_off: u64 = 0;
|
||||
var it = zcu.typeToStruct(ty).?.iterateRuntimeOrder(ip);
|
||||
while (it.next()) |field_idx| {
|
||||
const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8;
|
||||
try pack.padding(want_bit_off - cur_bit_off);
|
||||
const field_ty = ty.structFieldType(field_idx, zcu);
|
||||
elems[field_idx] = (try pack.get(field_ty)).toIntern();
|
||||
cur_bit_off = want_bit_off + field_ty.bitSize(zcu);
|
||||
}
|
||||
try pack.padding(ty.bitSize(zcu) - cur_bit_off);
|
||||
},
|
||||
.big => {
|
||||
var cur_bit_off: u64 = ty.bitSize(zcu);
|
||||
var it = zcu.typeToStruct(ty).?.iterateRuntimeOrderReverse(ip);
|
||||
while (it.next()) |field_idx| {
|
||||
const field_ty = ty.structFieldType(field_idx, zcu);
|
||||
const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8 + field_ty.bitSize(zcu);
|
||||
try pack.padding(cur_bit_off - want_bit_off);
|
||||
elems[field_idx] = (try pack.get(field_ty)).toIntern();
|
||||
cur_bit_off = want_bit_off - field_ty.bitSize(zcu);
|
||||
}
|
||||
assert(cur_bit_off == 0);
|
||||
},
|
||||
}
|
||||
// Any fields which do not have runtime bits should be OPV or comptime fields.
|
||||
// Fill those values now.
|
||||
for (elems, 0..) |*elem, field_idx| {
|
||||
if (elem.* != .none) continue;
|
||||
const val = (try ty.structFieldValueComptime(zcu, field_idx)).?;
|
||||
elem.* = val.toIntern();
|
||||
}
|
||||
return Value.fromInterned(try zcu.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = elems },
|
||||
} }));
|
||||
},
|
||||
.@"packed" => {
|
||||
// All fields are in order with no padding.
|
||||
// This is identical between LE and BE targets.
|
||||
const elems = try arena.alloc(InternPool.Index, ty.structFieldCount(zcu));
|
||||
for (elems, 0..) |*elem, i| {
|
||||
const field_ty = ty.structFieldType(i, zcu);
|
||||
elem.* = (try pack.get(field_ty)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try zcu.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = elems },
|
||||
} }));
|
||||
},
|
||||
},
|
||||
.Union => {
|
||||
// We will attempt to read as the backing representation. If this emits
|
||||
// `error.ReinterpretDeclRef`, we will try each union field, preferring larger ones.
|
||||
// We will also attempt smaller fields when we get `undefined`, as if some bits are
|
||||
// defined we want to include them.
|
||||
// TODO: this is very very bad. We need a more sophisticated union representation.
|
||||
|
||||
const prev_unpacked = pack.unpacked;
|
||||
const prev_bit_offset = pack.bit_offset;
|
||||
|
||||
const backing_ty = try ty.unionBackingType(zcu);
|
||||
|
||||
backing: {
|
||||
const backing_val = pack.get(backing_ty) catch |err| switch (err) {
|
||||
error.ReinterpretDeclRef => {
|
||||
pack.unpacked = prev_unpacked;
|
||||
pack.bit_offset = prev_bit_offset;
|
||||
break :backing;
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
if (backing_val.isUndef(zcu)) {
|
||||
pack.unpacked = prev_unpacked;
|
||||
pack.bit_offset = prev_bit_offset;
|
||||
break :backing;
|
||||
}
|
||||
return Value.fromInterned(try zcu.intern(.{ .un = .{
|
||||
.ty = ty.toIntern(),
|
||||
.tag = .none,
|
||||
.val = backing_val.toIntern(),
|
||||
} }));
|
||||
}
|
||||
|
||||
const field_order = try pack.arena.alloc(u32, ty.unionTagTypeHypothetical(zcu).enumFieldCount(zcu));
|
||||
for (field_order, 0..) |*f, i| f.* = @intCast(i);
|
||||
// Sort `field_order` to put the fields with the largest bit sizes first.
|
||||
const SizeSortCtx = struct {
|
||||
zcu: *Zcu,
|
||||
field_types: []const InternPool.Index,
|
||||
fn lessThan(ctx: @This(), a_idx: u32, b_idx: u32) bool {
|
||||
const a_ty = Type.fromInterned(ctx.field_types[a_idx]);
|
||||
const b_ty = Type.fromInterned(ctx.field_types[b_idx]);
|
||||
return a_ty.bitSize(ctx.zcu) > b_ty.bitSize(ctx.zcu);
|
||||
}
|
||||
};
|
||||
std.mem.sortUnstable(u32, field_order, SizeSortCtx{
|
||||
.zcu = zcu,
|
||||
.field_types = zcu.typeToUnion(ty).?.field_types.get(ip),
|
||||
}, SizeSortCtx.lessThan);
|
||||
|
||||
const padding_after = endian == .little or ty.containerLayout(zcu) == .@"packed";
|
||||
|
||||
for (field_order) |field_idx| {
|
||||
const field_ty = Type.fromInterned(zcu.typeToUnion(ty).?.field_types.get(ip)[field_idx]);
|
||||
const pad_bits = ty.bitSize(zcu) - field_ty.bitSize(zcu);
|
||||
if (!padding_after) try pack.padding(pad_bits);
|
||||
const field_val = pack.get(field_ty) catch |err| switch (err) {
|
||||
error.ReinterpretDeclRef => {
|
||||
pack.unpacked = prev_unpacked;
|
||||
pack.bit_offset = prev_bit_offset;
|
||||
continue;
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
if (padding_after) try pack.padding(pad_bits);
|
||||
if (field_val.isUndef(zcu)) {
|
||||
pack.unpacked = prev_unpacked;
|
||||
pack.bit_offset = prev_bit_offset;
|
||||
continue;
|
||||
}
|
||||
const tag_val = try zcu.enumValueFieldIndex(ty.unionTagTypeHypothetical(zcu), field_idx);
|
||||
return Value.fromInterned(try zcu.intern(.{ .un = .{
|
||||
.ty = ty.toIntern(),
|
||||
.tag = tag_val.toIntern(),
|
||||
.val = field_val.toIntern(),
|
||||
} }));
|
||||
}
|
||||
|
||||
// No field could represent the value. Just do whatever happens when we try to read
|
||||
// the backing type - either `undefined` or `error.ReinterpretDeclRef`.
|
||||
const backing_val = try pack.get(backing_ty);
|
||||
return Value.fromInterned(try zcu.intern(.{ .un = .{
|
||||
.ty = ty.toIntern(),
|
||||
.tag = .none,
|
||||
.val = backing_val.toIntern(),
|
||||
} }));
|
||||
},
|
||||
else => return pack.primitive(ty),
|
||||
}
|
||||
}
|
||||
|
||||
fn padding(pack: *PackValueBits, pad_bits: u64) BitCastError!void {
|
||||
_ = pack.prepareBits(pad_bits);
|
||||
}
|
||||
|
||||
fn primitive(pack: *PackValueBits, want_ty: Type) BitCastError!Value {
|
||||
const zcu = pack.zcu;
|
||||
const vals, const bit_offset = pack.prepareBits(want_ty.bitSize(zcu));
|
||||
|
||||
for (vals) |val| {
|
||||
if (!Value.fromInterned(val).isUndef(zcu)) break;
|
||||
} else {
|
||||
// All bits of the value are `undefined`.
|
||||
return zcu.undefValue(want_ty);
|
||||
}
|
||||
|
||||
// TODO: we need to decide how to handle partially-undef values here.
|
||||
// Currently, a value with some undefined bits becomes `0xAA` so that we
|
||||
// preserve the well-defined bits, because we can't currently represent
|
||||
// a partially-undefined primitive (e.g. an int with some undef bits).
|
||||
// In future, we probably want to take one of these two routes:
|
||||
// * Define that if any bits are `undefined`, the entire value is `undefined`.
|
||||
// This is a major breaking change, and probably a footgun.
|
||||
// * Introduce tracking for partially-undef values at comptime.
|
||||
// This would complicate a lot of operations in Sema, such as basic
|
||||
// arithmetic.
|
||||
// This design complexity is tracked by #19634.
|
||||
|
||||
ptr_cast: {
|
||||
if (vals.len != 1) break :ptr_cast;
|
||||
const val = Value.fromInterned(vals[0]);
|
||||
if (!val.typeOf(zcu).isPtrAtRuntime(zcu)) break :ptr_cast;
|
||||
if (!want_ty.isPtrAtRuntime(zcu)) break :ptr_cast;
|
||||
return zcu.getCoerced(val, want_ty);
|
||||
}
|
||||
|
||||
// Reinterpret via an in-memory buffer.
|
||||
|
||||
var buf_bits: u64 = 0;
|
||||
for (vals) |ip_val| {
|
||||
const val = Value.fromInterned(ip_val);
|
||||
const ty = val.typeOf(zcu);
|
||||
buf_bits += ty.bitSize(zcu);
|
||||
}
|
||||
|
||||
const buf = try pack.arena.alloc(u8, @intCast((buf_bits + 7) / 8));
|
||||
// We will skip writing undefined values, so mark the buffer as `0xAA` so we get "undefined" bits.
|
||||
@memset(buf, 0xAA);
|
||||
var cur_bit_off: usize = 0;
|
||||
for (vals) |ip_val| {
|
||||
const val = Value.fromInterned(ip_val);
|
||||
const ty = val.typeOf(zcu);
|
||||
if (!val.isUndef(zcu)) {
|
||||
try val.writeToPackedMemory(ty, zcu, buf, cur_bit_off);
|
||||
}
|
||||
cur_bit_off += @intCast(ty.bitSize(zcu));
|
||||
}
|
||||
|
||||
return Value.readFromPackedMemory(want_ty, zcu, buf, @intCast(bit_offset), pack.arena);
|
||||
}
|
||||
|
||||
fn prepareBits(pack: *PackValueBits, need_bits: u64) struct { []const InternPool.Index, u64 } {
|
||||
if (need_bits == 0) return .{ &.{}, 0 };
|
||||
|
||||
const zcu = pack.zcu;
|
||||
|
||||
var bits: u64 = 0;
|
||||
var len: usize = 0;
|
||||
while (bits < pack.bit_offset + need_bits) {
|
||||
bits += Value.fromInterned(pack.unpacked[len]).typeOf(zcu).bitSize(zcu);
|
||||
len += 1;
|
||||
}
|
||||
|
||||
const result_vals = pack.unpacked[0..len];
|
||||
const result_offset = pack.bit_offset;
|
||||
|
||||
const extra_bits = bits - pack.bit_offset - need_bits;
|
||||
if (extra_bits == 0) {
|
||||
pack.unpacked = pack.unpacked[len..];
|
||||
pack.bit_offset = 0;
|
||||
} else {
|
||||
pack.unpacked = pack.unpacked[len - 1 ..];
|
||||
pack.bit_offset = Value.fromInterned(pack.unpacked[0]).typeOf(zcu).bitSize(zcu) - extra_bits;
|
||||
}
|
||||
|
||||
return .{ result_vals, result_offset };
|
||||
}
|
||||
};
|
||||
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const assert = std.debug.assert;
|
||||
|
||||
const Sema = @import("../Sema.zig");
|
||||
const Zcu = @import("../Module.zig");
|
||||
const InternPool = @import("../InternPool.zig");
|
||||
const Type = @import("../type.zig").Type;
|
||||
const Value = @import("../Value.zig");
|
||||
const CompileError = Zcu.CompileError;
|
||||
1059
src/Sema/comptime_ptr_access.zig
Normal file
1059
src/Sema/comptime_ptr_access.zig
Normal file
File diff suppressed because it is too large
Load Diff
863
src/Value.zig
863
src/Value.zig
File diff suppressed because it is too large
Load Diff
@ -2206,7 +2206,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
|
||||
);
|
||||
break :blk extern_func.decl;
|
||||
} else switch (mod.intern_pool.indexToKey(func_val.ip_index)) {
|
||||
.ptr => |ptr| switch (ptr.addr) {
|
||||
.ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
|
||||
.decl => |decl| {
|
||||
_ = try func.bin_file.getOrCreateAtomForDecl(decl);
|
||||
break :blk decl;
|
||||
@ -3058,72 +3058,59 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
|
||||
return WValue{ .stack = {} };
|
||||
}
|
||||
|
||||
fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue {
|
||||
const mod = func.bin_file.base.comp.module.?;
|
||||
const ptr = mod.intern_pool.indexToKey(ptr_val.ip_index).ptr;
|
||||
switch (ptr.addr) {
|
||||
.decl => |decl_index| {
|
||||
return func.lowerParentPtrDecl(ptr_val, decl_index, offset);
|
||||
},
|
||||
.anon_decl => |ad| return func.lowerAnonDeclRef(ad, offset),
|
||||
.eu_payload => |tag| return func.fail("TODO: Implement lowerParentPtr for {}", .{tag}),
|
||||
.int => |base| return func.lowerConstant(Value.fromInterned(base), Type.usize),
|
||||
.opt_payload => |base_ptr| return func.lowerParentPtr(Value.fromInterned(base_ptr), offset),
|
||||
.comptime_field, .comptime_alloc => unreachable,
|
||||
.elem => |elem| {
|
||||
const index = elem.index;
|
||||
const elem_type = Type.fromInterned(mod.intern_pool.typeOf(elem.base)).elemType2(mod);
|
||||
const elem_offset = index * elem_type.abiSize(mod);
|
||||
return func.lowerParentPtr(Value.fromInterned(elem.base), @as(u32, @intCast(elem_offset + offset)));
|
||||
},
|
||||
fn lowerPtr(func: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerError!WValue {
|
||||
const zcu = func.bin_file.base.comp.module.?;
|
||||
const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr;
|
||||
const offset: u64 = prev_offset + ptr.byte_offset;
|
||||
return switch (ptr.base_addr) {
|
||||
.decl => |decl| return func.lowerDeclRefValue(decl, @intCast(offset)),
|
||||
.anon_decl => |ad| return func.lowerAnonDeclRef(ad, @intCast(offset)),
|
||||
.int => return func.lowerConstant(try zcu.intValue(Type.usize, offset), Type.usize),
|
||||
.eu_payload => return func.fail("Wasm TODO: lower error union payload pointer", .{}),
|
||||
.opt_payload => |opt_ptr| return func.lowerPtr(opt_ptr, offset),
|
||||
.field => |field| {
|
||||
const parent_ptr_ty = Type.fromInterned(mod.intern_pool.typeOf(field.base));
|
||||
const parent_ty = parent_ptr_ty.childType(mod);
|
||||
const field_index: u32 = @intCast(field.index);
|
||||
|
||||
const field_offset = switch (parent_ty.zigTypeTag(mod)) {
|
||||
.Struct => blk: {
|
||||
if (mod.typeToPackedStruct(parent_ty)) |struct_type| {
|
||||
if (Type.fromInterned(ptr.ty).ptrInfo(mod).packed_offset.host_size == 0)
|
||||
break :blk @divExact(mod.structPackedFieldBitOffset(struct_type, field_index) + parent_ptr_ty.ptrInfo(mod).packed_offset.bit_offset, 8)
|
||||
else
|
||||
break :blk 0;
|
||||
}
|
||||
break :blk parent_ty.structFieldOffset(field_index, mod);
|
||||
},
|
||||
.Union => switch (parent_ty.containerLayout(mod)) {
|
||||
.@"packed" => 0,
|
||||
else => blk: {
|
||||
const layout: Module.UnionLayout = parent_ty.unionGetLayout(mod);
|
||||
if (layout.payload_size == 0) break :blk 0;
|
||||
if (layout.payload_align.compare(.gt, layout.tag_align)) break :blk 0;
|
||||
|
||||
// tag is stored first so calculate offset from where payload starts
|
||||
break :blk layout.tag_align.forward(layout.tag_size);
|
||||
},
|
||||
},
|
||||
.Pointer => switch (parent_ty.ptrSize(mod)) {
|
||||
.Slice => switch (field.index) {
|
||||
0 => 0,
|
||||
1 => func.ptrSize(),
|
||||
const base_ptr = Value.fromInterned(field.base);
|
||||
const base_ty = base_ptr.typeOf(zcu).childType(zcu);
|
||||
const field_off: u64 = switch (base_ty.zigTypeTag(zcu)) {
|
||||
.Pointer => off: {
|
||||
assert(base_ty.isSlice(zcu));
|
||||
break :off switch (field.index) {
|
||||
Value.slice_ptr_index => 0,
|
||||
Value.slice_len_index => @divExact(zcu.getTarget().ptrBitWidth(), 8),
|
||||
else => unreachable,
|
||||
};
|
||||
},
|
||||
.Struct => switch (base_ty.containerLayout(zcu)) {
|
||||
.auto => base_ty.structFieldOffset(@intCast(field.index), zcu),
|
||||
.@"extern", .@"packed" => unreachable,
|
||||
},
|
||||
.Union => switch (base_ty.containerLayout(zcu)) {
|
||||
.auto => off: {
|
||||
// Keep in sync with the `un` case of `generateSymbol`.
|
||||
const layout = base_ty.unionGetLayout(zcu);
|
||||
if (layout.payload_size == 0) break :off 0;
|
||||
if (layout.tag_size == 0) break :off 0;
|
||||
if (layout.tag_align.compare(.gte, layout.payload_align)) {
|
||||
// Tag first.
|
||||
break :off layout.tag_size;
|
||||
} else {
|
||||
// Payload first.
|
||||
break :off 0;
|
||||
}
|
||||
},
|
||||
else => unreachable,
|
||||
.@"extern", .@"packed" => unreachable,
|
||||
},
|
||||
else => unreachable,
|
||||
};
|
||||
return func.lowerParentPtr(Value.fromInterned(field.base), @as(u32, @intCast(offset + field_offset)));
|
||||
return func.lowerPtr(field.base, offset + field_off);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn lowerParentPtrDecl(func: *CodeGen, ptr_val: Value, decl_index: InternPool.DeclIndex, offset: u32) InnerError!WValue {
|
||||
return func.lowerDeclRefValue(ptr_val, decl_index, offset);
|
||||
.arr_elem, .comptime_field, .comptime_alloc => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
fn lowerAnonDeclRef(
|
||||
func: *CodeGen,
|
||||
anon_decl: InternPool.Key.Ptr.Addr.AnonDecl,
|
||||
anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl,
|
||||
offset: u32,
|
||||
) InnerError!WValue {
|
||||
const mod = func.bin_file.base.comp.module.?;
|
||||
@ -3153,7 +3140,7 @@ fn lowerAnonDeclRef(
|
||||
} else return WValue{ .memory_offset = .{ .pointer = target_sym_index, .offset = offset } };
|
||||
}
|
||||
|
||||
fn lowerDeclRefValue(func: *CodeGen, val: Value, decl_index: InternPool.DeclIndex, offset: u32) InnerError!WValue {
|
||||
fn lowerDeclRefValue(func: *CodeGen, decl_index: InternPool.DeclIndex, offset: u32) InnerError!WValue {
|
||||
const mod = func.bin_file.base.comp.module.?;
|
||||
|
||||
const decl = mod.declPtr(decl_index);
|
||||
@ -3161,11 +3148,11 @@ fn lowerDeclRefValue(func: *CodeGen, val: Value, decl_index: InternPool.DeclInde
|
||||
// want to lower the actual decl, rather than the alias itself.
|
||||
if (decl.val.getFunction(mod)) |func_val| {
|
||||
if (func_val.owner_decl != decl_index) {
|
||||
return func.lowerDeclRefValue(val, func_val.owner_decl, offset);
|
||||
return func.lowerDeclRefValue(func_val.owner_decl, offset);
|
||||
}
|
||||
} else if (decl.val.getExternFunc(mod)) |func_val| {
|
||||
if (func_val.decl != decl_index) {
|
||||
return func.lowerDeclRefValue(val, func_val.decl, offset);
|
||||
return func.lowerDeclRefValue(func_val.decl, offset);
|
||||
}
|
||||
}
|
||||
const decl_ty = decl.typeOf(mod);
|
||||
@ -3309,23 +3296,16 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
|
||||
},
|
||||
.slice => |slice| {
|
||||
var ptr = ip.indexToKey(slice.ptr).ptr;
|
||||
const owner_decl = while (true) switch (ptr.addr) {
|
||||
const owner_decl = while (true) switch (ptr.base_addr) {
|
||||
.decl => |decl| break decl,
|
||||
.int, .anon_decl => return func.fail("Wasm TODO: lower slice where ptr is not owned by decl", .{}),
|
||||
.opt_payload, .eu_payload => |base| ptr = ip.indexToKey(base).ptr,
|
||||
.elem, .field => |base_index| ptr = ip.indexToKey(base_index.base).ptr,
|
||||
.comptime_field, .comptime_alloc => unreachable,
|
||||
.field => |base_index| ptr = ip.indexToKey(base_index.base).ptr,
|
||||
.arr_elem, .comptime_field, .comptime_alloc => unreachable,
|
||||
};
|
||||
return .{ .memory = try func.bin_file.lowerUnnamedConst(val, owner_decl) };
|
||||
},
|
||||
.ptr => |ptr| switch (ptr.addr) {
|
||||
.decl => |decl| return func.lowerDeclRefValue(val, decl, 0),
|
||||
.int => |int| return func.lowerConstant(Value.fromInterned(int), Type.fromInterned(ip.typeOf(int))),
|
||||
.opt_payload, .elem, .field => return func.lowerParentPtr(val, 0),
|
||||
.anon_decl => |ad| return func.lowerAnonDeclRef(ad, 0),
|
||||
.comptime_field, .comptime_alloc => unreachable,
|
||||
else => return func.fail("Wasm TODO: lowerConstant for other const addr tag {}", .{ptr.addr}),
|
||||
},
|
||||
.ptr => return func.lowerPtr(val.toIntern(), 0),
|
||||
.opt => if (ty.optionalReprIsPayload(mod)) {
|
||||
const pl_ty = ty.optionalChild(mod);
|
||||
if (val.optionalValue(mod)) |payload| {
|
||||
@ -3435,7 +3415,10 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 {
|
||||
else => return switch (mod.intern_pool.indexToKey(val.ip_index)) {
|
||||
.enum_tag => |enum_tag| intIndexAsI32(&mod.intern_pool, enum_tag.int, mod),
|
||||
.int => |int| intStorageAsI32(int.storage, mod),
|
||||
.ptr => |ptr| intIndexAsI32(&mod.intern_pool, ptr.addr.int, mod),
|
||||
.ptr => |ptr| {
|
||||
assert(ptr.base_addr == .int);
|
||||
return @intCast(ptr.byte_offset);
|
||||
},
|
||||
.err => |err| @as(i32, @bitCast(@as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(err.name).?)))),
|
||||
else => unreachable,
|
||||
},
|
||||
|
||||
@ -12249,10 +12249,10 @@ fn genCall(self: *Self, info: union(enum) {
|
||||
const func_key = mod.intern_pool.indexToKey(func_value.ip_index);
|
||||
switch (switch (func_key) {
|
||||
else => func_key,
|
||||
.ptr => |ptr| switch (ptr.addr) {
|
||||
.ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
|
||||
.decl => |decl| mod.intern_pool.indexToKey(mod.declPtr(decl).val.toIntern()),
|
||||
else => func_key,
|
||||
},
|
||||
} else func_key,
|
||||
}) {
|
||||
.func => |func| {
|
||||
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
|
||||
@ -17877,8 +17877,8 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
break :result null;
|
||||
}) orelse return self.fail("TODO implement airShuffle from {} and {} to {} with {}", .{
|
||||
lhs_ty.fmt(mod), rhs_ty.fmt(mod), dst_ty.fmt(mod),
|
||||
Value.fromInterned(extra.mask).fmtValue(mod),
|
||||
lhs_ty.fmt(mod), rhs_ty.fmt(mod), dst_ty.fmt(mod),
|
||||
Value.fromInterned(extra.mask).fmtValue(mod, null),
|
||||
});
|
||||
return self.finishAir(inst, result, .{ extra.a, extra.b, .none });
|
||||
}
|
||||
|
||||
134
src/codegen.zig
134
src/codegen.zig
@ -16,7 +16,8 @@ const Compilation = @import("Compilation.zig");
|
||||
const ErrorMsg = Module.ErrorMsg;
|
||||
const InternPool = @import("InternPool.zig");
|
||||
const Liveness = @import("Liveness.zig");
|
||||
const Module = @import("Module.zig");
|
||||
const Zcu = @import("Module.zig");
|
||||
const Module = Zcu;
|
||||
const Target = std.Target;
|
||||
const Type = @import("type.zig").Type;
|
||||
const Value = @import("Value.zig");
|
||||
@ -185,7 +186,7 @@ pub fn generateSymbol(
|
||||
const target = mod.getTarget();
|
||||
const endian = target.cpu.arch.endian();
|
||||
|
||||
log.debug("generateSymbol: val = {}", .{val.fmtValue(mod)});
|
||||
log.debug("generateSymbol: val = {}", .{val.fmtValue(mod, null)});
|
||||
|
||||
if (val.isUndefDeep(mod)) {
|
||||
const abi_size = math.cast(usize, ty.abiSize(mod)) orelse return error.Overflow;
|
||||
@ -314,7 +315,7 @@ pub fn generateSymbol(
|
||||
},
|
||||
.f128 => |f128_val| writeFloat(f128, f128_val, target, endian, try code.addManyAsArray(16)),
|
||||
},
|
||||
.ptr => switch (try lowerParentPtr(bin_file, src_loc, val.toIntern(), code, debug_output, reloc_info)) {
|
||||
.ptr => switch (try lowerPtr(bin_file, src_loc, val.toIntern(), code, debug_output, reloc_info, 0)) {
|
||||
.ok => {},
|
||||
.fail => |em| return .{ .fail = em },
|
||||
},
|
||||
@ -614,111 +615,79 @@ pub fn generateSymbol(
|
||||
return .ok;
|
||||
}
|
||||
|
||||
fn lowerParentPtr(
|
||||
fn lowerPtr(
|
||||
bin_file: *link.File,
|
||||
src_loc: Module.SrcLoc,
|
||||
parent_ptr: InternPool.Index,
|
||||
ptr_val: InternPool.Index,
|
||||
code: *std.ArrayList(u8),
|
||||
debug_output: DebugInfoOutput,
|
||||
reloc_info: RelocInfo,
|
||||
prev_offset: u64,
|
||||
) CodeGenError!Result {
|
||||
const mod = bin_file.comp.module.?;
|
||||
const ip = &mod.intern_pool;
|
||||
const ptr = ip.indexToKey(parent_ptr).ptr;
|
||||
return switch (ptr.addr) {
|
||||
.decl => |decl| try lowerDeclRef(bin_file, src_loc, decl, code, debug_output, reloc_info),
|
||||
.anon_decl => |ad| try lowerAnonDeclRef(bin_file, src_loc, ad, code, debug_output, reloc_info),
|
||||
.int => |int| try generateSymbol(bin_file, src_loc, Value.fromInterned(int), code, debug_output, reloc_info),
|
||||
.eu_payload => |eu_payload| try lowerParentPtr(
|
||||
const zcu = bin_file.comp.module.?;
|
||||
const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr;
|
||||
const offset: u64 = prev_offset + ptr.byte_offset;
|
||||
return switch (ptr.base_addr) {
|
||||
.decl => |decl| try lowerDeclRef(bin_file, src_loc, decl, code, debug_output, reloc_info, offset),
|
||||
.anon_decl => |ad| try lowerAnonDeclRef(bin_file, src_loc, ad, code, debug_output, reloc_info, offset),
|
||||
.int => try generateSymbol(bin_file, src_loc, try zcu.intValue(Type.usize, offset), code, debug_output, reloc_info),
|
||||
.eu_payload => |eu_ptr| try lowerPtr(
|
||||
bin_file,
|
||||
src_loc,
|
||||
eu_payload,
|
||||
code,
|
||||
debug_output,
|
||||
reloc_info.offset(@intCast(errUnionPayloadOffset(
|
||||
Type.fromInterned(ip.typeOf(eu_payload)),
|
||||
mod,
|
||||
))),
|
||||
),
|
||||
.opt_payload => |opt_payload| try lowerParentPtr(
|
||||
bin_file,
|
||||
src_loc,
|
||||
opt_payload,
|
||||
eu_ptr,
|
||||
code,
|
||||
debug_output,
|
||||
reloc_info,
|
||||
offset + errUnionPayloadOffset(
|
||||
Value.fromInterned(eu_ptr).typeOf(zcu).childType(zcu).errorUnionPayload(zcu),
|
||||
zcu,
|
||||
),
|
||||
),
|
||||
.elem => |elem| try lowerParentPtr(
|
||||
.opt_payload => |opt_ptr| try lowerPtr(
|
||||
bin_file,
|
||||
src_loc,
|
||||
elem.base,
|
||||
opt_ptr,
|
||||
code,
|
||||
debug_output,
|
||||
reloc_info.offset(@intCast(elem.index *
|
||||
Type.fromInterned(ip.typeOf(elem.base)).elemType2(mod).abiSize(mod))),
|
||||
reloc_info,
|
||||
offset,
|
||||
),
|
||||
.field => |field| {
|
||||
const base_ptr_ty = ip.typeOf(field.base);
|
||||
const base_ty = ip.indexToKey(base_ptr_ty).ptr_type.child;
|
||||
return lowerParentPtr(
|
||||
bin_file,
|
||||
src_loc,
|
||||
field.base,
|
||||
code,
|
||||
debug_output,
|
||||
reloc_info.offset(switch (ip.indexToKey(base_ty)) {
|
||||
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
|
||||
.One, .Many, .C => unreachable,
|
||||
.Slice => switch (field.index) {
|
||||
0 => 0,
|
||||
1 => @divExact(mod.getTarget().ptrBitWidth(), 8),
|
||||
else => unreachable,
|
||||
},
|
||||
},
|
||||
.struct_type,
|
||||
.anon_struct_type,
|
||||
.union_type,
|
||||
=> switch (Type.fromInterned(base_ty).containerLayout(mod)) {
|
||||
.auto, .@"extern" => @intCast(Type.fromInterned(base_ty).structFieldOffset(
|
||||
@intCast(field.index),
|
||||
mod,
|
||||
)),
|
||||
.@"packed" => if (mod.typeToStruct(Type.fromInterned(base_ty))) |struct_obj|
|
||||
if (Type.fromInterned(ptr.ty).ptrInfo(mod).packed_offset.host_size == 0)
|
||||
@divExact(Type.fromInterned(base_ptr_ty).ptrInfo(mod)
|
||||
.packed_offset.bit_offset + mod.structPackedFieldBitOffset(
|
||||
struct_obj,
|
||||
@intCast(field.index),
|
||||
), 8)
|
||||
else
|
||||
0
|
||||
else
|
||||
0,
|
||||
},
|
||||
else => unreachable,
|
||||
}),
|
||||
);
|
||||
const base_ptr = Value.fromInterned(field.base);
|
||||
const base_ty = base_ptr.typeOf(zcu).childType(zcu);
|
||||
const field_off: u64 = switch (base_ty.zigTypeTag(zcu)) {
|
||||
.Pointer => off: {
|
||||
assert(base_ty.isSlice(zcu));
|
||||
break :off switch (field.index) {
|
||||
Value.slice_ptr_index => 0,
|
||||
Value.slice_len_index => @divExact(zcu.getTarget().ptrBitWidth(), 8),
|
||||
else => unreachable,
|
||||
};
|
||||
},
|
||||
.Struct, .Union => switch (base_ty.containerLayout(zcu)) {
|
||||
.auto => base_ty.structFieldOffset(@intCast(field.index), zcu),
|
||||
.@"extern", .@"packed" => unreachable,
|
||||
},
|
||||
else => unreachable,
|
||||
};
|
||||
return lowerPtr(bin_file, src_loc, field.base, code, debug_output, reloc_info, offset + field_off);
|
||||
},
|
||||
.comptime_field, .comptime_alloc => unreachable,
|
||||
.arr_elem, .comptime_field, .comptime_alloc => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
const RelocInfo = struct {
|
||||
parent_atom_index: u32,
|
||||
addend: ?u32 = null,
|
||||
|
||||
fn offset(ri: RelocInfo, addend: u32) RelocInfo {
|
||||
return .{ .parent_atom_index = ri.parent_atom_index, .addend = (ri.addend orelse 0) + addend };
|
||||
}
|
||||
};
|
||||
|
||||
fn lowerAnonDeclRef(
|
||||
lf: *link.File,
|
||||
src_loc: Module.SrcLoc,
|
||||
anon_decl: InternPool.Key.Ptr.Addr.AnonDecl,
|
||||
anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl,
|
||||
code: *std.ArrayList(u8),
|
||||
debug_output: DebugInfoOutput,
|
||||
reloc_info: RelocInfo,
|
||||
offset: u64,
|
||||
) CodeGenError!Result {
|
||||
_ = debug_output;
|
||||
const zcu = lf.comp.module.?;
|
||||
@ -745,7 +714,7 @@ fn lowerAnonDeclRef(
|
||||
const vaddr = try lf.getAnonDeclVAddr(decl_val, .{
|
||||
.parent_atom_index = reloc_info.parent_atom_index,
|
||||
.offset = code.items.len,
|
||||
.addend = reloc_info.addend orelse 0,
|
||||
.addend = @intCast(offset),
|
||||
});
|
||||
const endian = target.cpu.arch.endian();
|
||||
switch (ptr_width_bytes) {
|
||||
@ -765,6 +734,7 @@ fn lowerDeclRef(
|
||||
code: *std.ArrayList(u8),
|
||||
debug_output: DebugInfoOutput,
|
||||
reloc_info: RelocInfo,
|
||||
offset: u64,
|
||||
) CodeGenError!Result {
|
||||
_ = src_loc;
|
||||
_ = debug_output;
|
||||
@ -783,7 +753,7 @@ fn lowerDeclRef(
|
||||
const vaddr = try lf.getDeclVAddr(decl_index, .{
|
||||
.parent_atom_index = reloc_info.parent_atom_index,
|
||||
.offset = code.items.len,
|
||||
.addend = reloc_info.addend orelse 0,
|
||||
.addend = @intCast(offset),
|
||||
});
|
||||
const endian = target.cpu.arch.endian();
|
||||
switch (ptr_width) {
|
||||
@ -861,7 +831,7 @@ fn genDeclRef(
|
||||
const zcu = lf.comp.module.?;
|
||||
const ip = &zcu.intern_pool;
|
||||
const ty = val.typeOf(zcu);
|
||||
log.debug("genDeclRef: val = {}", .{val.fmtValue(zcu)});
|
||||
log.debug("genDeclRef: val = {}", .{val.fmtValue(zcu, null)});
|
||||
|
||||
const ptr_decl = zcu.declPtr(ptr_decl_index);
|
||||
const namespace = zcu.namespacePtr(ptr_decl.src_namespace);
|
||||
@ -966,7 +936,7 @@ fn genUnnamedConst(
|
||||
) CodeGenError!GenResult {
|
||||
const zcu = lf.comp.module.?;
|
||||
const gpa = lf.comp.gpa;
|
||||
log.debug("genUnnamedConst: val = {}", .{val.fmtValue(zcu)});
|
||||
log.debug("genUnnamedConst: val = {}", .{val.fmtValue(zcu, null)});
|
||||
|
||||
const local_sym_index = lf.lowerUnnamedConst(val, owner_decl_index) catch |err| {
|
||||
return GenResult.fail(gpa, src_loc, "lowering unnamed constant failed: {s}", .{@errorName(err)});
|
||||
@ -1007,7 +977,7 @@ pub fn genTypedValue(
|
||||
const ip = &zcu.intern_pool;
|
||||
const ty = val.typeOf(zcu);
|
||||
|
||||
log.debug("genTypedValue: val = {}", .{val.fmtValue(zcu)});
|
||||
log.debug("genTypedValue: val = {}", .{val.fmtValue(zcu, null)});
|
||||
|
||||
if (val.isUndef(zcu))
|
||||
return GenResult.mcv(.undef);
|
||||
@ -1018,7 +988,7 @@ pub fn genTypedValue(
|
||||
const ptr_bits = target.ptrBitWidth();
|
||||
|
||||
if (!ty.isSlice(zcu)) switch (ip.indexToKey(val.toIntern())) {
|
||||
.ptr => |ptr| switch (ptr.addr) {
|
||||
.ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
|
||||
.decl => |decl| return genDeclRef(lf, src_loc, val, decl),
|
||||
else => {},
|
||||
},
|
||||
|
||||
@ -646,8 +646,7 @@ pub const DeclGen = struct {
|
||||
fn renderAnonDeclValue(
|
||||
dg: *DeclGen,
|
||||
writer: anytype,
|
||||
ptr_val: Value,
|
||||
anon_decl: InternPool.Key.Ptr.Addr.AnonDecl,
|
||||
anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl,
|
||||
location: ValueRenderLocation,
|
||||
) error{ OutOfMemory, AnalysisFail }!void {
|
||||
const zcu = dg.zcu;
|
||||
@ -657,16 +656,16 @@ pub const DeclGen = struct {
|
||||
const decl_ty = decl_val.typeOf(zcu);
|
||||
|
||||
// Render an undefined pointer if we have a pointer to a zero-bit or comptime type.
|
||||
const ptr_ty = ptr_val.typeOf(zcu);
|
||||
const ptr_ty = Type.fromInterned(anon_decl.orig_ty);
|
||||
if (ptr_ty.isPtrAtRuntime(zcu) and !decl_ty.isFnOrHasRuntimeBits(zcu)) {
|
||||
return dg.writeCValue(writer, .{ .undef = ptr_ty });
|
||||
}
|
||||
|
||||
// Chase function values in order to be able to reference the original function.
|
||||
if (decl_val.getFunction(zcu)) |func|
|
||||
return dg.renderDeclValue(writer, ptr_val, func.owner_decl, location);
|
||||
return dg.renderDeclValue(writer, func.owner_decl, location);
|
||||
if (decl_val.getExternFunc(zcu)) |extern_func|
|
||||
return dg.renderDeclValue(writer, ptr_val, extern_func.decl, location);
|
||||
return dg.renderDeclValue(writer, extern_func.decl, location);
|
||||
|
||||
assert(decl_val.getVariable(zcu) == null);
|
||||
|
||||
@ -712,7 +711,6 @@ pub const DeclGen = struct {
|
||||
fn renderDeclValue(
|
||||
dg: *DeclGen,
|
||||
writer: anytype,
|
||||
val: Value,
|
||||
decl_index: InternPool.DeclIndex,
|
||||
location: ValueRenderLocation,
|
||||
) error{ OutOfMemory, AnalysisFail }!void {
|
||||
@ -722,17 +720,17 @@ pub const DeclGen = struct {
|
||||
assert(decl.has_tv);
|
||||
|
||||
// Render an undefined pointer if we have a pointer to a zero-bit or comptime type.
|
||||
const ty = val.typeOf(zcu);
|
||||
const decl_ty = decl.typeOf(zcu);
|
||||
if (ty.isPtrAtRuntime(zcu) and !decl_ty.isFnOrHasRuntimeBits(zcu)) {
|
||||
return dg.writeCValue(writer, .{ .undef = ty });
|
||||
const ptr_ty = try decl.declPtrType(zcu);
|
||||
if (!decl_ty.isFnOrHasRuntimeBits(zcu)) {
|
||||
return dg.writeCValue(writer, .{ .undef = ptr_ty });
|
||||
}
|
||||
|
||||
// Chase function values in order to be able to reference the original function.
|
||||
if (decl.val.getFunction(zcu)) |func| if (func.owner_decl != decl_index)
|
||||
return dg.renderDeclValue(writer, val, func.owner_decl, location);
|
||||
return dg.renderDeclValue(writer, func.owner_decl, location);
|
||||
if (decl.val.getExternFunc(zcu)) |extern_func| if (extern_func.decl != decl_index)
|
||||
return dg.renderDeclValue(writer, val, extern_func.decl, location);
|
||||
return dg.renderDeclValue(writer, extern_func.decl, location);
|
||||
|
||||
if (decl.val.getVariable(zcu)) |variable| try dg.renderFwdDecl(decl_index, variable, .tentative);
|
||||
|
||||
@ -740,7 +738,7 @@ pub const DeclGen = struct {
|
||||
// them). The analysis until now should ensure that the C function
|
||||
// pointers are compatible. If they are not, then there is a bug
|
||||
// somewhere and we should let the C compiler tell us about it.
|
||||
const ctype = try dg.ctypeFromType(ty, .complete);
|
||||
const ctype = try dg.ctypeFromType(ptr_ty, .complete);
|
||||
const elem_ctype = ctype.info(ctype_pool).pointer.elem_ctype;
|
||||
const decl_ctype = try dg.ctypeFromType(decl_ty, .complete);
|
||||
const need_cast = !elem_ctype.eql(decl_ctype) and
|
||||
@ -755,125 +753,108 @@ pub const DeclGen = struct {
|
||||
if (need_cast) try writer.writeByte(')');
|
||||
}
|
||||
|
||||
/// Renders a "parent" pointer by recursing to the root decl/variable
|
||||
/// that its contents are defined with respect to.
|
||||
fn renderParentPtr(
|
||||
fn renderPointer(
|
||||
dg: *DeclGen,
|
||||
writer: anytype,
|
||||
ptr_val: InternPool.Index,
|
||||
derivation: Value.PointerDeriveStep,
|
||||
location: ValueRenderLocation,
|
||||
) error{ OutOfMemory, AnalysisFail }!void {
|
||||
const zcu = dg.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const ptr_ty = Type.fromInterned(ip.typeOf(ptr_val));
|
||||
const ptr_ctype = try dg.ctypeFromType(ptr_ty, .complete);
|
||||
const ptr_child_ctype = ptr_ctype.info(&dg.ctype_pool).pointer.elem_ctype;
|
||||
const ptr = ip.indexToKey(ptr_val).ptr;
|
||||
switch (ptr.addr) {
|
||||
.decl => |d| try dg.renderDeclValue(writer, Value.fromInterned(ptr_val), d, location),
|
||||
.anon_decl => |anon_decl| try dg.renderAnonDeclValue(writer, Value.fromInterned(ptr_val), anon_decl, location),
|
||||
switch (derivation) {
|
||||
.comptime_alloc_ptr, .comptime_field_ptr => unreachable,
|
||||
.int => |int| {
|
||||
const ptr_ctype = try dg.ctypeFromType(int.ptr_ty, .complete);
|
||||
const addr_val = try zcu.intValue(Type.usize, int.addr);
|
||||
try writer.writeByte('(');
|
||||
try dg.renderCType(writer, ptr_ctype);
|
||||
try writer.print("){x}", .{try dg.fmtIntLiteral(Value.fromInterned(int), .Other)});
|
||||
try writer.print("){x}", .{try dg.fmtIntLiteral(addr_val, .Other)});
|
||||
},
|
||||
.eu_payload, .opt_payload => |base| {
|
||||
const ptr_base_ty = Type.fromInterned(ip.typeOf(base));
|
||||
const base_ty = ptr_base_ty.childType(zcu);
|
||||
// Ensure complete type definition is visible before accessing fields.
|
||||
_ = try dg.ctypeFromType(base_ty, .complete);
|
||||
const payload_ty = switch (ptr.addr) {
|
||||
.eu_payload => base_ty.errorUnionPayload(zcu),
|
||||
.opt_payload => base_ty.optionalChild(zcu),
|
||||
else => unreachable,
|
||||
};
|
||||
const payload_ctype = try dg.ctypeFromType(payload_ty, .forward);
|
||||
if (!ptr_child_ctype.eql(payload_ctype)) {
|
||||
try writer.writeByte('(');
|
||||
try dg.renderCType(writer, ptr_ctype);
|
||||
try writer.writeByte(')');
|
||||
}
|
||||
|
||||
.decl_ptr => |decl| try dg.renderDeclValue(writer, decl, location),
|
||||
.anon_decl_ptr => |ad| try dg.renderAnonDeclValue(writer, ad, location),
|
||||
|
||||
inline .eu_payload_ptr, .opt_payload_ptr => |info| {
|
||||
try writer.writeAll("&(");
|
||||
try dg.renderParentPtr(writer, base, location);
|
||||
try dg.renderPointer(writer, info.parent.*, location);
|
||||
try writer.writeAll(")->payload");
|
||||
},
|
||||
.elem => |elem| {
|
||||
const ptr_base_ty = Type.fromInterned(ip.typeOf(elem.base));
|
||||
const elem_ty = ptr_base_ty.elemType2(zcu);
|
||||
const elem_ctype = try dg.ctypeFromType(elem_ty, .forward);
|
||||
if (!ptr_child_ctype.eql(elem_ctype)) {
|
||||
try writer.writeByte('(');
|
||||
try dg.renderCType(writer, ptr_ctype);
|
||||
try writer.writeByte(')');
|
||||
}
|
||||
try writer.writeAll("&(");
|
||||
if (ip.indexToKey(ptr_base_ty.toIntern()).ptr_type.flags.size == .One)
|
||||
try writer.writeByte('*');
|
||||
try dg.renderParentPtr(writer, elem.base, location);
|
||||
try writer.print(")[{d}]", .{elem.index});
|
||||
},
|
||||
.field => |field| {
|
||||
const ptr_base_ty = Type.fromInterned(ip.typeOf(field.base));
|
||||
const base_ty = ptr_base_ty.childType(zcu);
|
||||
|
||||
.field_ptr => |field| {
|
||||
const parent_ptr_ty = try field.parent.ptrType(zcu);
|
||||
|
||||
// Ensure complete type definition is available before accessing fields.
|
||||
_ = try dg.ctypeFromType(base_ty, .complete);
|
||||
switch (fieldLocation(ptr_base_ty, ptr_ty, @as(u32, @intCast(field.index)), zcu)) {
|
||||
_ = try dg.ctypeFromType(parent_ptr_ty.childType(zcu), .complete);
|
||||
|
||||
switch (fieldLocation(parent_ptr_ty, field.result_ptr_ty, field.field_idx, zcu)) {
|
||||
.begin => {
|
||||
const ptr_base_ctype = try dg.ctypeFromType(ptr_base_ty, .complete);
|
||||
if (!ptr_ctype.eql(ptr_base_ctype)) {
|
||||
try writer.writeByte('(');
|
||||
try dg.renderCType(writer, ptr_ctype);
|
||||
try writer.writeByte(')');
|
||||
}
|
||||
try dg.renderParentPtr(writer, field.base, location);
|
||||
const ptr_ctype = try dg.ctypeFromType(field.result_ptr_ty, .complete);
|
||||
try writer.writeByte('(');
|
||||
try dg.renderCType(writer, ptr_ctype);
|
||||
try writer.writeByte(')');
|
||||
try dg.renderPointer(writer, field.parent.*, location);
|
||||
},
|
||||
.field => |name| {
|
||||
const field_ty = switch (ip.indexToKey(base_ty.toIntern())) {
|
||||
.anon_struct_type,
|
||||
.struct_type,
|
||||
.union_type,
|
||||
=> base_ty.structFieldType(@as(usize, @intCast(field.index)), zcu),
|
||||
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
|
||||
.One, .Many, .C => unreachable,
|
||||
.Slice => switch (field.index) {
|
||||
Value.slice_ptr_index => base_ty.slicePtrFieldType(zcu),
|
||||
Value.slice_len_index => Type.usize,
|
||||
else => unreachable,
|
||||
},
|
||||
},
|
||||
else => unreachable,
|
||||
};
|
||||
const field_ctype = try dg.ctypeFromType(field_ty, .forward);
|
||||
if (!ptr_child_ctype.eql(field_ctype)) {
|
||||
try writer.writeByte('(');
|
||||
try dg.renderCType(writer, ptr_ctype);
|
||||
try writer.writeByte(')');
|
||||
}
|
||||
try writer.writeAll("&(");
|
||||
try dg.renderParentPtr(writer, field.base, location);
|
||||
try dg.renderPointer(writer, field.parent.*, location);
|
||||
try writer.writeAll(")->");
|
||||
try dg.writeCValue(writer, name);
|
||||
},
|
||||
.byte_offset => |byte_offset| {
|
||||
const u8_ptr_ty = try zcu.adjustPtrTypeChild(ptr_ty, Type.u8);
|
||||
const u8_ptr_ctype = try dg.ctypeFromType(u8_ptr_ty, .complete);
|
||||
|
||||
if (!ptr_ctype.eql(u8_ptr_ctype)) {
|
||||
try writer.writeByte('(');
|
||||
try dg.renderCType(writer, ptr_ctype);
|
||||
try writer.writeByte(')');
|
||||
}
|
||||
try writer.writeAll("((");
|
||||
try dg.renderCType(writer, u8_ptr_ctype);
|
||||
const ptr_ctype = try dg.ctypeFromType(field.result_ptr_ty, .complete);
|
||||
try writer.writeByte('(');
|
||||
try dg.renderCType(writer, ptr_ctype);
|
||||
try writer.writeByte(')');
|
||||
try dg.renderParentPtr(writer, field.base, location);
|
||||
try writer.print(" + {})", .{
|
||||
try dg.fmtIntLiteral(try zcu.intValue(Type.usize, byte_offset), .Other),
|
||||
});
|
||||
const offset_val = try zcu.intValue(Type.usize, byte_offset);
|
||||
try writer.writeAll("((char *)");
|
||||
try dg.renderPointer(writer, field.parent.*, location);
|
||||
try writer.print(" + {})", .{try dg.fmtIntLiteral(offset_val, .Other)});
|
||||
},
|
||||
}
|
||||
},
|
||||
.comptime_field, .comptime_alloc => unreachable,
|
||||
|
||||
.elem_ptr => |elem| if (!(try elem.parent.ptrType(zcu)).childType(zcu).hasRuntimeBits(zcu)) {
|
||||
// Element type is zero-bit, so lowers to `void`. The index is irrelevant; just cast the pointer.
|
||||
const ptr_ctype = try dg.ctypeFromType(elem.result_ptr_ty, .complete);
|
||||
try writer.writeByte('(');
|
||||
try dg.renderCType(writer, ptr_ctype);
|
||||
try writer.writeByte(')');
|
||||
try dg.renderPointer(writer, elem.parent.*, location);
|
||||
} else {
|
||||
const index_val = try zcu.intValue(Type.usize, elem.elem_idx);
|
||||
// We want to do pointer arithmetic on a pointer to the element type.
|
||||
// We might have a pointer-to-array. In this case, we must cast first.
|
||||
const result_ctype = try dg.ctypeFromType(elem.result_ptr_ty, .complete);
|
||||
const parent_ctype = try dg.ctypeFromType(try elem.parent.ptrType(zcu), .complete);
|
||||
if (result_ctype.eql(parent_ctype)) {
|
||||
// The pointer already has an appropriate type - just do the arithmetic.
|
||||
try writer.writeByte('(');
|
||||
try dg.renderPointer(writer, elem.parent.*, location);
|
||||
try writer.print(" + {})", .{try dg.fmtIntLiteral(index_val, .Other)});
|
||||
} else {
|
||||
// We probably have an array pointer `T (*)[n]`. Cast to an element pointer,
|
||||
// and *then* apply the index.
|
||||
try writer.writeAll("((");
|
||||
try dg.renderCType(writer, result_ctype);
|
||||
try writer.writeByte(')');
|
||||
try dg.renderPointer(writer, elem.parent.*, location);
|
||||
try writer.print(" + {})", .{try dg.fmtIntLiteral(index_val, .Other)});
|
||||
}
|
||||
},
|
||||
|
||||
.offset_and_cast => |oac| {
|
||||
const ptr_ctype = try dg.ctypeFromType(oac.new_ptr_ty, .complete);
|
||||
try writer.writeByte('(');
|
||||
try dg.renderCType(writer, ptr_ctype);
|
||||
try writer.writeByte(')');
|
||||
if (oac.byte_offset == 0) {
|
||||
try dg.renderPointer(writer, oac.parent.*, location);
|
||||
} else {
|
||||
const offset_val = try zcu.intValue(Type.usize, oac.byte_offset);
|
||||
try writer.writeAll("((char *)");
|
||||
try dg.renderPointer(writer, oac.parent.*, location);
|
||||
try writer.print(" + {})", .{try dg.fmtIntLiteral(offset_val, .Other)});
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@ -1103,20 +1084,11 @@ pub const DeclGen = struct {
|
||||
}
|
||||
try writer.writeByte('}');
|
||||
},
|
||||
.ptr => |ptr| switch (ptr.addr) {
|
||||
.decl => |d| try dg.renderDeclValue(writer, val, d, location),
|
||||
.anon_decl => |decl_val| try dg.renderAnonDeclValue(writer, val, decl_val, location),
|
||||
.int => |int| {
|
||||
try writer.writeAll("((");
|
||||
try dg.renderCType(writer, ctype);
|
||||
try writer.print("){x})", .{try dg.fmtIntLiteral(Value.fromInterned(int), location)});
|
||||
},
|
||||
.eu_payload,
|
||||
.opt_payload,
|
||||
.elem,
|
||||
.field,
|
||||
=> try dg.renderParentPtr(writer, val.toIntern(), location),
|
||||
.comptime_field, .comptime_alloc => unreachable,
|
||||
.ptr => {
|
||||
var arena = std.heap.ArenaAllocator.init(zcu.gpa);
|
||||
defer arena.deinit();
|
||||
const derivation = try val.pointerDerivation(arena.allocator(), zcu);
|
||||
try dg.renderPointer(writer, derivation, location);
|
||||
},
|
||||
.opt => |opt| switch (ctype.info(ctype_pool)) {
|
||||
.basic => if (ctype.isBool()) try writer.writeAll(switch (opt.val) {
|
||||
@ -4574,10 +4546,10 @@ fn airCall(
|
||||
break :fn_decl switch (zcu.intern_pool.indexToKey(callee_val.toIntern())) {
|
||||
.extern_func => |extern_func| extern_func.decl,
|
||||
.func => |func| func.owner_decl,
|
||||
.ptr => |ptr| switch (ptr.addr) {
|
||||
.ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
|
||||
.decl => |decl| decl,
|
||||
else => break :known,
|
||||
},
|
||||
} else break :known,
|
||||
else => break :known,
|
||||
};
|
||||
};
|
||||
@ -5147,10 +5119,10 @@ fn asmInputNeedsLocal(f: *Function, constraint: []const u8, value: CValue) bool
|
||||
'I' => !target.cpu.arch.isArmOrThumb(),
|
||||
else => switch (value) {
|
||||
.constant => |val| switch (f.object.dg.zcu.intern_pool.indexToKey(val.toIntern())) {
|
||||
.ptr => |ptr| switch (ptr.addr) {
|
||||
.ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
|
||||
.decl => false,
|
||||
else => true,
|
||||
},
|
||||
} else true,
|
||||
else => true,
|
||||
},
|
||||
else => false,
|
||||
|
||||
@ -3262,6 +3262,7 @@ pub const Object = struct {
|
||||
try o.lowerType(Type.fromInterned(vector_type.child)),
|
||||
),
|
||||
.opt_type => |child_ty| {
|
||||
// Must stay in sync with `opt_payload` logic in `lowerPtr`.
|
||||
if (!Type.fromInterned(child_ty).hasRuntimeBitsIgnoreComptime(mod)) return .i8;
|
||||
|
||||
const payload_ty = try o.lowerType(Type.fromInterned(child_ty));
|
||||
@ -3281,6 +3282,8 @@ pub const Object = struct {
|
||||
},
|
||||
.anyframe_type => @panic("TODO implement lowerType for AnyFrame types"),
|
||||
.error_union_type => |error_union_type| {
|
||||
// Must stay in sync with `codegen.errUnionPayloadOffset`.
|
||||
// See logic in `lowerPtr`.
|
||||
const error_type = try o.errorIntType();
|
||||
if (!Type.fromInterned(error_union_type.payload_type).hasRuntimeBitsIgnoreComptime(mod))
|
||||
return error_type;
|
||||
@ -3792,17 +3795,7 @@ pub const Object = struct {
|
||||
128 => try o.builder.fp128Const(val.toFloat(f128, mod)),
|
||||
else => unreachable,
|
||||
},
|
||||
.ptr => |ptr| return switch (ptr.addr) {
|
||||
.decl => |decl| try o.lowerDeclRefValue(ty, decl),
|
||||
.anon_decl => |anon_decl| try o.lowerAnonDeclRef(ty, anon_decl),
|
||||
.int => |int| try o.lowerIntAsPtr(int),
|
||||
.eu_payload,
|
||||
.opt_payload,
|
||||
.elem,
|
||||
.field,
|
||||
=> try o.lowerParentPtr(val),
|
||||
.comptime_field, .comptime_alloc => unreachable,
|
||||
},
|
||||
.ptr => try o.lowerPtr(arg_val, 0),
|
||||
.slice => |slice| return o.builder.structConst(try o.lowerType(ty), &.{
|
||||
try o.lowerValue(slice.ptr),
|
||||
try o.lowerValue(slice.len),
|
||||
@ -4223,20 +4216,6 @@ pub const Object = struct {
|
||||
};
|
||||
}
|
||||
|
||||
fn lowerIntAsPtr(o: *Object, val: InternPool.Index) Allocator.Error!Builder.Constant {
|
||||
const mod = o.module;
|
||||
switch (mod.intern_pool.indexToKey(val)) {
|
||||
.undef => return o.builder.undefConst(.ptr),
|
||||
.int => {
|
||||
var bigint_space: Value.BigIntSpace = undefined;
|
||||
const bigint = Value.fromInterned(val).toBigInt(&bigint_space, mod);
|
||||
const llvm_int = try lowerBigInt(o, Type.usize, bigint);
|
||||
return o.builder.castConst(.inttoptr, llvm_int, .ptr);
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
fn lowerBigInt(
|
||||
o: *Object,
|
||||
ty: Type,
|
||||
@ -4246,129 +4225,60 @@ pub const Object = struct {
|
||||
return o.builder.bigIntConst(try o.builder.intType(ty.intInfo(mod).bits), bigint);
|
||||
}
|
||||
|
||||
fn lowerParentPtrDecl(o: *Object, decl_index: InternPool.DeclIndex) Allocator.Error!Builder.Constant {
|
||||
const mod = o.module;
|
||||
const decl = mod.declPtr(decl_index);
|
||||
const ptr_ty = try mod.singleMutPtrType(decl.typeOf(mod));
|
||||
return o.lowerDeclRefValue(ptr_ty, decl_index);
|
||||
}
|
||||
|
||||
fn lowerParentPtr(o: *Object, ptr_val: Value) Error!Builder.Constant {
|
||||
const mod = o.module;
|
||||
const ip = &mod.intern_pool;
|
||||
const ptr = ip.indexToKey(ptr_val.toIntern()).ptr;
|
||||
return switch (ptr.addr) {
|
||||
.decl => |decl| try o.lowerParentPtrDecl(decl),
|
||||
.anon_decl => |ad| try o.lowerAnonDeclRef(Type.fromInterned(ad.orig_ty), ad),
|
||||
.int => |int| try o.lowerIntAsPtr(int),
|
||||
.eu_payload => |eu_ptr| {
|
||||
const parent_ptr = try o.lowerParentPtr(Value.fromInterned(eu_ptr));
|
||||
|
||||
const eu_ty = Type.fromInterned(ip.typeOf(eu_ptr)).childType(mod);
|
||||
const payload_ty = eu_ty.errorUnionPayload(mod);
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
|
||||
// In this case, we represent pointer to error union the same as pointer
|
||||
// to the payload.
|
||||
return parent_ptr;
|
||||
}
|
||||
|
||||
const err_int_ty = try mod.errorIntType();
|
||||
const payload_align = payload_ty.abiAlignment(mod);
|
||||
const err_align = err_int_ty.abiAlignment(mod);
|
||||
const index: u32 = if (payload_align.compare(.gt, err_align)) 2 else 1;
|
||||
return o.builder.gepConst(.inbounds, try o.lowerType(eu_ty), parent_ptr, null, &.{
|
||||
.@"0", try o.builder.intConst(.i32, index),
|
||||
fn lowerPtr(
|
||||
o: *Object,
|
||||
ptr_val: InternPool.Index,
|
||||
prev_offset: u64,
|
||||
) Error!Builder.Constant {
|
||||
const zcu = o.module;
|
||||
const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr;
|
||||
const offset: u64 = prev_offset + ptr.byte_offset;
|
||||
return switch (ptr.base_addr) {
|
||||
.decl => |decl| {
|
||||
const base_ptr = try o.lowerDeclRefValue(decl);
|
||||
return o.builder.gepConst(.inbounds, .i8, base_ptr, null, &.{
|
||||
try o.builder.intConst(.i64, offset),
|
||||
});
|
||||
},
|
||||
.opt_payload => |opt_ptr| {
|
||||
const parent_ptr = try o.lowerParentPtr(Value.fromInterned(opt_ptr));
|
||||
|
||||
const opt_ty = Type.fromInterned(ip.typeOf(opt_ptr)).childType(mod);
|
||||
const payload_ty = opt_ty.optionalChild(mod);
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or
|
||||
payload_ty.optionalReprIsPayload(mod))
|
||||
{
|
||||
// In this case, we represent pointer to optional the same as pointer
|
||||
// to the payload.
|
||||
return parent_ptr;
|
||||
}
|
||||
|
||||
return o.builder.gepConst(.inbounds, try o.lowerType(opt_ty), parent_ptr, null, &.{ .@"0", .@"0" });
|
||||
},
|
||||
.comptime_field, .comptime_alloc => unreachable,
|
||||
.elem => |elem_ptr| {
|
||||
const parent_ptr = try o.lowerParentPtr(Value.fromInterned(elem_ptr.base));
|
||||
const elem_ty = Type.fromInterned(ip.typeOf(elem_ptr.base)).elemType2(mod);
|
||||
|
||||
return o.builder.gepConst(.inbounds, try o.lowerType(elem_ty), parent_ptr, null, &.{
|
||||
try o.builder.intConst(try o.lowerType(Type.usize), elem_ptr.index),
|
||||
.anon_decl => |ad| {
|
||||
const base_ptr = try o.lowerAnonDeclRef(ad);
|
||||
return o.builder.gepConst(.inbounds, .i8, base_ptr, null, &.{
|
||||
try o.builder.intConst(.i64, offset),
|
||||
});
|
||||
},
|
||||
.field => |field_ptr| {
|
||||
const parent_ptr = try o.lowerParentPtr(Value.fromInterned(field_ptr.base));
|
||||
const parent_ptr_ty = Type.fromInterned(ip.typeOf(field_ptr.base));
|
||||
const parent_ty = parent_ptr_ty.childType(mod);
|
||||
const field_index: u32 = @intCast(field_ptr.index);
|
||||
switch (parent_ty.zigTypeTag(mod)) {
|
||||
.Union => {
|
||||
if (parent_ty.containerLayout(mod) == .@"packed") {
|
||||
return parent_ptr;
|
||||
}
|
||||
|
||||
const layout = parent_ty.unionGetLayout(mod);
|
||||
if (layout.payload_size == 0) {
|
||||
// In this case a pointer to the union and a pointer to any
|
||||
// (void) payload is the same.
|
||||
return parent_ptr;
|
||||
}
|
||||
|
||||
const parent_llvm_ty = try o.lowerType(parent_ty);
|
||||
return o.builder.gepConst(.inbounds, parent_llvm_ty, parent_ptr, null, &.{
|
||||
.@"0",
|
||||
try o.builder.intConst(.i32, @intFromBool(
|
||||
layout.tag_size > 0 and layout.tag_align.compare(.gte, layout.payload_align),
|
||||
)),
|
||||
});
|
||||
.int => try o.builder.castConst(
|
||||
.inttoptr,
|
||||
try o.builder.intConst(try o.lowerType(Type.usize), offset),
|
||||
.ptr,
|
||||
),
|
||||
.eu_payload => |eu_ptr| try o.lowerPtr(
|
||||
eu_ptr,
|
||||
offset + @import("../codegen.zig").errUnionPayloadOffset(
|
||||
Value.fromInterned(eu_ptr).typeOf(zcu).childType(zcu),
|
||||
zcu,
|
||||
),
|
||||
),
|
||||
.opt_payload => |opt_ptr| try o.lowerPtr(opt_ptr, offset),
|
||||
.field => |field| {
|
||||
const agg_ty = Value.fromInterned(field.base).typeOf(zcu).childType(zcu);
|
||||
const field_off: u64 = switch (agg_ty.zigTypeTag(zcu)) {
|
||||
.Pointer => off: {
|
||||
assert(agg_ty.isSlice(zcu));
|
||||
break :off switch (field.index) {
|
||||
Value.slice_ptr_index => 0,
|
||||
Value.slice_len_index => @divExact(zcu.getTarget().ptrBitWidth(), 8),
|
||||
else => unreachable,
|
||||
};
|
||||
},
|
||||
.Struct => {
|
||||
if (mod.typeToPackedStruct(parent_ty)) |struct_type| {
|
||||
const ptr_info = Type.fromInterned(ptr.ty).ptrInfo(mod);
|
||||
if (ptr_info.packed_offset.host_size != 0) return parent_ptr;
|
||||
|
||||
const parent_ptr_info = parent_ptr_ty.ptrInfo(mod);
|
||||
const bit_offset = mod.structPackedFieldBitOffset(struct_type, field_index) + parent_ptr_info.packed_offset.bit_offset;
|
||||
const llvm_usize = try o.lowerType(Type.usize);
|
||||
const base_addr = try o.builder.castConst(.ptrtoint, parent_ptr, llvm_usize);
|
||||
const byte_offset = try o.builder.intConst(llvm_usize, @divExact(bit_offset, 8));
|
||||
const field_addr = try o.builder.binConst(.add, base_addr, byte_offset);
|
||||
return o.builder.castConst(.inttoptr, field_addr, .ptr);
|
||||
}
|
||||
|
||||
return o.builder.gepConst(
|
||||
.inbounds,
|
||||
try o.lowerType(parent_ty),
|
||||
parent_ptr,
|
||||
null,
|
||||
if (o.llvmFieldIndex(parent_ty, field_index)) |llvm_field_index| &.{
|
||||
.@"0",
|
||||
try o.builder.intConst(.i32, llvm_field_index),
|
||||
} else &.{
|
||||
try o.builder.intConst(.i32, @intFromBool(
|
||||
parent_ty.hasRuntimeBitsIgnoreComptime(mod),
|
||||
)),
|
||||
},
|
||||
);
|
||||
},
|
||||
.Pointer => {
|
||||
assert(parent_ty.isSlice(mod));
|
||||
const parent_llvm_ty = try o.lowerType(parent_ty);
|
||||
return o.builder.gepConst(.inbounds, parent_llvm_ty, parent_ptr, null, &.{
|
||||
.@"0", try o.builder.intConst(.i32, field_index),
|
||||
});
|
||||
.Struct, .Union => switch (agg_ty.containerLayout(zcu)) {
|
||||
.auto => agg_ty.structFieldOffset(@intCast(field.index), zcu),
|
||||
.@"extern", .@"packed" => unreachable,
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
};
|
||||
return o.lowerPtr(field.base, offset + field_off);
|
||||
},
|
||||
.arr_elem, .comptime_field, .comptime_alloc => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
@ -4376,8 +4286,7 @@ pub const Object = struct {
|
||||
/// Maybe the logic could be unified.
|
||||
fn lowerAnonDeclRef(
|
||||
o: *Object,
|
||||
ptr_ty: Type,
|
||||
anon_decl: InternPool.Key.Ptr.Addr.AnonDecl,
|
||||
anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl,
|
||||
) Error!Builder.Constant {
|
||||
const mod = o.module;
|
||||
const ip = &mod.intern_pool;
|
||||
@ -4393,6 +4302,8 @@ pub const Object = struct {
|
||||
@panic("TODO");
|
||||
}
|
||||
|
||||
const ptr_ty = Type.fromInterned(anon_decl.orig_ty);
|
||||
|
||||
const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn;
|
||||
if ((!is_fn_body and !decl_ty.hasRuntimeBits(mod)) or
|
||||
(is_fn_body and mod.typeToFunc(decl_ty).?.is_generic)) return o.lowerPtrToVoid(ptr_ty);
|
||||
@ -4400,9 +4311,8 @@ pub const Object = struct {
|
||||
if (is_fn_body)
|
||||
@panic("TODO");
|
||||
|
||||
const orig_ty = Type.fromInterned(anon_decl.orig_ty);
|
||||
const llvm_addr_space = toLlvmAddressSpace(orig_ty.ptrAddressSpace(mod), target);
|
||||
const alignment = orig_ty.ptrAlignment(mod);
|
||||
const llvm_addr_space = toLlvmAddressSpace(ptr_ty.ptrAddressSpace(mod), target);
|
||||
const alignment = ptr_ty.ptrAlignment(mod);
|
||||
const llvm_global = (try o.resolveGlobalAnonDecl(decl_val, llvm_addr_space, alignment)).ptrConst(&o.builder).global;
|
||||
|
||||
const llvm_val = try o.builder.convConst(
|
||||
@ -4411,13 +4321,10 @@ pub const Object = struct {
|
||||
try o.builder.ptrType(llvm_addr_space),
|
||||
);
|
||||
|
||||
return o.builder.convConst(if (ptr_ty.isAbiInt(mod)) switch (ptr_ty.intInfo(mod).signedness) {
|
||||
.signed => .signed,
|
||||
.unsigned => .unsigned,
|
||||
} else .unneeded, llvm_val, try o.lowerType(ptr_ty));
|
||||
return o.builder.convConst(.unneeded, llvm_val, try o.lowerType(ptr_ty));
|
||||
}
|
||||
|
||||
fn lowerDeclRefValue(o: *Object, ty: Type, decl_index: InternPool.DeclIndex) Allocator.Error!Builder.Constant {
|
||||
fn lowerDeclRefValue(o: *Object, decl_index: InternPool.DeclIndex) Allocator.Error!Builder.Constant {
|
||||
const mod = o.module;
|
||||
|
||||
// In the case of something like:
|
||||
@ -4428,18 +4335,23 @@ pub const Object = struct {
|
||||
const decl = mod.declPtr(decl_index);
|
||||
if (decl.val.getFunction(mod)) |func| {
|
||||
if (func.owner_decl != decl_index) {
|
||||
return o.lowerDeclRefValue(ty, func.owner_decl);
|
||||
return o.lowerDeclRefValue(func.owner_decl);
|
||||
}
|
||||
} else if (decl.val.getExternFunc(mod)) |func| {
|
||||
if (func.decl != decl_index) {
|
||||
return o.lowerDeclRefValue(ty, func.decl);
|
||||
return o.lowerDeclRefValue(func.decl);
|
||||
}
|
||||
}
|
||||
|
||||
const decl_ty = decl.typeOf(mod);
|
||||
const ptr_ty = try decl.declPtrType(mod);
|
||||
|
||||
const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn;
|
||||
if ((!is_fn_body and !decl_ty.hasRuntimeBits(mod)) or
|
||||
(is_fn_body and mod.typeToFunc(decl_ty).?.is_generic)) return o.lowerPtrToVoid(ty);
|
||||
(is_fn_body and mod.typeToFunc(decl_ty).?.is_generic))
|
||||
{
|
||||
return o.lowerPtrToVoid(ptr_ty);
|
||||
}
|
||||
|
||||
const llvm_global = if (is_fn_body)
|
||||
(try o.resolveLlvmFunction(decl_index)).ptrConst(&o.builder).global
|
||||
@ -4452,10 +4364,7 @@ pub const Object = struct {
|
||||
try o.builder.ptrType(toLlvmAddressSpace(decl.@"addrspace", mod.getTarget())),
|
||||
);
|
||||
|
||||
return o.builder.convConst(if (ty.isAbiInt(mod)) switch (ty.intInfo(mod).signedness) {
|
||||
.signed => .signed,
|
||||
.unsigned => .unsigned,
|
||||
} else .unneeded, llvm_val, try o.lowerType(ty));
|
||||
return o.builder.convConst(.unneeded, llvm_val, try o.lowerType(ptr_ty));
|
||||
}
|
||||
|
||||
fn lowerPtrToVoid(o: *Object, ptr_ty: Type) Allocator.Error!Builder.Constant {
|
||||
|
||||
@ -863,7 +863,7 @@ const DeclGen = struct {
|
||||
const result_ty_id = try self.resolveType(ty, repr);
|
||||
const ip = &mod.intern_pool;
|
||||
|
||||
log.debug("lowering constant: ty = {}, val = {}", .{ ty.fmt(mod), val.fmtValue(mod) });
|
||||
log.debug("lowering constant: ty = {}, val = {}", .{ ty.fmt(mod), val.fmtValue(mod, null) });
|
||||
if (val.isUndefDeep(mod)) {
|
||||
return self.spv.constUndef(result_ty_id);
|
||||
}
|
||||
@ -983,10 +983,10 @@ const DeclGen = struct {
|
||||
const int_ty = ty.intTagType(mod);
|
||||
break :cache try self.constant(int_ty, int_val, repr);
|
||||
},
|
||||
.ptr => return self.constantPtr(ty, val),
|
||||
.ptr => return self.constantPtr(val),
|
||||
.slice => |slice| {
|
||||
const ptr_ty = ty.slicePtrFieldType(mod);
|
||||
const ptr_id = try self.constantPtr(ptr_ty, Value.fromInterned(slice.ptr));
|
||||
const ptr_id = try self.constantPtr(Value.fromInterned(slice.ptr));
|
||||
const len_id = try self.constant(Type.usize, Value.fromInterned(slice.len), .indirect);
|
||||
return self.constructStruct(
|
||||
ty,
|
||||
@ -1107,62 +1107,86 @@ const DeclGen = struct {
|
||||
return cacheable_id;
|
||||
}
|
||||
|
||||
fn constantPtr(self: *DeclGen, ptr_ty: Type, ptr_val: Value) Error!IdRef {
|
||||
fn constantPtr(self: *DeclGen, ptr_val: Value) Error!IdRef {
|
||||
// TODO: Caching??
|
||||
|
||||
const result_ty_id = try self.resolveType(ptr_ty, .direct);
|
||||
const mod = self.module;
|
||||
const zcu = self.module;
|
||||
|
||||
if (ptr_val.isUndef(mod)) return self.spv.constUndef(result_ty_id);
|
||||
if (ptr_val.isUndef(zcu)) {
|
||||
const result_ty = ptr_val.typeOf(zcu);
|
||||
const result_ty_id = try self.resolveType(result_ty, .direct);
|
||||
return self.spv.constUndef(result_ty_id);
|
||||
}
|
||||
|
||||
switch (mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr.addr) {
|
||||
.decl => |decl| return try self.constantDeclRef(ptr_ty, decl),
|
||||
.anon_decl => |anon_decl| return try self.constantAnonDeclRef(ptr_ty, anon_decl),
|
||||
var arena = std.heap.ArenaAllocator.init(self.gpa);
|
||||
defer arena.deinit();
|
||||
|
||||
const derivation = try ptr_val.pointerDerivation(arena.allocator(), zcu);
|
||||
return self.derivePtr(derivation);
|
||||
}
|
||||
|
||||
fn derivePtr(self: *DeclGen, derivation: Value.PointerDeriveStep) Error!IdRef {
|
||||
const zcu = self.module;
|
||||
switch (derivation) {
|
||||
.comptime_alloc_ptr, .comptime_field_ptr => unreachable,
|
||||
.int => |int| {
|
||||
const ptr_id = self.spv.allocId();
|
||||
const result_ty_id = try self.resolveType(int.ptr_ty, .direct);
|
||||
// TODO: This can probably be an OpSpecConstantOp Bitcast, but
|
||||
// that is not implemented by Mesa yet. Therefore, just generate it
|
||||
// as a runtime operation.
|
||||
const result_ptr_id = self.spv.allocId();
|
||||
try self.func.body.emit(self.spv.gpa, .OpConvertUToPtr, .{
|
||||
.id_result_type = result_ty_id,
|
||||
.id_result = ptr_id,
|
||||
.integer_value = try self.constant(Type.usize, Value.fromInterned(int), .direct),
|
||||
.id_result = result_ptr_id,
|
||||
.integer_value = try self.constant(Type.usize, try zcu.intValue(Type.usize, int.addr), .direct),
|
||||
});
|
||||
return ptr_id;
|
||||
return result_ptr_id;
|
||||
},
|
||||
.eu_payload => unreachable, // TODO
|
||||
.opt_payload => unreachable, // TODO
|
||||
.comptime_field, .comptime_alloc => unreachable,
|
||||
.elem => |elem_ptr| {
|
||||
const parent_ptr_ty = Type.fromInterned(mod.intern_pool.typeOf(elem_ptr.base));
|
||||
const parent_ptr_id = try self.constantPtr(parent_ptr_ty, Value.fromInterned(elem_ptr.base));
|
||||
const index_id = try self.constInt(Type.usize, elem_ptr.index, .direct);
|
||||
.decl_ptr => |decl| {
|
||||
const result_ptr_ty = try zcu.declPtr(decl).declPtrType(zcu);
|
||||
return self.constantDeclRef(result_ptr_ty, decl);
|
||||
},
|
||||
.anon_decl_ptr => |ad| {
|
||||
const result_ptr_ty = Type.fromInterned(ad.orig_ty);
|
||||
return self.constantAnonDeclRef(result_ptr_ty, ad);
|
||||
},
|
||||
.eu_payload_ptr => @panic("TODO"),
|
||||
.opt_payload_ptr => @panic("TODO"),
|
||||
.field_ptr => |field| {
|
||||
const parent_ptr_id = try self.derivePtr(field.parent.*);
|
||||
const parent_ptr_ty = try field.parent.ptrType(zcu);
|
||||
return self.structFieldPtr(field.result_ptr_ty, parent_ptr_ty, parent_ptr_id, field.field_idx);
|
||||
},
|
||||
.elem_ptr => |elem| {
|
||||
const parent_ptr_id = try self.derivePtr(elem.parent.*);
|
||||
const parent_ptr_ty = try elem.parent.ptrType(zcu);
|
||||
const index_id = try self.constInt(Type.usize, elem.elem_idx, .direct);
|
||||
return self.ptrElemPtr(parent_ptr_ty, parent_ptr_id, index_id);
|
||||
},
|
||||
.offset_and_cast => |oac| {
|
||||
const parent_ptr_id = try self.derivePtr(oac.parent.*);
|
||||
const parent_ptr_ty = try oac.parent.ptrType(zcu);
|
||||
disallow: {
|
||||
if (oac.byte_offset != 0) break :disallow;
|
||||
// Allow changing the pointer type child only to restructure arrays.
|
||||
// e.g. [3][2]T to T is fine, as is [2]T -> [2][1]T.
|
||||
const src_base_ty = parent_ptr_ty.arrayBase(zcu)[0];
|
||||
const dest_base_ty = oac.new_ptr_ty.arrayBase(zcu)[0];
|
||||
if (self.getTarget().os.tag == .vulkan and src_base_ty.toIntern() != dest_base_ty.toIntern()) break :disallow;
|
||||
|
||||
const elem_ptr_id = try self.ptrElemPtr(parent_ptr_ty, parent_ptr_id, index_id);
|
||||
|
||||
// TODO: Can we consolidate this in ptrElemPtr?
|
||||
const elem_ty = parent_ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T.
|
||||
const elem_ptr_ty_id = try self.ptrType(elem_ty, self.spvStorageClass(parent_ptr_ty.ptrAddressSpace(mod)));
|
||||
|
||||
// TODO: Can we remove this ID comparison?
|
||||
if (elem_ptr_ty_id == result_ty_id) {
|
||||
return elem_ptr_id;
|
||||
const result_ty_id = try self.resolveType(oac.new_ptr_ty, .direct);
|
||||
const result_ptr_id = self.spv.allocId();
|
||||
try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
|
||||
.id_result_type = result_ty_id,
|
||||
.id_result = result_ptr_id,
|
||||
.operand = parent_ptr_id,
|
||||
});
|
||||
return result_ptr_id;
|
||||
}
|
||||
// This may happen when we have pointer-to-array and the result is
|
||||
// another pointer-to-array instead of a pointer-to-element.
|
||||
const result_id = self.spv.allocId();
|
||||
try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
|
||||
.id_result_type = result_ty_id,
|
||||
.id_result = result_id,
|
||||
.operand = elem_ptr_id,
|
||||
return self.fail("Cannot perform pointer cast: '{}' to '{}'", .{
|
||||
parent_ptr_ty.fmt(zcu),
|
||||
oac.new_ptr_ty.fmt(zcu),
|
||||
});
|
||||
return result_id;
|
||||
},
|
||||
.field => |field| {
|
||||
const base_ptr_ty = Type.fromInterned(mod.intern_pool.typeOf(field.base));
|
||||
const base_ptr = try self.constantPtr(base_ptr_ty, Value.fromInterned(field.base));
|
||||
const field_index: u32 = @intCast(field.index);
|
||||
return try self.structFieldPtr(ptr_ty, base_ptr_ty, base_ptr, field_index);
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -1170,7 +1194,7 @@ const DeclGen = struct {
|
||||
fn constantAnonDeclRef(
|
||||
self: *DeclGen,
|
||||
ty: Type,
|
||||
anon_decl: InternPool.Key.Ptr.Addr.AnonDecl,
|
||||
anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl,
|
||||
) !IdRef {
|
||||
// TODO: Merge this function with constantDeclRef.
|
||||
|
||||
@ -4456,16 +4480,20 @@ const DeclGen = struct {
|
||||
) !IdRef {
|
||||
const result_ty_id = try self.resolveType(result_ptr_ty, .direct);
|
||||
|
||||
const mod = self.module;
|
||||
const object_ty = object_ptr_ty.childType(mod);
|
||||
switch (object_ty.zigTypeTag(mod)) {
|
||||
.Struct => switch (object_ty.containerLayout(mod)) {
|
||||
const zcu = self.module;
|
||||
const object_ty = object_ptr_ty.childType(zcu);
|
||||
switch (object_ty.zigTypeTag(zcu)) {
|
||||
.Pointer => {
|
||||
assert(object_ty.isSlice(zcu));
|
||||
return self.accessChain(result_ty_id, object_ptr, &.{field_index});
|
||||
},
|
||||
.Struct => switch (object_ty.containerLayout(zcu)) {
|
||||
.@"packed" => unreachable, // TODO
|
||||
else => {
|
||||
return try self.accessChain(result_ty_id, object_ptr, &.{field_index});
|
||||
},
|
||||
},
|
||||
.Union => switch (object_ty.containerLayout(mod)) {
|
||||
.Union => switch (object_ty.containerLayout(zcu)) {
|
||||
.@"packed" => unreachable, // TODO
|
||||
else => {
|
||||
const layout = self.unionLayout(object_ty);
|
||||
@ -4475,7 +4503,7 @@ const DeclGen = struct {
|
||||
return try self.spv.constUndef(result_ty_id);
|
||||
}
|
||||
|
||||
const storage_class = self.spvStorageClass(object_ptr_ty.ptrAddressSpace(mod));
|
||||
const storage_class = self.spvStorageClass(object_ptr_ty.ptrAddressSpace(zcu));
|
||||
const pl_ptr_ty_id = try self.ptrType(layout.payload_ty, storage_class);
|
||||
const pl_ptr_id = try self.accessChain(pl_ptr_ty_id, object_ptr, &.{layout.payload_index});
|
||||
|
||||
|
||||
@ -539,7 +539,6 @@ fn lowerConst(zig_object: *ZigObject, wasm_file: *Wasm, name: []const u8, val: V
|
||||
.none,
|
||||
.{
|
||||
.parent_atom_index = @intFromEnum(atom.sym_index),
|
||||
.addend = null,
|
||||
},
|
||||
);
|
||||
break :code switch (result) {
|
||||
|
||||
@ -54,22 +54,22 @@ pub const MutableValue = union(enum) {
|
||||
payload: *MutableValue,
|
||||
};
|
||||
|
||||
pub fn intern(mv: MutableValue, zcu: *Zcu, arena: Allocator) Allocator.Error!InternPool.Index {
|
||||
pub fn intern(mv: MutableValue, zcu: *Zcu, arena: Allocator) Allocator.Error!Value {
|
||||
const ip = &zcu.intern_pool;
|
||||
const gpa = zcu.gpa;
|
||||
return switch (mv) {
|
||||
return Value.fromInterned(switch (mv) {
|
||||
.interned => |ip_index| ip_index,
|
||||
.eu_payload => |sv| try ip.get(gpa, .{ .error_union = .{
|
||||
.ty = sv.ty,
|
||||
.val = .{ .payload = try sv.child.intern(zcu, arena) },
|
||||
.val = .{ .payload = (try sv.child.intern(zcu, arena)).toIntern() },
|
||||
} }),
|
||||
.opt_payload => |sv| try ip.get(gpa, .{ .opt = .{
|
||||
.ty = sv.ty,
|
||||
.val = try sv.child.intern(zcu, arena),
|
||||
.val = (try sv.child.intern(zcu, arena)).toIntern(),
|
||||
} }),
|
||||
.repeated => |sv| try ip.get(gpa, .{ .aggregate = .{
|
||||
.ty = sv.ty,
|
||||
.storage = .{ .repeated_elem = try sv.child.intern(zcu, arena) },
|
||||
.storage = .{ .repeated_elem = (try sv.child.intern(zcu, arena)).toIntern() },
|
||||
} }),
|
||||
.bytes => |b| try ip.get(gpa, .{ .aggregate = .{
|
||||
.ty = b.ty,
|
||||
@ -78,24 +78,24 @@ pub const MutableValue = union(enum) {
|
||||
.aggregate => |a| {
|
||||
const elems = try arena.alloc(InternPool.Index, a.elems.len);
|
||||
for (a.elems, elems) |mut_elem, *interned_elem| {
|
||||
interned_elem.* = try mut_elem.intern(zcu, arena);
|
||||
interned_elem.* = (try mut_elem.intern(zcu, arena)).toIntern();
|
||||
}
|
||||
return ip.get(gpa, .{ .aggregate = .{
|
||||
return Value.fromInterned(try ip.get(gpa, .{ .aggregate = .{
|
||||
.ty = a.ty,
|
||||
.storage = .{ .elems = elems },
|
||||
} });
|
||||
} }));
|
||||
},
|
||||
.slice => |s| try ip.get(gpa, .{ .slice = .{
|
||||
.ty = s.ty,
|
||||
.ptr = try s.ptr.intern(zcu, arena),
|
||||
.len = try s.len.intern(zcu, arena),
|
||||
.ptr = (try s.ptr.intern(zcu, arena)).toIntern(),
|
||||
.len = (try s.len.intern(zcu, arena)).toIntern(),
|
||||
} }),
|
||||
.un => |u| try ip.get(gpa, .{ .un = .{
|
||||
.ty = u.ty,
|
||||
.tag = u.tag,
|
||||
.val = try u.payload.intern(zcu, arena),
|
||||
.val = (try u.payload.intern(zcu, arena)).toIntern(),
|
||||
} }),
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
/// Un-interns the top level of this `MutableValue`, if applicable.
|
||||
@ -248,9 +248,11 @@ pub const MutableValue = union(enum) {
|
||||
},
|
||||
.Union => {
|
||||
const payload = try arena.create(MutableValue);
|
||||
// HACKHACK: this logic is silly, but Sema detects it and reverts the change where needed.
|
||||
// See comment at the top of `Sema.beginComptimePtrMutationInner`.
|
||||
payload.* = .{ .interned = .undef };
|
||||
const backing_ty = try Type.fromInterned(ty_ip).unionBackingType(zcu);
|
||||
payload.* = .{ .interned = try ip.get(
|
||||
gpa,
|
||||
.{ .undef = backing_ty.toIntern() },
|
||||
) };
|
||||
mv.* = .{ .un = .{
|
||||
.ty = ty_ip,
|
||||
.tag = .none,
|
||||
@ -294,7 +296,6 @@ pub const MutableValue = union(enum) {
|
||||
/// Get a pointer to the `MutableValue` associated with a field/element.
|
||||
/// The returned pointer can be safety mutated through to modify the field value.
|
||||
/// The returned pointer is valid until the representation of `mv` changes.
|
||||
/// This function does *not* support accessing the ptr/len field of slices.
|
||||
pub fn elem(
|
||||
mv: *MutableValue,
|
||||
zcu: *Zcu,
|
||||
@ -304,18 +305,18 @@ pub const MutableValue = union(enum) {
|
||||
const ip = &zcu.intern_pool;
|
||||
const gpa = zcu.gpa;
|
||||
// Convert to the `aggregate` representation.
|
||||
switch (mv) {
|
||||
.eu_payload, .opt_payload, .slice, .un => unreachable,
|
||||
switch (mv.*) {
|
||||
.eu_payload, .opt_payload, .un => unreachable,
|
||||
.interned => {
|
||||
try mv.unintern(zcu, arena, false, false);
|
||||
},
|
||||
.bytes => |bytes| {
|
||||
const elems = try arena.alloc(MutableValue, bytes.data.len);
|
||||
for (bytes.data, elems) |byte, interned_byte| {
|
||||
interned_byte.* = try ip.get(gpa, .{ .int = .{
|
||||
for (bytes.data, elems) |byte, *interned_byte| {
|
||||
interned_byte.* = .{ .interned = try ip.get(gpa, .{ .int = .{
|
||||
.ty = .u8_type,
|
||||
.storage = .{ .u64 = byte },
|
||||
} });
|
||||
} }) };
|
||||
}
|
||||
mv.* = .{ .aggregate = .{
|
||||
.ty = bytes.ty,
|
||||
@ -331,9 +332,17 @@ pub const MutableValue = union(enum) {
|
||||
.elems = elems,
|
||||
} };
|
||||
},
|
||||
.aggregate => {},
|
||||
.slice, .aggregate => {},
|
||||
}
|
||||
switch (mv.*) {
|
||||
.aggregate => |*agg| return &agg.elems[field_idx],
|
||||
.slice => |*slice| return switch (field_idx) {
|
||||
Value.slice_ptr_index => slice.ptr,
|
||||
Value.slice_len_index => slice.len,
|
||||
else => unreachable,
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
return &mv.aggregate.elems[field_idx];
|
||||
}
|
||||
|
||||
/// Modify a single field of a `MutableValue` which represents an aggregate or slice, leaving others
|
||||
@ -349,43 +358,44 @@ pub const MutableValue = union(enum) {
|
||||
) Allocator.Error!void {
|
||||
const ip = &zcu.intern_pool;
|
||||
const is_trivial_int = field_val.isTrivialInt(zcu);
|
||||
try mv.unintern(arena, is_trivial_int, true);
|
||||
switch (mv) {
|
||||
try mv.unintern(zcu, arena, is_trivial_int, true);
|
||||
switch (mv.*) {
|
||||
.interned,
|
||||
.eu_payload,
|
||||
.opt_payload,
|
||||
.un,
|
||||
=> unreachable,
|
||||
.slice => |*s| switch (field_idx) {
|
||||
Value.slice_ptr_index => s.ptr = field_val,
|
||||
Value.slice_len_index => s.len = field_val,
|
||||
Value.slice_ptr_index => s.ptr.* = field_val,
|
||||
Value.slice_len_index => s.len.* = field_val,
|
||||
else => unreachable,
|
||||
},
|
||||
.bytes => |b| {
|
||||
assert(is_trivial_int);
|
||||
assert(field_val.typeOf() == Type.u8);
|
||||
b.data[field_idx] = Value.fromInterned(field_val.interned).toUnsignedInt(zcu);
|
||||
assert(field_val.typeOf(zcu).toIntern() == .u8_type);
|
||||
b.data[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(zcu));
|
||||
},
|
||||
.repeated => |r| {
|
||||
if (field_val.eqlTrivial(r.child.*)) return;
|
||||
// We must switch to either the `aggregate` or the `bytes` representation.
|
||||
const len_inc_sent = ip.aggregateTypeLenIncludingSentinel(r.ty);
|
||||
if (ip.zigTypeTag(r.ty) != .Struct and
|
||||
if (Type.fromInterned(r.ty).zigTypeTag(zcu) != .Struct and
|
||||
is_trivial_int and
|
||||
Type.fromInterned(r.ty).childType(zcu) == .u8_type and
|
||||
Type.fromInterned(r.ty).childType(zcu).toIntern() == .u8_type and
|
||||
r.child.isTrivialInt(zcu))
|
||||
{
|
||||
// We can use the `bytes` representation.
|
||||
const bytes = try arena.alloc(u8, @intCast(len_inc_sent));
|
||||
const repeated_byte = Value.fromInterned(r.child.interned).getUnsignedInt(zcu);
|
||||
@memset(bytes, repeated_byte);
|
||||
bytes[field_idx] = Value.fromInterned(field_val.interned).getUnsignedInt(zcu);
|
||||
const repeated_byte = Value.fromInterned(r.child.interned).toUnsignedInt(zcu);
|
||||
@memset(bytes, @intCast(repeated_byte));
|
||||
bytes[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(zcu));
|
||||
mv.* = .{ .bytes = .{
|
||||
.ty = r.ty,
|
||||
.data = bytes,
|
||||
} };
|
||||
} else {
|
||||
// We must use the `aggregate` representation.
|
||||
const mut_elems = try arena.alloc(u8, @intCast(len_inc_sent));
|
||||
const mut_elems = try arena.alloc(MutableValue, @intCast(len_inc_sent));
|
||||
@memset(mut_elems, r.child.*);
|
||||
mut_elems[field_idx] = field_val;
|
||||
mv.* = .{ .aggregate = .{
|
||||
@ -396,12 +406,12 @@ pub const MutableValue = union(enum) {
|
||||
},
|
||||
.aggregate => |a| {
|
||||
a.elems[field_idx] = field_val;
|
||||
const is_struct = ip.zigTypeTag(a.ty) == .Struct;
|
||||
const is_struct = Type.fromInterned(a.ty).zigTypeTag(zcu) == .Struct;
|
||||
// Attempt to switch to a more efficient representation.
|
||||
const is_repeated = for (a.elems) |e| {
|
||||
if (!e.eqlTrivial(field_val)) break false;
|
||||
} else true;
|
||||
if (is_repeated) {
|
||||
if (!is_struct and is_repeated) {
|
||||
// Switch to `repeated` repr
|
||||
const mut_repeated = try arena.create(MutableValue);
|
||||
mut_repeated.* = field_val;
|
||||
@ -425,7 +435,7 @@ pub const MutableValue = union(enum) {
|
||||
} else {
|
||||
const bytes = try arena.alloc(u8, a.elems.len);
|
||||
for (a.elems, bytes) |elem_val, *b| {
|
||||
b.* = Value.fromInterned(elem_val.interned).toUnsignedInt(zcu);
|
||||
b.* = @intCast(Value.fromInterned(elem_val.interned).toUnsignedInt(zcu));
|
||||
}
|
||||
mv.* = .{ .bytes = .{
|
||||
.ty = a.ty,
|
||||
@ -505,4 +515,67 @@ pub const MutableValue = union(enum) {
|
||||
inline else => |x| Type.fromInterned(x.ty),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn unpackOptional(mv: MutableValue, zcu: *Zcu) union(enum) {
|
||||
undef,
|
||||
null,
|
||||
payload: MutableValue,
|
||||
} {
|
||||
return switch (mv) {
|
||||
.opt_payload => |pl| return .{ .payload = pl.child.* },
|
||||
.interned => |ip_index| switch (zcu.intern_pool.indexToKey(ip_index)) {
|
||||
.undef => return .undef,
|
||||
.opt => |opt| if (opt.val == .none) .null else .{ .payload = .{ .interned = opt.val } },
|
||||
else => unreachable,
|
||||
},
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn unpackErrorUnion(mv: MutableValue, zcu: *Zcu) union(enum) {
|
||||
undef,
|
||||
err: InternPool.NullTerminatedString,
|
||||
payload: MutableValue,
|
||||
} {
|
||||
return switch (mv) {
|
||||
.eu_payload => |pl| return .{ .payload = pl.child.* },
|
||||
.interned => |ip_index| switch (zcu.intern_pool.indexToKey(ip_index)) {
|
||||
.undef => return .undef,
|
||||
.error_union => |eu| switch (eu.val) {
|
||||
.err_name => |name| .{ .err = name },
|
||||
.payload => |pl| .{ .payload = .{ .interned = pl } },
|
||||
},
|
||||
else => unreachable,
|
||||
},
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
/// Fast equality checking which may return false negatives.
|
||||
/// Used for deciding when to switch aggregate representations without fully
|
||||
/// interning many values.
|
||||
fn eqlTrivial(a: MutableValue, b: MutableValue) bool {
|
||||
const Tag = @typeInfo(MutableValue).Union.tag_type.?;
|
||||
if (@as(Tag, a) != @as(Tag, b)) return false;
|
||||
return switch (a) {
|
||||
.interned => |a_ip| a_ip == b.interned,
|
||||
.eu_payload => |a_pl| a_pl.ty == b.eu_payload.ty and a_pl.child.eqlTrivial(b.eu_payload.child.*),
|
||||
.opt_payload => |a_pl| a_pl.ty == b.opt_payload.ty and a_pl.child.eqlTrivial(b.opt_payload.child.*),
|
||||
.repeated => |a_rep| a_rep.ty == b.repeated.ty and a_rep.child.eqlTrivial(b.repeated.child.*),
|
||||
.bytes => |a_bytes| a_bytes.ty == b.bytes.ty and std.mem.eql(u8, a_bytes.data, b.bytes.data),
|
||||
.aggregate => |a_agg| {
|
||||
const b_agg = b.aggregate;
|
||||
if (a_agg.ty != b_agg.ty) return false;
|
||||
if (a_agg.elems.len != b_agg.elems.len) return false;
|
||||
for (a_agg.elems, b_agg.elems) |a_elem, b_elem| {
|
||||
if (!a_elem.eqlTrivial(b_elem)) return false;
|
||||
}
|
||||
return true;
|
||||
},
|
||||
.slice => |a_slice| a_slice.ty == b.slice.ty and
|
||||
a_slice.ptr.interned == b.slice.ptr.interned and
|
||||
a_slice.len.interned == b.slice.len.interned,
|
||||
.un => |a_un| a_un.ty == b.un.ty and a_un.tag == b.un.tag and a_un.payload.eqlTrivial(b.un.payload.*),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
@ -951,7 +951,7 @@ const Writer = struct {
|
||||
const ty = Type.fromInterned(mod.intern_pool.indexToKey(ip_index).typeOf());
|
||||
try s.print("<{}, {}>", .{
|
||||
ty.fmt(mod),
|
||||
Value.fromInterned(ip_index).fmtValue(mod),
|
||||
Value.fromInterned(ip_index).fmtValue(mod, null),
|
||||
});
|
||||
} else {
|
||||
return w.writeInstIndex(s, operand.toIndex().?, dies);
|
||||
|
||||
@ -17,6 +17,7 @@ const max_string_len = 256;
|
||||
const FormatContext = struct {
|
||||
val: Value,
|
||||
mod: *Module,
|
||||
opt_sema: ?*Sema,
|
||||
};
|
||||
|
||||
pub fn format(
|
||||
@ -27,10 +28,10 @@ pub fn format(
|
||||
) !void {
|
||||
_ = options;
|
||||
comptime std.debug.assert(fmt.len == 0);
|
||||
return print(ctx.val, writer, 3, ctx.mod, null) catch |err| switch (err) {
|
||||
return print(ctx.val, writer, 3, ctx.mod, ctx.opt_sema) catch |err| switch (err) {
|
||||
error.OutOfMemory => @panic("OOM"), // We're not allowed to return this from a format function
|
||||
error.ComptimeBreak, error.ComptimeReturn => unreachable,
|
||||
error.AnalysisFail, error.NeededSourceLocation => unreachable, // TODO: re-evaluate when we actually pass `opt_sema`
|
||||
error.AnalysisFail, error.NeededSourceLocation => unreachable, // TODO: re-evaluate when we use `opt_sema` more fully
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
@ -117,7 +118,7 @@ pub fn print(
|
||||
},
|
||||
.slice => |slice| {
|
||||
const print_contents = switch (ip.getBackingAddrTag(slice.ptr).?) {
|
||||
.field, .elem, .eu_payload, .opt_payload => unreachable,
|
||||
.field, .arr_elem, .eu_payload, .opt_payload => unreachable,
|
||||
.anon_decl, .comptime_alloc, .comptime_field => true,
|
||||
.decl, .int => false,
|
||||
};
|
||||
@ -125,7 +126,7 @@ pub fn print(
|
||||
// TODO: eventually we want to load the slice as an array with `opt_sema`, but that's
|
||||
// currently not possible without e.g. triggering compile errors.
|
||||
}
|
||||
try printPtr(slice.ptr, writer, false, false, 0, level, mod, opt_sema);
|
||||
try printPtr(Value.fromInterned(slice.ptr), writer, level, mod, opt_sema);
|
||||
try writer.writeAll("[0..");
|
||||
if (level == 0) {
|
||||
try writer.writeAll("(...)");
|
||||
@ -136,7 +137,7 @@ pub fn print(
|
||||
},
|
||||
.ptr => {
|
||||
const print_contents = switch (ip.getBackingAddrTag(val.toIntern()).?) {
|
||||
.field, .elem, .eu_payload, .opt_payload => unreachable,
|
||||
.field, .arr_elem, .eu_payload, .opt_payload => unreachable,
|
||||
.anon_decl, .comptime_alloc, .comptime_field => true,
|
||||
.decl, .int => false,
|
||||
};
|
||||
@ -144,13 +145,13 @@ pub fn print(
|
||||
// TODO: eventually we want to load the pointer with `opt_sema`, but that's
|
||||
// currently not possible without e.g. triggering compile errors.
|
||||
}
|
||||
try printPtr(val.toIntern(), writer, false, false, 0, level, mod, opt_sema);
|
||||
try printPtr(val, writer, level, mod, opt_sema);
|
||||
},
|
||||
.opt => |opt| switch (opt.val) {
|
||||
.none => try writer.writeAll("null"),
|
||||
else => |payload| try print(Value.fromInterned(payload), writer, level, mod, opt_sema),
|
||||
},
|
||||
.aggregate => |aggregate| try printAggregate(val, aggregate, writer, level, false, mod, opt_sema),
|
||||
.aggregate => |aggregate| try printAggregate(val, aggregate, false, writer, level, mod, opt_sema),
|
||||
.un => |un| {
|
||||
if (level == 0) {
|
||||
try writer.writeAll(".{ ... }");
|
||||
@ -176,13 +177,14 @@ pub fn print(
|
||||
fn printAggregate(
|
||||
val: Value,
|
||||
aggregate: InternPool.Key.Aggregate,
|
||||
is_ref: bool,
|
||||
writer: anytype,
|
||||
level: u8,
|
||||
is_ref: bool,
|
||||
zcu: *Zcu,
|
||||
opt_sema: ?*Sema,
|
||||
) (@TypeOf(writer).Error || Module.CompileError)!void {
|
||||
if (level == 0) {
|
||||
if (is_ref) try writer.writeByte('&');
|
||||
return writer.writeAll(".{ ... }");
|
||||
}
|
||||
const ip = &zcu.intern_pool;
|
||||
@ -257,101 +259,87 @@ fn printAggregate(
|
||||
return writer.writeAll(" }");
|
||||
}
|
||||
|
||||
fn printPtr(
|
||||
ptr_val: InternPool.Index,
|
||||
writer: anytype,
|
||||
force_type: bool,
|
||||
force_addrof: bool,
|
||||
leading_parens: u32,
|
||||
level: u8,
|
||||
zcu: *Zcu,
|
||||
opt_sema: ?*Sema,
|
||||
) (@TypeOf(writer).Error || Module.CompileError)!void {
|
||||
const ip = &zcu.intern_pool;
|
||||
const ptr = switch (ip.indexToKey(ptr_val)) {
|
||||
.undef => |ptr_ty| {
|
||||
if (force_addrof) try writer.writeAll("&");
|
||||
try writer.writeByteNTimes('(', leading_parens);
|
||||
try writer.print("@as({}, undefined)", .{Type.fromInterned(ptr_ty).fmt(zcu)});
|
||||
return;
|
||||
},
|
||||
fn printPtr(ptr_val: Value, writer: anytype, level: u8, zcu: *Zcu, opt_sema: ?*Sema) (@TypeOf(writer).Error || Module.CompileError)!void {
|
||||
const ptr = switch (zcu.intern_pool.indexToKey(ptr_val.toIntern())) {
|
||||
.undef => return writer.writeAll("undefined"),
|
||||
.ptr => |ptr| ptr,
|
||||
else => unreachable,
|
||||
};
|
||||
if (level == 0) {
|
||||
return writer.writeAll("&...");
|
||||
}
|
||||
switch (ptr.addr) {
|
||||
.int => |int| {
|
||||
if (force_addrof) try writer.writeAll("&");
|
||||
try writer.writeByteNTimes('(', leading_parens);
|
||||
if (force_type) {
|
||||
try writer.print("@as({}, @ptrFromInt(", .{Type.fromInterned(ptr.ty).fmt(zcu)});
|
||||
try print(Value.fromInterned(int), writer, level - 1, zcu, opt_sema);
|
||||
try writer.writeAll("))");
|
||||
} else {
|
||||
try writer.writeAll("@ptrFromInt(");
|
||||
try print(Value.fromInterned(int), writer, level - 1, zcu, opt_sema);
|
||||
try writer.writeAll(")");
|
||||
}
|
||||
},
|
||||
.decl => |index| {
|
||||
try writer.writeAll("&");
|
||||
try zcu.declPtr(index).renderFullyQualifiedName(zcu, writer);
|
||||
},
|
||||
.comptime_alloc => try writer.writeAll("&(comptime alloc)"),
|
||||
.anon_decl => |anon| switch (ip.indexToKey(anon.val)) {
|
||||
.aggregate => |aggregate| try printAggregate(
|
||||
Value.fromInterned(anon.val),
|
||||
aggregate,
|
||||
writer,
|
||||
level - 1,
|
||||
|
||||
if (ptr.base_addr == .anon_decl) {
|
||||
// If the value is an aggregate, we can potentially print it more nicely.
|
||||
switch (zcu.intern_pool.indexToKey(ptr.base_addr.anon_decl.val)) {
|
||||
.aggregate => |agg| return printAggregate(
|
||||
Value.fromInterned(ptr.base_addr.anon_decl.val),
|
||||
agg,
|
||||
true,
|
||||
writer,
|
||||
level,
|
||||
zcu,
|
||||
opt_sema,
|
||||
),
|
||||
else => {
|
||||
const ty = Type.fromInterned(ip.typeOf(anon.val));
|
||||
try writer.print("&@as({}, ", .{ty.fmt(zcu)});
|
||||
try print(Value.fromInterned(anon.val), writer, level - 1, zcu, opt_sema);
|
||||
try writer.writeAll(")");
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
|
||||
var arena = std.heap.ArenaAllocator.init(zcu.gpa);
|
||||
defer arena.deinit();
|
||||
const derivation = try ptr_val.pointerDerivationAdvanced(arena.allocator(), zcu, opt_sema);
|
||||
try printPtrDerivation(derivation, writer, level, zcu, opt_sema);
|
||||
}
|
||||
|
||||
/// Print `derivation` as an lvalue, i.e. such that writing `&` before this gives the pointer value.
|
||||
fn printPtrDerivation(derivation: Value.PointerDeriveStep, writer: anytype, level: u8, zcu: *Zcu, opt_sema: ?*Sema) (@TypeOf(writer).Error || Module.CompileError)!void {
|
||||
const ip = &zcu.intern_pool;
|
||||
switch (derivation) {
|
||||
.int => |int| try writer.print("@as({}, @ptrFromInt({x})).*", .{
|
||||
int.ptr_ty.fmt(zcu),
|
||||
int.addr,
|
||||
}),
|
||||
.decl_ptr => |decl| {
|
||||
try zcu.declPtr(decl).renderFullyQualifiedName(zcu, writer);
|
||||
},
|
||||
.comptime_field => |val| {
|
||||
const ty = Type.fromInterned(ip.typeOf(val));
|
||||
try writer.print("&@as({}, ", .{ty.fmt(zcu)});
|
||||
try print(Value.fromInterned(val), writer, level - 1, zcu, opt_sema);
|
||||
try writer.writeAll(")");
|
||||
.anon_decl_ptr => |anon| {
|
||||
const ty = Value.fromInterned(anon.val).typeOf(zcu);
|
||||
try writer.print("@as({}, ", .{ty.fmt(zcu)});
|
||||
try print(Value.fromInterned(anon.val), writer, level - 1, zcu, opt_sema);
|
||||
try writer.writeByte(')');
|
||||
},
|
||||
.eu_payload => |base| {
|
||||
try printPtr(base, writer, true, true, leading_parens, level, zcu, opt_sema);
|
||||
.comptime_alloc_ptr => |info| {
|
||||
try writer.print("@as({}, ", .{info.val.typeOf(zcu).fmt(zcu)});
|
||||
try print(info.val, writer, level - 1, zcu, opt_sema);
|
||||
try writer.writeByte(')');
|
||||
},
|
||||
.comptime_field_ptr => |val| {
|
||||
const ty = val.typeOf(zcu);
|
||||
try writer.print("@as({}, ", .{ty.fmt(zcu)});
|
||||
try print(val, writer, level - 1, zcu, opt_sema);
|
||||
try writer.writeByte(')');
|
||||
},
|
||||
.eu_payload_ptr => |info| {
|
||||
try writer.writeByte('(');
|
||||
try printPtrDerivation(info.parent.*, writer, level, zcu, opt_sema);
|
||||
try writer.writeAll(" catch unreachable)");
|
||||
},
|
||||
.opt_payload_ptr => |info| {
|
||||
try printPtrDerivation(info.parent.*, writer, level, zcu, opt_sema);
|
||||
try writer.writeAll(".?");
|
||||
},
|
||||
.opt_payload => |base| {
|
||||
try writer.writeAll("(");
|
||||
try printPtr(base, writer, true, true, leading_parens + 1, level, zcu, opt_sema);
|
||||
try writer.writeAll(" catch unreachable");
|
||||
},
|
||||
.elem => |elem| {
|
||||
try printPtr(elem.base, writer, true, true, leading_parens, level, zcu, opt_sema);
|
||||
try writer.print("[{d}]", .{elem.index});
|
||||
},
|
||||
.field => |field| {
|
||||
try printPtr(field.base, writer, true, true, leading_parens, level, zcu, opt_sema);
|
||||
const base_ty = Type.fromInterned(ip.typeOf(field.base)).childType(zcu);
|
||||
switch (base_ty.zigTypeTag(zcu)) {
|
||||
.Struct => if (base_ty.isTuple(zcu)) {
|
||||
try writer.print("[{d}]", .{field.index});
|
||||
} else {
|
||||
const field_name = base_ty.structFieldName(@intCast(field.index), zcu).unwrap().?;
|
||||
.field_ptr => |field| {
|
||||
try printPtrDerivation(field.parent.*, writer, level, zcu, opt_sema);
|
||||
const agg_ty = (try field.parent.ptrType(zcu)).childType(zcu);
|
||||
switch (agg_ty.zigTypeTag(zcu)) {
|
||||
.Struct => if (agg_ty.structFieldName(field.field_idx, zcu).unwrap()) |field_name| {
|
||||
try writer.print(".{i}", .{field_name.fmt(ip)});
|
||||
} else {
|
||||
try writer.print("[{d}]", .{field.field_idx});
|
||||
},
|
||||
.Union => {
|
||||
const tag_ty = base_ty.unionTagTypeHypothetical(zcu);
|
||||
const field_name = tag_ty.enumFieldName(@intCast(field.index), zcu);
|
||||
const tag_ty = agg_ty.unionTagTypeHypothetical(zcu);
|
||||
const field_name = tag_ty.enumFieldName(field.field_idx, zcu);
|
||||
try writer.print(".{i}", .{field_name.fmt(ip)});
|
||||
},
|
||||
.Pointer => switch (field.index) {
|
||||
.Pointer => switch (field.field_idx) {
|
||||
Value.slice_ptr_index => try writer.writeAll(".ptr"),
|
||||
Value.slice_len_index => try writer.writeAll(".len"),
|
||||
else => unreachable,
|
||||
@ -359,5 +347,18 @@ fn printPtr(
|
||||
else => unreachable,
|
||||
}
|
||||
},
|
||||
.elem_ptr => |elem| {
|
||||
try printPtrDerivation(elem.parent.*, writer, level, zcu, opt_sema);
|
||||
try writer.print("[{d}]", .{elem.elem_idx});
|
||||
},
|
||||
.offset_and_cast => |oac| if (oac.byte_offset == 0) {
|
||||
try writer.print("@as({}, @ptrCast(", .{oac.new_ptr_ty.fmt(zcu)});
|
||||
try printPtrDerivation(oac.parent.*, writer, level, zcu, opt_sema);
|
||||
try writer.writeAll("))");
|
||||
} else {
|
||||
try writer.print("@as({}, @ptrFromInt(@intFromPtr(", .{oac.new_ptr_ty.fmt(zcu)});
|
||||
try printPtrDerivation(oac.parent.*, writer, level, zcu, opt_sema);
|
||||
try writer.print(") + {d}))", .{oac.byte_offset});
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
102
src/type.zig
102
src/type.zig
@ -172,6 +172,7 @@ pub const Type = struct {
|
||||
}
|
||||
|
||||
/// Prints a name suitable for `@typeName`.
|
||||
/// TODO: take an `opt_sema` to pass to `fmtValue` when printing sentinels.
|
||||
pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void {
|
||||
const ip = &mod.intern_pool;
|
||||
switch (ip.indexToKey(ty.toIntern())) {
|
||||
@ -187,8 +188,8 @@ pub const Type = struct {
|
||||
|
||||
if (info.sentinel != .none) switch (info.flags.size) {
|
||||
.One, .C => unreachable,
|
||||
.Many => try writer.print("[*:{}]", .{Value.fromInterned(info.sentinel).fmtValue(mod)}),
|
||||
.Slice => try writer.print("[:{}]", .{Value.fromInterned(info.sentinel).fmtValue(mod)}),
|
||||
.Many => try writer.print("[*:{}]", .{Value.fromInterned(info.sentinel).fmtValue(mod, null)}),
|
||||
.Slice => try writer.print("[:{}]", .{Value.fromInterned(info.sentinel).fmtValue(mod, null)}),
|
||||
} else switch (info.flags.size) {
|
||||
.One => try writer.writeAll("*"),
|
||||
.Many => try writer.writeAll("[*]"),
|
||||
@ -234,7 +235,7 @@ pub const Type = struct {
|
||||
} else {
|
||||
try writer.print("[{d}:{}]", .{
|
||||
array_type.len,
|
||||
Value.fromInterned(array_type.sentinel).fmtValue(mod),
|
||||
Value.fromInterned(array_type.sentinel).fmtValue(mod, null),
|
||||
});
|
||||
try print(Type.fromInterned(array_type.child), writer, mod);
|
||||
}
|
||||
@ -352,7 +353,7 @@ pub const Type = struct {
|
||||
try print(Type.fromInterned(field_ty), writer, mod);
|
||||
|
||||
if (val != .none) {
|
||||
try writer.print(" = {}", .{Value.fromInterned(val).fmtValue(mod)});
|
||||
try writer.print(" = {}", .{Value.fromInterned(val).fmtValue(mod, null)});
|
||||
}
|
||||
}
|
||||
try writer.writeAll("}");
|
||||
@ -1965,6 +1966,12 @@ pub const Type = struct {
|
||||
return Type.fromInterned(union_fields[index]);
|
||||
}
|
||||
|
||||
pub fn unionFieldTypeByIndex(ty: Type, index: usize, mod: *Module) Type {
|
||||
const ip = &mod.intern_pool;
|
||||
const union_obj = mod.typeToUnion(ty).?;
|
||||
return Type.fromInterned(union_obj.field_types.get(ip)[index]);
|
||||
}
|
||||
|
||||
pub fn unionTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 {
|
||||
const union_obj = mod.typeToUnion(ty).?;
|
||||
return mod.unionTagFieldIndex(union_obj, enum_tag);
|
||||
@ -3049,22 +3056,34 @@ pub const Type = struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn structFieldAlign(ty: Type, index: usize, mod: *Module) Alignment {
|
||||
const ip = &mod.intern_pool;
|
||||
pub fn structFieldAlign(ty: Type, index: usize, zcu: *Zcu) Alignment {
|
||||
return ty.structFieldAlignAdvanced(index, zcu, null) catch unreachable;
|
||||
}
|
||||
|
||||
pub fn structFieldAlignAdvanced(ty: Type, index: usize, zcu: *Zcu, opt_sema: ?*Sema) !Alignment {
|
||||
const ip = &zcu.intern_pool;
|
||||
switch (ip.indexToKey(ty.toIntern())) {
|
||||
.struct_type => {
|
||||
const struct_type = ip.loadStructType(ty.toIntern());
|
||||
assert(struct_type.layout != .@"packed");
|
||||
const explicit_align = struct_type.fieldAlign(ip, index);
|
||||
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]);
|
||||
return mod.structFieldAlignment(explicit_align, field_ty, struct_type.layout);
|
||||
if (opt_sema) |sema| {
|
||||
return sema.structFieldAlignment(explicit_align, field_ty, struct_type.layout);
|
||||
} else {
|
||||
return zcu.structFieldAlignment(explicit_align, field_ty, struct_type.layout);
|
||||
}
|
||||
},
|
||||
.anon_struct_type => |anon_struct| {
|
||||
return Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignment(mod);
|
||||
return (try Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignmentAdvanced(zcu, if (opt_sema) |sema| .{ .sema = sema } else .eager)).scalar;
|
||||
},
|
||||
.union_type => {
|
||||
const union_obj = ip.loadUnionType(ty.toIntern());
|
||||
return mod.unionFieldNormalAlignment(union_obj, @intCast(index));
|
||||
if (opt_sema) |sema| {
|
||||
return sema.unionFieldAlignment(union_obj, @intCast(index));
|
||||
} else {
|
||||
return zcu.unionFieldNormalAlignment(union_obj, @intCast(index));
|
||||
}
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
@ -3301,6 +3320,71 @@ pub const Type = struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn arrayBase(ty: Type, zcu: *const Zcu) struct { Type, u64 } {
|
||||
var cur_ty: Type = ty;
|
||||
var cur_len: u64 = 1;
|
||||
while (cur_ty.zigTypeTag(zcu) == .Array) {
|
||||
cur_len *= cur_ty.arrayLenIncludingSentinel(zcu);
|
||||
cur_ty = cur_ty.childType(zcu);
|
||||
}
|
||||
return .{ cur_ty, cur_len };
|
||||
}
|
||||
|
||||
pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx: u32, zcu: *Zcu) union(enum) {
|
||||
/// The result is a bit-pointer with the same value and a new packed offset.
|
||||
bit_ptr: InternPool.Key.PtrType.PackedOffset,
|
||||
/// The result is a standard pointer.
|
||||
byte_ptr: struct {
|
||||
/// The byte offset of the field pointer from the parent pointer value.
|
||||
offset: u64,
|
||||
/// The alignment of the field pointer type.
|
||||
alignment: InternPool.Alignment,
|
||||
},
|
||||
} {
|
||||
comptime assert(Type.packed_struct_layout_version == 2);
|
||||
|
||||
const parent_ptr_info = parent_ptr_ty.ptrInfo(zcu);
|
||||
const field_ty = struct_ty.structFieldType(field_idx, zcu);
|
||||
|
||||
var bit_offset: u16 = 0;
|
||||
var running_bits: u16 = 0;
|
||||
for (0..struct_ty.structFieldCount(zcu)) |i| {
|
||||
const f_ty = struct_ty.structFieldType(i, zcu);
|
||||
if (i == field_idx) {
|
||||
bit_offset = running_bits;
|
||||
}
|
||||
running_bits += @intCast(f_ty.bitSize(zcu));
|
||||
}
|
||||
|
||||
const res_host_size: u16, const res_bit_offset: u16 = if (parent_ptr_info.packed_offset.host_size != 0)
|
||||
.{ parent_ptr_info.packed_offset.host_size, parent_ptr_info.packed_offset.bit_offset + bit_offset }
|
||||
else
|
||||
.{ (running_bits + 7) / 8, bit_offset };
|
||||
|
||||
// If the field happens to be byte-aligned, simplify the pointer type.
|
||||
// We can only do this if the pointee's bit size matches its ABI byte size,
|
||||
// so that loads and stores do not interfere with surrounding packed bits.
|
||||
//
|
||||
// TODO: we do not attempt this with big-endian targets yet because of nested
|
||||
// structs and floats. I need to double-check the desired behavior for big endian
|
||||
// targets before adding the necessary complications to this code. This will not
|
||||
// cause miscompilations; it only means the field pointer uses bit masking when it
|
||||
// might not be strictly necessary.
|
||||
if (res_bit_offset % 8 == 0 and field_ty.bitSize(zcu) == field_ty.abiSize(zcu) * 8 and zcu.getTarget().cpu.arch.endian() == .little) {
|
||||
const byte_offset = res_bit_offset / 8;
|
||||
const new_align = Alignment.fromLog2Units(@ctz(byte_offset | parent_ptr_ty.ptrAlignment(zcu).toByteUnits().?));
|
||||
return .{ .byte_ptr = .{
|
||||
.offset = byte_offset,
|
||||
.alignment = new_align,
|
||||
} };
|
||||
}
|
||||
|
||||
return .{ .bit_ptr = .{
|
||||
.host_size = res_host_size,
|
||||
.bit_offset = res_bit_offset,
|
||||
} };
|
||||
}
|
||||
|
||||
pub const @"u1": Type = .{ .ip_index = .u1_type };
|
||||
pub const @"u8": Type = .{ .ip_index = .u8_type };
|
||||
pub const @"u16": Type = .{ .ip_index = .u16_type };
|
||||
|
||||
@ -517,3 +517,61 @@ test "@bitCast of packed struct of bools all false" {
|
||||
p.b3 = false;
|
||||
try expect(@as(u8, @as(u4, @bitCast(p))) == 0);
|
||||
}
|
||||
|
||||
test "@bitCast of packed struct containing pointer" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
|
||||
|
||||
const S = struct {
|
||||
const A = packed struct {
|
||||
ptr: *const u32,
|
||||
};
|
||||
|
||||
const B = packed struct {
|
||||
ptr: *const i32,
|
||||
};
|
||||
|
||||
fn doTheTest() !void {
|
||||
const x: u32 = 123;
|
||||
var a: A = undefined;
|
||||
a = .{ .ptr = &x };
|
||||
const b: B = @bitCast(a);
|
||||
try expect(b.ptr.* == 123);
|
||||
}
|
||||
};
|
||||
|
||||
try S.doTheTest();
|
||||
try comptime S.doTheTest();
|
||||
}
|
||||
|
||||
test "@bitCast of extern struct containing pointer" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
|
||||
|
||||
const S = struct {
|
||||
const A = extern struct {
|
||||
ptr: *const u32,
|
||||
};
|
||||
|
||||
const B = extern struct {
|
||||
ptr: *const i32,
|
||||
};
|
||||
|
||||
fn doTheTest() !void {
|
||||
const x: u32 = 123;
|
||||
var a: A = undefined;
|
||||
a = .{ .ptr = &x };
|
||||
const b: B = @bitCast(a);
|
||||
try expect(b.ptr.* == 123);
|
||||
}
|
||||
};
|
||||
|
||||
try S.doTheTest();
|
||||
try comptime S.doTheTest();
|
||||
}
|
||||
|
||||
@ -139,8 +139,8 @@ const Piece = packed struct {
|
||||
color: Color,
|
||||
type: Type,
|
||||
|
||||
const Type = enum { KING, QUEEN, BISHOP, KNIGHT, ROOK, PAWN };
|
||||
const Color = enum { WHITE, BLACK };
|
||||
const Type = enum(u3) { KING, QUEEN, BISHOP, KNIGHT, ROOK, PAWN };
|
||||
const Color = enum(u1) { WHITE, BLACK };
|
||||
|
||||
fn charToPiece(c: u8) !@This() {
|
||||
return .{
|
||||
|
||||
@ -32,32 +32,22 @@ test "type pun signed and unsigned as array pointer" {
|
||||
}
|
||||
|
||||
test "type pun signed and unsigned as offset many pointer" {
|
||||
if (true) {
|
||||
// TODO https://github.com/ziglang/zig/issues/9646
|
||||
return error.SkipZigTest;
|
||||
}
|
||||
|
||||
comptime {
|
||||
var x: u32 = 0;
|
||||
var y = @as([*]i32, @ptrCast(&x));
|
||||
var x: [11]u32 = undefined;
|
||||
var y: [*]i32 = @ptrCast(&x[10]);
|
||||
y -= 10;
|
||||
y[10] = -1;
|
||||
try testing.expectEqual(@as(u32, 0xFFFFFFFF), x);
|
||||
try testing.expectEqual(@as(u32, 0xFFFFFFFF), x[10]);
|
||||
}
|
||||
}
|
||||
|
||||
test "type pun signed and unsigned as array pointer with pointer arithemtic" {
|
||||
if (true) {
|
||||
// TODO https://github.com/ziglang/zig/issues/9646
|
||||
return error.SkipZigTest;
|
||||
}
|
||||
|
||||
comptime {
|
||||
var x: u32 = 0;
|
||||
const y = @as([*]i32, @ptrCast(&x)) - 10;
|
||||
var x: [11]u32 = undefined;
|
||||
const y = @as([*]i32, @ptrCast(&x[10])) - 10;
|
||||
const z: *[15]i32 = y[0..15];
|
||||
z[10] = -1;
|
||||
try testing.expectEqual(@as(u32, 0xFFFFFFFF), x);
|
||||
try testing.expectEqual(@as(u32, 0xFFFFFFFF), x[10]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -171,10 +161,13 @@ fn doTypePunBitsTest(as_bits: *Bits) !void {
|
||||
|
||||
test "type pun bits" {
|
||||
if (true) {
|
||||
// TODO https://github.com/ziglang/zig/issues/9646
|
||||
// TODO: currently, marking one bit of `Bits` as `undefined` does
|
||||
// mark the whole value as `undefined`, since the pointer interpretation
|
||||
// logic reads it back in as a `u32`, which is partially-undef and thus
|
||||
// has value `undefined`. We need an improved comptime memory representation
|
||||
// to make this work.
|
||||
return error.SkipZigTest;
|
||||
}
|
||||
|
||||
comptime {
|
||||
var v: u32 = undefined;
|
||||
try doTypePunBitsTest(@as(*Bits, @ptrCast(&v)));
|
||||
@ -296,11 +289,6 @@ test "dance on linker values" {
|
||||
}
|
||||
|
||||
test "offset array ptr by element size" {
|
||||
if (true) {
|
||||
// TODO https://github.com/ziglang/zig/issues/9646
|
||||
return error.SkipZigTest;
|
||||
}
|
||||
|
||||
comptime {
|
||||
const VirtualStruct = struct { x: u32 };
|
||||
var arr: [4]VirtualStruct = .{
|
||||
@ -310,15 +298,10 @@ test "offset array ptr by element size" {
|
||||
.{ .x = bigToNativeEndian(u32, 0x03070b0f) },
|
||||
};
|
||||
|
||||
const address = @intFromPtr(&arr);
|
||||
try testing.expectEqual(@intFromPtr(&arr[0]), address);
|
||||
try testing.expectEqual(@intFromPtr(&arr[0]) + 10, address + 10);
|
||||
try testing.expectEqual(@intFromPtr(&arr[1]), address + @sizeOf(VirtualStruct));
|
||||
try testing.expectEqual(@intFromPtr(&arr[2]), address + 2 * @sizeOf(VirtualStruct));
|
||||
try testing.expectEqual(@intFromPtr(&arr[3]), address + @sizeOf(VirtualStruct) * 3);
|
||||
const buf: [*]align(@alignOf(VirtualStruct)) u8 = @ptrCast(&arr);
|
||||
|
||||
const secondElement = @as(*VirtualStruct, @ptrFromInt(@intFromPtr(&arr[0]) + 2 * @sizeOf(VirtualStruct)));
|
||||
try testing.expectEqual(bigToNativeEndian(u32, 0x02060a0e), secondElement.x);
|
||||
const second_element: *VirtualStruct = @ptrCast(buf + 2 * @sizeOf(VirtualStruct));
|
||||
try testing.expectEqual(bigToNativeEndian(u32, 0x02060a0e), second_element.x);
|
||||
}
|
||||
}
|
||||
|
||||
@ -364,7 +347,7 @@ test "offset field ptr by enclosing array element size" {
|
||||
|
||||
var i: usize = 0;
|
||||
while (i < 4) : (i += 1) {
|
||||
var ptr: [*]u8 = @as([*]u8, @ptrCast(&arr[0]));
|
||||
var ptr: [*]u8 = @ptrCast(&arr[0]);
|
||||
ptr += i;
|
||||
ptr += @offsetOf(VirtualStruct, "x");
|
||||
var j: usize = 0;
|
||||
@ -400,23 +383,18 @@ test "accessing reinterpreted memory of parent object" {
|
||||
}
|
||||
|
||||
test "bitcast packed union to integer" {
|
||||
if (true) {
|
||||
// https://github.com/ziglang/zig/issues/19384
|
||||
return error.SkipZigTest;
|
||||
}
|
||||
const U = packed union {
|
||||
x: u1,
|
||||
x: i2,
|
||||
y: u2,
|
||||
};
|
||||
|
||||
comptime {
|
||||
const a = U{ .x = 1 };
|
||||
const b = U{ .y = 2 };
|
||||
const cast_a = @as(u2, @bitCast(a));
|
||||
const cast_b = @as(u2, @bitCast(b));
|
||||
const a: U = .{ .x = -1 };
|
||||
const b: U = .{ .y = 2 };
|
||||
const cast_a: u2 = @bitCast(a);
|
||||
const cast_b: u2 = @bitCast(b);
|
||||
|
||||
// truncated because the upper bit is garbage memory that we don't care about
|
||||
try testing.expectEqual(@as(u1, 1), @as(u1, @truncate(cast_a)));
|
||||
try testing.expectEqual(@as(u2, 3), cast_a);
|
||||
try testing.expectEqual(@as(u2, 2), cast_b);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1054,3 +1054,26 @@ test "errorCast from error sets to error unions" {
|
||||
const err_union: Set1!void = @errorCast(error.A);
|
||||
try expectError(error.A, err_union);
|
||||
}
|
||||
|
||||
test "result location initialization of error union with OPV payload" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
|
||||
const S = struct {
|
||||
x: u0,
|
||||
};
|
||||
|
||||
const a: anyerror!S = .{ .x = 0 };
|
||||
comptime assert((a catch unreachable).x == 0);
|
||||
|
||||
comptime {
|
||||
var b: anyerror!S = .{ .x = 0 };
|
||||
_ = &b;
|
||||
assert((b catch unreachable).x == 0);
|
||||
}
|
||||
|
||||
var c: anyerror!S = .{ .x = 0 };
|
||||
_ = &c;
|
||||
try expectEqual(0, (c catch return error.TestFailed).x);
|
||||
}
|
||||
|
||||
@ -1731,6 +1731,7 @@ test "@fieldParentPtr extern union" {
|
||||
test "@fieldParentPtr packed union" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.target.cpu.arch.endian() == .big) return error.SkipZigTest; // TODO
|
||||
|
||||
const C = packed union {
|
||||
a: bool,
|
||||
|
||||
@ -92,13 +92,11 @@ test "optional with zero-bit type" {
|
||||
|
||||
var two: ?struct { ZeroBit, ZeroBit } = undefined;
|
||||
two = .{ with_runtime.zero_bit, with_runtime.zero_bit };
|
||||
if (!@inComptime()) {
|
||||
try expect(two != null);
|
||||
try expect(two.?[0] == zero_bit);
|
||||
try expect(two.?[0] == with_runtime.zero_bit);
|
||||
try expect(two.?[1] == zero_bit);
|
||||
try expect(two.?[1] == with_runtime.zero_bit);
|
||||
}
|
||||
try expect(two != null);
|
||||
try expect(two.?[0] == zero_bit);
|
||||
try expect(two.?[0] == with_runtime.zero_bit);
|
||||
try expect(two.?[1] == zero_bit);
|
||||
try expect(two.?[1] == with_runtime.zero_bit);
|
||||
}
|
||||
};
|
||||
|
||||
@ -610,3 +608,27 @@ test "copied optional doesn't alias source" {
|
||||
|
||||
try expect(x[0] == 0.0);
|
||||
}
|
||||
|
||||
test "result location initialization of optional with OPV payload" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
|
||||
const S = struct {
|
||||
x: u0,
|
||||
};
|
||||
|
||||
const a: ?S = .{ .x = 0 };
|
||||
comptime assert(a.?.x == 0);
|
||||
|
||||
comptime {
|
||||
var b: ?S = .{ .x = 0 };
|
||||
_ = &b;
|
||||
assert(b.?.x == 0);
|
||||
}
|
||||
|
||||
var c: ?S = .{ .x = 0 };
|
||||
_ = &c;
|
||||
try expectEqual(0, (c orelse return error.TestFailed).x);
|
||||
}
|
||||
|
||||
@ -1025,7 +1025,7 @@ test "modify nested packed struct aligned field" {
|
||||
pretty_print: packed struct {
|
||||
enabled: bool = false,
|
||||
num_spaces: u4 = 4,
|
||||
space_char: enum { space, tab } = .space,
|
||||
space_char: enum(u1) { space, tab } = .space,
|
||||
indent: u8 = 0,
|
||||
} = .{},
|
||||
baz: bool = false,
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const assert = std.debug.assert;
|
||||
const expectEqual = std.testing.expectEqual;
|
||||
|
||||
test "flags in packed union" {
|
||||
@ -106,7 +107,7 @@ test "packed union in packed struct" {
|
||||
|
||||
fn testPackedUnionInPackedStruct() !void {
|
||||
const ReadRequest = packed struct { key: i32 };
|
||||
const RequestType = enum {
|
||||
const RequestType = enum(u1) {
|
||||
read,
|
||||
insert,
|
||||
};
|
||||
@ -169,3 +170,15 @@ test "assigning to non-active field at comptime" {
|
||||
test_bits.bits = .{};
|
||||
}
|
||||
}
|
||||
|
||||
test "comptime packed union of pointers" {
|
||||
const U = packed union {
|
||||
a: *const u32,
|
||||
b: *const [1]u32,
|
||||
};
|
||||
|
||||
const x: u32 = 123;
|
||||
const u: U = .{ .a = &x };
|
||||
|
||||
comptime assert(u.b[0] == 123);
|
||||
}
|
||||
|
||||
@ -621,3 +621,39 @@ test "cast pointers with zero sized elements" {
|
||||
const d: []u8 = c;
|
||||
_ = d;
|
||||
}
|
||||
|
||||
test "comptime pointer equality through distinct fields with well-defined layout" {
|
||||
const A = extern struct {
|
||||
x: u32,
|
||||
z: u16,
|
||||
};
|
||||
const B = extern struct {
|
||||
x: u16,
|
||||
y: u16,
|
||||
z: u16,
|
||||
};
|
||||
|
||||
const a: A = .{
|
||||
.x = undefined,
|
||||
.z = 123,
|
||||
};
|
||||
|
||||
const ap: *const A = &a;
|
||||
const bp: *const B = @ptrCast(ap);
|
||||
|
||||
comptime assert(&ap.z == &bp.z);
|
||||
comptime assert(ap.z == 123);
|
||||
comptime assert(bp.z == 123);
|
||||
}
|
||||
|
||||
test "comptime pointer equality through distinct elements with well-defined layout" {
|
||||
const buf: [2]u32 = .{ 123, 456 };
|
||||
|
||||
const ptr: *const [2]u32 = &buf;
|
||||
const byte_ptr: *align(4) const [8]u8 = @ptrCast(ptr);
|
||||
const second_elem: *const u32 = @ptrCast(byte_ptr[4..8]);
|
||||
|
||||
comptime assert(&buf[1] == second_elem);
|
||||
comptime assert(buf[1] == 456);
|
||||
comptime assert(second_elem.* == 456);
|
||||
}
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const expect = std.testing.expect;
|
||||
const assert = std.debug.assert;
|
||||
const native_endian = builtin.target.cpu.arch.endian();
|
||||
|
||||
test "reinterpret bytes as integer with nonzero offset" {
|
||||
@ -277,7 +278,7 @@ test "@ptrCast undefined value at comptime" {
|
||||
}
|
||||
};
|
||||
comptime {
|
||||
const x = S.transmute([]u8, i32, undefined);
|
||||
const x = S.transmute(u64, i32, undefined);
|
||||
_ = x;
|
||||
}
|
||||
}
|
||||
@ -292,3 +293,60 @@ test "comptime @ptrCast with packed struct leaves value unmodified" {
|
||||
try expect(p.*[0] == 6);
|
||||
try expect(st.three == 6);
|
||||
}
|
||||
|
||||
test "@ptrCast restructures comptime-only array" {
|
||||
{
|
||||
const a3a2: [3][2]comptime_int = .{
|
||||
.{ 1, 2 },
|
||||
.{ 3, 4 },
|
||||
.{ 5, 6 },
|
||||
};
|
||||
const a2a3: *const [2][3]comptime_int = @ptrCast(&a3a2);
|
||||
comptime assert(a2a3[0][0] == 1);
|
||||
comptime assert(a2a3[0][1] == 2);
|
||||
comptime assert(a2a3[0][2] == 3);
|
||||
comptime assert(a2a3[1][0] == 4);
|
||||
comptime assert(a2a3[1][1] == 5);
|
||||
comptime assert(a2a3[1][2] == 6);
|
||||
}
|
||||
|
||||
{
|
||||
const a6a1: [6][1]comptime_int = .{
|
||||
.{1}, .{2}, .{3}, .{4}, .{5}, .{6},
|
||||
};
|
||||
const a1a2a3: *const [1][2][3]comptime_int = @ptrCast(&a6a1);
|
||||
comptime assert(a1a2a3[0][0][0] == 1);
|
||||
comptime assert(a1a2a3[0][0][1] == 2);
|
||||
comptime assert(a1a2a3[0][0][2] == 3);
|
||||
comptime assert(a1a2a3[0][1][0] == 4);
|
||||
comptime assert(a1a2a3[0][1][1] == 5);
|
||||
comptime assert(a1a2a3[0][1][2] == 6);
|
||||
}
|
||||
|
||||
{
|
||||
const a1: [1]comptime_int = .{123};
|
||||
const raw: *const comptime_int = @ptrCast(&a1);
|
||||
comptime assert(raw.* == 123);
|
||||
}
|
||||
|
||||
{
|
||||
const raw: comptime_int = 123;
|
||||
const a1: *const [1]comptime_int = @ptrCast(&raw);
|
||||
comptime assert(a1[0] == 123);
|
||||
}
|
||||
}
|
||||
|
||||
test "@ptrCast restructures sliced comptime-only array" {
|
||||
const a3a2: [4][2]comptime_int = .{
|
||||
.{ 1, 2 },
|
||||
.{ 3, 4 },
|
||||
.{ 5, 6 },
|
||||
.{ 7, 8 },
|
||||
};
|
||||
|
||||
const sub: *const [4]comptime_int = @ptrCast(a3a2[1..]);
|
||||
comptime assert(sub[0] == 3);
|
||||
comptime assert(sub[1] == 4);
|
||||
comptime assert(sub[2] == 5);
|
||||
comptime assert(sub[3] == 6);
|
||||
}
|
||||
|
||||
@ -758,3 +758,24 @@ test "matching captures causes opaque equivalence" {
|
||||
comptime assert(@TypeOf(a) == @TypeOf(b));
|
||||
try testing.expect(a == b);
|
||||
}
|
||||
|
||||
test "reify enum where fields refers to part of array" {
|
||||
const fields: [3]std.builtin.Type.EnumField = .{
|
||||
.{ .name = "foo", .value = 0 },
|
||||
.{ .name = "bar", .value = 1 },
|
||||
undefined,
|
||||
};
|
||||
const E = @Type(.{ .Enum = .{
|
||||
.tag_type = u8,
|
||||
.fields = fields[0..2],
|
||||
.decls = &.{},
|
||||
.is_exhaustive = true,
|
||||
} });
|
||||
var a: E = undefined;
|
||||
var b: E = undefined;
|
||||
a = .foo;
|
||||
b = .bar;
|
||||
try testing.expect(a == .foo);
|
||||
try testing.expect(b == .bar);
|
||||
try testing.expect(a != b);
|
||||
}
|
||||
|
||||
@ -1532,7 +1532,7 @@ test "reinterpreting enum value inside packed union" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const U = packed union {
|
||||
tag: enum { a, b },
|
||||
tag: enum(u8) { a, b },
|
||||
val: u8,
|
||||
|
||||
fn doTest() !void {
|
||||
@ -1850,9 +1850,8 @@ test "reinterpret packed union" {
|
||||
|
||||
{
|
||||
// Union initialization
|
||||
var u: U = .{
|
||||
.qux = 0xe2a,
|
||||
};
|
||||
var u: U = .{ .baz = 0 }; // ensure all bits are defined
|
||||
u.qux = 0xe2a;
|
||||
try expectEqual(@as(u8, 0x2a), u.foo);
|
||||
try expectEqual(@as(u12, 0xe2a), u.qux);
|
||||
try expectEqual(@as(u29, 0xe2a), u.bar & 0xfff);
|
||||
|
||||
@ -0,0 +1,31 @@
|
||||
//! The full test name would be:
|
||||
//! struct field type resolution marks transitive error from bad usingnamespace in @typeInfo call from non-initial field type
|
||||
//!
|
||||
//! This test is rather esoteric. It's ensuring that errors triggered by `@typeInfo` analyzing
|
||||
//! a bad `usingnamespace` correctly trigger transitive errors when analyzed by struct field type
|
||||
//! resolution, meaning we don't incorrectly analyze code past the uses of `S`.
|
||||
|
||||
const S = struct {
|
||||
ok: u32,
|
||||
bad: @typeInfo(T),
|
||||
};
|
||||
|
||||
const T = struct {
|
||||
pub usingnamespace @compileError("usingnamespace analyzed");
|
||||
};
|
||||
|
||||
comptime {
|
||||
const a: S = .{ .ok = 123, .bad = undefined };
|
||||
_ = a;
|
||||
@compileError("should not be reached");
|
||||
}
|
||||
|
||||
comptime {
|
||||
const b: S = .{ .ok = 123, .bad = undefined };
|
||||
_ = b;
|
||||
@compileError("should not be reached");
|
||||
}
|
||||
|
||||
// error
|
||||
//
|
||||
// :14:24: error: usingnamespace analyzed
|
||||
22
test/cases/compile_errors/bit_ptr_non_packed.zig
Normal file
22
test/cases/compile_errors/bit_ptr_non_packed.zig
Normal file
@ -0,0 +1,22 @@
|
||||
export fn entry1() void {
|
||||
const S = extern struct { x: u32 };
|
||||
_ = *align(1:2:8) S;
|
||||
}
|
||||
|
||||
export fn entry2() void {
|
||||
const S = struct { x: u32 };
|
||||
_ = *align(1:2:@sizeOf(S) * 2) S;
|
||||
}
|
||||
|
||||
export fn entry3() void {
|
||||
const E = enum { implicit, backing, type };
|
||||
_ = *align(1:2:8) E;
|
||||
}
|
||||
|
||||
// error
|
||||
//
|
||||
// :3:23: error: bit-pointer cannot refer to value of type 'tmp.entry1.S'
|
||||
// :3:23: note: only packed structs layout are allowed in packed types
|
||||
// :8:36: error: bit-pointer cannot refer to value of type 'tmp.entry2.S'
|
||||
// :8:36: note: only packed structs layout are allowed in packed types
|
||||
// :13:23: error: bit-pointer cannot refer to value of type 'tmp.entry3.E'
|
||||
12
test/cases/compile_errors/bitcast_undef.zig
Normal file
12
test/cases/compile_errors/bitcast_undef.zig
Normal file
@ -0,0 +1,12 @@
|
||||
export fn entry1() void {
|
||||
const x: i32 = undefined;
|
||||
const y: u32 = @bitCast(x);
|
||||
@compileLog(y);
|
||||
}
|
||||
|
||||
// error
|
||||
//
|
||||
// :4:5: error: found compile log statement
|
||||
//
|
||||
// Compile Log Output:
|
||||
// @as(u32, undefined)
|
||||
@ -9,4 +9,4 @@ export fn entry() void {
|
||||
// :2:5: error: found compile log statement
|
||||
//
|
||||
// Compile Log Output:
|
||||
// @as(*const anyopaque, &tmp.entry)
|
||||
// @as(*const anyopaque, @as(*const anyopaque, @ptrCast(tmp.entry)))
|
||||
|
||||
@ -1,13 +0,0 @@
|
||||
const MyStruct = struct { x: bool = false };
|
||||
|
||||
comptime {
|
||||
const x = &[_]MyStruct{ .{}, .{} };
|
||||
const y = x[0..1] ++ &[_]MyStruct{};
|
||||
_ = y;
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :5:16: error: comptime dereference requires '[1]tmp.MyStruct' to have a well-defined layout, but it does not.
|
||||
@ -6,7 +6,7 @@ comptime {
|
||||
|
||||
const payload_ptr = &opt_ptr.?;
|
||||
opt_ptr = null;
|
||||
_ = payload_ptr.*.*;
|
||||
_ = payload_ptr.*.*; // TODO: this case was regressed by #19630
|
||||
}
|
||||
comptime {
|
||||
var opt: ?u8 = 15;
|
||||
@ -28,6 +28,5 @@ comptime {
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :9:20: error: attempt to use null value
|
||||
// :16:20: error: attempt to use null value
|
||||
// :24:20: error: attempt to unwrap error: Foo
|
||||
|
||||
@ -11,4 +11,5 @@ fn concat() [16]f32 {
|
||||
// target=native
|
||||
//
|
||||
// :3:17: error: expected type '[4]f32', found '[16]f32'
|
||||
// :3:17: note: array of length 16 cannot cast into an array of length 4
|
||||
// :3:17: note: destination has length 4
|
||||
// :3:17: note: source has length 16
|
||||
|
||||
@ -8,7 +8,5 @@ export fn foo_slice_len_increment_beyond_bounds() void {
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :6:16: error: comptime store of index 8 out of bounds of array length 8
|
||||
// :6:16: error: dereference of '*u8' exceeds bounds of containing decl of type '[8]u8'
|
||||
|
||||
@ -0,0 +1,26 @@
|
||||
comptime {
|
||||
const a: @Vector(3, u8) = .{ 1, 200, undefined };
|
||||
@compileLog(@addWithOverflow(a, a));
|
||||
}
|
||||
|
||||
comptime {
|
||||
const a: @Vector(3, u8) = .{ 1, 2, undefined };
|
||||
const b: @Vector(3, u8) = .{ 0, 3, 10 };
|
||||
@compileLog(@subWithOverflow(a, b));
|
||||
}
|
||||
|
||||
comptime {
|
||||
const a: @Vector(3, u8) = .{ 1, 200, undefined };
|
||||
@compileLog(@mulWithOverflow(a, a));
|
||||
}
|
||||
|
||||
// error
|
||||
//
|
||||
// :3:5: error: found compile log statement
|
||||
// :9:5: note: also here
|
||||
// :14:5: note: also here
|
||||
//
|
||||
// Compile Log Output:
|
||||
// @as(struct{@Vector(3, u8), @Vector(3, u1)}, .{ .{ 2, 144, undefined }, .{ 0, 1, undefined } })
|
||||
// @as(struct{@Vector(3, u8), @Vector(3, u1)}, .{ .{ 1, 255, undefined }, .{ 0, 1, undefined } })
|
||||
// @as(struct{@Vector(3, u8), @Vector(3, u1)}, .{ .{ 1, 64, undefined }, .{ 0, 1, undefined } })
|
||||
@ -30,7 +30,7 @@ export fn entry6() void {
|
||||
}
|
||||
export fn entry7() void {
|
||||
_ = @sizeOf(packed struct {
|
||||
x: enum { A, B },
|
||||
x: enum(u1) { A, B },
|
||||
});
|
||||
}
|
||||
export fn entry8() void {
|
||||
@ -70,6 +70,12 @@ export fn entry13() void {
|
||||
x: *type,
|
||||
});
|
||||
}
|
||||
export fn entry14() void {
|
||||
const E = enum { implicit, backing, type };
|
||||
_ = @sizeOf(packed struct {
|
||||
x: E,
|
||||
});
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=llvm
|
||||
@ -97,3 +103,5 @@ export fn entry13() void {
|
||||
// :70:12: error: packed structs cannot contain fields of type '*type'
|
||||
// :70:12: note: comptime-only pointer has no guaranteed in-memory representation
|
||||
// :70:12: note: types are not available at runtime
|
||||
// :76:12: error: packed structs cannot contain fields of type 'tmp.entry14.E'
|
||||
// :74:15: note: enum declared here
|
||||
|
||||
@ -0,0 +1,19 @@
|
||||
export fn entry1() void {
|
||||
const x: u32 = 123;
|
||||
const ptr: [*]const u32 = @ptrCast(&x);
|
||||
_ = ptr - 1;
|
||||
}
|
||||
|
||||
export fn entry2() void {
|
||||
const S = extern struct { x: u32, y: u32 };
|
||||
const y: u32 = 123;
|
||||
const parent_ptr: *const S = @fieldParentPtr("y", &y);
|
||||
_ = parent_ptr;
|
||||
}
|
||||
|
||||
// error
|
||||
//
|
||||
// :4:13: error: pointer computation here causes undefined behavior
|
||||
// :4:13: note: resulting pointer exceeds bounds of containing value which may trigger overflow
|
||||
// :10:55: error: pointer computation here causes undefined behavior
|
||||
// :10:55: note: resulting pointer exceeds bounds of containing value which may trigger overflow
|
||||
@ -5,9 +5,17 @@ comptime {
|
||||
const deref = int_ptr.*;
|
||||
_ = deref;
|
||||
}
|
||||
comptime {
|
||||
const array: [4]u8 = "aoeu".*;
|
||||
const sub_array = array[1..];
|
||||
const int_ptr: *const u32 = @ptrCast(@alignCast(sub_array));
|
||||
const deref = int_ptr.*;
|
||||
_ = deref;
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :5:26: error: dereference of '*const u24' exceeds bounds of containing decl of type '[4]u8'
|
||||
// :12:26: error: dereference of '*const u32' exceeds bounds of containing decl of type '[4]u8'
|
||||
|
||||
@ -7,4 +7,4 @@ export fn foo() void {
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :3:49: error: comptime dereference requires '[]const u8' to have a well-defined layout, but it does not.
|
||||
// :3:49: error: comptime dereference requires '[]const u8' to have a well-defined layout
|
||||
|
||||
@ -31,5 +31,5 @@ pub fn main() !void {}
|
||||
// :20:5: error: found compile log statement
|
||||
//
|
||||
// Compile Log Output:
|
||||
// @as([]i32, &(comptime alloc).buf[0..2])
|
||||
// @as([]i32, &(comptime alloc).buf[0..2])
|
||||
// @as([]i32, @as([*]i32, @ptrCast(@as(tmp.UnionContainer, .{ .buf = .{ 1, 2 } }).buf[0]))[0..2])
|
||||
// @as([]i32, @as([*]i32, @ptrCast(@as(tmp.StructContainer, .{ .buf = .{ 3, 4 } }).buf[0]))[0..2])
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user