mirror of
https://github.com/ziglang/zig.git
synced 2025-12-06 06:13:07 +00:00
We've got a big one here! This commit reworks how we represent pointers in the InternPool, and rewrites the logic for loading and storing from them at comptime. Firstly, the pointer representation. Previously, pointers were represented in a highly structured manner: pointers to fields, array elements, etc, were explicitly represented. This works well for simple cases, but is quite difficult to handle in the cases of unusual reinterpretations, pointer casts, offsets, etc. Therefore, pointers are now represented in a more "flat" manner. For types without well-defined layouts -- such as comptime-only types, automatic-layout aggregates, and so on -- we still use this "hierarchical" structure. However, for types with well-defined layouts, we use a byte offset associated with the pointer. This allows the comptime pointer access logic to deal with reinterpreted pointers far more gracefully, because the "base address" of a pointer -- for instance a `field` -- is a single value which pointer accesses cannot exceed since the parent has undefined layout. This strategy is also more useful to most backends -- see the updated logic in `codegen.zig` and `codegen/llvm.zig`. For backends which do prefer a chain of field and elements accesses for lowering pointer values, such as SPIR-V, there is a helpful function in `Value` which creates a strategy to derive a pointer value using ideally only field and element accesses. This is actually more correct than the previous logic, since it correctly handles pointer casts which, after the dust has settled, end up referring exactly to an aggregate field or array element. In terms of the pointer access code, it has been rewritten from the ground up. The old logic had become rather a mess of special cases being added whenever bugs were hit, and was still riddled with bugs. The new logic was written to handle the "difficult" cases correctly, the most notable of which is restructuring of a comptime-only array (for instance, converting a `[3][2]comptime_int` to a `[2][3]comptime_int`. Currently, the logic for loading and storing work somewhat differently, but a future change will likely improve the loading logic to bring it more in line with the store strategy. As far as I can tell, the rewrite has fixed all bugs exposed by #19414. As a part of this, the comptime bitcast logic has also been rewritten. Previously, bitcasts simply worked by serializing the entire value into an in-memory buffer, then deserializing it. This strategy has two key weaknesses: pointers, and undefined values. Representations of these values at comptime cannot be easily serialized/deserialized whilst preserving data, which means many bitcasts would become runtime-known if pointers were involved, or would turn `undefined` values into `0xAA`. The new logic works by "flattening" the datastructure to be cast into a sequence of bit-packed atomic values, and then "unflattening" it; using serialization when necessary, but with special handling for `undefined` values and for pointers which align in virtual memory. The resulting code is definitely slower -- more on this later -- but it is correct. The pointer access and bitcast logic required some helper functions and types which are not generally useful elsewhere, so I opted to split them into separate files `Sema/comptime_ptr_access.zig` and `Sema/bitcast.zig`, with simple re-exports in `Sema.zig` for their small public APIs. Whilst working on this branch, I caught various unrelated bugs with transitive Sema errors, and with the handling of `undefined` values. These bugs have been fixed, and corresponding behavior test added. In terms of performance, I do anticipate that this commit will regress performance somewhat, because the new pointer access and bitcast logic is necessarily more complex. I have not yet taken performance measurements, but will do shortly, and post the results in this PR. If the performance regression is severe, I will do work to to optimize the new logic before merge. Resolves: #19452 Resolves: #19460
353 lines
11 KiB
Zig
353 lines
11 KiB
Zig
const std = @import("std");
|
|
const builtin = @import("builtin");
|
|
const expect = std.testing.expect;
|
|
const assert = std.debug.assert;
|
|
const native_endian = builtin.target.cpu.arch.endian();
|
|
|
|
test "reinterpret bytes as integer with nonzero offset" {
|
|
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
|
|
|
try testReinterpretBytesAsInteger();
|
|
try comptime testReinterpretBytesAsInteger();
|
|
}
|
|
|
|
fn testReinterpretBytesAsInteger() !void {
|
|
const bytes = "\x12\x34\x56\x78\xab";
|
|
const expected = switch (native_endian) {
|
|
.little => 0xab785634,
|
|
.big => 0x345678ab,
|
|
};
|
|
try expect(@as(*align(1) const u32, @ptrCast(bytes[1..5])).* == expected);
|
|
}
|
|
|
|
test "reinterpret an array over multiple elements, with no well-defined layout" {
|
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
|
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
|
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
|
|
|
try testReinterpretWithOffsetAndNoWellDefinedLayout();
|
|
try comptime testReinterpretWithOffsetAndNoWellDefinedLayout();
|
|
}
|
|
|
|
fn testReinterpretWithOffsetAndNoWellDefinedLayout() !void {
|
|
const bytes: ?[5]?u8 = [5]?u8{ 0x12, 0x34, 0x56, 0x78, 0x9a };
|
|
const ptr = &bytes.?[1];
|
|
const copy: [4]?u8 = @as(*const [4]?u8, @ptrCast(ptr)).*;
|
|
_ = copy;
|
|
//try expect(@ptrCast(*align(1)?u8, bytes[1..5]).* == );
|
|
}
|
|
|
|
test "reinterpret bytes inside auto-layout struct as integer with nonzero offset" {
|
|
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
|
|
|
try testReinterpretStructWrappedBytesAsInteger();
|
|
try comptime testReinterpretStructWrappedBytesAsInteger();
|
|
}
|
|
|
|
fn testReinterpretStructWrappedBytesAsInteger() !void {
|
|
const S = struct { bytes: [5:0]u8 };
|
|
const obj = S{ .bytes = "\x12\x34\x56\x78\xab".* };
|
|
const expected = switch (native_endian) {
|
|
.little => 0xab785634,
|
|
.big => 0x345678ab,
|
|
};
|
|
try expect(@as(*align(1) const u32, @ptrCast(obj.bytes[1..5])).* == expected);
|
|
}
|
|
|
|
test "reinterpret bytes of an array into an extern struct" {
|
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
|
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
|
|
|
try testReinterpretBytesAsExternStruct();
|
|
try comptime testReinterpretBytesAsExternStruct();
|
|
}
|
|
|
|
fn testReinterpretBytesAsExternStruct() !void {
|
|
var bytes align(2) = [_]u8{ 1, 2, 3, 4, 5, 6 };
|
|
|
|
const S = extern struct {
|
|
a: u8,
|
|
b: u16,
|
|
c: u8,
|
|
};
|
|
|
|
const ptr: *const S = @ptrCast(&bytes);
|
|
const val = ptr.c;
|
|
try expect(val == 5);
|
|
}
|
|
|
|
test "reinterpret bytes of an extern struct (with under-aligned fields) into another" {
|
|
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
|
|
|
try testReinterpretExternStructAsExternStruct();
|
|
try comptime testReinterpretExternStructAsExternStruct();
|
|
}
|
|
|
|
fn testReinterpretExternStructAsExternStruct() !void {
|
|
const S1 = extern struct {
|
|
a: u8,
|
|
b: u16,
|
|
c: u8,
|
|
};
|
|
comptime var bytes align(2) = S1{ .a = 0, .b = 0, .c = 5 };
|
|
|
|
const S2 = extern struct {
|
|
a: u32 align(2),
|
|
c: u8,
|
|
};
|
|
const ptr: *const S2 = @ptrCast(&bytes);
|
|
const val = ptr.c;
|
|
try expect(val == 5);
|
|
}
|
|
|
|
test "reinterpret bytes of an extern struct into another" {
|
|
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
|
|
|
try testReinterpretOverAlignedExternStructAsExternStruct();
|
|
try comptime testReinterpretOverAlignedExternStructAsExternStruct();
|
|
}
|
|
|
|
fn testReinterpretOverAlignedExternStructAsExternStruct() !void {
|
|
const S1 = extern struct {
|
|
a: u32,
|
|
b: u32,
|
|
c: u8,
|
|
};
|
|
comptime var bytes: S1 = .{ .a = 0, .b = 0, .c = 5 };
|
|
|
|
const S2 = extern struct {
|
|
a0: u32,
|
|
a1: u16,
|
|
a2: u16,
|
|
c: u8,
|
|
};
|
|
const ptr: *const S2 = @ptrCast(&bytes);
|
|
const val = ptr.c;
|
|
try expect(val == 5);
|
|
}
|
|
|
|
test "lower reinterpreted comptime field ptr (with under-aligned fields)" {
|
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
|
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
|
|
|
// Test lowering a field ptr
|
|
comptime var bytes align(2) = [_]u8{ 1, 2, 3, 4, 5, 6 };
|
|
const S = extern struct {
|
|
a: u32 align(2),
|
|
c: u8,
|
|
};
|
|
comptime var ptr = @as(*const S, @ptrCast(&bytes));
|
|
const val = &ptr.c;
|
|
try expect(val.* == 5);
|
|
|
|
// Test lowering an elem ptr
|
|
comptime var src_value = S{ .a = 15, .c = 5 };
|
|
comptime var ptr2 = @as(*[@sizeOf(S)]u8, @ptrCast(&src_value));
|
|
const val2 = &ptr2[4];
|
|
try expect(val2.* == 5);
|
|
}
|
|
|
|
test "lower reinterpreted comptime field ptr" {
|
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
|
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
|
|
|
// Test lowering a field ptr
|
|
comptime var bytes align(4) = [_]u8{ 1, 2, 3, 4, 5, 6, 7, 8 };
|
|
const S = extern struct {
|
|
a: u32,
|
|
c: u8,
|
|
};
|
|
comptime var ptr = @as(*const S, @ptrCast(&bytes));
|
|
const val = &ptr.c;
|
|
try expect(val.* == 5);
|
|
|
|
// Test lowering an elem ptr
|
|
comptime var src_value = S{ .a = 15, .c = 5 };
|
|
comptime var ptr2 = @as(*[@sizeOf(S)]u8, @ptrCast(&src_value));
|
|
const val2 = &ptr2[4];
|
|
try expect(val2.* == 5);
|
|
}
|
|
|
|
test "reinterpret struct field at comptime" {
|
|
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
|
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
|
|
|
const numNative = comptime Bytes.init(0x12345678);
|
|
if (native_endian != .little) {
|
|
try expect(std.mem.eql(u8, &[_]u8{ 0x12, 0x34, 0x56, 0x78 }, &numNative.bytes));
|
|
} else {
|
|
try expect(std.mem.eql(u8, &[_]u8{ 0x78, 0x56, 0x34, 0x12 }, &numNative.bytes));
|
|
}
|
|
}
|
|
|
|
const Bytes = struct {
|
|
bytes: [4]u8,
|
|
|
|
pub fn init(v: u32) Bytes {
|
|
var res: Bytes = undefined;
|
|
@as(*align(1) u32, @ptrCast(&res.bytes)).* = v;
|
|
|
|
return res;
|
|
}
|
|
};
|
|
|
|
test "ptrcast of const integer has the correct object size" {
|
|
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
|
|
|
const is_value = ~@as(isize, @intCast(std.math.minInt(isize)));
|
|
const is_bytes = @as([*]const u8, @ptrCast(&is_value))[0..@sizeOf(isize)];
|
|
if (@sizeOf(isize) == 8) {
|
|
switch (native_endian) {
|
|
.little => {
|
|
try expect(is_bytes[0] == 0xff);
|
|
try expect(is_bytes[1] == 0xff);
|
|
try expect(is_bytes[2] == 0xff);
|
|
try expect(is_bytes[3] == 0xff);
|
|
|
|
try expect(is_bytes[4] == 0xff);
|
|
try expect(is_bytes[5] == 0xff);
|
|
try expect(is_bytes[6] == 0xff);
|
|
try expect(is_bytes[7] == 0x7f);
|
|
},
|
|
.big => {
|
|
try expect(is_bytes[0] == 0x7f);
|
|
try expect(is_bytes[1] == 0xff);
|
|
try expect(is_bytes[2] == 0xff);
|
|
try expect(is_bytes[3] == 0xff);
|
|
|
|
try expect(is_bytes[4] == 0xff);
|
|
try expect(is_bytes[5] == 0xff);
|
|
try expect(is_bytes[6] == 0xff);
|
|
try expect(is_bytes[7] == 0xff);
|
|
},
|
|
}
|
|
}
|
|
}
|
|
|
|
test "implicit optional pointer to optional anyopaque pointer" {
|
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
|
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
|
|
|
var buf: [4]u8 = "aoeu".*;
|
|
const x: ?[*]u8 = &buf;
|
|
const y: ?*anyopaque = x;
|
|
const z: *[4]u8 = @ptrCast(y);
|
|
try expect(std.mem.eql(u8, z, "aoeu"));
|
|
}
|
|
|
|
test "@ptrCast slice to slice" {
|
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
|
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
|
|
|
const S = struct {
|
|
fn foo(slice: []u32) []i32 {
|
|
return @as([]i32, @ptrCast(slice));
|
|
}
|
|
};
|
|
var buf: [4]u32 = .{ 0, 0, 0, 0 };
|
|
const alias = S.foo(&buf);
|
|
alias[1] = 42;
|
|
try expect(buf[1] == 42);
|
|
try expect(alias.len == 4);
|
|
}
|
|
|
|
test "comptime @ptrCast a subset of an array, then write through it" {
|
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
|
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
|
|
|
comptime {
|
|
var buff: [16]u8 align(4) = undefined;
|
|
const len_bytes = @as(*u32, @ptrCast(&buff));
|
|
len_bytes.* = 16;
|
|
const source = "abcdef";
|
|
@memcpy(buff[4 .. 4 + source.len], source);
|
|
}
|
|
}
|
|
|
|
test "@ptrCast undefined value at comptime" {
|
|
const S = struct {
|
|
fn transmute(comptime T: type, comptime U: type, value: T) U {
|
|
return @as(*const U, @ptrCast(&value)).*;
|
|
}
|
|
};
|
|
comptime {
|
|
const x = S.transmute(u64, i32, undefined);
|
|
_ = x;
|
|
}
|
|
}
|
|
|
|
test "comptime @ptrCast with packed struct leaves value unmodified" {
|
|
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
|
|
|
const S = packed struct { three: u3 };
|
|
const st: S = .{ .three = 6 };
|
|
try expect(st.three == 6);
|
|
const p: *const [1]u3 = @ptrCast(&st);
|
|
try expect(p.*[0] == 6);
|
|
try expect(st.three == 6);
|
|
}
|
|
|
|
test "@ptrCast restructures comptime-only array" {
|
|
{
|
|
const a3a2: [3][2]comptime_int = .{
|
|
.{ 1, 2 },
|
|
.{ 3, 4 },
|
|
.{ 5, 6 },
|
|
};
|
|
const a2a3: *const [2][3]comptime_int = @ptrCast(&a3a2);
|
|
comptime assert(a2a3[0][0] == 1);
|
|
comptime assert(a2a3[0][1] == 2);
|
|
comptime assert(a2a3[0][2] == 3);
|
|
comptime assert(a2a3[1][0] == 4);
|
|
comptime assert(a2a3[1][1] == 5);
|
|
comptime assert(a2a3[1][2] == 6);
|
|
}
|
|
|
|
{
|
|
const a6a1: [6][1]comptime_int = .{
|
|
.{1}, .{2}, .{3}, .{4}, .{5}, .{6},
|
|
};
|
|
const a1a2a3: *const [1][2][3]comptime_int = @ptrCast(&a6a1);
|
|
comptime assert(a1a2a3[0][0][0] == 1);
|
|
comptime assert(a1a2a3[0][0][1] == 2);
|
|
comptime assert(a1a2a3[0][0][2] == 3);
|
|
comptime assert(a1a2a3[0][1][0] == 4);
|
|
comptime assert(a1a2a3[0][1][1] == 5);
|
|
comptime assert(a1a2a3[0][1][2] == 6);
|
|
}
|
|
|
|
{
|
|
const a1: [1]comptime_int = .{123};
|
|
const raw: *const comptime_int = @ptrCast(&a1);
|
|
comptime assert(raw.* == 123);
|
|
}
|
|
|
|
{
|
|
const raw: comptime_int = 123;
|
|
const a1: *const [1]comptime_int = @ptrCast(&raw);
|
|
comptime assert(a1[0] == 123);
|
|
}
|
|
}
|
|
|
|
test "@ptrCast restructures sliced comptime-only array" {
|
|
const a3a2: [4][2]comptime_int = .{
|
|
.{ 1, 2 },
|
|
.{ 3, 4 },
|
|
.{ 5, 6 },
|
|
.{ 7, 8 },
|
|
};
|
|
|
|
const sub: *const [4]comptime_int = @ptrCast(a3a2[1..]);
|
|
comptime assert(sub[0] == 3);
|
|
comptime assert(sub[1] == 4);
|
|
comptime assert(sub[2] == 5);
|
|
comptime assert(sub[3] == 6);
|
|
}
|