stage2: implement comptime loads through casted pointers

This commit is contained in:
Andrew Kelley 2021-10-21 22:56:11 -07:00
parent 1bce0ed046
commit 0a6851cc6d
8 changed files with 314 additions and 193 deletions

View File

@ -4570,7 +4570,7 @@ fn zirOptionalPayloadPtr(
});
if (try sema.resolveDefinedValue(block, src, optional_ptr)) |pointer_val| {
if (try pointer_val.pointerDeref(sema.arena)) |val| {
if (try sema.pointerDeref(block, src, pointer_val, optional_ptr_ty)) |val| {
if (val.isNull()) {
return sema.fail(block, src, "unable to unwrap null", .{});
}
@ -4689,7 +4689,7 @@ fn zirErrUnionPayloadPtr(
});
if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| {
if (try pointer_val.pointerDeref(sema.arena)) |val| {
if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| {
if (val.getError()) |name| {
return sema.fail(block, src, "caught unexpected error '{s}'", .{name});
}
@ -4748,7 +4748,7 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
const result_ty = operand_ty.elemType().errorUnionSet();
if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| {
if (try pointer_val.pointerDeref(sema.arena)) |val| {
if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| {
assert(val.getError() != null);
return sema.addConstant(result_ty, val);
}
@ -6912,8 +6912,8 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const final_len = lhs_info.len + rhs_info.len;
const final_len_including_sent = final_len + @boolToInt(res_sent != null);
const is_pointer = lhs_ty.zigTypeTag() == .Pointer;
const lhs_sub_val = if (is_pointer) (try lhs_val.pointerDeref(sema.arena)).? else lhs_val;
const rhs_sub_val = if (is_pointer) (try rhs_val.pointerDeref(sema.arena)).? else rhs_val;
const lhs_sub_val = if (is_pointer) (try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).? else lhs_val;
const rhs_sub_val = if (is_pointer) (try sema.pointerDeref(block, rhs_src, rhs_val, rhs_ty)).? else rhs_val;
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
@ -6992,7 +6992,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const final_len_including_sent = final_len + @boolToInt(mulinfo.sentinel != null);
if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| {
const lhs_sub_val = if (lhs_ty.zigTypeTag() == .Pointer) (try lhs_val.pointerDeref(sema.arena)).? else lhs_val;
const lhs_sub_val = if (lhs_ty.zigTypeTag() == .Pointer) (try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).? else lhs_val;
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
@ -10092,7 +10092,8 @@ fn zirCmpxchg(
const failure_order_src: LazySrcLoc = .{ .node_offset_builtin_call_arg5 = inst_data.src_node };
// zig fmt: on
const ptr = sema.resolveInst(extra.ptr);
const elem_ty = sema.typeOf(ptr).elemType();
const ptr_ty = sema.typeOf(ptr);
const elem_ty = ptr_ty.elemType();
try sema.checkAtomicOperandType(block, elem_ty_src, elem_ty);
if (elem_ty.zigTypeTag() == .Float) {
return sema.fail(
@ -10135,7 +10136,7 @@ fn zirCmpxchg(
// to become undef as well
return sema.addConstUndef(result_ty);
}
const stored_val = (try ptr_val.pointerDeref(sema.arena)) orelse break :rs ptr_src;
const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src;
const result_val = if (stored_val.eql(expected_val, elem_ty)) blk: {
try sema.storePtr(block, src, ptr, new_value);
break :blk Value.@"null";
@ -10197,7 +10198,8 @@ fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
// zig fmt: on
const ptr = sema.resolveInst(extra.lhs);
const elem_ty = sema.typeOf(ptr).elemType();
const ptr_ty = sema.typeOf(ptr);
const elem_ty = ptr_ty.elemType();
try sema.checkAtomicOperandType(block, elem_ty_src, elem_ty);
const order = try sema.resolveAtomicOrder(block, order_src, extra.rhs);
@ -10218,7 +10220,7 @@ fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
}
if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| {
if (try ptr_val.pointerDeref(sema.arena)) |elem_val| {
if (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) |elem_val| {
return sema.addConstant(elem_ty, elem_val);
}
}
@ -10245,7 +10247,8 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg4 = inst_data.src_node };
// zig fmt: on
const ptr = sema.resolveInst(extra.ptr);
const operand_ty = sema.typeOf(ptr).elemType();
const ptr_ty = sema.typeOf(ptr);
const operand_ty = ptr_ty.elemType();
try sema.checkAtomicOperandType(block, operand_ty_src, operand_ty);
const op = try sema.resolveAtomicRmwOp(block, op_src, extra.operation);
@ -10282,7 +10285,7 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
};
if (ptr_val.isComptimeMutablePtr()) {
const target = sema.mod.getTarget();
const stored_val = (try ptr_val.pointerDeref(sema.arena)) orelse break :rs ptr_src;
const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src;
const new_val = switch (op) {
// zig fmt: off
.Xchg => operand_val,
@ -11785,7 +11788,7 @@ fn elemVal(
const ptr_val = maybe_ptr_val orelse break :rs array_src;
const index_val = maybe_index_val orelse break :rs elem_index_src;
const index = @intCast(usize, index_val.toUnsignedInt());
const maybe_array_val = try ptr_val.pointerDeref(sema.arena);
const maybe_array_val = try sema.pointerDeref(block, array_src, ptr_val, array_ty);
const array_val = maybe_array_val orelse break :rs array_src;
const elem_val = try array_val.elemValue(sema.arena, index);
return sema.addConstant(array_ty.elemType2(), elem_val);
@ -11887,6 +11890,8 @@ fn coerce(
assert(inst_ty.zigTypeTag() != .Undefined);
// comptime known number to other number
// TODO why is this a separate function? should just be flattened into the
// switch expression below.
if (try sema.coerceNum(block, dest_ty, inst, inst_src)) |some|
return some;
@ -12514,6 +12519,122 @@ fn beginComptimePtrMutation(
}
}
const ComptimePtrLoadKit = struct {
/// The Value of the Decl that owns this memory.
root_val: Value,
/// Parent Value.
val: Value,
/// The Type of the parent Value.
ty: Type,
/// The starting byte offset of `val` from `root_val`.
byte_offset: usize,
/// Whether the `root_val` could be mutated by further
/// semantic analysis and a copy must be performed.
is_mutable: bool,
};
const ComptimePtrLoadError = CompileError || error{
RuntimeLoad,
};
fn beginComptimePtrLoad(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
ptr_val: Value,
) ComptimePtrLoadError!ComptimePtrLoadKit {
const target = sema.mod.getTarget();
switch (ptr_val.tag()) {
.decl_ref => {
const decl = ptr_val.castTag(.decl_ref).?.data;
const decl_val = try decl.value();
if (decl_val.tag() == .variable) return error.RuntimeLoad;
return ComptimePtrLoadKit{
.root_val = decl_val,
.val = decl_val,
.ty = decl.ty,
.byte_offset = 0,
.is_mutable = false,
};
},
.decl_ref_mut => {
const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl;
const decl_val = try decl.value();
if (decl_val.tag() == .variable) return error.RuntimeLoad;
return ComptimePtrLoadKit{
.root_val = decl_val,
.val = decl_val,
.ty = decl.ty,
.byte_offset = 0,
.is_mutable = true,
};
},
.elem_ptr => {
const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
const parent = try beginComptimePtrLoad(sema, block, src, elem_ptr.array_ptr);
const elem_ty = parent.ty.childType();
const elem_size = elem_ty.abiSize(target);
return ComptimePtrLoadKit{
.root_val = parent.root_val,
.val = try parent.val.elemValue(sema.arena, elem_ptr.index),
.ty = elem_ty,
.byte_offset = parent.byte_offset + elem_size * elem_ptr.index,
.is_mutable = parent.is_mutable,
};
},
.field_ptr => {
const field_ptr = ptr_val.castTag(.field_ptr).?.data;
const parent = try beginComptimePtrLoad(sema, block, src, field_ptr.container_ptr);
const field_index = @intCast(u32, field_ptr.field_index);
try sema.resolveTypeLayout(block, src, parent.ty);
const field_offset = parent.ty.structFieldOffset(field_index, target);
return ComptimePtrLoadKit{
.root_val = parent.root_val,
.val = try parent.val.fieldValue(sema.arena, field_index),
.ty = parent.ty.structFieldType(field_index),
.byte_offset = parent.byte_offset + field_offset,
.is_mutable = parent.is_mutable,
};
},
.eu_payload_ptr => {
const err_union_ptr = ptr_val.castTag(.eu_payload_ptr).?.data;
const parent = try beginComptimePtrLoad(sema, block, src, err_union_ptr);
return ComptimePtrLoadKit{
.root_val = parent.root_val,
.val = parent.val.castTag(.eu_payload).?.data,
.ty = parent.ty.errorUnionPayload(),
.byte_offset = undefined,
.is_mutable = parent.is_mutable,
};
},
.opt_payload_ptr => {
const opt_ptr = ptr_val.castTag(.opt_payload_ptr).?.data;
const parent = try beginComptimePtrLoad(sema, block, src, opt_ptr);
var buf: Type.Payload.ElemType = undefined;
return ComptimePtrLoadKit{
.root_val = parent.root_val,
.val = parent.val.castTag(.opt_payload).?.data,
.ty = parent.ty.optionalChild(&buf),
.byte_offset = undefined,
.is_mutable = parent.is_mutable,
};
},
.zero,
.one,
.int_u64,
.int_i64,
.int_big_positive,
.int_big_negative,
.variable,
.extern_fn,
.function,
=> return error.RuntimeLoad,
else => unreachable,
}
}
fn bitCast(
sema: *Sema,
block: *Block,
@ -12819,7 +12940,7 @@ fn analyzeLoad(
else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty}),
};
if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| {
if (try ptr_val.pointerDeref(sema.arena)) |elem_val| {
if (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) |elem_val| {
return sema.addConstant(elem_ty, elem_val);
}
}
@ -14539,3 +14660,45 @@ pub fn analyzeAddrspace(
return address_space;
}
/// Asserts the value is a pointer and dereferences it.
/// Returns `null` if the pointer contents cannot be loaded at comptime.
fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr_ty: Type) CompileError!?Value {
const target = sema.mod.getTarget();
const load_ty = ptr_ty.childType();
const parent = sema.beginComptimePtrLoad(block, src, ptr_val) catch |err| switch (err) {
error.RuntimeLoad => return null,
else => |e| return e,
};
// We have a Value that lines up in virtual memory exactly with what we want to load.
// If the Type is in-memory coercable to `load_ty`, it may be returned without modifications.
const coerce_in_mem_ok =
coerceInMemoryAllowed(load_ty, parent.ty, false, target) == .ok or
coerceInMemoryAllowed(parent.ty, load_ty, false, target) == .ok;
if (coerce_in_mem_ok) {
if (parent.is_mutable) {
// The decl whose value we are obtaining here may be overwritten with
// a different value upon further semantic analysis, which would
// invalidate this memory. So we must copy here.
return try parent.val.copy(sema.arena);
}
return parent.val;
}
// The type is not in-memory coercable, so it must be bitcasted according
// to the pointer type we are performing the load through.
// TODO emit a compile error if the types are not allowed to be bitcasted
if (parent.ty.abiSize(target) >= load_ty.abiSize(target)) {
// The Type it is stored as in the compiler has an ABI size greater or equal to
// the ABI size of `load_ty`. We may perform the bitcast based on
// `parent.val` alone (more efficient).
return try parent.val.bitCast(parent.ty, load_ty, target, sema.gpa, sema.arena);
}
// The Type it is stored as in the compiler has an ABI size less than the ABI size
// of `load_ty`. The bitcast must be performed based on the `parent.root_val`
// and reinterpreted starting at `parent.byte_offset`.
return sema.fail(block, src, "TODO: implement bitcast with index offset", .{});
}

View File

@ -1764,6 +1764,7 @@ pub const Type = extern union {
}
/// Asserts the type has the ABI size already resolved.
/// Types that return false for hasCodeGenBits() return 0.
pub fn abiSize(self: Type, target: Target) u64 {
return switch (self.tag()) {
.fn_noreturn_no_args => unreachable, // represents machine code; not a pointer
@ -1771,53 +1772,32 @@ pub const Type = extern union {
.fn_naked_noreturn_no_args => unreachable, // represents machine code; not a pointer
.fn_ccc_void_no_args => unreachable, // represents machine code; not a pointer
.function => unreachable, // represents machine code; not a pointer
.c_void => unreachable,
.type => unreachable,
.comptime_int => unreachable,
.comptime_float => unreachable,
.@"opaque" => unreachable, // no size available
.bound_fn => unreachable, // TODO remove from the language
.noreturn => unreachable,
.@"null" => unreachable,
.@"undefined" => unreachable,
.enum_literal => unreachable,
.single_const_pointer_to_comptime_int => unreachable,
.empty_struct_literal => unreachable,
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
.@"opaque" => unreachable,
.var_args_param => unreachable,
.generic_poison => unreachable,
.type_info => unreachable,
.bound_fn => unreachable,
.call_options => unreachable, // missing call to resolveTypeFields
.export_options => unreachable, // missing call to resolveTypeFields
.extern_options => unreachable, // missing call to resolveTypeFields
.type_info => unreachable, // missing call to resolveTypeFields
.empty_struct, .void => 0,
.c_void,
.type,
.comptime_int,
.comptime_float,
.@"null",
.@"undefined",
.enum_literal,
.single_const_pointer_to_comptime_int,
.empty_struct_literal,
.empty_struct,
.void,
=> 0,
.@"struct" => {
const fields = self.structFields();
if (self.castTag(.@"struct")) |payload| {
const struct_obj = payload.data;
assert(struct_obj.status == .have_layout);
const is_packed = struct_obj.layout == .Packed;
if (is_packed) @panic("TODO packed structs");
}
var size: u64 = 0;
var big_align: u32 = 0;
for (fields.values()) |field| {
if (!field.ty.hasCodeGenBits()) continue;
const field_align = a: {
if (field.abi_align.tag() == .abi_align_default) {
break :a field.ty.abiAlignment(target);
} else {
break :a @intCast(u32, field.abi_align.toUnsignedInt());
}
};
big_align = @maximum(big_align, field_align);
size = std.mem.alignForwardGeneric(u64, size, field_align);
size += field.ty.abiSize(target);
}
size = std.mem.alignForwardGeneric(u64, size, big_align);
return size;
},
.@"struct" => return self.structFieldOffset(self.structFieldCount(), target),
.enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => {
var buffer: Payload.Bits = undefined;
const int_tag_ty = self.intTagType(&buffer);
@ -1837,9 +1817,6 @@ pub const Type = extern union {
.address_space,
.float_mode,
.reduce_op,
.call_options,
.export_options,
.extern_options,
=> return 1,
.array_u8 => self.castTag(.array_u8).?.data,
@ -3414,6 +3391,36 @@ pub const Type = extern union {
}
}
pub fn structFieldOffset(ty: Type, index: usize, target: Target) u64 {
const fields = ty.structFields();
if (ty.castTag(.@"struct")) |payload| {
const struct_obj = payload.data;
assert(struct_obj.status == .have_layout);
const is_packed = struct_obj.layout == .Packed;
if (is_packed) @panic("TODO packed structs");
}
var offset: u64 = 0;
var big_align: u32 = 0;
for (fields.values()) |field, i| {
if (!field.ty.hasCodeGenBits()) continue;
const field_align = a: {
if (field.abi_align.tag() == .abi_align_default) {
break :a field.ty.abiAlignment(target);
} else {
break :a @intCast(u32, field.abi_align.toUnsignedInt());
}
};
big_align = @maximum(big_align, field_align);
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
if (i == index) return offset;
offset += field.ty.abiSize(target);
}
offset = std.mem.alignForwardGeneric(u64, offset, big_align);
return offset;
}
pub fn declSrcLoc(ty: Type) Module.SrcLoc {
return declSrcLocOrNull(ty).?;
}

View File

@ -1607,60 +1607,6 @@ pub const Value = extern union {
}
};
/// Asserts the value is a pointer and dereferences it.
/// Returns error.AnalysisFail if the pointer points to a Decl that failed semantic analysis.
pub fn pointerDeref(val: Value, arena: *Allocator) error{ AnalysisFail, OutOfMemory }!?Value {
const sub_val: Value = switch (val.tag()) {
.decl_ref_mut => sub_val: {
// The decl whose value we are obtaining here may be overwritten with
// a different value, which would invalidate this memory. So we must
// copy here.
const sub_val = try val.castTag(.decl_ref_mut).?.data.decl.value();
break :sub_val try sub_val.copy(arena);
},
.decl_ref => try val.castTag(.decl_ref).?.data.value(),
.elem_ptr => blk: {
const elem_ptr = val.castTag(.elem_ptr).?.data;
const array_val = (try elem_ptr.array_ptr.pointerDeref(arena)) orelse return null;
break :blk try array_val.elemValue(arena, elem_ptr.index);
},
.field_ptr => blk: {
const field_ptr = val.castTag(.field_ptr).?.data;
const container_val = (try field_ptr.container_ptr.pointerDeref(arena)) orelse return null;
break :blk try container_val.fieldValue(arena, field_ptr.field_index);
},
.eu_payload_ptr => blk: {
const err_union_ptr = val.castTag(.eu_payload_ptr).?.data;
const err_union_val = (try err_union_ptr.pointerDeref(arena)) orelse return null;
break :blk err_union_val.castTag(.eu_payload).?.data;
},
.opt_payload_ptr => blk: {
const opt_ptr = val.castTag(.opt_payload_ptr).?.data;
const opt_val = (try opt_ptr.pointerDeref(arena)) orelse return null;
break :blk opt_val.castTag(.opt_payload).?.data;
},
.zero,
.one,
.int_u64,
.int_i64,
.int_big_positive,
.int_big_negative,
.variable,
.extern_fn,
.function,
=> return null,
else => unreachable,
};
if (sub_val.tag() == .variable) {
// This would be loading a runtime value at compile-time so we return
// the indicator that this pointer dereference requires being done at runtime.
return null;
}
return sub_val;
}
pub fn isComptimeMutablePtr(val: Value) bool {
return switch (val.tag()) {
.decl_ref_mut => true,

View File

@ -45,6 +45,7 @@ test {
_ = @import("behavior/null.zig");
_ = @import("behavior/optional.zig");
_ = @import("behavior/pointers.zig");
_ = @import("behavior/ptrcast.zig");
_ = @import("behavior/pub_enum.zig");
_ = @import("behavior/sizeof_and_typeof.zig");
_ = @import("behavior/slice.zig");
@ -146,7 +147,7 @@ test {
_ = @import("behavior/optional_stage1.zig");
_ = @import("behavior/pointers_stage1.zig");
_ = @import("behavior/popcount.zig");
_ = @import("behavior/ptrcast.zig");
_ = @import("behavior/ptrcast_stage1.zig");
_ = @import("behavior/ref_var_in_if_after_if_2nd_switch_prong.zig");
_ = @import("behavior/reflection.zig");
{

View File

@ -65,3 +65,15 @@ test "implicit cast comptime_int to comptime_float" {
comptime try expect(@as(comptime_float, 10) == @as(f32, 10));
try expect(2 == 2.0);
}
test "pointer reinterpret const float to int" {
// The hex representation is 0x3fe3333333333303.
const float: f64 = 5.99999999999994648725e-01;
const float_ptr = &float;
const int_ptr = @ptrCast(*const i32, float_ptr);
const int_val = int_ptr.*;
if (native_endian == .Little)
try expect(int_val == 0x33333303)
else
try expect(int_val == 0x3fe33333);
}

View File

@ -5,18 +5,6 @@ const maxInt = std.math.maxInt;
const Vector = std.meta.Vector;
const native_endian = @import("builtin").target.cpu.arch.endian();
test "pointer reinterpret const float to int" {
// The hex representation is 0x3fe3333333333303.
const float: f64 = 5.99999999999994648725e-01;
const float_ptr = &float;
const int_ptr = @ptrCast(*const i32, float_ptr);
const int_val = int_ptr.*;
if (native_endian == .Little)
try expect(int_val == 0x33333303)
else
try expect(int_val == 0x3fe33333);
}
test "implicitly cast indirect pointer to maybe-indirect pointer" {
const S = struct {
const Self = @This();

View File

@ -2,72 +2,3 @@ const std = @import("std");
const builtin = @import("builtin");
const expect = std.testing.expect;
const native_endian = builtin.target.cpu.arch.endian();
test "reinterpret bytes as integer with nonzero offset" {
try testReinterpretBytesAsInteger();
comptime try testReinterpretBytesAsInteger();
}
fn testReinterpretBytesAsInteger() !void {
const bytes = "\x12\x34\x56\x78\xab";
const expected = switch (native_endian) {
.Little => 0xab785634,
.Big => 0x345678ab,
};
try expect(@ptrCast(*align(1) const u32, bytes[1..5]).* == expected);
}
test "reinterpret bytes of an array into an extern struct" {
try testReinterpretBytesAsExternStruct();
comptime try testReinterpretBytesAsExternStruct();
}
fn testReinterpretBytesAsExternStruct() !void {
var bytes align(2) = [_]u8{ 1, 2, 3, 4, 5, 6 };
const S = extern struct {
a: u8,
b: u16,
c: u8,
};
var ptr = @ptrCast(*const S, &bytes);
var val = ptr.c;
try expect(val == 5);
}
test "reinterpret struct field at comptime" {
const numNative = comptime Bytes.init(0x12345678);
if (native_endian != .Little) {
try expect(std.mem.eql(u8, &[_]u8{ 0x12, 0x34, 0x56, 0x78 }, &numNative.bytes));
} else {
try expect(std.mem.eql(u8, &[_]u8{ 0x78, 0x56, 0x34, 0x12 }, &numNative.bytes));
}
}
const Bytes = struct {
bytes: [4]u8,
pub fn init(v: u32) Bytes {
var res: Bytes = undefined;
@ptrCast(*align(1) u32, &res.bytes).* = v;
return res;
}
};
test "comptime ptrcast keeps larger alignment" {
comptime {
const a: u32 = 1234;
const p = @ptrCast([*]const u8, &a);
try expect(@TypeOf(p) == [*]align(@alignOf(u32)) const u8);
}
}
test "implicit optional pointer to optional c_void pointer" {
var buf: [4]u8 = "aoeu".*;
var x: ?[*]u8 = &buf;
var y: ?*c_void = x;
var z = @ptrCast(*[4]u8, y);
try expect(std.mem.eql(u8, z, "aoeu"));
}

View File

@ -0,0 +1,73 @@
const std = @import("std");
const builtin = @import("builtin");
const expect = std.testing.expect;
const native_endian = builtin.target.cpu.arch.endian();
test "reinterpret bytes as integer with nonzero offset" {
try testReinterpretBytesAsInteger();
comptime try testReinterpretBytesAsInteger();
}
fn testReinterpretBytesAsInteger() !void {
const bytes = "\x12\x34\x56\x78\xab";
const expected = switch (native_endian) {
.Little => 0xab785634,
.Big => 0x345678ab,
};
try expect(@ptrCast(*align(1) const u32, bytes[1..5]).* == expected);
}
test "reinterpret bytes of an array into an extern struct" {
try testReinterpretBytesAsExternStruct();
comptime try testReinterpretBytesAsExternStruct();
}
fn testReinterpretBytesAsExternStruct() !void {
var bytes align(2) = [_]u8{ 1, 2, 3, 4, 5, 6 };
const S = extern struct {
a: u8,
b: u16,
c: u8,
};
var ptr = @ptrCast(*const S, &bytes);
var val = ptr.c;
try expect(val == 5);
}
test "reinterpret struct field at comptime" {
const numNative = comptime Bytes.init(0x12345678);
if (native_endian != .Little) {
try expect(std.mem.eql(u8, &[_]u8{ 0x12, 0x34, 0x56, 0x78 }, &numNative.bytes));
} else {
try expect(std.mem.eql(u8, &[_]u8{ 0x78, 0x56, 0x34, 0x12 }, &numNative.bytes));
}
}
const Bytes = struct {
bytes: [4]u8,
pub fn init(v: u32) Bytes {
var res: Bytes = undefined;
@ptrCast(*align(1) u32, &res.bytes).* = v;
return res;
}
};
test "comptime ptrcast keeps larger alignment" {
comptime {
const a: u32 = 1234;
const p = @ptrCast([*]const u8, &a);
try expect(@TypeOf(p) == [*]align(@alignOf(u32)) const u8);
}
}
test "implicit optional pointer to optional c_void pointer" {
var buf: [4]u8 = "aoeu".*;
var x: ?[*]u8 = &buf;
var y: ?*c_void = x;
var z = @ptrCast(*[4]u8, y);
try expect(std.mem.eql(u8, z, "aoeu"));
}