From bbd750ff05895f29be646bf51e8932c3c9fb14f3 Mon Sep 17 00:00:00 2001 From: Cody Tapscott Date: Thu, 10 Mar 2022 18:10:41 -0700 Subject: [PATCH 1/7] stage2: Add container_ty/elem_ty to elem_ptr, field_ptr, *_payload_ptr Values --- src/Sema.zig | 45 +++++++++++++------- src/codegen/llvm.zig | 4 +- src/value.zig | 98 ++++++++++++++++++++++++++------------------ 3 files changed, 92 insertions(+), 55 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index b339e4374d..5fa7866586 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -5568,7 +5568,10 @@ fn analyzeOptionalPayloadPtr( } return sema.addConstant( child_pointer, - try Value.Tag.opt_payload_ptr.create(sema.arena, ptr_val), + try Value.Tag.opt_payload_ptr.create(sema.arena, .{ + .container_ptr = ptr_val, + .container_ty = optional_ptr_ty.childType(), + }), ); } if (try sema.pointerDeref(block, src, ptr_val, optional_ptr_ty)) |val| { @@ -5578,7 +5581,10 @@ fn analyzeOptionalPayloadPtr( // The same Value represents the pointer to the optional and the payload. return sema.addConstant( child_pointer, - try Value.Tag.opt_payload_ptr.create(sema.arena, ptr_val), + try Value.Tag.opt_payload_ptr.create(sema.arena, .{ + .container_ptr = ptr_val, + .container_ty = optional_ptr_ty.childType(), + }), ); } } @@ -5733,7 +5739,10 @@ fn analyzeErrUnionPayloadPtr( } return sema.addConstant( operand_pointer_ty, - try Value.Tag.eu_payload_ptr.create(sema.arena, ptr_val), + try Value.Tag.eu_payload_ptr.create(sema.arena, .{ + .container_ptr = ptr_val, + .container_ty = operand_ty.elemType(), + }), ); } if (try sema.pointerDeref(block, src, ptr_val, operand_ty)) |val| { @@ -5743,7 +5752,10 @@ fn analyzeErrUnionPayloadPtr( return sema.addConstant( operand_pointer_ty, - try Value.Tag.eu_payload_ptr.create(sema.arena, ptr_val), + try Value.Tag.eu_payload_ptr.create(sema.arena, .{ + .container_ptr = ptr_val, + .container_ty = operand_ty.elemType(), + }), ); } } @@ -6652,6 +6664,7 @@ fn zirSwitchCapture( field_ty_ptr, try Value.Tag.field_ptr.create(sema.arena, .{ .container_ptr = op_ptr_val, + .container_ty = operand_ty, .field_index = field_index, }), ); @@ -9638,7 +9651,7 @@ fn analyzePtrArithmetic( if (air_tag == .ptr_sub) { return sema.fail(block, op_src, "TODO implement Sema comptime pointer subtraction", .{}); } - const new_ptr_val = try ptr_val.elemPtr(sema.arena, offset_int); + const new_ptr_val = try ptr_val.elemPtr(ptr_ty, sema.arena, offset_int); return sema.addConstant(new_ptr_ty, new_ptr_val); } else break :rs offset_src; } else break :rs ptr_src; @@ -15903,6 +15916,7 @@ fn finishFieldCallBind( ptr_field_ty, try Value.Tag.field_ptr.create(arena, .{ .container_ptr = struct_ptr_val, + .container_ty = ptr_ty.childType(), .field_index = field_index, }), ); @@ -16065,6 +16079,7 @@ fn structFieldPtrByIndex( ptr_field_ty, try Value.Tag.field_ptr.create(sema.arena, .{ .container_ptr = struct_ptr_val, + .container_ty = struct_ptr_ty.childType(), .field_index = field_index, }), ); @@ -16241,6 +16256,7 @@ fn unionFieldPtr( ptr_field_ty, try Value.Tag.field_ptr.create(arena, .{ .container_ptr = union_ptr_val, + .container_ty = union_ty, .field_index = field_index, }), ); @@ -16333,7 +16349,7 @@ fn elemPtr( const runtime_src = if (maybe_slice_val) |slice_val| rs: { const index_val = maybe_index_val orelse break :rs elem_index_src; const index = @intCast(usize, index_val.toUnsignedInt()); - const elem_ptr = try slice_val.elemPtr(sema.arena, index); + const elem_ptr = try slice_val.elemPtr(array_ty, sema.arena, index); return sema.addConstant(result_ty, elem_ptr); } else array_ptr_src; @@ -16348,7 +16364,7 @@ fn elemPtr( const ptr_val = maybe_ptr_val orelse break :rs array_ptr_src; const index_val = maybe_index_val orelse break :rs elem_index_src; const index = @intCast(usize, index_val.toUnsignedInt()); - const elem_ptr = try ptr_val.elemPtr(sema.arena, index); + const elem_ptr = try ptr_val.elemPtr(array_ty, sema.arena, index); return sema.addConstant(result_ty, elem_ptr); }; @@ -16473,6 +16489,7 @@ fn tupleFieldPtr( ptr_field_ty, try Value.Tag.field_ptr.create(sema.arena, .{ .container_ptr = tuple_ptr_val, + .container_ty = tuple_ty, .field_index = field_index, }), ); @@ -16563,7 +16580,7 @@ fn elemPtrArray( const index_u64 = index_val.toUnsignedInt(); // @intCast here because it would have been impossible to construct a value that // required a larger index. - const elem_ptr = try array_ptr_val.elemPtr(sema.arena, @intCast(usize, index_u64)); + const elem_ptr = try array_ptr_val.elemPtr(array_ptr_ty, sema.arena, @intCast(usize, index_u64)); return sema.addConstant(result_ty, elem_ptr); } } @@ -17757,8 +17774,8 @@ fn beginComptimePtrMutation( } }, .eu_payload_ptr => { - const eu_ptr_val = ptr_val.castTag(.eu_payload_ptr).?.data; - var parent = try beginComptimePtrMutation(sema, block, src, eu_ptr_val); + const eu_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; + var parent = try beginComptimePtrMutation(sema, block, src, eu_ptr.container_ptr); const payload_ty = parent.ty.errorUnionPayload(); switch (parent.val.tag()) { else => { @@ -17790,8 +17807,8 @@ fn beginComptimePtrMutation( } }, .opt_payload_ptr => { - const opt_ptr_val = ptr_val.castTag(.opt_payload_ptr).?.data; - var parent = try beginComptimePtrMutation(sema, block, src, opt_ptr_val); + const opt_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; + var parent = try beginComptimePtrMutation(sema, block, src, opt_ptr.container_ptr); const payload_ty = try parent.ty.optionalChildAlloc(sema.arena); switch (parent.val.tag()) { .undef, .null_value => { @@ -17965,7 +17982,7 @@ fn beginComptimePtrLoad( }, .eu_payload_ptr => { const err_union_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; - const parent = try beginComptimePtrLoad(sema, block, src, err_union_ptr); + const parent = try beginComptimePtrLoad(sema, block, src, err_union_ptr.container_ptr); return ComptimePtrLoadKit{ .root_val = parent.root_val, .root_ty = parent.root_ty, @@ -17977,7 +17994,7 @@ fn beginComptimePtrLoad( }, .opt_payload_ptr => { const opt_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; - const parent = try beginComptimePtrLoad(sema, block, src, opt_ptr); + const parent = try beginComptimePtrLoad(sema, block, src, opt_ptr.container_ptr); return ComptimePtrLoadKit{ .root_val = parent.root_val, .root_ty = parent.root_ty, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 7c4455f296..01f7d60a53 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -2901,7 +2901,7 @@ pub const DeclGen = struct { }, .opt_payload_ptr => { const opt_payload_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; - const parent = try dg.lowerParentPtr(opt_payload_ptr, base_ty); + const parent = try dg.lowerParentPtr(opt_payload_ptr.container_ptr, base_ty); var buf: Type.Payload.ElemType = undefined; const payload_ty = parent.ty.optionalChild(&buf); if (!payload_ty.hasRuntimeBitsIgnoreComptime() or parent.ty.isPtrLikeOptional()) { @@ -2925,7 +2925,7 @@ pub const DeclGen = struct { }, .eu_payload_ptr => { const eu_payload_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; - const parent = try dg.lowerParentPtr(eu_payload_ptr, base_ty); + const parent = try dg.lowerParentPtr(eu_payload_ptr.container_ptr, base_ty); const payload_ty = parent.ty.errorUnionPayload(); if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { // In this case, we represent pointer to error union the same as pointer diff --git a/src/value.zig b/src/value.zig index c6e201d3f8..f997b554a3 100644 --- a/src/value.zig +++ b/src/value.zig @@ -268,12 +268,14 @@ pub const Value = extern union { .repeated, .eu_payload, - .eu_payload_ptr, .opt_payload, - .opt_payload_ptr, .empty_array_sentinel, => Payload.SubValue, + .eu_payload_ptr, + .opt_payload_ptr, + => Payload.PayloadPtr, + .bytes, .enum_literal, => Payload.Bytes, @@ -479,6 +481,20 @@ pub const Value = extern union { .variable => return self.copyPayloadShallow(arena, Payload.Variable), .decl_ref => return self.copyPayloadShallow(arena, Payload.Decl), .decl_ref_mut => return self.copyPayloadShallow(arena, Payload.DeclRefMut), + .eu_payload_ptr, + .opt_payload_ptr, + => { + const payload = self.cast(Payload.PayloadPtr).?; + const new_payload = try arena.create(Payload.PayloadPtr); + new_payload.* = .{ + .base = payload.base, + .data = .{ + .container_ptr = try payload.data.container_ptr.copy(arena), + .container_ty = try payload.data.container_ty.copy(arena), + }, + }; + return Value{ .ptr_otherwise = &new_payload.base }; + }, .elem_ptr => { const payload = self.castTag(.elem_ptr).?; const new_payload = try arena.create(Payload.ElemPtr); @@ -486,6 +502,7 @@ pub const Value = extern union { .base = payload.base, .data = .{ .array_ptr = try payload.data.array_ptr.copy(arena), + .elem_ty = try payload.data.elem_ty.copy(arena), .index = payload.data.index, }, }; @@ -498,6 +515,7 @@ pub const Value = extern union { .base = payload.base, .data = .{ .container_ptr = try payload.data.container_ptr.copy(arena), + .container_ty = try payload.data.container_ty.copy(arena), .field_index = payload.data.field_index, }, }; @@ -506,9 +524,7 @@ pub const Value = extern union { .bytes => return self.copyPayloadShallow(arena, Payload.Bytes), .repeated, .eu_payload, - .eu_payload_ptr, .opt_payload, - .opt_payload_ptr, .empty_array_sentinel, => { const payload = self.cast(Payload.SubValue).?; @@ -740,11 +756,11 @@ pub const Value = extern union { .inferred_alloc_comptime => return out_stream.writeAll("(inferred comptime allocation value)"), .eu_payload_ptr => { try out_stream.writeAll("(eu_payload_ptr)"); - val = val.castTag(.eu_payload_ptr).?.data; + val = val.castTag(.eu_payload_ptr).?.data.container_ptr; }, .opt_payload_ptr => { try out_stream.writeAll("(opt_payload_ptr)"); - val = val.castTag(.opt_payload_ptr).?.data; + val = val.castTag(.opt_payload_ptr).?.data.container_ptr; }, .bound_fn => { const bound_func = val.castTag(.bound_fn).?.data; @@ -2162,8 +2178,8 @@ pub const Value = extern union { .decl_ref_mut => true, .elem_ptr => isComptimeMutablePtr(val.castTag(.elem_ptr).?.data.array_ptr), .field_ptr => isComptimeMutablePtr(val.castTag(.field_ptr).?.data.container_ptr), - .eu_payload_ptr => isComptimeMutablePtr(val.castTag(.eu_payload_ptr).?.data), - .opt_payload_ptr => isComptimeMutablePtr(val.castTag(.opt_payload_ptr).?.data), + .eu_payload_ptr => isComptimeMutablePtr(val.castTag(.eu_payload_ptr).?.data.container_ptr), + .opt_payload_ptr => isComptimeMutablePtr(val.castTag(.opt_payload_ptr).?.data.container_ptr), else => false, }; @@ -2174,9 +2190,9 @@ pub const Value = extern union { switch (val.tag()) { .repeated => return val.castTag(.repeated).?.data.canMutateComptimeVarState(), .eu_payload => return val.castTag(.eu_payload).?.data.canMutateComptimeVarState(), - .eu_payload_ptr => return val.castTag(.eu_payload_ptr).?.data.canMutateComptimeVarState(), + .eu_payload_ptr => return val.castTag(.eu_payload_ptr).?.data.container_ptr.canMutateComptimeVarState(), .opt_payload => return val.castTag(.opt_payload).?.data.canMutateComptimeVarState(), - .opt_payload_ptr => return val.castTag(.opt_payload_ptr).?.data.canMutateComptimeVarState(), + .opt_payload_ptr => return val.castTag(.opt_payload_ptr).?.data.container_ptr.canMutateComptimeVarState(), .aggregate => { const fields = val.castTag(.aggregate).?.data; for (fields) |field| { @@ -2239,12 +2255,12 @@ pub const Value = extern union { .eu_payload_ptr => { const err_union_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; std.hash.autoHash(hasher, Value.Tag.eu_payload_ptr); - hashPtr(err_union_ptr, hasher); + hashPtr(err_union_ptr.container_ptr, hasher); }, .opt_payload_ptr => { const opt_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; std.hash.autoHash(hasher, Value.Tag.opt_payload_ptr); - hashPtr(opt_ptr, hasher); + hashPtr(opt_ptr.container_ptr, hasher); }, .zero, @@ -2272,12 +2288,14 @@ pub const Value = extern union { .repeated, .eu_payload, - .eu_payload_ptr, .opt_payload, - .opt_payload_ptr, .empty_array_sentinel, => return markReferencedDeclsAlive(val.cast(Payload.SubValue).?.data), + .eu_payload_ptr, + .opt_payload_ptr, + => return markReferencedDeclsAlive(val.cast(Payload.PayloadPtr).?.data.container_ptr), + .slice => { const slice = val.cast(Payload.Slice).?.data; markReferencedDeclsAlive(slice.ptr); @@ -2422,36 +2440,28 @@ pub const Value = extern union { } /// Returns a pointer to the element value at the index. - pub fn elemPtr(val: Value, arena: Allocator, index: usize) Allocator.Error!Value { - switch (val.tag()) { - .elem_ptr => { - const elem_ptr = val.castTag(.elem_ptr).?.data; + pub fn elemPtr(val: Value, ty: Type, arena: Allocator, index: usize) Allocator.Error!Value { + const elem_ty = ty.elemType2(); + const ptr_val = switch (val.tag()) { + .slice => val.slicePtr(), + else => val, + }; + + if (ptr_val.tag() == .elem_ptr) { + const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; + if (elem_ptr.elem_ty.eql(elem_ty)) { return Tag.elem_ptr.create(arena, .{ .array_ptr = elem_ptr.array_ptr, + .elem_ty = elem_ptr.elem_ty, .index = elem_ptr.index + index, }); - }, - .slice => { - const ptr_val = val.castTag(.slice).?.data.ptr; - switch (ptr_val.tag()) { - .elem_ptr => { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - return Tag.elem_ptr.create(arena, .{ - .array_ptr = elem_ptr.array_ptr, - .index = elem_ptr.index + index, - }); - }, - else => return Tag.elem_ptr.create(arena, .{ - .array_ptr = ptr_val, - .index = index, - }), - } - }, - else => return Tag.elem_ptr.create(arena, .{ - .array_ptr = val, - .index = index, - }), + } } + return Tag.elem_ptr.create(arena, .{ + .array_ptr = ptr_val, + .elem_ty = elem_ty, + .index = index, + }); } pub fn isUndef(self: Value) bool { @@ -4144,12 +4154,21 @@ pub const Value = extern union { }; }; + pub const PayloadPtr = struct { + base: Payload, + data: struct { + container_ptr: Value, + container_ty: Type, + }, + }; + pub const ElemPtr = struct { pub const base_tag = Tag.elem_ptr; base: Payload = Payload{ .tag = base_tag }, data: struct { array_ptr: Value, + elem_ty: Type, index: usize, }, }; @@ -4160,6 +4179,7 @@ pub const Value = extern union { base: Payload = Payload{ .tag = base_tag }, data: struct { container_ptr: Value, + container_ty: Type, field_index: usize, }, }; From 34a6fcd88e20a2491bf6a5396d583a7449cac20b Mon Sep 17 00:00:00 2001 From: Cody Tapscott Date: Fri, 11 Mar 2022 14:18:23 -0700 Subject: [PATCH 2/7] stage2: Add hasWellDefinedLayout() to type.zig and Sema.zig This follows the same strategy as sema.typeRequiresComptime() and type.comptimeOnly(): Two versions of the function, one which performs resolution just-in-time and another which asserts that resolution is complete. Thankfully, this doesn't cause very viral type resolution, since auto-layout structs and unions are very common and are known to not have a well-defined layout without resolving their fields. --- src/Module.zig | 10 ++- src/Sema.zig | 208 ++++++++++++++++++++++++++++++++++++++++++++++--- src/type.zig | 144 ++++++++++++++++++++++++++++++++++ 3 files changed, 348 insertions(+), 14 deletions(-) diff --git a/src/Module.zig b/src/Module.zig index b6104df232..7c6c654660 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -852,7 +852,7 @@ pub const ErrorSet = struct { } }; -pub const RequiresComptime = enum { no, yes, unknown, wip }; +pub const PropertyBoolean = enum { no, yes, unknown, wip }; /// Represents the data that a struct declaration provides. pub const Struct = struct { @@ -884,7 +884,8 @@ pub const Struct = struct { /// If false, resolving the fields is necessary to determine whether the type has only /// one possible value. known_non_opv: bool, - requires_comptime: RequiresComptime = .unknown, + requires_comptime: PropertyBoolean = .unknown, + has_well_defined_layout: PropertyBoolean = .unknown, pub const Fields = std.StringArrayHashMapUnmanaged(Field); @@ -1079,6 +1080,8 @@ pub const EnumFull = struct { /// An integer type which is used for the numerical value of the enum. /// Whether zig chooses this type or the user specifies it, it is stored here. tag_ty: Type, + /// true if zig inferred this tag type, false if user specified it + tag_ty_inferred: bool, /// Set of field names in declaration order. fields: NameMap, /// Maps integer tag value to field index. @@ -1132,7 +1135,8 @@ pub const Union = struct { // which `have_layout` does not ensure. fully_resolved, }, - requires_comptime: RequiresComptime = .unknown, + requires_comptime: PropertyBoolean = .unknown, + has_well_defined_layout: PropertyBoolean = .unknown, pub const Field = struct { /// undefined until `status` is `have_field_types` or `have_layout`. diff --git a/src/Sema.zig b/src/Sema.zig index 5fa7866586..bf1b24145d 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1579,6 +1579,8 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const target = sema.mod.getTarget(); const addr_space = target_util.defaultAddressSpace(target, .local); + try sema.resolveTypeLayout(block, src, pointee_ty); + if (Air.refToIndex(ptr)) |ptr_inst| { if (sema.air_instructions.items(.tag)[ptr_inst] == .constant) { const air_datas = sema.air_instructions.items(.data); @@ -1885,6 +1887,7 @@ fn zirEnumDecl( enum_obj.* = .{ .owner_decl = new_decl, .tag_ty = Type.initTag(.@"null"), + .tag_ty_inferred = true, .fields = .{}, .values = .{}, .node_offset = src.node_offset, @@ -1907,6 +1910,7 @@ fn zirEnumDecl( // TODO better source location const ty = try sema.resolveType(block, src, tag_type_ref); enum_obj.tag_ty = try ty.copy(new_decl_arena_allocator); + enum_obj.tag_ty_inferred = false; } try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl); @@ -1956,16 +1960,16 @@ fn zirEnumDecl( try wip_captures.finalize(); - const tag_ty = blk: { - if (tag_type_ref != .none) { - // TODO better source location - const ty = try sema.resolveType(block, src, tag_type_ref); - break :blk try ty.copy(new_decl_arena_allocator); - } + if (tag_type_ref != .none) { + // TODO better source location + const ty = try sema.resolveType(block, src, tag_type_ref); + enum_obj.tag_ty = try ty.copy(new_decl_arena_allocator); + enum_obj.tag_ty_inferred = false; + } else { const bits = std.math.log2_int_ceil(usize, fields_len); - break :blk try Type.Tag.int_unsigned.create(new_decl_arena_allocator, bits); - }; - enum_obj.tag_ty = tag_ty; + enum_obj.tag_ty = try Type.Tag.int_unsigned.create(new_decl_arena_allocator, bits); + enum_obj.tag_ty_inferred = true; + } } try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); @@ -2417,13 +2421,13 @@ fn zirAllocExtended( try sema.validateVarType(block, ty_src, var_ty, false); } const target = sema.mod.getTarget(); + try sema.requireRuntimeBlock(block, src); + try sema.resolveTypeLayout(block, src, var_ty); const ptr_type = try Type.ptr(sema.arena, target, .{ .pointee_type = var_ty, .@"align" = alignment, .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); - try sema.requireRuntimeBlock(block, src); - try sema.resolveTypeLayout(block, src, var_ty); return block.addTy(.alloc, ptr_type); } @@ -21209,6 +21213,182 @@ fn typePtrOrOptionalPtrTy( } } +fn typeHasWellDefinedLayout(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool { + return switch (ty.tag()) { + .u1, + .u8, + .i8, + .u16, + .i16, + .u32, + .i32, + .u64, + .i64, + .u128, + .i128, + .usize, + .isize, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .f16, + .f32, + .f64, + .f80, + .f128, + .bool, + .void, + .manyptr_u8, + .manyptr_const_u8, + .manyptr_const_u8_sentinel_0, + .anyerror_void_error_union, + .empty_struct_literal, + .empty_struct, + .array_u8, + .array_u8_sentinel_0, + .int_signed, + .int_unsigned, + .pointer, + .single_const_pointer, + .single_mut_pointer, + .many_const_pointer, + .many_mut_pointer, + .c_const_pointer, + .c_mut_pointer, + .single_const_pointer_to_comptime_int, + .enum_numbered, + => true, + + .anyopaque, + .anyerror, + .noreturn, + .@"null", + .@"anyframe", + .@"undefined", + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_options, + .prefetch_options, + .export_options, + .extern_options, + .error_set, + .error_set_single, + .error_set_inferred, + .error_set_merged, + .@"opaque", + .generic_poison, + .type, + .comptime_int, + .comptime_float, + .enum_literal, + .type_info, + // These are function bodies, not function pointers. + .fn_noreturn_no_args, + .fn_void_no_args, + .fn_naked_noreturn_no_args, + .fn_ccc_void_no_args, + .function, + .const_slice_u8, + .const_slice_u8_sentinel_0, + .const_slice, + .mut_slice, + .enum_simple, + .error_union, + .anyframe_T, + .tuple, + .anon_struct, + => false, + + .enum_full, + .enum_nonexhaustive, + => !ty.cast(Type.Payload.EnumFull).?.data.tag_ty_inferred, + + .var_args_param => unreachable, + .inferred_alloc_mut => unreachable, + .inferred_alloc_const => unreachable, + .bound_fn => unreachable, + + .array, + .array_sentinel, + .vector, + => sema.typeHasWellDefinedLayout(block, src, ty.childType()), + + .optional, + .optional_single_mut_pointer, + .optional_single_const_pointer, + => blk: { + var buf: Type.Payload.ElemType = undefined; + break :blk sema.typeHasWellDefinedLayout(block, src, ty.optionalChild(&buf)); + }, + + .@"struct" => { + const struct_obj = ty.castTag(.@"struct").?.data; + if (struct_obj.layout == .Auto) { + struct_obj.has_well_defined_layout = .no; + return false; + } + switch (struct_obj.has_well_defined_layout) { + .no => return false, + .yes, .wip => return true, + .unknown => { + if (struct_obj.status == .field_types_wip) + return true; + + try sema.resolveTypeFieldsStruct(block, src, ty, struct_obj); + + struct_obj.has_well_defined_layout = .wip; + for (struct_obj.fields.values()) |field| { + if (!(try sema.typeHasWellDefinedLayout(block, src, field.ty))) { + struct_obj.has_well_defined_layout = .no; + return false; + } + } + struct_obj.has_well_defined_layout = .yes; + return true; + }, + } + }, + + .@"union", .union_tagged => { + const union_obj = ty.cast(Type.Payload.Union).?.data; + if (union_obj.layout == .Auto) { + union_obj.has_well_defined_layout = .no; + return false; + } + switch (union_obj.has_well_defined_layout) { + .no => return false, + .yes, .wip => return true, + .unknown => { + if (union_obj.status == .field_types_wip) + return true; + + try sema.resolveTypeFieldsUnion(block, src, ty, union_obj); + + union_obj.has_well_defined_layout = .wip; + for (union_obj.fields.values()) |field| { + if (!(try sema.typeHasWellDefinedLayout(block, src, field.ty))) { + union_obj.has_well_defined_layout = .no; + return false; + } + } + union_obj.has_well_defined_layout = .yes; + return true; + }, + } + }, + }; +} + /// `generic_poison` will return false. /// This function returns false negatives when structs and unions are having their /// field types resolved. @@ -21412,6 +21592,12 @@ pub fn typeHasRuntimeBits(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) return true; } +fn typeAbiSize(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !u64 { + try sema.resolveTypeLayout(block, src, ty); + const target = sema.mod.getTarget(); + return ty.abiSize(target); +} + fn typeAbiAlignment(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !u32 { try sema.resolveTypeLayout(block, src, ty); const target = sema.mod.getTarget(); diff --git a/src/type.zig b/src/type.zig index 89f0e84b70..9ea579ee19 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2173,6 +2173,149 @@ pub const Type = extern union { }; } + /// true if and only if the type has a well-defined memory layout + /// readFrom/writeToMemory are supported only for types with a well- + /// defined memory layout + pub fn hasWellDefinedLayout(ty: Type) bool { + return switch (ty.tag()) { + .u1, + .u8, + .i8, + .u16, + .i16, + .u32, + .i32, + .u64, + .i64, + .u128, + .i128, + .usize, + .isize, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .f16, + .f32, + .f64, + .f80, + .f128, + .bool, + .void, + .manyptr_u8, + .manyptr_const_u8, + .manyptr_const_u8_sentinel_0, + .anyerror_void_error_union, + .empty_struct_literal, + .empty_struct, + .array_u8, + .array_u8_sentinel_0, + .int_signed, + .int_unsigned, + .pointer, + .single_const_pointer, + .single_mut_pointer, + .many_const_pointer, + .many_mut_pointer, + .c_const_pointer, + .c_mut_pointer, + .single_const_pointer_to_comptime_int, + .enum_numbered, + => true, + + .anyopaque, + .anyerror, + .noreturn, + .@"null", + .@"anyframe", + .@"undefined", + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_options, + .prefetch_options, + .export_options, + .extern_options, + .error_set, + .error_set_single, + .error_set_inferred, + .error_set_merged, + .@"opaque", + .generic_poison, + .type, + .comptime_int, + .comptime_float, + .enum_literal, + .type_info, + // These are function bodies, not function pointers. + .fn_noreturn_no_args, + .fn_void_no_args, + .fn_naked_noreturn_no_args, + .fn_ccc_void_no_args, + .function, + .const_slice_u8, + .const_slice_u8_sentinel_0, + .const_slice, + .mut_slice, + .enum_simple, + .error_union, + .anyframe_T, + .tuple, + .anon_struct, + => false, + + .enum_full, + .enum_nonexhaustive, + => !ty.cast(Payload.EnumFull).?.data.tag_ty_inferred, + + .var_args_param => unreachable, + .inferred_alloc_mut => unreachable, + .inferred_alloc_const => unreachable, + .bound_fn => unreachable, + + .array, + .array_sentinel, + .vector, + => ty.childType().hasWellDefinedLayout(), + + .optional, + .optional_single_mut_pointer, + .optional_single_const_pointer, + => { + var buf: Type.Payload.ElemType = undefined; + return ty.optionalChild(&buf).hasWellDefinedLayout(); + }, + + .@"struct" => { + const struct_obj = ty.castTag(.@"struct").?.data; + if (struct_obj.layout == .Auto) return false; + switch (struct_obj.has_well_defined_layout) { + .wip, .unknown => unreachable, // This function asserts types already resolved. + .no => return false, + .yes => return true, + } + }, + + .@"union", .union_tagged => { + const union_obj = ty.cast(Type.Payload.Union).?.data; + if (union_obj.layout == .Auto) return false; + switch (union_obj.has_well_defined_layout) { + .wip, .unknown => unreachable, // This function asserts types already resolved. + .no => return false, + .yes => return true, + } + }, + }; + } + pub fn hasRuntimeBits(ty: Type) bool { return hasRuntimeBitsAdvanced(ty, false); } @@ -3263,6 +3406,7 @@ pub const Type = extern union { /// For ?[*]T, returns T. /// For *T, returns T. /// For [*]T, returns T. + /// For [N]T, returns T. /// For []T, returns T. pub fn elemType2(ty: Type) Type { return switch (ty.tag()) { From 54426bdc82ad361bb580f9678dd23417c350282a Mon Sep 17 00:00:00 2001 From: Cody Tapscott Date: Fri, 11 Mar 2022 14:19:45 -0700 Subject: [PATCH 3/7] stage2: Fix assertion in struct field offset when all fields are 0-size --- src/type.zig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/type.zig b/src/type.zig index 9ea579ee19..b36f480654 100644 --- a/src/type.zig +++ b/src/type.zig @@ -4786,7 +4786,7 @@ pub const Type = extern union { return field_offset.offset; } - return std.mem.alignForwardGeneric(u64, it.offset, it.big_align); + return std.mem.alignForwardGeneric(u64, it.offset, @maximum(it.big_align, 1)); }, .tuple, .anon_struct => { @@ -4809,7 +4809,7 @@ pub const Type = extern union { if (i == index) return offset; offset += field_ty.abiSize(target); } - offset = std.mem.alignForwardGeneric(u64, offset, big_align); + offset = std.mem.alignForwardGeneric(u64, offset, @maximum(big_align, 1)); return offset; }, From 5fa057053ce32274b878077e5abff82335530fc8 Mon Sep 17 00:00:00 2001 From: Cody Tapscott Date: Fri, 11 Mar 2022 14:22:53 -0700 Subject: [PATCH 4/7] stage2 sema: Respect container_ty of parent ptrs The core change here is that we no longer blindly trust that parent pointers (.elem_ptr, .field_ptr, .eu_payload_ptr, .union_payload_ptr) were derived from the "true" type of the underlying decl. When types diverge, direct dereference fails and we are forced to bitcast, as usual. In order to maximize our chances to have a successful bitcast, this includes several changes to the dereference procedure: - `root` is now `parent` and is the largest Value containing the dereference target, with the condition that its layout and the byte offset of the target within are both well-defined. - If the target cannot be dereferenced directly, because the pointers were not derived from the true type of the underlying decl, then it is returned as null. - `beginComptimePtrDeref` now accepts an optional array_ty param, which is used to directly dereference an array from an elem_ptr, if necessary. This allows us to dereference array types without well-defined layouts (e.g. `[N]?u8`) at an offset The load_ty also allows us to correctly "over-read" an .elem_ptr to an array of [N]T, if necessary. This makes direct dereference work for array types even in the presence of an offset, which is necessary if the array has no well-defined layout (e.g. loading from `[6]?u8`) --- src/Sema.zig | 364 +++++++++++++++++++++++++++++--------------------- src/type.zig | 7 + src/value.zig | 23 ++++ 3 files changed, 241 insertions(+), 153 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index bf1b24145d..ad96aea7ab 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -12609,6 +12609,7 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I enum_obj.* = .{ .owner_decl = new_decl, .tag_ty = Type.initTag(.@"null"), + .tag_ty_inferred = true, .fields = .{}, .values = .{}, .node_offset = src.node_offset, @@ -17590,6 +17591,14 @@ fn beginComptimePtrMutation( src: LazySrcLoc, ptr_val: Value, ) CompileError!ComptimePtrMutationKit { + + // TODO: Update this to behave like `beginComptimePtrLoad` and properly check/use + // `container_ty` and `array_ty`, instead of trusting that the parent decl type + // matches the type used to derive the elem_ptr/field_ptr/etc. + // + // This is needed because the types will not match if the pointer we're mutating + // through is reinterpreting comptime memory. + switch (ptr_val.tag()) { .decl_ref_mut => { const decl_ref_mut = ptr_val.castTag(.decl_ref_mut).?.data; @@ -17850,163 +17859,191 @@ fn beginComptimePtrMutation( } } -const ComptimePtrLoadKit = struct { - /// The Value of the Decl that owns this memory. - root_val: Value, - /// The Type of the Decl that owns this memory. - root_ty: Type, - /// Parent Value. - val: Value, - /// The Type of the parent Value. - ty: Type, +const TypedValueAndOffset = struct { + tv: TypedValue, /// The starting byte offset of `val` from `root_val`. /// If the type does not have a well-defined memory layout, this is null. - byte_offset: ?usize, - /// Whether the `root_val` could be mutated by further + byte_offset: usize, +}; + +const ComptimePtrLoadKit = struct { + /// The Value and Type corresponding to the target of the provided pointer. + /// If a direct dereference is not possible, this is null. + target: ?TypedValue, + /// The largest parent Value containing `target` and having a well-defined memory layout. + /// This is used for bitcasting, if direct dereferencing failed (i.e. `target` is null). + parent: ?TypedValueAndOffset, + /// Whether the `target` could be mutated by further /// semantic analysis and a copy must be performed. is_mutable: bool, + /// If the root decl could not be used as `parent`, this is the type that + /// caused that by not having a well-defined layout + ty_without_well_defined_layout: ?Type, }; const ComptimePtrLoadError = CompileError || error{ RuntimeLoad, }; +/// If `maybe_array_ty` is provided, it will be used to directly dereference an +/// .elem_ptr of type T to a value of [N]T, if necessary. fn beginComptimePtrLoad( sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, + maybe_array_ty: ?Type, ) ComptimePtrLoadError!ComptimePtrLoadKit { const target = sema.mod.getTarget(); - switch (ptr_val.tag()) { - .decl_ref => { - const decl = ptr_val.castTag(.decl_ref).?.data; - const decl_val = try decl.value(); - if (decl_val.tag() == .variable) return error.RuntimeLoad; - return ComptimePtrLoadKit{ - .root_val = decl_val, - .root_ty = decl.ty, - .val = decl_val, - .ty = decl.ty, - .byte_offset = 0, - .is_mutable = false, + var deref: ComptimePtrLoadKit = switch (ptr_val.tag()) { + .decl_ref, + .decl_ref_mut, + => blk: { + const decl = switch (ptr_val.tag()) { + .decl_ref => ptr_val.castTag(.decl_ref).?.data, + .decl_ref_mut => ptr_val.castTag(.decl_ref_mut).?.data.decl, + else => unreachable, + }; + const is_mutable = ptr_val.tag() == .decl_ref_mut; + const decl_tv = try decl.typedValue(); + if (decl_tv.val.tag() == .variable) return error.RuntimeLoad; + + const layout_defined = try sema.typeHasWellDefinedLayout(block, src, decl.ty); + break :blk ComptimePtrLoadKit{ + .parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null, + .target = decl_tv, + .is_mutable = is_mutable, + .ty_without_well_defined_layout = if (!layout_defined) decl.ty else null, }; }, - .decl_ref_mut => { - const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl; - const decl_val = try decl.value(); - if (decl_val.tag() == .variable) return error.RuntimeLoad; - return ComptimePtrLoadKit{ - .root_val = decl_val, - .root_ty = decl.ty, - .val = decl_val, - .ty = decl.ty, - .byte_offset = 0, - .is_mutable = true, - }; - }, - .elem_ptr => { + + .elem_ptr => blk: { const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - const parent = try beginComptimePtrLoad(sema, block, src, elem_ptr.array_ptr); - switch (parent.ty.zigTypeTag()) { - .Array, .Vector => { - const check_len = parent.ty.arrayLenIncludingSentinel(); - if (elem_ptr.index >= check_len) { - // TODO have the parent include the decl so we can say "declared here" - return sema.fail(block, src, "comptime load of index {d} out of bounds of array length {d}", .{ - elem_ptr.index, check_len, - }); + const elem_ty = elem_ptr.elem_ty; + var deref = try beginComptimePtrLoad(sema, block, src, elem_ptr.array_ptr, null); + + if (elem_ptr.index != 0) { + if (try sema.typeHasWellDefinedLayout(block, src, elem_ty)) { + if (deref.parent) |*parent| { + // Update the byte offset (in-place) + const elem_size = try sema.typeAbiSize(block, src, elem_ty); + const offset = parent.byte_offset + elem_size * elem_ptr.index; + parent.byte_offset = try sema.usizeCast(block, src, offset); } - const elem_ty = parent.ty.childType(); - const byte_offset: ?usize = bo: { - if (try sema.typeRequiresComptime(block, src, elem_ty)) { - break :bo null; - } else { - if (parent.byte_offset) |off| { - try sema.resolveTypeLayout(block, src, elem_ty); - const elem_size = elem_ty.abiSize(target); - break :bo try sema.usizeCast(block, src, off + elem_size * elem_ptr.index); - } else { - break :bo null; - } - } - }; - return ComptimePtrLoadKit{ - .root_val = parent.root_val, - .root_ty = parent.root_ty, - .val = try parent.val.elemValue(sema.arena, elem_ptr.index), - .ty = elem_ty, - .byte_offset = byte_offset, - .is_mutable = parent.is_mutable, - }; - }, - else => { - if (elem_ptr.index != 0) { - // TODO have the parent include the decl so we can say "declared here" - return sema.fail(block, src, "out of bounds comptime load of index {d}", .{ - elem_ptr.index, - }); - } - return ComptimePtrLoadKit{ - .root_val = parent.root_val, - .root_ty = parent.root_ty, - .val = parent.val, - .ty = parent.ty, - .byte_offset = parent.byte_offset, - .is_mutable = parent.is_mutable, - }; - }, - } - }, - .field_ptr => { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - const parent = try beginComptimePtrLoad(sema, block, src, field_ptr.container_ptr); - const field_index = @intCast(u32, field_ptr.field_index); - const byte_offset: ?usize = bo: { - if (try sema.typeRequiresComptime(block, src, parent.ty)) { - break :bo null; } else { - if (parent.byte_offset) |off| { - try sema.resolveTypeLayout(block, src, parent.ty); - const field_offset = parent.ty.structFieldOffset(field_index, target); - break :bo try sema.usizeCast(block, src, off + field_offset); - } else { - break :bo null; - } + deref.parent = null; + deref.ty_without_well_defined_layout = elem_ty; } + } + + // If we're loading an elem_ptr that was derived from a different type + // than the true type of the underlying decl, we cannot deref directly + const ty_matches = if (deref.target != null and deref.target.?.ty.isArrayLike()) x: { + const deref_elem_ty = deref.target.?.ty.childType(); + break :x (try sema.coerceInMemoryAllowed(block, deref_elem_ty, elem_ty, false, target, src, src)) == .ok or + (try sema.coerceInMemoryAllowed(block, elem_ty, deref_elem_ty, false, target, src, src)) == .ok; + } else false; + if (!ty_matches) { + deref.target = null; + break :blk deref; + } + + var array_tv = deref.target.?; + const check_len = array_tv.ty.arrayLenIncludingSentinel(); + if (elem_ptr.index >= check_len) { + // TODO have the deref include the decl so we can say "declared here" + return sema.fail(block, src, "comptime load of index {d} out of bounds of array length {d}", .{ + elem_ptr.index, check_len, + }); + } + + if (maybe_array_ty) |load_ty| { + // It's possible that we're loading a [N]T, in which case we'd like to slice + // the target array directly from our parent array. + if (load_ty.isArrayLike() and load_ty.childType().eql(elem_ty)) { + const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel()); + deref.target = if (elem_ptr.index + N <= check_len) TypedValue{ + .ty = try Type.array(sema.arena, N, null, elem_ty), + .val = try array_tv.val.sliceArray(sema.arena, elem_ptr.index, elem_ptr.index + N), + } else null; + break :blk deref; + } + } + + deref.target = .{ + .ty = elem_ty, + .val = try array_tv.val.elemValue(sema.arena, elem_ptr.index), }; - return ComptimePtrLoadKit{ - .root_val = parent.root_val, - .root_ty = parent.root_ty, - .val = try parent.val.fieldValue(sema.arena, field_index), - .ty = parent.ty.structFieldType(field_index), - .byte_offset = byte_offset, - .is_mutable = parent.is_mutable, - }; + break :blk deref; }, - .eu_payload_ptr => { - const err_union_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; - const parent = try beginComptimePtrLoad(sema, block, src, err_union_ptr.container_ptr); - return ComptimePtrLoadKit{ - .root_val = parent.root_val, - .root_ty = parent.root_ty, - .val = parent.val.castTag(.eu_payload).?.data, - .ty = parent.ty.errorUnionPayload(), - .byte_offset = null, - .is_mutable = parent.is_mutable, - }; + + .field_ptr => blk: { + const field_ptr = ptr_val.castTag(.field_ptr).?.data; + const field_index = @intCast(u32, field_ptr.field_index); + const field_ty = field_ptr.container_ty.structFieldType(field_index); + var deref = try beginComptimePtrLoad(sema, block, src, field_ptr.container_ptr, field_ptr.container_ty); + + if (try sema.typeHasWellDefinedLayout(block, src, field_ptr.container_ty)) { + if (deref.parent) |*parent| { + // Update the byte offset (in-place) + try sema.resolveTypeLayout(block, src, field_ptr.container_ty); + const field_offset = field_ptr.container_ty.structFieldOffset(field_index, target); + parent.byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset); + } + } else { + deref.parent = null; + deref.ty_without_well_defined_layout = field_ptr.container_ty; + } + + if (deref.target) |*tv| { + const coerce_in_mem_ok = + (try sema.coerceInMemoryAllowed(block, field_ptr.container_ty, tv.ty, false, target, src, src)) == .ok or + (try sema.coerceInMemoryAllowed(block, tv.ty, field_ptr.container_ty, false, target, src, src)) == .ok; + if (coerce_in_mem_ok) { + deref.target = TypedValue{ + .ty = field_ty, + .val = try tv.val.fieldValue(sema.arena, field_index), + }; + break :blk deref; + } + } + deref.target = null; + break :blk deref; }, - .opt_payload_ptr => { - const opt_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; - const parent = try beginComptimePtrLoad(sema, block, src, opt_ptr.container_ptr); - return ComptimePtrLoadKit{ - .root_val = parent.root_val, - .root_ty = parent.root_ty, - .val = parent.val.castTag(.opt_payload).?.data, - .ty = try parent.ty.optionalChildAlloc(sema.arena), - .byte_offset = null, - .is_mutable = parent.is_mutable, + + .opt_payload_ptr, + .eu_payload_ptr, + => blk: { + const payload_ptr = ptr_val.cast(Value.Payload.PayloadPtr).?.data; + const payload_ty = switch (ptr_val.tag()) { + .eu_payload_ptr => payload_ptr.container_ty.errorUnionPayload(), + .opt_payload_ptr => try payload_ptr.container_ty.optionalChildAlloc(sema.arena), + else => unreachable, }; + var deref = try beginComptimePtrLoad(sema, block, src, payload_ptr.container_ptr, payload_ptr.container_ty); + + // eu_payload_ptr and opt_payload_ptr never have a well-defined layout + if (deref.parent != null) { + deref.parent = null; + deref.ty_without_well_defined_layout = payload_ptr.container_ty; + } + + if (deref.target) |*tv| { + const coerce_in_mem_ok = + (try sema.coerceInMemoryAllowed(block, payload_ptr.container_ty, tv.ty, false, target, src, src)) == .ok or + (try sema.coerceInMemoryAllowed(block, tv.ty, payload_ptr.container_ty, false, target, src, src)) == .ok; + if (coerce_in_mem_ok) { + const payload_val = switch (ptr_val.tag()) { + .eu_payload_ptr => tv.val.castTag(.eu_payload).?.data, + .opt_payload_ptr => tv.val.castTag(.opt_payload).?.data, + else => unreachable, + }; + tv.* = TypedValue{ .ty = payload_ty, .val = payload_val }; + break :blk deref; + } + } + deref.target = null; + break :blk deref; }, .zero, @@ -18021,7 +18058,14 @@ fn beginComptimePtrLoad( => return error.RuntimeLoad, else => unreachable, + }; + + if (deref.target) |tv| { + if (deref.parent == null and tv.ty.hasWellDefinedLayout()) { + deref.parent = .{ .tv = tv, .byte_offset = 0 }; + } } + return deref; } fn bitCast( @@ -21106,39 +21150,53 @@ pub fn analyzeAddrspace( /// Asserts the value is a pointer and dereferences it. /// Returns `null` if the pointer contents cannot be loaded at comptime. fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr_ty: Type) CompileError!?Value { - const target = sema.mod.getTarget(); const load_ty = ptr_ty.childType(); - const parent = sema.beginComptimePtrLoad(block, src, ptr_val) catch |err| switch (err) { + const target = sema.mod.getTarget(); + const deref = sema.beginComptimePtrLoad(block, src, ptr_val, load_ty) catch |err| switch (err) { error.RuntimeLoad => return null, else => |e| return e, }; - // We have a Value that lines up in virtual memory exactly with what we want to load. - // If the Type is in-memory coercable to `load_ty`, it may be returned without modifications. - const coerce_in_mem_ok = - (try sema.coerceInMemoryAllowed(block, load_ty, parent.ty, false, target, src, src)) == .ok or - (try sema.coerceInMemoryAllowed(block, parent.ty, load_ty, false, target, src, src)) == .ok; - if (coerce_in_mem_ok) { - if (parent.is_mutable) { - // The decl whose value we are obtaining here may be overwritten with - // a different value upon further semantic analysis, which would - // invalidate this memory. So we must copy here. - return try parent.val.copy(sema.arena); + + if (deref.target) |tv| { + const coerce_in_mem_ok = + (try sema.coerceInMemoryAllowed(block, load_ty, tv.ty, false, target, src, src)) == .ok or + (try sema.coerceInMemoryAllowed(block, tv.ty, load_ty, false, target, src, src)) == .ok; + if (coerce_in_mem_ok) { + // We have a Value that lines up in virtual memory exactly with what we want to load, + // and it is in-memory coercible to load_ty. It may be returned without modifications. + if (deref.is_mutable) { + // The decl whose value we are obtaining here may be overwritten with + // a different value upon further semantic analysis, which would + // invalidate this memory. So we must copy here. + return try tv.val.copy(sema.arena); + } + return tv.val; } - return parent.val; } - // The type is not in-memory coercable, so it must be bitcasted according - // to the pointer type we are performing the load through. + // The type is not in-memory coercible or the direct dereference failed, so it must + // be bitcast according to the pointer type we are performing the load through. + if (!(try sema.typeHasWellDefinedLayout(block, src, load_ty))) + return sema.fail(block, src, "comptime dereference requires {} to have a well-defined layout, but it does not.", .{load_ty}); - // TODO emit a compile error if the types are not allowed to be bitcasted + const load_sz = try sema.typeAbiSize(block, src, load_ty); - if (parent.ty.abiSize(target) >= load_ty.abiSize(target)) { - // The Type it is stored as in the compiler has an ABI size greater or equal to - // the ABI size of `load_ty`. We may perform the bitcast based on - // `parent.val` alone (more efficient). - return try sema.bitCastVal(block, src, parent.val, parent.ty, load_ty, 0); + // Try the smaller bit-cast first, since that's more efficient than using the larger `parent` + if (deref.target) |tv| if (load_sz <= try sema.typeAbiSize(block, src, tv.ty)) + return try sema.bitCastVal(block, src, tv.val, tv.ty, load_ty, 0); + + // If that fails, try to bit-cast from the largest parent value with a well-defined layout + if (deref.parent) |parent| if (load_sz + parent.byte_offset <= try sema.typeAbiSize(block, src, parent.tv.ty)) + return try sema.bitCastVal(block, src, parent.tv.val, parent.tv.ty, load_ty, parent.byte_offset); + + if (deref.ty_without_well_defined_layout) |bad_ty| { + // We got no parent for bit-casting, or the parent we got was too small. Either way, the problem + // is that some type we encountered when de-referencing does not have a well-defined layout. + return sema.fail(block, src, "comptime dereference requires {} to have a well-defined layout, but it does not.", .{bad_ty}); } else { - return try sema.bitCastVal(block, src, parent.root_val, parent.root_ty, load_ty, parent.byte_offset.?); + // If all encountered types had well-defined layouts, the parent is the root decl and it just + // wasn't big enough for the load. + return sema.fail(block, src, "dereference of {} exceeds bounds of containing decl of type {}", .{ ptr_ty, deref.parent.?.tv.ty }); } } diff --git a/src/type.zig b/src/type.zig index b36f480654..3f6e3ef282 100644 --- a/src/type.zig +++ b/src/type.zig @@ -4400,6 +4400,13 @@ pub const Type = extern union { }; } + pub fn isArrayLike(ty: Type) bool { + return switch (ty.zigTypeTag()) { + .Array, .Vector => true, + else => false, + }; + } + pub fn isIndexable(ty: Type) bool { return switch (ty.zigTypeTag()) { .Array, .Vector => true, diff --git a/src/value.zig b/src/value.zig index f997b554a3..af5ee75737 100644 --- a/src/value.zig +++ b/src/value.zig @@ -2412,6 +2412,29 @@ pub const Value = extern union { } } + // Asserts that the provided start/end are in-bounds. + pub fn sliceArray(val: Value, arena: Allocator, start: usize, end: usize) error{OutOfMemory}!Value { + return switch (val.tag()) { + .empty_array_sentinel => if (start == 0 and end == 1) val else Value.initTag(.empty_array), + .bytes => Tag.bytes.create(arena, val.castTag(.bytes).?.data[start..end]), + .array => Tag.array.create(arena, val.castTag(.array).?.data[start..end]), + .slice => sliceArray(val.castTag(.slice).?.data.ptr, arena, start, end), + + .decl_ref => sliceArray(val.castTag(.decl_ref).?.data.val, arena, start, end), + .decl_ref_mut => sliceArray(val.castTag(.decl_ref_mut).?.data.decl.val, arena, start, end), + .elem_ptr => blk: { + const elem_ptr = val.castTag(.elem_ptr).?.data; + break :blk sliceArray(elem_ptr.array_ptr, arena, start + elem_ptr.index, end + elem_ptr.index); + }, + + .repeated, + .the_only_possible_value, + => val, + + else => unreachable, + }; + } + pub fn fieldValue(val: Value, allocator: Allocator, index: usize) error{OutOfMemory}!Value { _ = allocator; switch (val.tag()) { From 1f76b4c6b84c1fd7d370bde75b8e37822bd85086 Mon Sep 17 00:00:00 2001 From: Cody Tapscott Date: Fri, 11 Mar 2022 14:23:29 -0700 Subject: [PATCH 5/7] stage2 llvm: Respect container type when lowering parent pointers We need to make sure that we bitcast our pointers correctly before we use get_element_ptr to compute the offset for the parent pointer. This also includes a small fix-up for a problem where ptrs to const i64/u64 were not using the correct type in >1-level decl chains (where we call lowerParentPtr recursively) --- src/codegen/llvm.zig | 160 +++++++++++++++----------------------- test/behavior/cast.zig | 4 +- test/behavior/ptrcast.zig | 92 +++++++++++++++++++++- 3 files changed, 155 insertions(+), 101 deletions(-) diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 01f7d60a53..08cfe4a258 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1518,26 +1518,8 @@ pub const DeclGen = struct { const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(), .False); return llvm_int.constIntToPtr(try dg.llvmType(tv.ty)); }, - .field_ptr, .opt_payload_ptr, .eu_payload_ptr => { - const parent = try dg.lowerParentPtr(tv.val, tv.ty); - return parent.llvm_ptr.constBitCast(try dg.llvmType(tv.ty)); - }, - .elem_ptr => { - const elem_ptr = tv.val.castTag(.elem_ptr).?.data; - const parent = try dg.lowerParentPtr(elem_ptr.array_ptr, tv.ty); - const llvm_usize = try dg.llvmType(Type.usize); - if (parent.llvm_ptr.typeOf().getElementType().getTypeKind() == .Array) { - const indices: [2]*const llvm.Value = .{ - llvm_usize.constInt(0, .False), - llvm_usize.constInt(elem_ptr.index, .False), - }; - return parent.llvm_ptr.constInBoundsGEP(&indices, indices.len); - } else { - const indices: [1]*const llvm.Value = .{ - llvm_usize.constInt(elem_ptr.index, .False), - }; - return parent.llvm_ptr.constInBoundsGEP(&indices, indices.len); - } + .field_ptr, .opt_payload_ptr, .eu_payload_ptr, .elem_ptr => { + return dg.lowerParentPtr(tv.val, tv.ty.childType()); }, .null_value, .zero => { const llvm_type = try dg.llvmType(tv.ty); @@ -2786,7 +2768,7 @@ pub const DeclGen = struct { llvm_ptr: *const llvm.Value, }; - fn lowerParentPtrDecl(dg: *DeclGen, ptr_val: Value, decl: *Module.Decl) Error!ParentPtr { + fn lowerParentPtrDecl(dg: *DeclGen, ptr_val: Value, decl: *Module.Decl, ptr_child_ty: Type) Error!*const llvm.Value { decl.markAlive(); var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, @@ -2794,123 +2776,104 @@ pub const DeclGen = struct { }; const ptr_ty = Type.initPayload(&ptr_ty_payload.base); const llvm_ptr = try dg.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl); - return ParentPtr{ - .llvm_ptr = llvm_ptr, - .ty = decl.ty, - }; + + if (ptr_child_ty.eql(decl.ty)) { + return llvm_ptr; + } else { + return llvm_ptr.constBitCast((try dg.llvmType(ptr_child_ty)).pointerType(0)); + } } - fn lowerParentPtr(dg: *DeclGen, ptr_val: Value, base_ty: Type) Error!ParentPtr { - switch (ptr_val.tag()) { + fn lowerParentPtr(dg: *DeclGen, ptr_val: Value, ptr_child_ty: Type) Error!*const llvm.Value { + var bitcast_needed: bool = undefined; + const llvm_ptr = switch (ptr_val.tag()) { .decl_ref_mut => { const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl; - return dg.lowerParentPtrDecl(ptr_val, decl); + return dg.lowerParentPtrDecl(ptr_val, decl, ptr_child_ty); }, .decl_ref => { const decl = ptr_val.castTag(.decl_ref).?.data; - return dg.lowerParentPtrDecl(ptr_val, decl); + return dg.lowerParentPtrDecl(ptr_val, decl, ptr_child_ty); }, .variable => { const decl = ptr_val.castTag(.variable).?.data.owner_decl; - return dg.lowerParentPtrDecl(ptr_val, decl); + return dg.lowerParentPtrDecl(ptr_val, decl, ptr_child_ty); }, .int_i64 => { const int = ptr_val.castTag(.int_i64).?.data; const llvm_usize = try dg.llvmType(Type.usize); const llvm_int = llvm_usize.constInt(@bitCast(u64, int), .False); - return ParentPtr{ - .llvm_ptr = llvm_int.constIntToPtr(try dg.llvmType(base_ty)), - .ty = base_ty, - }; + return llvm_int.constIntToPtr((try dg.llvmType(ptr_child_ty)).pointerType(0)); }, .int_u64 => { const int = ptr_val.castTag(.int_u64).?.data; const llvm_usize = try dg.llvmType(Type.usize); const llvm_int = llvm_usize.constInt(int, .False); - return ParentPtr{ - .llvm_ptr = llvm_int.constIntToPtr(try dg.llvmType(base_ty)), - .ty = base_ty, - }; + return llvm_int.constIntToPtr((try dg.llvmType(ptr_child_ty)).pointerType(0)); }, - .field_ptr => { + .field_ptr => blk: { const field_ptr = ptr_val.castTag(.field_ptr).?.data; - const parent = try dg.lowerParentPtr(field_ptr.container_ptr, base_ty); + const parent_llvm_ptr = try dg.lowerParentPtr(field_ptr.container_ptr, field_ptr.container_ty); + const parent_ty = field_ptr.container_ty; + const field_index = @intCast(u32, field_ptr.field_index); const llvm_u32 = dg.context.intType(32); const target = dg.module.getTarget(); - switch (parent.ty.zigTypeTag()) { + switch (parent_ty.zigTypeTag()) { .Union => { - const fields = parent.ty.unionFields(); - const layout = parent.ty.unionGetLayout(target); - const field_ty = fields.values()[field_index].ty; + bitcast_needed = true; + + const layout = parent_ty.unionGetLayout(target); if (layout.payload_size == 0) { // In this case a pointer to the union and a pointer to any // (void) payload is the same. - return ParentPtr{ - .llvm_ptr = parent.llvm_ptr, - .ty = field_ty, - }; + break :blk parent_llvm_ptr; } - if (layout.tag_size == 0) { - const indices: [2]*const llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(0, .False), - }; - return ParentPtr{ - .llvm_ptr = parent.llvm_ptr.constInBoundsGEP(&indices, indices.len), - .ty = field_ty, - }; - } - const llvm_pl_index = @boolToInt(layout.tag_align >= layout.payload_align); + const llvm_pl_index = if (layout.tag_size == 0) 0 else @boolToInt(layout.tag_align >= layout.payload_align); const indices: [2]*const llvm.Value = .{ llvm_u32.constInt(0, .False), llvm_u32.constInt(llvm_pl_index, .False), }; - return ParentPtr{ - .llvm_ptr = parent.llvm_ptr.constInBoundsGEP(&indices, indices.len), - .ty = field_ty, - }; + break :blk parent_llvm_ptr.constInBoundsGEP(&indices, indices.len); }, .Struct => { + const field_ty = parent_ty.structFieldType(field_index); + bitcast_needed = !field_ty.eql(ptr_child_ty); + var ty_buf: Type.Payload.Pointer = undefined; - const llvm_field_index = llvmFieldIndex(parent.ty, field_index, target, &ty_buf).?; + const llvm_field_index = llvmFieldIndex(parent_ty, field_index, target, &ty_buf).?; const indices: [2]*const llvm.Value = .{ llvm_u32.constInt(0, .False), llvm_u32.constInt(llvm_field_index, .False), }; - return ParentPtr{ - .llvm_ptr = parent.llvm_ptr.constInBoundsGEP(&indices, indices.len), - .ty = parent.ty.structFieldType(field_index), - }; + break :blk parent_llvm_ptr.constInBoundsGEP(&indices, indices.len); }, else => unreachable, } }, - .elem_ptr => { + .elem_ptr => blk: { const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - const parent = try dg.lowerParentPtr(elem_ptr.array_ptr, base_ty); + const parent_llvm_ptr = try dg.lowerParentPtr(elem_ptr.array_ptr, elem_ptr.elem_ty); + bitcast_needed = !elem_ptr.elem_ty.eql(ptr_child_ty); + const llvm_usize = try dg.llvmType(Type.usize); - const indices: [2]*const llvm.Value = .{ - llvm_usize.constInt(0, .False), + const indices: [1]*const llvm.Value = .{ llvm_usize.constInt(elem_ptr.index, .False), }; - return ParentPtr{ - .llvm_ptr = parent.llvm_ptr.constInBoundsGEP(&indices, indices.len), - .ty = parent.ty.childType(), - }; + break :blk parent_llvm_ptr.constInBoundsGEP(&indices, indices.len); }, - .opt_payload_ptr => { + .opt_payload_ptr => blk: { const opt_payload_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; - const parent = try dg.lowerParentPtr(opt_payload_ptr.container_ptr, base_ty); + const parent_llvm_ptr = try dg.lowerParentPtr(opt_payload_ptr.container_ptr, opt_payload_ptr.container_ty); var buf: Type.Payload.ElemType = undefined; - const payload_ty = parent.ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime() or parent.ty.isPtrLikeOptional()) { + + const payload_ty = opt_payload_ptr.container_ty.optionalChild(&buf); + bitcast_needed = !payload_ty.eql(ptr_child_ty); + + if (!payload_ty.hasRuntimeBitsIgnoreComptime() or payload_ty.isPtrLikeOptional()) { // In this case, we represent pointer to optional the same as pointer // to the payload. - return ParentPtr{ - .llvm_ptr = parent.llvm_ptr, - .ty = payload_ty, - }; + break :blk parent_llvm_ptr; } const llvm_u32 = dg.context.intType(32); @@ -2918,22 +2881,19 @@ pub const DeclGen = struct { llvm_u32.constInt(0, .False), llvm_u32.constInt(0, .False), }; - return ParentPtr{ - .llvm_ptr = parent.llvm_ptr.constInBoundsGEP(&indices, indices.len), - .ty = payload_ty, - }; + break :blk parent_llvm_ptr.constInBoundsGEP(&indices, indices.len); }, - .eu_payload_ptr => { + .eu_payload_ptr => blk: { const eu_payload_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; - const parent = try dg.lowerParentPtr(eu_payload_ptr.container_ptr, base_ty); - const payload_ty = parent.ty.errorUnionPayload(); + const parent_llvm_ptr = try dg.lowerParentPtr(eu_payload_ptr.container_ptr, eu_payload_ptr.container_ty); + + const payload_ty = eu_payload_ptr.container_ty.errorUnionPayload(); + bitcast_needed = !payload_ty.eql(ptr_child_ty); + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { // In this case, we represent pointer to error union the same as pointer // to the payload. - return ParentPtr{ - .llvm_ptr = parent.llvm_ptr, - .ty = payload_ty, - }; + break :blk parent_llvm_ptr; } const llvm_u32 = dg.context.intType(32); @@ -2941,12 +2901,14 @@ pub const DeclGen = struct { llvm_u32.constInt(0, .False), llvm_u32.constInt(1, .False), }; - return ParentPtr{ - .llvm_ptr = parent.llvm_ptr.constInBoundsGEP(&indices, indices.len), - .ty = payload_ty, - }; + break :blk parent_llvm_ptr.constInBoundsGEP(&indices, indices.len); }, else => unreachable, + }; + if (bitcast_needed) { + return llvm_ptr.constBitCast((try dg.llvmType(ptr_child_ty)).pointerType(0)); + } else { + return llvm_ptr; } } diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index 69e64b8973..417ec06bd7 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -887,7 +887,9 @@ test "peer cast *[N:x]T to *[N]T" { } test "peer cast [*:x]T to [*]T" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { diff --git a/test/behavior/ptrcast.zig b/test/behavior/ptrcast.zig index cfca107d78..211945d5b4 100644 --- a/test/behavior/ptrcast.zig +++ b/test/behavior/ptrcast.zig @@ -21,8 +21,47 @@ fn testReinterpretBytesAsInteger() !void { try expect(@ptrCast(*align(1) const u32, bytes[1..5]).* == expected); } +test "reinterpret an array over multiple elements, with no well-defined layout" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; + + try testReinterpretWithOffsetAndNoWellDefinedLayout(); + comptime try testReinterpretWithOffsetAndNoWellDefinedLayout(); +} + +fn testReinterpretWithOffsetAndNoWellDefinedLayout() !void { + const bytes: ?[5]?u8 = [5]?u8{ 0x12, 0x34, 0x56, 0x78, 0x9a }; + const ptr = &bytes.?[1]; + const copy: [4]?u8 = @ptrCast(*const [4]?u8, ptr).*; + _ = copy; + //try expect(@ptrCast(*align(1)?u8, bytes[1..5]).* == ); +} + +test "reinterpret bytes inside auto-layout struct as integer with nonzero offset" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; + + try testReinterpretStructWrappedBytesAsInteger(); + comptime try testReinterpretStructWrappedBytesAsInteger(); +} + +fn testReinterpretStructWrappedBytesAsInteger() !void { + const S = struct { bytes: [5:0]u8 }; + const obj = S{ .bytes = "\x12\x34\x56\x78\xab".* }; + const expected = switch (native_endian) { + .Little => 0xab785634, + .Big => 0x345678ab, + }; + try expect(@ptrCast(*align(1) const u32, obj.bytes[1..5]).* == expected); +} + test "reinterpret bytes of an array into an extern struct" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; try testReinterpretBytesAsExternStruct(); comptime try testReinterpretBytesAsExternStruct(); @@ -42,6 +81,57 @@ fn testReinterpretBytesAsExternStruct() !void { try expect(val == 5); } +test "reinterpret bytes of an extern struct into another" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; + + try testReinterpretExternStructAsExternStruct(); + comptime try testReinterpretExternStructAsExternStruct(); +} + +fn testReinterpretExternStructAsExternStruct() !void { + const S1 = extern struct { + a: u8, + b: u16, + c: u8, + }; + comptime var bytes align(2) = S1{ .a = 0, .b = 0, .c = 5 }; + + const S2 = extern struct { + a: u32 align(2), + c: u8, + }; + var ptr = @ptrCast(*const S2, &bytes); + var val = ptr.c; + try expect(val == 5); +} + +test "lower reinterpreted comptime field ptr" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; + + // Test lowering a field ptr + comptime var bytes align(2) = [_]u8{ 1, 2, 3, 4, 5, 6 }; + const S = extern struct { + a: u32 align(2), + c: u8, + }; + comptime var ptr = @ptrCast(*const S, &bytes); + var val = &ptr.c; + try expect(val.* == 5); + + // Test lowering an elem ptr + comptime var src_value = S{ .a = 15, .c = 5 }; + comptime var ptr2 = @ptrCast(*[@sizeOf(S)]u8, &src_value); + var val2 = &ptr2[4]; + try expect(val2.* == 5); +} + test "reinterpret struct field at comptime" { if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO From 50a1ca24ca2a4311097132d660b8244f252da82f Mon Sep 17 00:00:00 2001 From: Cody Tapscott Date: Mon, 14 Mar 2022 10:37:23 -0700 Subject: [PATCH 6/7] Add test for issue #11139 --- test/behavior.zig | 1 + test/behavior/bugs/11139.zig | 25 +++++++++++++++++++++++++ 2 files changed, 26 insertions(+) create mode 100644 test/behavior/bugs/11139.zig diff --git a/test/behavior.zig b/test/behavior.zig index 22298ef191..987c8c3473 100644 --- a/test/behavior.zig +++ b/test/behavior.zig @@ -62,6 +62,7 @@ test { _ = @import("behavior/bugs/11100.zig"); _ = @import("behavior/bugs/10970.zig"); _ = @import("behavior/bugs/11046.zig"); + _ = @import("behavior/bugs/11139.zig"); _ = @import("behavior/bugs/11165.zig"); _ = @import("behavior/call.zig"); _ = @import("behavior/cast.zig"); diff --git a/test/behavior/bugs/11139.zig b/test/behavior/bugs/11139.zig new file mode 100644 index 0000000000..a3714ec5ba --- /dev/null +++ b/test/behavior/bugs/11139.zig @@ -0,0 +1,25 @@ +const std = @import("std"); +const builtin = @import("builtin"); +const expect = std.testing.expect; + +test "store array of array of structs at comptime" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + + try expect(storeArrayOfArrayOfStructs() == 15); + comptime try expect(storeArrayOfArrayOfStructs() == 15); +} + +fn storeArrayOfArrayOfStructs() u8 { + const S = struct { + x: u8, + }; + + var cases = [_][1]S{ + [_]S{ + S{ .x = 15 }, + }, + }; + return cases[0][0].x; +} From 2f92d1a0264b6827cb67a55726c4c9a082337508 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 14 Mar 2022 21:11:49 -0700 Subject: [PATCH 7/7] stage2: fixups for topolarity-comptime-memory-reinterp branch * don't store `has_well_defined_layout` in memory. * remove struct `hasWellDefinedLayout` logic. it's just `layout != .Auto`. This means we only need one implementation, in Type. * fix some of the cases being wrong in `hasWellDefinedLayout`, such as optional pointers. * move `tag_ty_inferred` field into a position that makes it more obvious how the struct layout will be done. Also we don't have a compiler that intelligently moves fields around so this layout is better. * Sema: don't `resolveTypeLayout` in `zirCoerceResultPtr` unless necessary. * Rename `ComptimePtrLoadKit` `target` field to `pointee` to avoid confusion with `target`. --- src/Module.zig | 6 +- src/Sema.zig | 235 +++++------------------------------ src/codegen/llvm.zig | 5 +- src/type.zig | 51 +++----- src/value.zig | 4 +- test/behavior/bugs/11139.zig | 6 +- test/behavior/cast.zig | 8 +- test/behavior/ptrcast.zig | 32 ++--- 8 files changed, 76 insertions(+), 271 deletions(-) diff --git a/src/Module.zig b/src/Module.zig index 7c6c654660..20bf25af03 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -885,7 +885,6 @@ pub const Struct = struct { /// one possible value. known_non_opv: bool, requires_comptime: PropertyBoolean = .unknown, - has_well_defined_layout: PropertyBoolean = .unknown, pub const Fields = std.StringArrayHashMapUnmanaged(Field); @@ -1080,8 +1079,6 @@ pub const EnumFull = struct { /// An integer type which is used for the numerical value of the enum. /// Whether zig chooses this type or the user specifies it, it is stored here. tag_ty: Type, - /// true if zig inferred this tag type, false if user specified it - tag_ty_inferred: bool, /// Set of field names in declaration order. fields: NameMap, /// Maps integer tag value to field index. @@ -1092,6 +1089,8 @@ pub const EnumFull = struct { namespace: Namespace, /// Offset from `owner_decl`, points to the enum decl AST node. node_offset: i32, + /// true if zig inferred this tag type, false if user specified it + tag_ty_inferred: bool, pub const NameMap = std.StringArrayHashMapUnmanaged(void); pub const ValueMap = std.ArrayHashMapUnmanaged(Value, void, Value.ArrayHashContext, false); @@ -1136,7 +1135,6 @@ pub const Union = struct { fully_resolved, }, requires_comptime: PropertyBoolean = .unknown, - has_well_defined_layout: PropertyBoolean = .unknown, pub const Field = struct { /// undefined until `status` is `have_field_types` or `have_layout`. diff --git a/src/Sema.zig b/src/Sema.zig index ad96aea7ab..57522bcfd5 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1579,8 +1579,6 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const target = sema.mod.getTarget(); const addr_space = target_util.defaultAddressSpace(target, .local); - try sema.resolveTypeLayout(block, src, pointee_ty); - if (Air.refToIndex(ptr)) |ptr_inst| { if (sema.air_instructions.items(.tag)[ptr_inst] == .constant) { const air_datas = sema.air_instructions.items(.data); @@ -1617,6 +1615,9 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE try pointee_ty.copy(anon_decl.arena()), Value.undef, ); + if (iac.data.alignment != 0) { + try sema.resolveTypeLayout(block, src, pointee_ty); + } const ptr_ty = try Type.ptr(sema.arena, target, .{ .pointee_type = pointee_ty, .@"align" = iac.data.alignment, @@ -1886,7 +1887,7 @@ fn zirEnumDecl( enum_obj.* = .{ .owner_decl = new_decl, - .tag_ty = Type.initTag(.@"null"), + .tag_ty = Type.@"null", .tag_ty_inferred = true, .fields = .{}, .values = .{}, @@ -17867,13 +17868,13 @@ const TypedValueAndOffset = struct { }; const ComptimePtrLoadKit = struct { - /// The Value and Type corresponding to the target of the provided pointer. + /// The Value and Type corresponding to the pointee of the provided pointer. /// If a direct dereference is not possible, this is null. - target: ?TypedValue, - /// The largest parent Value containing `target` and having a well-defined memory layout. - /// This is used for bitcasting, if direct dereferencing failed (i.e. `target` is null). + pointee: ?TypedValue, + /// The largest parent Value containing `pointee` and having a well-defined memory layout. + /// This is used for bitcasting, if direct dereferencing failed (i.e. `pointee` is null). parent: ?TypedValueAndOffset, - /// Whether the `target` could be mutated by further + /// Whether the `pointee` could be mutated by further /// semantic analysis and a copy must be performed. is_mutable: bool, /// If the root decl could not be used as `parent`, this is the type that @@ -17885,7 +17886,7 @@ const ComptimePtrLoadError = CompileError || error{ RuntimeLoad, }; -/// If `maybe_array_ty` is provided, it will be used to directly dereference an +/// If `maybe_array_ty` is provided, it will be used to directly dereference an /// .elem_ptr of type T to a value of [N]T, if necessary. fn beginComptimePtrLoad( sema: *Sema, @@ -17908,10 +17909,10 @@ fn beginComptimePtrLoad( const decl_tv = try decl.typedValue(); if (decl_tv.val.tag() == .variable) return error.RuntimeLoad; - const layout_defined = try sema.typeHasWellDefinedLayout(block, src, decl.ty); + const layout_defined = decl.ty.hasWellDefinedLayout(); break :blk ComptimePtrLoadKit{ .parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null, - .target = decl_tv, + .pointee = decl_tv, .is_mutable = is_mutable, .ty_without_well_defined_layout = if (!layout_defined) decl.ty else null, }; @@ -17923,7 +17924,7 @@ fn beginComptimePtrLoad( var deref = try beginComptimePtrLoad(sema, block, src, elem_ptr.array_ptr, null); if (elem_ptr.index != 0) { - if (try sema.typeHasWellDefinedLayout(block, src, elem_ty)) { + if (elem_ty.hasWellDefinedLayout()) { if (deref.parent) |*parent| { // Update the byte offset (in-place) const elem_size = try sema.typeAbiSize(block, src, elem_ty); @@ -17938,17 +17939,17 @@ fn beginComptimePtrLoad( // If we're loading an elem_ptr that was derived from a different type // than the true type of the underlying decl, we cannot deref directly - const ty_matches = if (deref.target != null and deref.target.?.ty.isArrayLike()) x: { - const deref_elem_ty = deref.target.?.ty.childType(); + const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayLike()) x: { + const deref_elem_ty = deref.pointee.?.ty.childType(); break :x (try sema.coerceInMemoryAllowed(block, deref_elem_ty, elem_ty, false, target, src, src)) == .ok or (try sema.coerceInMemoryAllowed(block, elem_ty, deref_elem_ty, false, target, src, src)) == .ok; } else false; if (!ty_matches) { - deref.target = null; + deref.pointee = null; break :blk deref; } - var array_tv = deref.target.?; + var array_tv = deref.pointee.?; const check_len = array_tv.ty.arrayLenIncludingSentinel(); if (elem_ptr.index >= check_len) { // TODO have the deref include the decl so we can say "declared here" @@ -17959,10 +17960,10 @@ fn beginComptimePtrLoad( if (maybe_array_ty) |load_ty| { // It's possible that we're loading a [N]T, in which case we'd like to slice - // the target array directly from our parent array. + // the pointee array directly from our parent array. if (load_ty.isArrayLike() and load_ty.childType().eql(elem_ty)) { const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel()); - deref.target = if (elem_ptr.index + N <= check_len) TypedValue{ + deref.pointee = if (elem_ptr.index + N <= check_len) TypedValue{ .ty = try Type.array(sema.arena, N, null, elem_ty), .val = try array_tv.val.sliceArray(sema.arena, elem_ptr.index, elem_ptr.index + N), } else null; @@ -17970,7 +17971,7 @@ fn beginComptimePtrLoad( } } - deref.target = .{ + deref.pointee = .{ .ty = elem_ty, .val = try array_tv.val.elemValue(sema.arena, elem_ptr.index), }; @@ -17983,7 +17984,7 @@ fn beginComptimePtrLoad( const field_ty = field_ptr.container_ty.structFieldType(field_index); var deref = try beginComptimePtrLoad(sema, block, src, field_ptr.container_ptr, field_ptr.container_ty); - if (try sema.typeHasWellDefinedLayout(block, src, field_ptr.container_ty)) { + if (field_ptr.container_ty.hasWellDefinedLayout()) { if (deref.parent) |*parent| { // Update the byte offset (in-place) try sema.resolveTypeLayout(block, src, field_ptr.container_ty); @@ -17995,19 +17996,19 @@ fn beginComptimePtrLoad( deref.ty_without_well_defined_layout = field_ptr.container_ty; } - if (deref.target) |*tv| { + if (deref.pointee) |*tv| { const coerce_in_mem_ok = (try sema.coerceInMemoryAllowed(block, field_ptr.container_ty, tv.ty, false, target, src, src)) == .ok or (try sema.coerceInMemoryAllowed(block, tv.ty, field_ptr.container_ty, false, target, src, src)) == .ok; if (coerce_in_mem_ok) { - deref.target = TypedValue{ + deref.pointee = TypedValue{ .ty = field_ty, .val = try tv.val.fieldValue(sema.arena, field_index), }; break :blk deref; } } - deref.target = null; + deref.pointee = null; break :blk deref; }, @@ -18028,7 +18029,7 @@ fn beginComptimePtrLoad( deref.ty_without_well_defined_layout = payload_ptr.container_ty; } - if (deref.target) |*tv| { + if (deref.pointee) |*tv| { const coerce_in_mem_ok = (try sema.coerceInMemoryAllowed(block, payload_ptr.container_ty, tv.ty, false, target, src, src)) == .ok or (try sema.coerceInMemoryAllowed(block, tv.ty, payload_ptr.container_ty, false, target, src, src)) == .ok; @@ -18042,7 +18043,7 @@ fn beginComptimePtrLoad( break :blk deref; } } - deref.target = null; + deref.pointee = null; break :blk deref; }, @@ -18060,7 +18061,7 @@ fn beginComptimePtrLoad( else => unreachable, }; - if (deref.target) |tv| { + if (deref.pointee) |tv| { if (deref.parent == null and tv.ty.hasWellDefinedLayout()) { deref.parent = .{ .tv = tv, .byte_offset = 0 }; } @@ -21157,7 +21158,7 @@ fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr else => |e| return e, }; - if (deref.target) |tv| { + if (deref.pointee) |tv| { const coerce_in_mem_ok = (try sema.coerceInMemoryAllowed(block, load_ty, tv.ty, false, target, src, src)) == .ok or (try sema.coerceInMemoryAllowed(block, tv.ty, load_ty, false, target, src, src)) == .ok; @@ -21176,13 +21177,13 @@ fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr // The type is not in-memory coercible or the direct dereference failed, so it must // be bitcast according to the pointer type we are performing the load through. - if (!(try sema.typeHasWellDefinedLayout(block, src, load_ty))) + if (!load_ty.hasWellDefinedLayout()) return sema.fail(block, src, "comptime dereference requires {} to have a well-defined layout, but it does not.", .{load_ty}); const load_sz = try sema.typeAbiSize(block, src, load_ty); // Try the smaller bit-cast first, since that's more efficient than using the larger `parent` - if (deref.target) |tv| if (load_sz <= try sema.typeAbiSize(block, src, tv.ty)) + if (deref.pointee) |tv| if (load_sz <= try sema.typeAbiSize(block, src, tv.ty)) return try sema.bitCastVal(block, src, tv.val, tv.ty, load_ty, 0); // If that fails, try to bit-cast from the largest parent value with a well-defined layout @@ -21271,182 +21272,6 @@ fn typePtrOrOptionalPtrTy( } } -fn typeHasWellDefinedLayout(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool { - return switch (ty.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .usize, - .isize, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .f16, - .f32, - .f64, - .f80, - .f128, - .bool, - .void, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .anyerror_void_error_union, - .empty_struct_literal, - .empty_struct, - .array_u8, - .array_u8_sentinel_0, - .int_signed, - .int_unsigned, - .pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .single_const_pointer_to_comptime_int, - .enum_numbered, - => true, - - .anyopaque, - .anyerror, - .noreturn, - .@"null", - .@"anyframe", - .@"undefined", - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_options, - .prefetch_options, - .export_options, - .extern_options, - .error_set, - .error_set_single, - .error_set_inferred, - .error_set_merged, - .@"opaque", - .generic_poison, - .type, - .comptime_int, - .comptime_float, - .enum_literal, - .type_info, - // These are function bodies, not function pointers. - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, - .function, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .const_slice, - .mut_slice, - .enum_simple, - .error_union, - .anyframe_T, - .tuple, - .anon_struct, - => false, - - .enum_full, - .enum_nonexhaustive, - => !ty.cast(Type.Payload.EnumFull).?.data.tag_ty_inferred, - - .var_args_param => unreachable, - .inferred_alloc_mut => unreachable, - .inferred_alloc_const => unreachable, - .bound_fn => unreachable, - - .array, - .array_sentinel, - .vector, - => sema.typeHasWellDefinedLayout(block, src, ty.childType()), - - .optional, - .optional_single_mut_pointer, - .optional_single_const_pointer, - => blk: { - var buf: Type.Payload.ElemType = undefined; - break :blk sema.typeHasWellDefinedLayout(block, src, ty.optionalChild(&buf)); - }, - - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - if (struct_obj.layout == .Auto) { - struct_obj.has_well_defined_layout = .no; - return false; - } - switch (struct_obj.has_well_defined_layout) { - .no => return false, - .yes, .wip => return true, - .unknown => { - if (struct_obj.status == .field_types_wip) - return true; - - try sema.resolveTypeFieldsStruct(block, src, ty, struct_obj); - - struct_obj.has_well_defined_layout = .wip; - for (struct_obj.fields.values()) |field| { - if (!(try sema.typeHasWellDefinedLayout(block, src, field.ty))) { - struct_obj.has_well_defined_layout = .no; - return false; - } - } - struct_obj.has_well_defined_layout = .yes; - return true; - }, - } - }, - - .@"union", .union_tagged => { - const union_obj = ty.cast(Type.Payload.Union).?.data; - if (union_obj.layout == .Auto) { - union_obj.has_well_defined_layout = .no; - return false; - } - switch (union_obj.has_well_defined_layout) { - .no => return false, - .yes, .wip => return true, - .unknown => { - if (union_obj.status == .field_types_wip) - return true; - - try sema.resolveTypeFieldsUnion(block, src, ty, union_obj); - - union_obj.has_well_defined_layout = .wip; - for (union_obj.fields.values()) |field| { - if (!(try sema.typeHasWellDefinedLayout(block, src, field.ty))) { - union_obj.has_well_defined_layout = .no; - return false; - } - } - union_obj.has_well_defined_layout = .yes; - return true; - }, - } - }, - }; -} - /// `generic_poison` will return false. /// This function returns false negatives when structs and unions are having their /// field types resolved. diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 08cfe4a258..94dacf61e8 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -2829,7 +2829,10 @@ pub const DeclGen = struct { // (void) payload is the same. break :blk parent_llvm_ptr; } - const llvm_pl_index = if (layout.tag_size == 0) 0 else @boolToInt(layout.tag_align >= layout.payload_align); + const llvm_pl_index = if (layout.tag_size == 0) + 0 + else + @boolToInt(layout.tag_align >= layout.payload_align); const indices: [2]*const llvm.Value = .{ llvm_u32.constInt(0, .False), llvm_u32.constInt(llvm_pl_index, .False), diff --git a/src/type.zig b/src/type.zig index 3f6e3ef282..2df7cc83d8 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2210,9 +2210,6 @@ pub const Type = extern union { .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, - .anyerror_void_error_union, - .empty_struct_literal, - .empty_struct, .array_u8, .array_u8_sentinel_0, .int_signed, @@ -2226,6 +2223,9 @@ pub const Type = extern union { .c_mut_pointer, .single_const_pointer_to_comptime_int, .enum_numbered, + .vector, + .optional_single_mut_pointer, + .optional_single_const_pointer, => true, .anyopaque, @@ -2267,9 +2267,12 @@ pub const Type = extern union { .mut_slice, .enum_simple, .error_union, + .anyerror_void_error_union, .anyframe_T, .tuple, .anon_struct, + .empty_struct_literal, + .empty_struct, => false, .enum_full, @@ -2283,36 +2286,12 @@ pub const Type = extern union { .array, .array_sentinel, - .vector, => ty.childType().hasWellDefinedLayout(), - .optional, - .optional_single_mut_pointer, - .optional_single_const_pointer, - => { - var buf: Type.Payload.ElemType = undefined; - return ty.optionalChild(&buf).hasWellDefinedLayout(); - }, - - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - if (struct_obj.layout == .Auto) return false; - switch (struct_obj.has_well_defined_layout) { - .wip, .unknown => unreachable, // This function asserts types already resolved. - .no => return false, - .yes => return true, - } - }, - - .@"union", .union_tagged => { - const union_obj = ty.cast(Type.Payload.Union).?.data; - if (union_obj.layout == .Auto) return false; - switch (union_obj.has_well_defined_layout) { - .wip, .unknown => unreachable, // This function asserts types already resolved. - .no => return false, - .yes => return true, - } - }, + .optional => ty.isPtrLikeOptional(), + .@"struct" => ty.castTag(.@"struct").?.data.layout != .Auto, + .@"union" => ty.castTag(.@"union").?.data.layout != .Auto, + .union_tagged => false, }; } @@ -3299,13 +3278,12 @@ pub const Type = extern union { => return true, .optional => { - var buf: Payload.ElemType = undefined; - const child_type = self.optionalChild(&buf); + const child_ty = self.castTag(.optional).?.data; // optionals of zero sized types behave like bools, not pointers - if (!child_type.hasRuntimeBits()) return false; - if (child_type.zigTypeTag() != .Pointer) return false; + if (!child_ty.hasRuntimeBits()) return false; + if (child_ty.zigTypeTag() != .Pointer) return false; - const info = child_type.ptrInfo().data; + const info = child_ty.ptrInfo().data; switch (info.size) { .Slice, .C => return false, .Many, .One => return !info.@"allowzero", @@ -5496,6 +5474,7 @@ pub const Type = extern union { pub const @"type" = initTag(.type); pub const @"anyerror" = initTag(.anyerror); pub const @"anyopaque" = initTag(.anyopaque); + pub const @"null" = initTag(.@"null"); pub fn ptr(arena: Allocator, target: Target, data: Payload.Pointer.Data) !Type { var d = data; diff --git a/src/value.zig b/src/value.zig index af5ee75737..24cec0396e 100644 --- a/src/value.zig +++ b/src/value.zig @@ -2417,7 +2417,7 @@ pub const Value = extern union { return switch (val.tag()) { .empty_array_sentinel => if (start == 0 and end == 1) val else Value.initTag(.empty_array), .bytes => Tag.bytes.create(arena, val.castTag(.bytes).?.data[start..end]), - .array => Tag.array.create(arena, val.castTag(.array).?.data[start..end]), + .aggregate => Tag.aggregate.create(arena, val.castTag(.aggregate).?.data[start..end]), .slice => sliceArray(val.castTag(.slice).?.data.ptr, arena, start, end), .decl_ref => sliceArray(val.castTag(.decl_ref).?.data.val, arena, start, end), @@ -2466,7 +2466,7 @@ pub const Value = extern union { pub fn elemPtr(val: Value, ty: Type, arena: Allocator, index: usize) Allocator.Error!Value { const elem_ty = ty.elemType2(); const ptr_val = switch (val.tag()) { - .slice => val.slicePtr(), + .slice => val.castTag(.slice).?.data.ptr, else => val, }; diff --git a/test/behavior/bugs/11139.zig b/test/behavior/bugs/11139.zig index a3714ec5ba..9ceaaeb0e4 100644 --- a/test/behavior/bugs/11139.zig +++ b/test/behavior/bugs/11139.zig @@ -3,9 +3,9 @@ const builtin = @import("builtin"); const expect = std.testing.expect; test "store array of array of structs at comptime" { - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO try expect(storeArrayOfArrayOfStructs() == 15); comptime try expect(storeArrayOfArrayOfStructs() == 15); diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index 417ec06bd7..764e3b77b4 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -871,7 +871,7 @@ test "peer cast [N:x]T to [N]T" { } test "peer cast *[N:x]T to *[N]T" { - if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -887,9 +887,9 @@ test "peer cast *[N:x]T to *[N]T" { } test "peer cast [*:x]T to [*]T" { - if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { diff --git a/test/behavior/ptrcast.zig b/test/behavior/ptrcast.zig index 211945d5b4..81bf479363 100644 --- a/test/behavior/ptrcast.zig +++ b/test/behavior/ptrcast.zig @@ -23,9 +23,9 @@ fn testReinterpretBytesAsInteger() !void { test "reinterpret an array over multiple elements, with no well-defined layout" { if (builtin.zig_backend == .stage1) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO try testReinterpretWithOffsetAndNoWellDefinedLayout(); comptime try testReinterpretWithOffsetAndNoWellDefinedLayout(); @@ -40,9 +40,9 @@ fn testReinterpretWithOffsetAndNoWellDefinedLayout() !void { } test "reinterpret bytes inside auto-layout struct as integer with nonzero offset" { - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO try testReinterpretStructWrappedBytesAsInteger(); comptime try testReinterpretStructWrappedBytesAsInteger(); @@ -59,9 +59,9 @@ fn testReinterpretStructWrappedBytesAsInteger() !void { } test "reinterpret bytes of an array into an extern struct" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO try testReinterpretBytesAsExternStruct(); comptime try testReinterpretBytesAsExternStruct(); @@ -83,8 +83,8 @@ fn testReinterpretBytesAsExternStruct() !void { test "reinterpret bytes of an extern struct into another" { if (builtin.zig_backend == .stage1) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO try testReinterpretExternStructAsExternStruct(); comptime try testReinterpretExternStructAsExternStruct(); @@ -109,11 +109,11 @@ fn testReinterpretExternStructAsExternStruct() !void { test "lower reinterpreted comptime field ptr" { if (builtin.zig_backend == .stage1) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO // Test lowering a field ptr comptime var bytes align(2) = [_]u8{ 1, 2, 3, 4, 5, 6 };