From 32692569656d9a178abb24f8fb7893395700cb62 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 29 May 2023 00:10:36 -0400 Subject: [PATCH] behavior: fix more compiler crashes --- src/InternPool.zig | 11 +- src/Module.zig | 29 +---- src/Sema.zig | 208 ++++++++++++++++++++++--------- src/TypedValue.zig | 4 +- src/codegen/c.zig | 4 +- src/codegen/llvm.zig | 2 +- src/type.zig | 4 +- src/value.zig | 226 +++++++++++++++++++--------------- tools/lldb_pretty_printers.py | 26 ++-- 9 files changed, 309 insertions(+), 205 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 55ab58c391..90e0e2bd35 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -1416,7 +1416,12 @@ pub const Index = enum(u32) { only_possible_value: DataIsIndex, union_value: struct { data: *Key.Union }, bytes: struct { data: *Bytes }, - aggregate: struct { data: *Aggregate }, + aggregate: struct { + const @"data.ty.data.len orelse data.ty.data.fields_len" = opaque {}; + data: *Aggregate, + @"trailing.element_values.len": *@"data.ty.data.len orelse data.ty.data.fields_len", + trailing: struct { element_values: []Index }, + }, repeated: struct { data: *Repeated }, memoized_decl: struct { data: *Key.MemoizedDecl }, @@ -4437,7 +4442,7 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al .Slice => try ip.get(gpa, .{ .undef = .usize_type }), }, } }), - else => try ip.getCoerced(gpa, opt.val, new_ty), + else => |payload| try ip.getCoerced(gpa, payload, new_ty), }, .err => |err| if (ip.isErrorSetType(new_ty)) return ip.get(gpa, .{ .err = .{ @@ -4622,7 +4627,7 @@ pub fn isErrorUnionType(ip: InternPool, ty: Index) bool { pub fn isAggregateType(ip: InternPool, ty: Index) bool { return switch (ip.indexToKey(ty)) { - .array_wype, .vector_type, .anon_struct_type, .struct_type => true, + .array_type, .vector_type, .anon_struct_type, .struct_type => true, else => false, }; } diff --git a/src/Module.zig b/src/Module.zig index d11a11cf08..36037bb49c 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6678,14 +6678,14 @@ pub fn optionalType(mod: *Module, child_type: InternPool.Index) Allocator.Error! pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type { var canon_info = info; + const have_elem_layout = info.elem_type.toType().layoutIsResolved(mod); // Canonicalize non-zero alignment. If it matches the ABI alignment of the pointee // type, we change it to 0 here. If this causes an assertion trip because the // pointee type needs to be resolved more, that needs to be done before calling // this ptr() function. if (info.alignment.toByteUnitsOptional()) |info_align| { - const elem_align = info.elem_type.toType().abiAlignment(mod); - if (info.elem_type.toType().layoutIsResolved(mod) and info_align == elem_align) { + if (have_elem_layout and info_align == info.elem_type.toType().abiAlignment(mod)) { canon_info.alignment = .none; } } @@ -6694,7 +6694,7 @@ pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type // Canonicalize host_size. If it matches the bit size of the pointee type, // we change it to 0 here. If this causes an assertion trip, the pointee type // needs to be resolved before calling this ptr() function. - .none => if (info.host_size != 0) { + .none => if (have_elem_layout and info.host_size != 0) { const elem_bit_size = info.elem_type.toType().bitSize(mod); assert(info.bit_offset + elem_bit_size <= info.host_size * 8); if (info.host_size * 8 == elem_bit_size) { @@ -6782,21 +6782,7 @@ pub fn errorSetFromUnsortedNames( /// Supports optionals in addition to pointers. pub fn ptrIntValue(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { - if (ty.isPtrLikeOptional(mod)) { - const i = try intern(mod, .{ .opt = .{ - .ty = ty.toIntern(), - .val = try intern(mod, .{ .ptr = .{ - .ty = ty.childType(mod).toIntern(), - .addr = .{ .int = try intern(mod, .{ .int = .{ - .ty = .usize_type, - .storage = .{ .u64 = x }, - } }) }, - } }), - } }); - return i.toValue(); - } else { - return ptrIntValue_ptronly(mod, ty, x); - } + return mod.getCoerced(try mod.intValue_u64(Type.usize, x), ty); } /// Supports only pointers. See `ptrIntValue` for pointer-like optional support. @@ -6804,10 +6790,7 @@ pub fn ptrIntValue_ptronly(mod: *Module, ty: Type, x: u64) Allocator.Error!Value assert(ty.zigTypeTag(mod) == .Pointer); const i = try intern(mod, .{ .ptr = .{ .ty = ty.toIntern(), - .addr = .{ .int = try intern(mod, .{ .int = .{ - .ty = .usize_type, - .storage = .{ .u64 = x }, - } }) }, + .addr = .{ .int = try mod.intValue_u64(Type.usize, x) }, } }); return i.toValue(); } @@ -6954,7 +6937,7 @@ pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 { const key = mod.intern_pool.indexToKey(val.toIntern()); switch (key.int.storage) { .i64 => |x| { - if (std.math.cast(u64, x)) |casted| return Type.smallestUnsignedBits(casted); + if (std.math.cast(u64, x)) |casted| return Type.smallestUnsignedBits(casted) + @boolToInt(sign); assert(sign); // Protect against overflow in the following negation. if (x == std.math.minInt(i64)) return 64; diff --git a/src/Sema.zig b/src/Sema.zig index 7ad7b1a8a3..87d66aad1f 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -9510,7 +9510,10 @@ fn zirPtrToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)}); } if (try sema.resolveMaybeUndefValIntable(ptr)) |ptr_val| { - return sema.addConstant(Type.usize, try mod.getCoerced(ptr_val, Type.usize)); + return sema.addConstant( + Type.usize, + try mod.intValue(Type.usize, (try ptr_val.getUnsignedIntAdvanced(mod, sema)).?), + ); } try sema.requireRuntimeBlock(block, inst_data.src(), ptr_src); return block.addUnOp(.ptrtoint, ptr); @@ -27879,7 +27882,7 @@ fn beginComptimePtrMutation( .undef => try mod.intern(.{ .undef = payload_ty.toIntern() }), .opt => |opt| switch (opt.val) { .none => try mod.intern(.{ .undef = payload_ty.toIntern() }), - else => opt.val, + else => |payload| payload, }, else => unreachable, }; @@ -28438,7 +28441,7 @@ fn beginComptimePtrLoad( }, .opt => |opt| switch (opt.val) { .none => return sema.fail(block, src, "attempt to use null value", .{}), - else => opt.val, + else => |payload| payload, }, else => unreachable, }.toValue(), @@ -28591,7 +28594,7 @@ fn beginComptimePtrLoad( }, .opt => |opt| switch (opt.val) { .none => return sema.fail(block, src, "attempt to use null value", .{}), - else => try sema.beginComptimePtrLoad(block, src, opt.val.toValue(), null), + else => |payload| try sema.beginComptimePtrLoad(block, src, payload.toValue(), null), }, else => unreachable, }; @@ -28931,35 +28934,53 @@ fn coerceAnonStructToUnion( ) !Air.Inst.Ref { const mod = sema.mod; const inst_ty = sema.typeOf(inst); - const field_count = inst_ty.structFieldCount(mod); - if (field_count != 1) { - const msg = msg: { - const msg = if (field_count > 1) try sema.errMsg( - block, - inst_src, - "cannot initialize multiple union fields at once; unions can only have one active field", - .{}, - ) else try sema.errMsg( - block, - inst_src, - "union initializer must initialize one field", - .{}, - ); - errdefer msg.destroy(sema.gpa); + const field_info: union(enum) { + name: []const u8, + count: usize, + } = switch (mod.intern_pool.indexToKey(inst_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len == 1) + .{ .name = mod.intern_pool.stringToSlice(anon_struct_type.names[0]) } + else + .{ .count = anon_struct_type.names.len }, + .struct_type => |struct_type| name: { + const field_names = mod.structPtrUnwrap(struct_type.index).?.fields.keys(); + break :name if (field_names.len == 1) + .{ .name = field_names[0] } + else + .{ .count = field_names.len }; + }, + else => unreachable, + }; + switch (field_info) { + .name => |field_name| { + const init = try sema.structFieldVal(block, inst_src, inst, field_name, inst_src, inst_ty); + return sema.unionInit(block, init, inst_src, union_ty, union_ty_src, field_name, inst_src); + }, + .count => |field_count| { + assert(field_count != 1); + const msg = msg: { + const msg = if (field_count > 1) try sema.errMsg( + block, + inst_src, + "cannot initialize multiple union fields at once; unions can only have one active field", + .{}, + ) else try sema.errMsg( + block, + inst_src, + "union initializer must initialize one field", + .{}, + ); + errdefer msg.destroy(sema.gpa); - // TODO add notes for where the anon struct was created to point out - // the extra fields. + // TODO add notes for where the anon struct was created to point out + // the extra fields. - try sema.addDeclaredHereNote(msg, union_ty); - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(msg); + try sema.addDeclaredHereNote(msg, union_ty); + break :msg msg; + }; + return sema.failWithOwnedErrorMsg(msg); + }, } - - const anon_struct = mod.intern_pool.indexToKey(inst_ty.toIntern()).anon_struct_type; - const field_name = mod.intern_pool.stringToSlice(anon_struct.names[0]); - const init = try sema.structFieldVal(block, inst_src, inst, field_name, inst_src, inst_ty); - return sema.unionInit(block, init, inst_src, union_ty, union_ty_src, field_name, inst_src); } fn coerceAnonStructToUnionPtrs( @@ -29193,16 +29214,27 @@ fn coerceTupleToStruct( @memset(field_refs, .none); const inst_ty = sema.typeOf(inst); - const anon_struct = mod.intern_pool.indexToKey(inst_ty.toIntern()).anon_struct_type; var runtime_src: ?LazySrcLoc = null; - for (0..anon_struct.types.len) |field_index_usize| { + const field_count = switch (mod.intern_pool.indexToKey(inst_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, + .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| + struct_obj.fields.count() + else + 0, + else => unreachable, + }; + for (0..field_count) |field_index_usize| { const field_i = @intCast(u32, field_index_usize); const field_src = inst_src; // TODO better source location - const field_name = if (anon_struct.names.len != 0) - // https://github.com/ziglang/zig/issues/15709 - @as([]const u8, mod.intern_pool.stringToSlice(anon_struct.names[field_i])) - else - try std.fmt.allocPrint(sema.arena, "{d}", .{field_i}); + // https://github.com/ziglang/zig/issues/15709 + const field_name: []const u8 = switch (mod.intern_pool.indexToKey(inst_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len > 0) + mod.intern_pool.stringToSlice(anon_struct_type.names[field_i]) + else + try std.fmt.allocPrint(sema.arena, "{d}", .{field_i}), + .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.keys()[field_i], + else => unreachable, + }; const field_index = try sema.structFieldIndex(block, struct_ty, field_name, field_src); const field = fields.values()[field_index]; const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i); @@ -29281,40 +29313,72 @@ fn coerceTupleToTuple( inst_src: LazySrcLoc, ) !Air.Inst.Ref { const mod = sema.mod; - const dest_tuple = mod.intern_pool.indexToKey(tuple_ty.toIntern()).anon_struct_type; - const field_vals = try sema.arena.alloc(InternPool.Index, dest_tuple.types.len); + const dest_field_count = switch (mod.intern_pool.indexToKey(tuple_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, + .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| + struct_obj.fields.count() + else + 0, + else => unreachable, + }; + const field_vals = try sema.arena.alloc(InternPool.Index, dest_field_count); const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len); @memset(field_refs, .none); const inst_ty = sema.typeOf(inst); - const src_tuple = mod.intern_pool.indexToKey(inst_ty.toIntern()).anon_struct_type; - if (src_tuple.types.len > dest_tuple.types.len) return error.NotCoercible; + const src_field_count = switch (mod.intern_pool.indexToKey(inst_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, + .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| + struct_obj.fields.count() + else + 0, + else => unreachable, + }; + if (src_field_count > dest_field_count) return error.NotCoercible; var runtime_src: ?LazySrcLoc = null; - for (dest_tuple.types, dest_tuple.values, 0..) |field_ty, default_val, field_index_usize| { + for (0..dest_field_count) |field_index_usize| { const field_i = @intCast(u32, field_index_usize); const field_src = inst_src; // TODO better source location - const field_name = if (src_tuple.names.len != 0) - // https://github.com/ziglang/zig/issues/15709 - @as([]const u8, mod.intern_pool.stringToSlice(src_tuple.names[field_i])) - else - try std.fmt.allocPrint(sema.arena, "{d}", .{field_i}); + // https://github.com/ziglang/zig/issues/15709 + const field_name: []const u8 = switch (mod.intern_pool.indexToKey(inst_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len > 0) + mod.intern_pool.stringToSlice(anon_struct_type.names[field_i]) + else + try std.fmt.allocPrint(sema.arena, "{d}", .{field_i}), + .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.keys()[field_i], + else => unreachable, + }; if (mem.eql(u8, field_name, "len")) { return sema.fail(block, field_src, "cannot assign to 'len' field of tuple", .{}); } + const field_ty = switch (mod.intern_pool.indexToKey(tuple_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.types[field_index_usize].toType(), + .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[field_index_usize].ty, + else => unreachable, + }; + const default_val = switch (mod.intern_pool.indexToKey(tuple_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.values[field_index_usize], + .struct_type => |struct_type| switch (mod.structPtrUnwrap(struct_type.index).?.fields.values()[field_index_usize].default_val.toIntern()) { + .unreachable_value => .none, + else => |default_val| default_val, + }, + else => unreachable, + }; + const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_src); const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i); - const coerced = try sema.coerce(block, field_ty.toType(), elem_ref, field_src); + const coerced = try sema.coerce(block, field_ty, elem_ref, field_src); field_refs[field_index] = coerced; if (default_val != .none) { const init_val = (try sema.resolveMaybeUndefVal(coerced)) orelse { return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known"); }; - if (!init_val.eql(default_val.toValue(), field_ty.toType(), sema.mod)) { + if (!init_val.eql(default_val.toValue(), field_ty, sema.mod)) { return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i); } } @@ -29331,14 +29395,18 @@ fn coerceTupleToTuple( var root_msg: ?*Module.ErrorMsg = null; errdefer if (root_msg) |msg| msg.destroy(sema.gpa); - for ( - dest_tuple.types, - dest_tuple.values, - field_refs, - 0.., - ) |field_ty, default_val, *field_ref, i| { + for (field_refs, 0..) |*field_ref, i| { if (field_ref.* != .none) continue; + const default_val = switch (mod.intern_pool.indexToKey(tuple_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.values[i], + .struct_type => |struct_type| switch (mod.structPtrUnwrap(struct_type.index).?.fields.values()[i].default_val.toIntern()) { + .unreachable_value => .none, + else => |default_val| default_val, + }, + else => unreachable, + }; + const field_src = inst_src; // TODO better source location if (default_val == .none) { if (tuple_ty.isTuple(mod)) { @@ -29362,7 +29430,12 @@ fn coerceTupleToTuple( if (runtime_src == null) { field_vals[i] = default_val; } else { - field_ref.* = try sema.addConstant(field_ty.toType(), default_val.toValue()); + const field_ty = switch (mod.intern_pool.indexToKey(tuple_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.types[i].toType(), + .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[i].ty, + else => unreachable, + }; + field_ref.* = try sema.addConstant(field_ty, default_val.toValue()); } } @@ -33959,11 +34032,20 @@ fn anonStructFieldIndex( field_src: LazySrcLoc, ) !u32 { const mod = sema.mod; - const anon_struct = mod.intern_pool.indexToKey(struct_ty.toIntern()).anon_struct_type; - for (anon_struct.names, 0..) |name, i| { - if (mem.eql(u8, mod.intern_pool.stringToSlice(name), field_name)) { - return @intCast(u32, i); - } + switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| for (anon_struct_type.names, 0..) |name, i| { + if (mem.eql(u8, mod.intern_pool.stringToSlice(name), field_name)) { + return @intCast(u32, i); + } + }, + .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| { + for (struct_obj.fields.keys(), 0..) |name, i| { + if (mem.eql(u8, name, field_name)) { + return @intCast(u32, i); + } + } + }, + else => unreachable, } return sema.fail(block, field_src, "no field named '{s}' in anonymous struct '{}'", .{ field_name, struct_ty.fmt(sema.mod), @@ -34006,6 +34088,10 @@ fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.add(lhs_bigint, rhs_bigint); + if (scalar_ty.toIntern() != .comptime_int_type) { + const int_info = scalar_ty.intInfo(mod); + result_bigint.truncate(result_bigint.toConst(), int_info.signedness, int_info.bits); + } return mod.intValue_big(scalar_ty, result_bigint.toConst()); } diff --git a/src/TypedValue.zig b/src/TypedValue.zig index a46e6ebe1f..0128a3cbfb 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -254,8 +254,8 @@ pub fn print( .ptr => return writer.writeAll("(ptr)"), .opt => |opt| switch (opt.val) { .none => return writer.writeAll("null"), - else => { - val = opt.val.toValue(); + else => |payload| { + val = payload.toValue(); ty = ty.optionalChild(mod); }, }, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index e6ce72f48e..2f65513dcd 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1313,7 +1313,7 @@ pub const DeclGen = struct { if (ty.optionalReprIsPayload(mod)) switch (opt.val) { .none => return writer.writeByte('0'), - else => return dg.renderValue(writer, payload_ty, opt.val.toValue(), location), + else => |payload| return dg.renderValue(writer, payload_ty, payload.toValue(), location), }; if (!location.isInitializer()) { @@ -1325,7 +1325,7 @@ pub const DeclGen = struct { try writer.writeAll("{ .payload = "); try dg.renderValue(writer, payload_ty, switch (opt.val) { .none => try mod.intern(.{ .undef = payload_ty.ip_index }), - else => opt.val, + else => |payload| payload, }.toValue(), initializer_type); try writer.writeAll(", .is_null = "); try dg.renderValue(writer, Type.bool, is_null_val, initializer_type); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 956924eff8..606c57b187 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3430,7 +3430,7 @@ pub const DeclGen = struct { const llvm_ty = try dg.lowerType(tv.ty); if (tv.ty.optionalReprIsPayload(mod)) return switch (opt.val) { .none => llvm_ty.constNull(), - else => dg.lowerValue(.{ .ty = payload_ty, .val = opt.val.toValue() }), + else => |payload| dg.lowerValue(.{ .ty = payload_ty, .val = payload.toValue() }), }; assert(payload_ty.zigTypeTag(mod) != .Fn); diff --git a/src/type.zig b/src/type.zig index 27c7756a68..0ce242b616 100644 --- a/src/type.zig +++ b/src/type.zig @@ -630,7 +630,6 @@ pub const Type = struct { pub fn hasWellDefinedLayout(ty: Type, mod: *Module) bool { return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .int_type, - .ptr_type, .vector_type, => true, @@ -646,6 +645,7 @@ pub const Type = struct { .array_type => |array_type| array_type.child.toType().hasWellDefinedLayout(mod), .opt_type => ty.isPtrLikeOptional(mod), + .ptr_type => |ptr_type| ptr_type.size != .Slice, .simple_type => |t| switch (t) { .f16, @@ -1578,7 +1578,7 @@ pub const Type = struct { .int_type => |int_type| return int_type.bits, .ptr_type => |ptr_type| switch (ptr_type.size) { .Slice => return target.ptrBitWidth() * 2, - else => return target.ptrBitWidth() * 2, + else => return target.ptrBitWidth(), }, .anyframe_type => return target.ptrBitWidth(), diff --git a/src/value.zig b/src/value.zig index f02c31ca84..0da5626937 100644 --- a/src/value.zig +++ b/src/value.zig @@ -363,7 +363,7 @@ pub const Value = struct { }, .slice => { const pl = val.castTag(.slice).?.data; - const ptr = try pl.ptr.intern(ty.optionalChild(mod), mod); + const ptr = try pl.ptr.intern(ty.slicePtrFieldType(mod), mod); var ptr_key = mod.intern_pool.indexToKey(ptr).ptr; assert(ptr_key.len == .none); ptr_key.ty = ty.toIntern(); @@ -547,7 +547,6 @@ pub const Value = struct { return switch (val.toIntern()) { .bool_false => BigIntMutable.init(&space.limbs, 0).toConst(), .bool_true => BigIntMutable.init(&space.limbs, 1).toConst(), - .undef => unreachable, .null_value => BigIntMutable.init(&space.limbs, 0).toConst(), else => switch (mod.intern_pool.indexToKey(val.toIntern())) { .runtime_value => |runtime_value| runtime_value.val.toValue().toBigIntAdvanced(space, mod, opt_sema), @@ -564,19 +563,10 @@ pub const Value = struct { }, }, .enum_tag => |enum_tag| enum_tag.int.toValue().toBigIntAdvanced(space, mod, opt_sema), - .ptr => |ptr| switch (ptr.len) { - .none => switch (ptr.addr) { - .int => |int| int.toValue().toBigIntAdvanced(space, mod, opt_sema), - .elem => |elem| { - const base_addr = (try elem.base.toValue().getUnsignedIntAdvanced(mod, opt_sema)).?; - const elem_size = ptr.ty.toType().elemType2(mod).abiSize(mod); - const new_addr = base_addr + elem.index * elem_size; - return BigIntMutable.init(&space.limbs, new_addr).toConst(); - }, - else => unreachable, - }, - else => unreachable, - }, + .opt, .ptr => BigIntMutable.init( + &space.limbs, + (try val.getUnsignedIntAdvanced(mod, opt_sema)).?, + ).toConst(), else => unreachable, }, }; @@ -614,10 +604,11 @@ pub const Value = struct { /// Asserts not undefined. pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64 { return switch (val.toIntern()) { + .undef => unreachable, .bool_false => 0, .bool_true => 1, - .undef => unreachable, else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef => unreachable, .int => |int| switch (int.storage) { .big_int => |big_int| big_int.to(u64) catch null, .u64 => |x| x, @@ -631,6 +622,26 @@ pub const Value = struct { else ty.toType().abiSize(mod), }, + .ptr => |ptr| switch (ptr.addr) { + .int => |int| int.toValue().getUnsignedIntAdvanced(mod, opt_sema), + .elem => |elem| { + const base_addr = (try elem.base.toValue().getUnsignedIntAdvanced(mod, opt_sema)) orelse return null; + const elem_size = ptr.ty.toType().elemType2(mod).abiSize(mod); + return base_addr + elem.index * elem_size; + }, + .field => |field| { + const struct_ty = ptr.ty.toType().childType(mod); + if (opt_sema) |sema| try sema.resolveTypeLayout(struct_ty); + const base_addr = (try field.base.toValue().getUnsignedIntAdvanced(mod, opt_sema)) orelse return null; + const field_offset = ptr.ty.toType().childType(mod).structFieldOffset(field.index, mod); + return base_addr + field_offset; + }, + else => null, + }, + .opt => |opt| switch (opt.val) { + .none => 0, + else => |payload| payload.toValue().getUnsignedIntAdvanced(mod, opt_sema), + }, else => null, }, }; @@ -646,7 +657,6 @@ pub const Value = struct { return switch (val.toIntern()) { .bool_false => 0, .bool_true => 1, - .undef => unreachable, else => switch (mod.intern_pool.indexToKey(val.toIntern())) { .int => |int| switch (int.storage) { .big_int => |big_int| big_int.to(i64) catch unreachable, @@ -830,24 +840,14 @@ pub const Value = struct { } }, .Int, .Enum => { + if (buffer.len == 0) return; const bits = ty.intInfo(mod).bits; - const abi_size = @intCast(usize, ty.abiSize(mod)); + if (bits == 0) return; - const int_val = try val.enumToInt(ty, mod); - - if (abi_size == 0) return; - if (abi_size <= @sizeOf(u64)) { - const ip_key = mod.intern_pool.indexToKey(int_val.toIntern()); - const int: u64 = switch (ip_key.int.storage) { - .u64 => |x| x, - .i64 => |x| @bitCast(u64, x), - else => unreachable, - }; - std.mem.writeVarPackedInt(buffer, bit_offset, bits, int, endian); - } else { - var bigint_buffer: BigIntSpace = undefined; - const bigint = int_val.toBigInt(&bigint_buffer, mod); - bigint.writePackedTwosComplement(buffer, bit_offset, bits, endian); + switch (mod.intern_pool.indexToKey((try val.enumToInt(ty, mod)).toIntern()).int.storage) { + inline .u64, .i64 => |int| std.mem.writeVarPackedInt(buffer, bit_offset, bits, int, endian), + .big_int => |bigint| bigint.writePackedTwosComplement(buffer, bit_offset, bits, endian), + else => unreachable, } }, .Float => switch (ty.floatBits(target)) { @@ -1075,25 +1075,40 @@ pub const Value = struct { return Value.true; } }, - .Int, .Enum => { + .Int, .Enum => |ty_tag| { if (buffer.len == 0) return mod.intValue(ty, 0); const int_info = ty.intInfo(mod); - const abi_size = @intCast(usize, ty.abiSize(mod)); - const bits = int_info.bits; if (bits == 0) return mod.intValue(ty, 0); - if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64 - .signed => return mod.intValue(ty, std.mem.readVarPackedInt(i64, buffer, bit_offset, bits, endian, .signed)), - .unsigned => return mod.intValue(ty, std.mem.readVarPackedInt(u64, buffer, bit_offset, bits, endian, .unsigned)), - } else { // Slow path, we have to construct a big-int - const Limb = std.math.big.Limb; - const limb_count = (abi_size + @sizeOf(Limb) - 1) / @sizeOf(Limb); - const limbs_buffer = try arena.alloc(Limb, limb_count); - var bigint = BigIntMutable.init(limbs_buffer, 0); - bigint.readPackedTwosComplement(buffer, bit_offset, bits, endian, int_info.signedness); - return mod.intValue_big(ty, bigint.toConst()); + // Fast path for integers <= u64 + if (bits <= 64) { + const int_ty = switch (ty_tag) { + .Int => ty, + .Enum => ty.intTagType(mod), + else => unreachable, + }; + return mod.getCoerced(switch (int_info.signedness) { + .signed => return mod.intValue( + int_ty, + std.mem.readVarPackedInt(i64, buffer, bit_offset, bits, endian, .signed), + ), + .unsigned => return mod.intValue( + int_ty, + std.mem.readVarPackedInt(u64, buffer, bit_offset, bits, endian, .unsigned), + ), + }, ty); } + + // Slow path, we have to construct a big-int + const abi_size = @intCast(usize, ty.abiSize(mod)); + const Limb = std.math.big.Limb; + const limb_count = (abi_size + @sizeOf(Limb) - 1) / @sizeOf(Limb); + const limbs_buffer = try arena.alloc(Limb, limb_count); + + var bigint = BigIntMutable.init(limbs_buffer, 0); + bigint.readPackedTwosComplement(buffer, bit_offset, bits, endian, int_info.signedness); + return mod.intValue_big(ty, bigint.toConst()); }, .Float => return (try mod.intern(.{ .float = .{ .ty = ty.toIntern(), @@ -1764,7 +1779,7 @@ pub const Value = struct { }, .opt => |opt| switch (opt.val) { .none => false, - else => opt.val.toValue().canMutateComptimeVarState(mod), + else => |payload| payload.toValue().canMutateComptimeVarState(mod), }, .aggregate => |aggregate| for (aggregate.storage.values()) |elem| { if (elem.toValue().canMutateComptimeVarState(mod)) break true; @@ -1949,43 +1964,51 @@ pub const Value = struct { start: usize, end: usize, ) error{OutOfMemory}!Value { - return switch (mod.intern_pool.indexToKey(val.toIntern())) { - .ptr => |ptr| switch (ptr.addr) { - .decl => |decl| try mod.declPtr(decl).val.sliceArray(mod, arena, start, end), - .mut_decl => |mut_decl| (try mod.declPtr(mut_decl.decl).internValue(mod)).toValue() - .sliceArray(mod, arena, start, end), - .comptime_field => |comptime_field| comptime_field.toValue() - .sliceArray(mod, arena, start, end), - .elem => |elem| elem.base.toValue() - .sliceArray(mod, arena, start + elem.index, end + elem.index), + return switch (val.ip_index) { + .none => switch (val.tag()) { + .slice => val.castTag(.slice).?.data.ptr.sliceArray(mod, arena, start, end), + .bytes => Tag.bytes.create(arena, val.castTag(.bytes).?.data[start..end]), + .repeated => val, + .aggregate => Tag.aggregate.create(arena, val.castTag(.aggregate).?.data[start..end]), else => unreachable, }, - .aggregate => |aggregate| (try mod.intern(.{ .aggregate = .{ - .ty = switch (mod.intern_pool.indexToKey(mod.intern_pool.typeOf(val.toIntern()))) { - .array_type => |array_type| try mod.arrayType(.{ - .len = @intCast(u32, end - start), - .child = array_type.child, - .sentinel = if (end == array_type.len) array_type.sentinel else .none, - }), - .vector_type => |vector_type| try mod.vectorType(.{ - .len = @intCast(u32, end - start), - .child = vector_type.child, - }), + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| try mod.declPtr(decl).val.sliceArray(mod, arena, start, end), + .mut_decl => |mut_decl| (try mod.declPtr(mut_decl.decl).internValue(mod)).toValue() + .sliceArray(mod, arena, start, end), + .comptime_field => |comptime_field| comptime_field.toValue() + .sliceArray(mod, arena, start, end), + .elem => |elem| elem.base.toValue() + .sliceArray(mod, arena, start + elem.index, end + elem.index), else => unreachable, - }.toIntern(), - .storage = switch (aggregate.storage) { - .bytes => |bytes| .{ .bytes = bytes[start..end] }, - .elems => |elems| .{ .elems = elems[start..end] }, - .repeated_elem => |elem| .{ .repeated_elem = elem }, }, - } })).toValue(), - else => unreachable, + .aggregate => |aggregate| (try mod.intern(.{ .aggregate = .{ + .ty = switch (mod.intern_pool.indexToKey(mod.intern_pool.typeOf(val.toIntern()))) { + .array_type => |array_type| try mod.arrayType(.{ + .len = @intCast(u32, end - start), + .child = array_type.child, + .sentinel = if (end == array_type.len) array_type.sentinel else .none, + }), + .vector_type => |vector_type| try mod.vectorType(.{ + .len = @intCast(u32, end - start), + .child = vector_type.child, + }), + else => unreachable, + }.toIntern(), + .storage = switch (aggregate.storage) { + .bytes => |bytes| .{ .bytes = bytes[start..end] }, + .elems => |elems| .{ .elems = elems[start..end] }, + .repeated_elem => |elem| .{ .repeated_elem = elem }, + }, + } })).toValue(), + else => unreachable, + }, }; } pub fn fieldValue(val: Value, mod: *Module, index: usize) !Value { return switch (val.ip_index) { - .undef => Value.undef, .none => switch (val.tag()) { .aggregate => { const field_values = val.castTag(.aggregate).?.data; @@ -1999,6 +2022,9 @@ pub const Value = struct { else => unreachable, }, else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef => |ty| (try mod.intern(.{ + .undef = ty.toType().structFieldType(index, mod).toIntern(), + })).toValue(), .aggregate => |aggregate| switch (aggregate.storage) { .bytes => |bytes| try mod.intern(.{ .int = .{ .ty = .u8_type, @@ -2108,6 +2134,7 @@ pub const Value = struct { .null_value => true, else => return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef => unreachable, .int => { var buf: BigIntSpace = undefined; return val.toBigInt(&buf, mod).eqZero(); @@ -2141,9 +2168,13 @@ pub const Value = struct { /// Value of the optional, null if optional has no payload. pub fn optionalValue(val: Value, mod: *const Module) ?Value { - return switch (mod.intern_pool.indexToKey(val.toIntern()).opt.val) { - .none => null, - else => |index| index.toValue(), + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .opt => |opt| switch (opt.val) { + .none => null, + else => |payload| payload.toValue(), + }, + .ptr => val, + else => unreachable, }; } @@ -2152,6 +2183,7 @@ pub const Value = struct { return switch (self.toIntern()) { .undef => unreachable, else => switch (mod.intern_pool.indexToKey(self.toIntern())) { + .undef => unreachable, .float => true, else => false, }, @@ -2182,28 +2214,26 @@ pub const Value = struct { } pub fn intToFloatScalar(val: Value, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { - return switch (val.toIntern()) { - .undef => val, - else => return switch (mod.intern_pool.indexToKey(val.toIntern())) { - .int => |int| switch (int.storage) { - .big_int => |big_int| { - const float = bigIntToFloat(big_int.limbs, big_int.positive); - return mod.floatValue(float_ty, float); - }, - inline .u64, .i64 => |x| intToFloatInner(x, float_ty, mod), - .lazy_align => |ty| if (opt_sema) |sema| { - return intToFloatInner((try ty.toType().abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod); - } else { - return intToFloatInner(ty.toType().abiAlignment(mod), float_ty, mod); - }, - .lazy_size => |ty| if (opt_sema) |sema| { - return intToFloatInner((try ty.toType().abiSizeAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod); - } else { - return intToFloatInner(ty.toType().abiSize(mod), float_ty, mod); - }, + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef => (try mod.intern(.{ .undef = float_ty.toIntern() })).toValue(), + .int => |int| switch (int.storage) { + .big_int => |big_int| { + const float = bigIntToFloat(big_int.limbs, big_int.positive); + return mod.floatValue(float_ty, float); + }, + inline .u64, .i64 => |x| intToFloatInner(x, float_ty, mod), + .lazy_align => |ty| if (opt_sema) |sema| { + return intToFloatInner((try ty.toType().abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod); + } else { + return intToFloatInner(ty.toType().abiAlignment(mod), float_ty, mod); + }, + .lazy_size => |ty| if (opt_sema) |sema| { + return intToFloatInner((try ty.toType().abiSizeAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod); + } else { + return intToFloatInner(ty.toType().abiSize(mod), float_ty, mod); }, - else => unreachable, }, + else => unreachable, }; } diff --git a/tools/lldb_pretty_printers.py b/tools/lldb_pretty_printers.py index 0bba97dcaa..3d57adee70 100644 --- a/tools/lldb_pretty_printers.py +++ b/tools/lldb_pretty_printers.py @@ -455,21 +455,21 @@ class InternPool_Index_SynthProvider: elif encoding_field.name == 'trailing': trailing_data = lldb.SBData() for trailing_field in encoding_field.type.fields: - if trailing_field.type.IsAggregateType(): - trailing_data.Append(extra.GetChildAtIndex(extra_index).address_of.data) - len = dynamic_values['trailing.%s.len' % trailing_field.name].unsigned - trailing_data.Append(lldb.SBData.CreateDataFromInt(len, trailing_data.GetAddressByteSize())) - extra_index += len - else: - pass + trailing_data.Append(extra.GetChildAtIndex(extra_index).address_of.data) + trailing_len = dynamic_values['trailing.%s.len' % trailing_field.name].unsigned + trailing_data.Append(lldb.SBData.CreateDataFromInt(trailing_len, trailing_data.GetAddressByteSize())) + extra_index += trailing_len self.trailing = self.data.CreateValueFromData('trailing', trailing_data, encoding_field.type) else: - path = encoding_field.type.GetPointeeType().name.removeprefix('%s::' % encoding_type.name).removeprefix('%s.' % encoding_type.name).partition('__')[0].split('.') - if path[0] == 'data': - dynamic_value = self.data - for name in path[1:]: - dynamic_value = dynamic_value.GetChildMemberWithName(name) - dynamic_values[encoding_field.name] = dynamic_value + for path in encoding_field.type.GetPointeeType().name.removeprefix('%s::' % encoding_type.name).removeprefix('%s.' % encoding_type.name).partition('__')[0].split(' orelse '): + if path.startswith('data.'): + root = self.data + path = path[len('data'):] + else: return + dynamic_value = root.GetValueForExpressionPath(path) + if dynamic_value: + dynamic_values[encoding_field.name] = dynamic_value + break except: pass def has_children(self): return True def num_children(self): return 2 + (self.trailing is not None)