diff --git a/src/InternPool.zig b/src/InternPool.zig index c208fcf18a..4da4790e2f 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -4568,6 +4568,7 @@ pub fn sliceLen(ip: *const InternPool, i: Index) Index { /// * int <=> int /// * int <=> enum /// * enum_literal => enum +/// * float <=> float /// * ptr <=> ptr /// * opt ptr <=> ptr /// * opt ptr <=> opt ptr @@ -4579,6 +4580,7 @@ pub fn sliceLen(ip: *const InternPool, i: Index) Index { /// * error set => error union /// * payload => error union /// * fn <=> fn +/// * aggregate <=> aggregate (where children can also be coerced) pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index { const old_ty = ip.typeOf(val); if (old_ty == new_ty) return val; @@ -4623,6 +4625,23 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al else => if (ip.isIntegerType(new_ty)) return getCoercedInts(ip, gpa, int, new_ty), }, + .float => |float| switch (ip.indexToKey(new_ty)) { + .simple_type => |simple| switch (simple) { + .f16, + .f32, + .f64, + .f80, + .f128, + .c_longdouble, + .comptime_float, + => return ip.get(gpa, .{ .float = .{ + .ty = new_ty, + .storage = float.storage, + } }), + else => {}, + }, + else => {}, + }, .enum_tag => |enum_tag| if (ip.isIntegerType(new_ty)) return getCoercedInts(ip, gpa, ip.indexToKey(enum_tag.int).int, new_ty), .enum_literal => |enum_literal| switch (ip.indexToKey(new_ty)) { @@ -4688,6 +4707,80 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al .ty = new_ty, .val = error_union.val, } }), + .aggregate => |aggregate| { + const new_len = @intCast(usize, ip.aggregateTypeLen(new_ty)); + direct: { + const old_ty_child = switch (ip.indexToKey(old_ty)) { + inline .array_type, .vector_type => |seq_type| seq_type.child, + .anon_struct_type, .struct_type => break :direct, + else => unreachable, + }; + const new_ty_child = switch (ip.indexToKey(new_ty)) { + inline .array_type, .vector_type => |seq_type| seq_type.child, + .anon_struct_type, .struct_type => break :direct, + else => unreachable, + }; + if (old_ty_child != new_ty_child) break :direct; + // TODO: write something like getCoercedInts to avoid needing to dupe here + switch (aggregate.storage) { + .bytes => |bytes| { + const bytes_copy = try gpa.dupe(u8, bytes[0..new_len]); + defer gpa.free(bytes_copy); + return ip.get(gpa, .{ .aggregate = .{ + .ty = new_ty, + .storage = .{ .bytes = bytes_copy }, + } }); + }, + .elems => |elems| { + const elems_copy = try gpa.dupe(InternPool.Index, elems[0..new_len]); + defer gpa.free(elems_copy); + return ip.get(gpa, .{ .aggregate = .{ + .ty = new_ty, + .storage = .{ .elems = elems_copy }, + } }); + }, + .repeated_elem => |elem| { + return ip.get(gpa, .{ .aggregate = .{ + .ty = new_ty, + .storage = .{ .repeated_elem = elem }, + } }); + }, + } + } + // Direct approach failed - we must recursively coerce elems + const agg_elems = try gpa.alloc(InternPool.Index, new_len); + defer gpa.free(agg_elems); + // First, fill the vector with the uncoerced elements. We do this to avoid key + // lifetime issues, since it'll allow us to avoid referencing `aggregate` after we + // begin interning elems. + switch (aggregate.storage) { + .bytes => { + // We have to intern each value here, so unfortunately we can't easily avoid + // the repeated indexToKey calls. + for (agg_elems, 0..) |*elem, i| { + const x = ip.indexToKey(val).aggregate.storage.bytes[i]; + elem.* = try ip.get(gpa, .{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = x }, + } }); + } + }, + .elems => |elems| @memcpy(agg_elems, elems[0..new_len]), + .repeated_elem => |elem| @memset(agg_elems, elem), + } + // Now, coerce each element to its new type. + for (agg_elems, 0..) |*elem, i| { + const new_elem_ty = switch (ip.indexToKey(new_ty)) { + inline .array_type, .vector_type => |seq_type| seq_type.child, + .anon_struct_type => |anon_struct_type| anon_struct_type.types[i], + .struct_type => |struct_type| ip.structPtr(struct_type.index.unwrap().?) + .fields.values()[i].ty.toIntern(), + else => unreachable, + }; + elem.* = try ip.getCoerced(gpa, elem.*, new_elem_ty); + } + return ip.get(gpa, .{ .aggregate = .{ .ty = new_ty, .storage = .{ .elems = agg_elems } } }); + }, else => {}, }, } diff --git a/src/Module.zig b/src/Module.zig index d60f3919a5..e8cea97b8f 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6978,7 +6978,7 @@ pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 { assert(sign); // Protect against overflow in the following negation. if (x == std.math.minInt(i64)) return 64; - return Type.smallestUnsignedBits(@intCast(u64, -x - 1)) + 1; + return Type.smallestUnsignedBits(@intCast(u64, -(x + 1))) + 1; }, .u64 => |x| { return Type.smallestUnsignedBits(x) + @boolToInt(sign); diff --git a/src/Sema.zig b/src/Sema.zig index 32cd218798..f703c6154c 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -23069,7 +23069,7 @@ fn analyzeMinMax( if (std.debug.runtime_safety) { assert(try sema.intFitsInType(val, refined_ty, null)); } - cur_minmax = try sema.coerceInMemory(block, val, orig_ty, refined_ty, src); + cur_minmax = try sema.coerceInMemory(val, refined_ty); } break :refined refined_ty; @@ -26610,7 +26610,7 @@ fn coerceExtra( var in_memory_result = try sema.coerceInMemoryAllowed(block, dest_ty, inst_ty, false, target, dest_ty_src, inst_src); if (in_memory_result == .ok) { if (maybe_inst_val) |val| { - return sema.coerceInMemory(block, val, inst_ty, dest_ty, dest_ty_src); + return sema.coerceInMemory(val, dest_ty); } try sema.requireRuntimeBlock(block, inst_src, null); return block.addBitCast(dest_ty, inst); @@ -27278,82 +27278,12 @@ fn coerceExtra( return sema.failWithOwnedErrorMsg(msg); } -fn coerceValueInMemory( - sema: *Sema, - block: *Block, - val: Value, - src_ty: Type, - dst_ty: Type, - dst_ty_src: LazySrcLoc, -) CompileError!Value { - const mod = sema.mod; - return switch (mod.intern_pool.indexToKey(val.toIntern())) { - .aggregate => |aggregate| { - const dst_ty_key = mod.intern_pool.indexToKey(dst_ty.toIntern()); - const dest_len = try sema.usizeCast( - block, - dst_ty_src, - mod.intern_pool.aggregateTypeLen(dst_ty.toIntern()), - ); - direct: { - const src_ty_child = switch (mod.intern_pool.indexToKey(src_ty.toIntern())) { - inline .array_type, .vector_type => |seq_type| seq_type.child, - .anon_struct_type, .struct_type => break :direct, - else => unreachable, - }; - const dst_ty_child = switch (dst_ty_key) { - inline .array_type, .vector_type => |seq_type| seq_type.child, - .anon_struct_type, .struct_type => break :direct, - else => unreachable, - }; - if (src_ty_child != dst_ty_child) break :direct; - // TODO: write something like getCoercedInts to avoid needing to dupe - return (try mod.intern(.{ .aggregate = .{ - .ty = dst_ty.toIntern(), - .storage = switch (aggregate.storage) { - .bytes => |bytes| .{ .bytes = try sema.arena.dupe(u8, bytes[0..dest_len]) }, - .elems => |elems| .{ .elems = try sema.arena.dupe(InternPool.Index, elems[0..dest_len]) }, - .repeated_elem => |elem| .{ .repeated_elem = elem }, - }, - } })).toValue(); - } - const dest_elems = try sema.arena.alloc(InternPool.Index, dest_len); - for (dest_elems, 0..) |*dest_elem, i| { - const elem_ty = switch (dst_ty_key) { - inline .array_type, .vector_type => |seq_type| seq_type.child, - .anon_struct_type => |anon_struct_type| anon_struct_type.types[i], - .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).? - .fields.values()[i].ty.toIntern(), - else => unreachable, - }; - dest_elem.* = try mod.intern_pool.getCoerced(mod.gpa, switch (aggregate.storage) { - .bytes => |bytes| (try mod.intValue(Type.u8, bytes[i])).toIntern(), - .elems => |elems| elems[i], - .repeated_elem => |elem| elem, - }, elem_ty); - } - return (try mod.intern(.{ .aggregate = .{ - .ty = dst_ty.toIntern(), - .storage = .{ .elems = dest_elems }, - } })).toValue(); - }, - .float => |float| (try mod.intern(.{ .float = .{ - .ty = dst_ty.toIntern(), - .storage = float.storage, - } })).toValue(), - else => try mod.getCoerced(val, dst_ty), - }; -} - fn coerceInMemory( sema: *Sema, - block: *Block, val: Value, - src_ty: Type, dst_ty: Type, - dst_ty_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { - return sema.addConstant(dst_ty, try sema.coerceValueInMemory(block, val, src_ty, dst_ty, dst_ty_src)); + return sema.addConstant(dst_ty, try sema.mod.getCoerced(val, dst_ty)); } const InMemoryCoercionResult = union(enum) { @@ -27891,6 +27821,22 @@ fn coerceInMemoryAllowed( return .ok; } + // Tuples (with in-memory-coercible fields) + if (dest_ty.isTuple(mod) and src_ty.isTuple(mod)) tuple: { + if (dest_ty.containerLayout(mod) != src_ty.containerLayout(mod)) break :tuple; + if (dest_ty.structFieldCount(mod) != src_ty.structFieldCount(mod)) break :tuple; + const field_count = dest_ty.structFieldCount(mod); + for (0..field_count) |field_idx| { + if (dest_ty.structFieldIsComptime(field_idx, mod) != src_ty.structFieldIsComptime(field_idx, mod)) break :tuple; + if (dest_ty.structFieldAlign(field_idx, mod) != src_ty.structFieldAlign(field_idx, mod)) break :tuple; + const dest_field_ty = dest_ty.structFieldType(field_idx, mod); + const src_field_ty = src_ty.structFieldType(field_idx, mod); + const field = try sema.coerceInMemoryAllowed(block, dest_field_ty, src_field_ty, dest_is_mut, target, dest_src, src_src); + if (field != .ok) break :tuple; + } + return .ok; + } + return InMemoryCoercionResult{ .no_match = .{ .actual = dest_ty, .wanted = src_ty, @@ -27959,9 +27905,8 @@ fn coerceInMemoryAllowedErrorSets( switch (src_ty.toIntern()) { .anyerror_type => switch (ip.indexToKey(dest_ty.toIntern())) { - .inferred_error_set_type => unreachable, // Caught by dest_ty.isAnyError(mod) above. .simple_type => unreachable, // filtered out above - .error_set_type => return .from_anyerror, + .error_set_type, .inferred_error_set_type => return .from_anyerror, else => unreachable, }, @@ -28008,8 +27953,6 @@ fn coerceInMemoryAllowedErrorSets( else => unreachable, }, } - - unreachable; } fn coerceInMemoryAllowedFns( @@ -29800,7 +29743,7 @@ fn coerceArrayLike( if (in_memory_result == .ok) { if (try sema.resolveMaybeUndefVal(inst)) |inst_val| { // These types share the same comptime value representation. - return sema.coerceInMemory(block, inst_val, inst_ty, dest_ty, dest_ty_src); + return sema.coerceInMemory(inst_val, dest_ty); } try sema.requireRuntimeBlock(block, inst_src, null); return block.addBitCast(dest_ty, inst); @@ -31586,6 +31529,290 @@ fn unionToTag( return block.addTyOp(.get_union_tag, enum_ty, un); } +const PeerResolveStrategy = enum { + /// The type is not known. + /// If refined no further, this is equivalent to `exact`. + unknown, + /// The type may be an error set or error union. + /// If refined no further, it is an error set. + error_set, + /// The type must be some error union. + error_union, + /// The type may be @TypeOf(null), an optional or a C pointer. + /// If refined no further, it is @TypeOf(null). + nullable, + /// The type must be some optional or a C pointer. + /// If refined no further, it is an optional. + optional, + /// The type must be either an array or a vector. + /// If refined no further, it is an array. + array, + /// The type must be a vector. + vector, + /// The type must be a C pointer. + c_ptr, + /// The type must be a pointer (C or not). + /// If refined no further, it is a non-C pointer. + ptr, + /// The type must be a function or a pointer to a function. + /// If refined no further, it is a function. + func, + /// The type must be an enum literal, or some specific enum or union. Which one is decided + /// afterwards based on the types in question. + enum_or_union, + /// The type must be some integer or float type. + /// If refined no further, it is `comptime_int`. + comptime_int, + /// The type must be some float type. + /// If refined no further, it is `comptime_float`. + comptime_float, + /// The type must be some float or fixed-width integer type. + /// If refined no further, it is some fixed-width integer type. + fixed_int, + /// The type must be some fixed-width float type. + fixed_float, + /// The type must be a struct literal or tuple type. + coercible_struct, + /// The peers must all be of the same type. + exact, + + /// Given two strategies, find a strategy that satisfies both, if one exists. If no such + /// strategy exists, any strategy may be returned; an error will be emitted when the caller + /// attempts to use the strategy to resolve the type. + /// Strategy `a` comes from the peer in `reason_peer`, while strategy `b` comes from the peer at + /// index `b_peer_idx`. `reason_peer` is updated to reflect the reason for the new strategy. + fn merge(a: PeerResolveStrategy, b: PeerResolveStrategy, reason_peer: *usize, b_peer_idx: usize) PeerResolveStrategy { + // Our merging should be order-independent. Thus, even though the union order is arbitrary, + // by sorting the tags and switching first on the smaller, we have half as many cases to + // worry about (since we avoid the duplicates). + const s0_is_a = @enumToInt(a) <= @enumToInt(b); + const s0 = if (s0_is_a) a else b; + const s1 = if (s0_is_a) b else a; + + const ReasonMethod = enum { + all_s0, + all_s1, + either, + }; + + const res: struct { ReasonMethod, PeerResolveStrategy } = switch (s0) { + .unknown => .{ .all_s1, s1 }, + .error_set => switch (s1) { + .error_set => .{ .either, .error_set }, + else => .{ .all_s0, .error_union }, + }, + .error_union => switch (s1) { + .error_union => .{ .either, .error_union }, + else => .{ .all_s0, .error_union }, + }, + .nullable => switch (s1) { + .nullable => .{ .either, .nullable }, + .c_ptr => .{ .all_s1, .c_ptr }, + else => .{ .all_s0, .optional }, + }, + .optional => switch (s1) { + .optional => .{ .either, .optional }, + .c_ptr => .{ .all_s1, .c_ptr }, + else => .{ .all_s0, .optional }, + }, + .array => switch (s1) { + .array => .{ .either, .array }, + .vector => .{ .all_s1, .vector }, + else => .{ .all_s0, .array }, + }, + .vector => switch (s1) { + .vector => .{ .either, .vector }, + else => .{ .all_s0, .vector }, + }, + .c_ptr => switch (s1) { + .c_ptr => .{ .either, .c_ptr }, + else => .{ .all_s0, .c_ptr }, + }, + .ptr => switch (s1) { + .ptr => .{ .either, .ptr }, + else => .{ .all_s0, .ptr }, + }, + .func => switch (s1) { + .func => .{ .either, .func }, + else => .{ .all_s1, s1 }, // doesn't override anything later + }, + .enum_or_union => switch (s1) { + .enum_or_union => .{ .either, .enum_or_union }, + else => .{ .all_s0, .enum_or_union }, + }, + .comptime_int => switch (s1) { + .comptime_int => .{ .either, .comptime_int }, + else => .{ .all_s1, s1 }, // doesn't override anything later + }, + .comptime_float => switch (s1) { + .comptime_float => .{ .either, .comptime_float }, + else => .{ .all_s1, s1 }, // doesn't override anything later + }, + .fixed_int => switch (s1) { + .fixed_int => .{ .either, .fixed_int }, + else => .{ .all_s1, s1 }, // doesn't override anything later + }, + .fixed_float => switch (s1) { + .fixed_float => .{ .either, .fixed_float }, + else => .{ .all_s1, s1 }, // doesn't override anything later + }, + .coercible_struct => switch (s1) { + .exact => .{ .all_s1, .exact }, + else => .{ .all_s0, .coercible_struct }, + }, + .exact => .{ .all_s0, .exact }, + }; + + switch (res[0]) { + .all_s0 => { + if (!s0_is_a) { + reason_peer.* = b_peer_idx; + } + }, + .all_s1 => { + if (s0_is_a) { + reason_peer.* = b_peer_idx; + } + }, + .either => { + // Prefer the earliest peer + reason_peer.* = @min(reason_peer.*, b_peer_idx); + }, + } + + return res[1]; + } + + fn select(ty: Type, mod: *Module) PeerResolveStrategy { + return switch (ty.zigTypeTag(mod)) { + .Type, .Void, .Bool, .Opaque, .Frame, .AnyFrame => .exact, + .NoReturn, .Undefined => .unknown, + .Null => .nullable, + .ComptimeInt => .comptime_int, + .Int => .fixed_int, + .ComptimeFloat => .comptime_float, + .Float => .fixed_float, + .Pointer => if (ty.ptrInfo(mod).size == .C) .c_ptr else .ptr, + .Array => .array, + .Vector => .vector, + .Optional => .optional, + .ErrorSet => .error_set, + .ErrorUnion => .error_union, + .EnumLiteral, .Enum, .Union => .enum_or_union, + .Struct => if (ty.isTupleOrAnonStruct(mod)) .coercible_struct else .exact, + .Fn => .func, + }; + } +}; + +const PeerResolveResult = union(enum) { + /// The peer type resolution was successful, and resulted in the given type. + success: Type, + /// There was some generic conflict between two peers. + conflict: struct { + peer_idx_a: usize, + peer_idx_b: usize, + }, + /// There was an error when resolving the type of a struct or tuple field. + field_error: struct { + /// The name of the field which caused the failure. + field_name: []const u8, + /// The type of this field in each peer. + field_types: []Type, + /// The error from resolving the field type. Guaranteed not to be `success`. + sub_result: *PeerResolveResult, + }, + + fn report( + result: PeerResolveResult, + sema: *Sema, + block: *Block, + src: LazySrcLoc, + instructions: []const Air.Inst.Ref, + candidate_srcs: Module.PeerTypeCandidateSrc, + ) !*Module.ErrorMsg { + const mod = sema.mod; + const decl_ptr = mod.declPtr(block.src_decl); + + var opt_msg: ?*Module.ErrorMsg = null; + errdefer if (opt_msg) |msg| msg.destroy(sema.gpa); + + // If we mention fields we'll want to include field types, so put peer types in a buffer + var peer_tys = try sema.arena.alloc(Type, instructions.len); + for (peer_tys, instructions) |*ty, inst| { + ty.* = sema.typeOf(inst); + } + + var cur = result; + while (true) { + var conflict_idx: [2]usize = undefined; + + switch (cur) { + .success => unreachable, + .conflict => |conflict| { + // Fall through to two-peer conflict handling below + conflict_idx = .{ + conflict.peer_idx_a, + conflict.peer_idx_b, + }; + }, + .field_error => |field_error| { + const fmt = "struct field '{s}' has conflicting types"; + const args = .{field_error.field_name}; + if (opt_msg) |msg| { + try sema.errNote(block, src, msg, fmt, args); + } else { + opt_msg = try sema.errMsg(block, src, fmt, args); + } + + // Continue on to child error + cur = field_error.sub_result.*; + peer_tys = field_error.field_types; + continue; + }, + } + + // This is the path for reporting a generic conflict between two peers. + + if (conflict_idx[1] < conflict_idx[0]) { + // b comes first in source, so it's better if it comes first in the error + std.mem.swap(usize, &conflict_idx[0], &conflict_idx[1]); + } + + const conflict_tys: [2]Type = .{ + peer_tys[conflict_idx[0]], + peer_tys[conflict_idx[1]], + }; + const conflict_srcs: [2]?LazySrcLoc = .{ + candidate_srcs.resolve(mod, decl_ptr, conflict_idx[0]), + candidate_srcs.resolve(mod, decl_ptr, conflict_idx[1]), + }; + + const fmt = "incompatible types: '{}' and '{}'"; + const args = .{ + conflict_tys[0].fmt(mod), + conflict_tys[1].fmt(mod), + }; + const msg = if (opt_msg) |msg| msg: { + try sema.errNote(block, src, msg, fmt, args); + break :msg msg; + } else msg: { + const msg = try sema.errMsg(block, src, fmt, args); + opt_msg = msg; + break :msg msg; + }; + + if (conflict_srcs[0]) |src_loc| try sema.errNote(block, src_loc, msg, "type '{}' here", .{conflict_tys[0].fmt(mod)}); + if (conflict_srcs[1]) |src_loc| try sema.errNote(block, src_loc, msg, "type '{}' here", .{conflict_tys[1].fmt(mod)}); + + // No child error + break; + } + + return opt_msg.?; + } +}; + fn resolvePeerTypes( sema: *Sema, block: *Block, @@ -31593,594 +31820,1144 @@ fn resolvePeerTypes( instructions: []const Air.Inst.Ref, candidate_srcs: Module.PeerTypeCandidateSrc, ) !Type { - const mod = sema.mod; switch (instructions.len) { 0 => return Type.noreturn, 1 => return sema.typeOf(instructions[0]), else => {}, } + var peer_tys = try sema.arena.alloc(?Type, instructions.len); + var peer_vals = try sema.arena.alloc(?Value, instructions.len); + + for (instructions, peer_tys, peer_vals) |inst, *ty, *val| { + ty.* = sema.typeOf(inst); + val.* = try sema.resolveMaybeUndefVal(inst); + } + + switch (try sema.resolvePeerTypesInner(block, src, peer_tys, peer_vals)) { + .success => |ty| return ty, + else => |result| { + const msg = try result.report(sema, block, src, instructions, candidate_srcs); + return sema.failWithOwnedErrorMsg(msg); + }, + } +} + +fn resolvePeerTypesInner( + sema: *Sema, + block: *Block, + src: LazySrcLoc, + peer_tys: []?Type, + peer_vals: []?Value, +) !PeerResolveResult { + const mod = sema.mod; + + var strat_reason: usize = 0; + var s: PeerResolveStrategy = .unknown; + for (peer_tys, 0..) |opt_ty, i| { + const ty = opt_ty orelse continue; + s = s.merge(PeerResolveStrategy.select(ty, mod), &strat_reason, i); + } + + if (s == .unknown) { + // The whole thing was noreturn or undefined - try to do an exact match + s = .exact; + } else { + // There was something other than noreturn and undefined, so we can ignore those peers + for (peer_tys) |*ty_ptr| { + const ty = ty_ptr.* orelse continue; + switch (ty.zigTypeTag(mod)) { + .NoReturn, .Undefined => ty_ptr.* = null, + else => {}, + } + } + } + const target = mod.getTarget(); - var chosen = instructions[0]; - // If this is non-null then it does the following thing, depending on the chosen zigTypeTag(mod). - // * ErrorSet: this is an override - // * ErrorUnion: this is an override of the error set only - // * other: at the end we make an ErrorUnion with the other thing and this - var err_set_ty: ?Type = null; - var any_are_null = false; - var seen_const = false; - var convert_to_slice = false; - var chosen_i: usize = 0; - for (instructions[1..], 0..) |candidate, candidate_i| { - const candidate_ty = sema.typeOf(candidate); - const chosen_ty = sema.typeOf(chosen); + switch (s) { + .unknown => unreachable, - const candidate_ty_tag = try candidate_ty.zigTypeTagOrPoison(mod); - const chosen_ty_tag = try chosen_ty.zigTypeTagOrPoison(mod); + .error_set => { + var final_set: ?Type = null; + for (peer_tys, 0..) |opt_ty, i| { + const ty = opt_ty orelse continue; + if (ty.zigTypeTag(mod) != .ErrorSet) return .{ .conflict = .{ + .peer_idx_a = strat_reason, + .peer_idx_b = i, + } }; + if (final_set) |cur_set| { + final_set = try sema.maybeMergeErrorSets(block, src, cur_set, ty); + } else { + final_set = ty; + } + } + return .{ .success = final_set.? }; + }, - // If the candidate can coerce into our chosen type, we're done. - // If the chosen type can coerce into the candidate, use that. - if ((try sema.coerceInMemoryAllowed(block, chosen_ty, candidate_ty, false, target, src, src)) == .ok) { - continue; - } - if ((try sema.coerceInMemoryAllowed(block, candidate_ty, chosen_ty, false, target, src, src)) == .ok) { - chosen = candidate; - chosen_i = candidate_i + 1; - continue; - } - - switch (candidate_ty_tag) { - .NoReturn, .Undefined => continue, - - .Null => { - any_are_null = true; - continue; - }, - - .Int => switch (chosen_ty_tag) { - .ComptimeInt => { - chosen = candidate; - chosen_i = candidate_i + 1; - continue; - }, - .Int => { - const chosen_info = chosen_ty.intInfo(mod); - const candidate_info = candidate_ty.intInfo(mod); - - if (chosen_info.bits < candidate_info.bits) { - chosen = candidate; - chosen_i = candidate_i + 1; - } - continue; - }, - .Pointer => if (chosen_ty.ptrSize(mod) == .C) continue, - else => {}, - }, - .ComptimeInt => switch (chosen_ty_tag) { - .Int, .Float, .ComptimeFloat => continue, - .Pointer => if (chosen_ty.ptrSize(mod) == .C) continue, - else => {}, - }, - .Float => switch (chosen_ty_tag) { - .Float => { - if (chosen_ty.floatBits(target) < candidate_ty.floatBits(target)) { - chosen = candidate; - chosen_i = candidate_i + 1; - } - continue; - }, - .ComptimeFloat, .ComptimeInt => { - chosen = candidate; - chosen_i = candidate_i + 1; - continue; - }, - else => {}, - }, - .ComptimeFloat => switch (chosen_ty_tag) { - .Float => continue, - .ComptimeInt => { - chosen = candidate; - chosen_i = candidate_i + 1; - continue; - }, - else => {}, - }, - .Enum => switch (chosen_ty_tag) { - .EnumLiteral => { - chosen = candidate; - chosen_i = candidate_i + 1; - continue; - }, - .Union => continue, - else => {}, - }, - .EnumLiteral => switch (chosen_ty_tag) { - .Enum, .Union => continue, - else => {}, - }, - .Union => switch (chosen_ty_tag) { - .Enum, .EnumLiteral => { - chosen = candidate; - chosen_i = candidate_i + 1; - continue; - }, - else => {}, - }, - .ErrorSet => switch (chosen_ty_tag) { - .ErrorSet => { - // If chosen is superset of candidate, keep it. - // If candidate is superset of chosen, switch it. - // If neither is a superset, merge errors. - const chosen_set_ty = err_set_ty orelse chosen_ty; - - if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_set_ty, candidate_ty, src, src)) { - continue; - } - if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, candidate_ty, chosen_set_ty, src, src)) { - err_set_ty = null; - chosen = candidate; - chosen_i = candidate_i + 1; - continue; - } - - err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_ty); - continue; - }, - .ErrorUnion => { - const chosen_set_ty = err_set_ty orelse chosen_ty.errorUnionSet(mod); - - if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_set_ty, candidate_ty, src, src)) { - continue; - } - if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, candidate_ty, chosen_set_ty, src, src)) { - err_set_ty = candidate_ty; - continue; - } - - err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_ty); - continue; - }, - else => { - if (err_set_ty) |chosen_set_ty| { - if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_set_ty, candidate_ty, src, src)) { - continue; - } - if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, candidate_ty, chosen_set_ty, src, src)) { - err_set_ty = candidate_ty; - continue; - } - - err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_ty); - continue; - } else { - err_set_ty = candidate_ty; - continue; - } - }, - }, - .ErrorUnion => switch (chosen_ty_tag) { - .ErrorSet => { - const chosen_set_ty = err_set_ty orelse chosen_ty; - const candidate_set_ty = candidate_ty.errorUnionSet(mod); - - if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_set_ty, candidate_set_ty, src, src)) { - err_set_ty = chosen_set_ty; - } else if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, candidate_set_ty, chosen_set_ty, src, src)) { - err_set_ty = null; - } else { - err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_set_ty); - } - chosen = candidate; - chosen_i = candidate_i + 1; - continue; - }, - - .ErrorUnion => { - const chosen_payload_ty = chosen_ty.errorUnionPayload(mod); - const candidate_payload_ty = candidate_ty.errorUnionPayload(mod); - - const coerce_chosen = (try sema.coerceInMemoryAllowed(block, chosen_payload_ty, candidate_payload_ty, false, target, src, src)) == .ok; - const coerce_candidate = (try sema.coerceInMemoryAllowed(block, candidate_payload_ty, chosen_payload_ty, false, target, src, src)) == .ok; - - if (coerce_chosen or coerce_candidate) { - // If we can coerce to the candidate, we switch to that - // type. This is the same logic as the bare (non-union) - // coercion check we do at the top of this func. - if (coerce_candidate) { - chosen = candidate; - chosen_i = candidate_i + 1; - } - - const chosen_set_ty = err_set_ty orelse chosen_ty.errorUnionSet(mod); - const candidate_set_ty = candidate_ty.errorUnionSet(mod); - - if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_set_ty, candidate_set_ty, src, src)) { - err_set_ty = chosen_set_ty; - } else if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, candidate_set_ty, chosen_set_ty, src, src)) { - err_set_ty = candidate_set_ty; - } else { - err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_set_ty); - } - continue; - } - }, - - else => { - if (err_set_ty) |chosen_set_ty| { - const candidate_set_ty = candidate_ty.errorUnionSet(mod); - if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_set_ty, candidate_set_ty, src, src)) { - err_set_ty = chosen_set_ty; - } else if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, candidate_set_ty, chosen_set_ty, src, src)) { - err_set_ty = null; - } else { - err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_set_ty); - } - } - seen_const = seen_const or chosen_ty.isConstPtr(mod); - chosen = candidate; - chosen_i = candidate_i + 1; - continue; - }, - }, - .Pointer => { - const cand_info = candidate_ty.ptrInfo(mod); - switch (chosen_ty_tag) { - .Pointer => { - const chosen_info = chosen_ty.ptrInfo(mod); - - seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable; - - // *[N]T to [*]T - // *[N]T to []T - if ((cand_info.size == .Many or cand_info.size == .Slice) and - chosen_info.size == .One and - chosen_info.pointee_type.zigTypeTag(mod) == .Array) - { - // In case we see i.e.: `*[1]T`, `*[2]T`, `[*]T` - convert_to_slice = false; - chosen = candidate; - chosen_i = candidate_i + 1; - continue; - } - if (cand_info.size == .One and - cand_info.pointee_type.zigTypeTag(mod) == .Array and - (chosen_info.size == .Many or chosen_info.size == .Slice)) - { - // In case we see i.e.: `*[1]T`, `*[2]T`, `[*]T` - convert_to_slice = false; - continue; - } - - // *[N]T and *[M]T - // Verify both are single-pointers to arrays. - // Keep the one whose element type can be coerced into. - if (chosen_info.size == .One and - cand_info.size == .One and - chosen_info.pointee_type.zigTypeTag(mod) == .Array and - cand_info.pointee_type.zigTypeTag(mod) == .Array) - { - const chosen_elem_ty = chosen_info.pointee_type.childType(mod); - const cand_elem_ty = cand_info.pointee_type.childType(mod); - - const chosen_ok = .ok == try sema.coerceInMemoryAllowed(block, chosen_elem_ty, cand_elem_ty, chosen_info.mutable, target, src, src); - if (chosen_ok) { - convert_to_slice = true; - continue; - } - - const cand_ok = .ok == try sema.coerceInMemoryAllowed(block, cand_elem_ty, chosen_elem_ty, cand_info.mutable, target, src, src); - if (cand_ok) { - convert_to_slice = true; - chosen = candidate; - chosen_i = candidate_i + 1; - continue; - } - - // They're both bad. Report error. - // In the future we probably want to use the - // coerceInMemoryAllowed error reporting mechanism, - // however, for now we just fall through for the - // "incompatible types" error below. - } - - // [*c]T and any other pointer size - // Whichever element type can coerce to the other one, is - // the one we will keep. If they're both OK then we keep the - // C pointer since it matches both single and many pointers. - if (cand_info.size == .C or chosen_info.size == .C) { - const cand_ok = .ok == try sema.coerceInMemoryAllowed(block, cand_info.pointee_type, chosen_info.pointee_type, cand_info.mutable, target, src, src); - const chosen_ok = .ok == try sema.coerceInMemoryAllowed(block, chosen_info.pointee_type, cand_info.pointee_type, chosen_info.mutable, target, src, src); - - if (cand_ok) { - if (chosen_ok) { - if (chosen_info.size == .C) { - continue; - } else { - chosen = candidate; - chosen_i = candidate_i + 1; - continue; - } - } else { - chosen = candidate; - chosen_i = candidate_i + 1; - continue; - } - } else { - if (chosen_ok) { - continue; - } else { - // They're both bad. Report error. - // In the future we probably want to use the - // coerceInMemoryAllowed error reporting mechanism, - // however, for now we just fall through for the - // "incompatible types" error below. - } - } - } + .error_union => { + var final_set: ?Type = null; + for (peer_tys, peer_vals) |*ty_ptr, *val_ptr| { + const ty = ty_ptr.* orelse continue; + const set_ty = switch (ty.zigTypeTag(mod)) { + .ErrorSet => blk: { + ty_ptr.* = null; // no payload to decide on + val_ptr.* = null; + break :blk ty; }, - .Int, .ComptimeInt => { - if (cand_info.size == .C) { - chosen = candidate; - chosen_i = candidate_i + 1; - continue; - } + .ErrorUnion => blk: { + const set_ty = ty.errorUnionSet(mod); + ty_ptr.* = ty.errorUnionPayload(mod); + if (val_ptr.*) |eu_val| switch (mod.intern_pool.indexToKey(eu_val.toIntern())) { + .error_union => |eu| switch (eu.val) { + .payload => |payload_ip| val_ptr.* = payload_ip.toValue(), + .err_name => val_ptr.* = null, + }, + .undef => val_ptr.* = (try sema.mod.intern(.{ .undef = ty_ptr.*.?.toIntern() })).toValue(), + else => unreachable, + }; + break :blk set_ty; + }, + else => continue, // whole type is the payload + }; + if (final_set) |cur_set| { + final_set = try sema.maybeMergeErrorSets(block, src, cur_set, set_ty); + } else { + final_set = set_ty; + } + } + assert(final_set != null); + const final_payload = switch (try sema.resolvePeerTypesInner( + block, + src, + peer_tys, + peer_vals, + )) { + .success => |ty| ty, + else => |result| return result, + }; + return .{ .success = try mod.errorUnionType(final_set.?, final_payload) }; + }, + + .nullable => { + for (peer_tys, 0..) |opt_ty, i| { + const ty = opt_ty orelse continue; + if (!ty.eql(Type.null, mod)) return .{ .conflict = .{ + .peer_idx_a = strat_reason, + .peer_idx_b = i, + } }; + } + return .{ .success = Type.null }; + }, + + .optional => { + for (peer_tys, peer_vals) |*ty_ptr, *val_ptr| { + const ty = ty_ptr.* orelse continue; + switch (ty.zigTypeTag(mod)) { + .Null => { + ty_ptr.* = null; + val_ptr.* = null; }, .Optional => { - const chosen_ptr_ty = chosen_ty.optionalChild(mod); - if (chosen_ptr_ty.zigTypeTag(mod) == .Pointer) { - const chosen_info = chosen_ptr_ty.ptrInfo(mod); - - seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable; - - // *[N]T to ?![*]T - // *[N]T to ?![]T - if (cand_info.size == .One and - cand_info.pointee_type.zigTypeTag(mod) == .Array and - (chosen_info.size == .Many or chosen_info.size == .Slice)) - { - continue; - } - } - }, - .ErrorUnion => { - const chosen_ptr_ty = chosen_ty.errorUnionPayload(mod); - if (chosen_ptr_ty.zigTypeTag(mod) == .Pointer) { - const chosen_info = chosen_ptr_ty.ptrInfo(mod); - - seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable; - - // *[N]T to E![*]T - // *[N]T to E![]T - if (cand_info.size == .One and - cand_info.pointee_type.zigTypeTag(mod) == .Array and - (chosen_info.size == .Many or chosen_info.size == .Slice)) - { - continue; - } - } - }, - .Fn => { - if (!cand_info.mutable and cand_info.pointee_type.zigTypeTag(mod) == .Fn and .ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty, cand_info.pointee_type, target, src, src)) { - chosen = candidate; - chosen_i = candidate_i + 1; - continue; - } + ty_ptr.* = ty.optionalChild(mod); + if (val_ptr.*) |opt_val| val_ptr.* = if (!opt_val.isUndef(mod)) opt_val.optionalValue(mod) else null; }, else => {}, } - }, - .Optional => { - const opt_child_ty = candidate_ty.optionalChild(mod); - if ((try sema.coerceInMemoryAllowed(block, chosen_ty, opt_child_ty, false, target, src, src)) == .ok) { - seen_const = seen_const or opt_child_ty.isConstPtr(mod); - any_are_null = true; - continue; - } - - seen_const = seen_const or chosen_ty.isConstPtr(mod); - any_are_null = false; - chosen = candidate; - chosen_i = candidate_i + 1; - continue; - }, - .Vector => switch (chosen_ty_tag) { - .Vector => { - const chosen_len = chosen_ty.vectorLen(mod); - const candidate_len = candidate_ty.vectorLen(mod); - if (chosen_len != candidate_len) - continue; - - const chosen_child_ty = chosen_ty.childType(mod); - const candidate_child_ty = candidate_ty.childType(mod); - if (chosen_child_ty.zigTypeTag(mod) == .Int and candidate_child_ty.zigTypeTag(mod) == .Int) { - const chosen_info = chosen_child_ty.intInfo(mod); - const candidate_info = candidate_child_ty.intInfo(mod); - if (chosen_info.bits < candidate_info.bits) { - chosen = candidate; - chosen_i = candidate_i + 1; - } - continue; - } - if (chosen_child_ty.zigTypeTag(mod) == .Float and candidate_child_ty.zigTypeTag(mod) == .Float) { - if (chosen_ty.floatBits(target) < candidate_ty.floatBits(target)) { - chosen = candidate; - chosen_i = candidate_i + 1; - } - continue; - } - }, - .Array => { - chosen = candidate; - chosen_i = candidate_i + 1; - continue; - }, - else => {}, - }, - .Array => switch (chosen_ty_tag) { - .Vector => continue, - else => {}, - }, - .Fn => if (chosen_ty.isSinglePointer(mod) and chosen_ty.isConstPtr(mod) and chosen_ty.childType(mod).zigTypeTag(mod) == .Fn) { - if (.ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty.childType(mod), candidate_ty, target, src, src)) { - continue; - } - }, - else => {}, - } - - switch (chosen_ty_tag) { - .NoReturn, .Undefined => { - chosen = candidate; - chosen_i = candidate_i + 1; - continue; - }, - .Null => { - any_are_null = true; - chosen = candidate; - chosen_i = candidate_i + 1; - continue; - }, - .Optional => { - const opt_child_ty = chosen_ty.optionalChild(mod); - if ((try sema.coerceInMemoryAllowed(block, opt_child_ty, candidate_ty, false, target, src, src)) == .ok) { - continue; - } - if ((try sema.coerceInMemoryAllowed(block, candidate_ty, opt_child_ty, false, target, src, src)) == .ok) { - any_are_null = true; - chosen = candidate; - chosen_i = candidate_i + 1; - continue; - } - }, - .ErrorUnion => { - const payload_ty = chosen_ty.errorUnionPayload(mod); - if ((try sema.coerceInMemoryAllowed(block, payload_ty, candidate_ty, false, target, src, src)) == .ok) { - continue; - } - }, - .ErrorSet => { - chosen = candidate; - chosen_i = candidate_i + 1; - if (err_set_ty) |chosen_set_ty| { - if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_set_ty, chosen_ty, src, src)) { - continue; - } - if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_ty, chosen_set_ty, src, src)) { - err_set_ty = chosen_ty; - continue; - } - - err_set_ty = try sema.errorSetMerge(chosen_set_ty, chosen_ty); - continue; - } else { - err_set_ty = chosen_ty; - continue; - } - }, - else => {}, - } - - // At this point, we hit a compile error. We need to recover - // the source locations. - const chosen_src = candidate_srcs.resolve( - mod, - mod.declPtr(block.src_decl), - chosen_i, - ); - const candidate_src = candidate_srcs.resolve( - mod, - mod.declPtr(block.src_decl), - candidate_i + 1, - ); - - const msg = msg: { - const msg = try sema.errMsg(block, src, "incompatible types: '{}' and '{}'", .{ - chosen_ty.fmt(mod), - candidate_ty.fmt(mod), - }); - errdefer msg.destroy(sema.gpa); - - if (chosen_src) |src_loc| - try sema.errNote(block, src_loc, msg, "type '{}' here", .{chosen_ty.fmt(mod)}); - - if (candidate_src) |src_loc| - try sema.errNote(block, src_loc, msg, "type '{}' here", .{candidate_ty.fmt(mod)}); - - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(msg); - } - - const chosen_ty = sema.typeOf(chosen); - - if (convert_to_slice) { - // turn *[N]T => []T - const chosen_child_ty = chosen_ty.childType(mod); - var info = chosen_ty.ptrInfo(mod); - info.sentinel = chosen_child_ty.sentinel(mod); - info.size = .Slice; - info.mutable = !(seen_const or chosen_child_ty.isConstPtr(mod)); - info.pointee_type = chosen_child_ty.elemType2(mod); - - const new_ptr_ty = try Type.ptr(sema.arena, mod, info); - const opt_ptr_ty = if (any_are_null) - try Type.optional(sema.arena, new_ptr_ty, mod) - else - new_ptr_ty; - const set_ty = err_set_ty orelse return opt_ptr_ty; - return try mod.errorUnionType(set_ty, opt_ptr_ty); - } - - if (seen_const) { - // turn []T => []const T - switch (chosen_ty.zigTypeTag(mod)) { - .ErrorUnion => { - const ptr_ty = chosen_ty.errorUnionPayload(mod); - var info = ptr_ty.ptrInfo(mod); - info.mutable = false; - const new_ptr_ty = try Type.ptr(sema.arena, mod, info); - const opt_ptr_ty = if (any_are_null) - try Type.optional(sema.arena, new_ptr_ty, mod) - else - new_ptr_ty; - const set_ty = err_set_ty orelse chosen_ty.errorUnionSet(mod); - return try mod.errorUnionType(set_ty, opt_ptr_ty); - }, - .Pointer => { - var info = chosen_ty.ptrInfo(mod); - info.mutable = false; - const new_ptr_ty = try Type.ptr(sema.arena, mod, info); - const opt_ptr_ty = if (any_are_null) - try Type.optional(sema.arena, new_ptr_ty, mod) - else - new_ptr_ty; - const set_ty = err_set_ty orelse return opt_ptr_ty; - return try mod.errorUnionType(set_ty, opt_ptr_ty); - }, - else => return chosen_ty, - } - } - - if (any_are_null) { - const opt_ty = switch (chosen_ty.zigTypeTag(mod)) { - .Null, .Optional => chosen_ty, - else => try Type.optional(sema.arena, chosen_ty, mod), - }; - const set_ty = err_set_ty orelse return opt_ty; - return try mod.errorUnionType(set_ty, opt_ty); - } - - if (err_set_ty) |ty| switch (chosen_ty.zigTypeTag(mod)) { - .ErrorSet => return ty, - .ErrorUnion => { - const payload_ty = chosen_ty.errorUnionPayload(mod); - return try mod.errorUnionType(ty, payload_ty); + } + const child_ty = switch (try sema.resolvePeerTypesInner( + block, + src, + peer_tys, + peer_vals, + )) { + .success => |ty| ty, + else => |result| return result, + }; + return .{ .success = try mod.optionalType(child_ty.toIntern()) }; }, - else => return try mod.errorUnionType(ty, chosen_ty), - }; - return chosen_ty; + .array => { + // Index of the first non-null peer + var opt_first_idx: ?usize = null; + // Index of the first array or vector peer (i.e. not a tuple) + var opt_first_arr_idx: ?usize = null; + // Set to non-null once we see any peer, even a tuple + var len: u64 = undefined; + var sentinel: ?Value = undefined; + // Only set once we see a non-tuple peer + var elem_ty: Type = undefined; + + for (peer_tys, 0..) |*ty_ptr, i| { + const ty = ty_ptr.* orelse continue; + + if (!ty.isArrayOrVector(mod)) { + // We allow tuples of the correct length. We won't validate their elem type, since the elements can be coerced. + const arr_like = sema.typeIsArrayLike(ty) orelse return .{ .conflict = .{ + .peer_idx_a = strat_reason, + .peer_idx_b = i, + } }; + + if (opt_first_idx) |first_idx| { + if (arr_like.len != len) return .{ .conflict = .{ + .peer_idx_a = first_idx, + .peer_idx_b = i, + } }; + } else { + opt_first_idx = i; + len = arr_like.len; + } + + sentinel = null; + + continue; + } + + const first_arr_idx = opt_first_arr_idx orelse { + if (opt_first_idx == null) { + opt_first_idx = i; + len = ty.arrayLen(mod); + sentinel = ty.sentinel(mod); + } + opt_first_arr_idx = i; + elem_ty = ty.childType(mod); + continue; + }; + + if (ty.arrayLen(mod) != len) return .{ .conflict = .{ + .peer_idx_a = first_arr_idx, + .peer_idx_b = i, + } }; + + if (!ty.childType(mod).eql(elem_ty, mod)) { + return .{ .conflict = .{ + .peer_idx_a = first_arr_idx, + .peer_idx_b = i, + } }; + } + + if (sentinel) |cur_sent| { + if (ty.sentinel(mod)) |peer_sent| { + if (!peer_sent.eql(cur_sent, elem_ty, mod)) sentinel = null; + } else { + sentinel = null; + } + } + } + + // There should always be at least one array or vector peer + assert(opt_first_arr_idx != null); + + return .{ .success = try mod.arrayType(.{ + .len = len, + .child = elem_ty.toIntern(), + .sentinel = if (sentinel) |sent_val| sent_val.toIntern() else .none, + }) }; + }, + + .vector => { + var len: ?u64 = null; + var first_idx: usize = undefined; + for (peer_tys, peer_vals, 0..) |*ty_ptr, *val_ptr, i| { + const ty = ty_ptr.* orelse continue; + + if (!ty.isArrayOrVector(mod)) { + // Allow tuples of the correct length + const arr_like = sema.typeIsArrayLike(ty) orelse return .{ .conflict = .{ + .peer_idx_a = strat_reason, + .peer_idx_b = i, + } }; + + if (len) |expect_len| { + if (arr_like.len != expect_len) return .{ .conflict = .{ + .peer_idx_a = first_idx, + .peer_idx_b = i, + } }; + } else { + len = arr_like.len; + first_idx = i; + } + + // Tuples won't participate in the child type resolution. We'll resolve without + // them, and if the tuples have a bad type, we'll get a coercion error later. + ty_ptr.* = null; + val_ptr.* = null; + + continue; + } + + if (len) |expect_len| { + if (ty.arrayLen(mod) != expect_len) return .{ .conflict = .{ + .peer_idx_a = first_idx, + .peer_idx_b = i, + } }; + } else { + len = ty.arrayLen(mod); + first_idx = i; + } + + ty_ptr.* = ty.childType(mod); + val_ptr.* = null; // multiple child vals, so we can't easily use them in PTR + } + + const child_ty = switch (try sema.resolvePeerTypesInner( + block, + src, + peer_tys, + peer_vals, + )) { + .success => |ty| ty, + else => |result| return result, + }; + + return .{ .success = try mod.vectorType(.{ + .len = @intCast(u32, len.?), + .child = child_ty.toIntern(), + }) }; + }, + + .c_ptr => { + var opt_ptr_info: ?Type.Payload.Pointer.Data = null; + var first_idx: usize = undefined; + for (peer_tys, peer_vals, 0..) |opt_ty, opt_val, i| { + const ty = opt_ty orelse continue; + switch (ty.zigTypeTag(mod)) { + .ComptimeInt => continue, // comptime-known integers can always coerce to C pointers + .Int => { + if (opt_val != null) { + // Always allow the coercion for comptime-known ints + continue; + } else { + // Runtime-known, so check if the type is no bigger than a usize + const ptr_bits = target.ptrBitWidth(); + const bits = ty.intInfo(mod).bits; + if (bits <= ptr_bits) continue; + } + }, + .Null => continue, + else => {}, + } + + if (!ty.isPtrAtRuntime(mod)) return .{ .conflict = .{ + .peer_idx_a = strat_reason, + .peer_idx_b = i, + } }; + + // Goes through optionals + const peer_info = ty.ptrInfo(mod); + + var ptr_info = opt_ptr_info orelse { + opt_ptr_info = peer_info; + opt_ptr_info.?.size = .C; + first_idx = i; + continue; + }; + + // Try peer -> cur, then cur -> peer + ptr_info.pointee_type = (try sema.resolvePairInMemoryCoercible(block, src, ptr_info.pointee_type, peer_info.pointee_type)) orelse { + return .{ .conflict = .{ + .peer_idx_a = first_idx, + .peer_idx_b = i, + } }; + }; + + if (ptr_info.sentinel != null and peer_info.sentinel != null) { + const peer_sent = try mod.getCoerced(ptr_info.sentinel.?, ptr_info.pointee_type); + const ptr_sent = try mod.getCoerced(peer_info.sentinel.?, ptr_info.pointee_type); + if (ptr_sent.eql(peer_sent, ptr_info.pointee_type, mod)) { + ptr_info.sentinel = ptr_sent; + } else { + ptr_info.sentinel = null; + } + } else { + ptr_info.sentinel = null; + } + + // Note that the align can be always non-zero; Type.ptr will canonicalize it + ptr_info.@"align" = @min(ptr_info.alignment(mod), peer_info.alignment(mod)); + if (ptr_info.@"addrspace" != peer_info.@"addrspace") { + return .{ .conflict = .{ + .peer_idx_a = first_idx, + .peer_idx_b = i, + } }; + } + + if (ptr_info.bit_offset != peer_info.bit_offset or + ptr_info.host_size != peer_info.host_size) + { + return .{ .conflict = .{ + .peer_idx_a = first_idx, + .peer_idx_b = i, + } }; + } + + ptr_info.mutable = ptr_info.mutable and peer_info.mutable; + ptr_info.@"volatile" = ptr_info.@"volatile" or peer_info.@"volatile"; + + opt_ptr_info = ptr_info; + } + return .{ .success = try Type.ptr(sema.arena, mod, opt_ptr_info.?) }; + }, + + .ptr => { + // If we've resolved to a `[]T` but then see a `[*]T`, we can resolve to a `[*]T` only + // if there were no actual slices. Else, we want the slice index to report a conflict. + var opt_slice_idx: ?usize = null; + + var opt_ptr_info: ?Type.Payload.Pointer.Data = null; + var first_idx: usize = undefined; + var other_idx: usize = undefined; // We sometimes need a second peer index to report a generic error + + for (peer_tys, 0..) |opt_ty, i| { + const ty = opt_ty orelse continue; + const peer_info: Type.Payload.Pointer.Data = switch (ty.zigTypeTag(mod)) { + .Pointer => ty.ptrInfo(mod), + .Fn => .{ + .pointee_type = ty, + .@"addrspace" = target_util.defaultAddressSpace(target, .global_constant), + }, + else => return .{ .conflict = .{ + .peer_idx_a = strat_reason, + .peer_idx_b = i, + } }, + }; + + switch (peer_info.size) { + .One, .Many => {}, + .Slice => opt_slice_idx = i, + .C => return .{ .conflict = .{ + .peer_idx_a = strat_reason, + .peer_idx_b = i, + } }, + } + + var ptr_info = opt_ptr_info orelse { + opt_ptr_info = peer_info; + first_idx = i; + continue; + }; + + other_idx = i; + + // We want to return this in a lot of cases, so alias it here for convenience + const generic_err: PeerResolveResult = .{ .conflict = .{ + .peer_idx_a = first_idx, + .peer_idx_b = i, + } }; + + // Note that the align can be always non-zero; Type.ptr will canonicalize it + ptr_info.@"align" = @min(ptr_info.alignment(mod), peer_info.alignment(mod)); + + if (ptr_info.@"addrspace" != peer_info.@"addrspace") { + return generic_err; + } + + if (ptr_info.bit_offset != peer_info.bit_offset or + ptr_info.host_size != peer_info.host_size) + { + return generic_err; + } + + ptr_info.mutable = ptr_info.mutable and peer_info.mutable; + ptr_info.@"volatile" = ptr_info.@"volatile" or peer_info.@"volatile"; + + const peer_sentinel: ?Value = switch (peer_info.size) { + .One => switch (peer_info.pointee_type.zigTypeTag(mod)) { + .Array => peer_info.pointee_type.sentinel(mod), + else => null, + }, + .Many, .Slice => peer_info.sentinel, + .C => unreachable, + }; + + const cur_sentinel: ?Value = switch (ptr_info.size) { + .One => switch (ptr_info.pointee_type.zigTypeTag(mod)) { + .Array => ptr_info.pointee_type.sentinel(mod), + else => null, + }, + .Many, .Slice => ptr_info.sentinel, + .C => unreachable, + }; + + // We abstract array handling slightly so that tuple pointers can work like array pointers + const peer_pointee_array = sema.typeIsArrayLike(peer_info.pointee_type); + const cur_pointee_array = sema.typeIsArrayLike(ptr_info.pointee_type); + + // This switch is just responsible for deciding the size and pointee (not including + // single-pointer array sentinel). + good: { + switch (peer_info.size) { + .One => switch (ptr_info.size) { + .One => { + if (try sema.resolvePairInMemoryCoercible(block, src, ptr_info.pointee_type, peer_info.pointee_type)) |pointee| { + ptr_info.pointee_type = pointee; + break :good; + } + + const cur_arr = cur_pointee_array orelse return generic_err; + const peer_arr = peer_pointee_array orelse return generic_err; + + if (try sema.resolvePairInMemoryCoercible(block, src, cur_arr.elem_ty, peer_arr.elem_ty)) |elem_ty| { + // *[n:x]T + *[n:y]T = *[n]T + if (cur_arr.len == peer_arr.len) { + ptr_info.pointee_type = try mod.arrayType(.{ + .len = cur_arr.len, + .child = elem_ty.toIntern(), + }); + break :good; + } + // *[a]T + *[b]T = []T + ptr_info.size = .Slice; + ptr_info.pointee_type = elem_ty; + break :good; + } + + if (peer_arr.elem_ty.toIntern() == .noreturn_type) { + // *struct{} + *[a]T = []T + ptr_info.size = .Slice; + ptr_info.pointee_type = cur_arr.elem_ty; + break :good; + } + + if (cur_arr.elem_ty.toIntern() == .noreturn_type) { + // *[a]T + *struct{} = []T + ptr_info.size = .Slice; + ptr_info.pointee_type = peer_arr.elem_ty; + break :good; + } + + return generic_err; + }, + .Many => { + // Only works for *[n]T + [*]T -> [*]T + const arr = peer_pointee_array orelse return generic_err; + if (try sema.resolvePairInMemoryCoercible(block, src, ptr_info.pointee_type, arr.elem_ty)) |pointee| { + ptr_info.pointee_type = pointee; + break :good; + } + if (arr.elem_ty.toIntern() == .noreturn_type) { + // *struct{} + [*]T -> [*]T + break :good; + } + return generic_err; + }, + .Slice => { + // Only works for *[n]T + []T -> []T + const arr = peer_pointee_array orelse return generic_err; + if (try sema.resolvePairInMemoryCoercible(block, src, ptr_info.pointee_type, arr.elem_ty)) |pointee| { + ptr_info.pointee_type = pointee; + break :good; + } + if (arr.elem_ty.toIntern() == .noreturn_type) { + // *struct{} + []T -> []T + break :good; + } + return generic_err; + }, + .C => unreachable, + }, + .Many => switch (ptr_info.size) { + .One => { + // Only works for [*]T + *[n]T -> [*]T + const arr = cur_pointee_array orelse return generic_err; + if (try sema.resolvePairInMemoryCoercible(block, src, arr.elem_ty, peer_info.pointee_type)) |pointee| { + ptr_info.size = .Many; + ptr_info.pointee_type = pointee; + break :good; + } + if (arr.elem_ty.toIntern() == .noreturn_type) { + // [*]T + *struct{} -> [*]T + ptr_info.size = .Many; + ptr_info.pointee_type = peer_info.pointee_type; + break :good; + } + return generic_err; + }, + .Many => { + if (try sema.resolvePairInMemoryCoercible(block, src, ptr_info.pointee_type, peer_info.pointee_type)) |pointee| { + ptr_info.pointee_type = pointee; + break :good; + } + return generic_err; + }, + .Slice => { + // Only works if no peers are actually slices + if (opt_slice_idx) |slice_idx| { + return .{ .conflict = .{ + .peer_idx_a = slice_idx, + .peer_idx_b = i, + } }; + } + // Okay, then works for [*]T + "[]T" -> [*]T + if (try sema.resolvePairInMemoryCoercible(block, src, ptr_info.pointee_type, peer_info.pointee_type)) |pointee| { + ptr_info.size = .Many; + ptr_info.pointee_type = pointee; + break :good; + } + return generic_err; + }, + .C => unreachable, + }, + .Slice => switch (ptr_info.size) { + .One => { + // Only works for []T + *[n]T -> []T + const arr = cur_pointee_array orelse return generic_err; + if (try sema.resolvePairInMemoryCoercible(block, src, arr.elem_ty, peer_info.pointee_type)) |pointee| { + ptr_info.size = .Slice; + ptr_info.pointee_type = pointee; + break :good; + } + if (arr.elem_ty.toIntern() == .noreturn_type) { + // []T + *struct{} -> []T + ptr_info.size = .Slice; + ptr_info.pointee_type = peer_info.pointee_type; + break :good; + } + return generic_err; + }, + .Many => { + // Impossible! (current peer is an actual slice) + return generic_err; + }, + .Slice => { + if (try sema.resolvePairInMemoryCoercible(block, src, ptr_info.pointee_type, peer_info.pointee_type)) |pointee| { + ptr_info.pointee_type = pointee; + break :good; + } + return generic_err; + }, + .C => unreachable, + }, + .C => unreachable, + } + } + + const sentinel_ty = if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Array) blk: { + break :blk ptr_info.pointee_type.childType(mod); + } else ptr_info.pointee_type; + + // TODO: once InternPool is in, we need to cast the sentinels to sentinel_ty + + sentinel: { + no_sentinel: { + if (peer_sentinel == null) break :no_sentinel; + if (cur_sentinel == null) break :no_sentinel; + const peer_sent_coerced = try mod.getCoerced(peer_sentinel.?, sentinel_ty); + const cur_sent_coerced = try mod.getCoerced(cur_sentinel.?, sentinel_ty); + if (!peer_sent_coerced.eql(cur_sent_coerced, sentinel_ty, mod)) break :no_sentinel; + // Sentinels match + if (ptr_info.size == .One) { + assert(ptr_info.pointee_type.zigTypeTag(mod) == .Array); + ptr_info.pointee_type = try mod.arrayType(.{ + .len = ptr_info.pointee_type.arrayLen(mod), + .child = ptr_info.pointee_type.childType(mod).toIntern(), + .sentinel = cur_sent_coerced.toIntern(), + }); + } else { + ptr_info.sentinel = cur_sent_coerced; + } + break :sentinel; + } + // Clear existing sentinel + ptr_info.sentinel = null; + if (ptr_info.pointee_type.zigTypeTag(mod) == .Array) { + ptr_info.pointee_type = try mod.arrayType(.{ + .len = ptr_info.pointee_type.arrayLen(mod), + .child = ptr_info.pointee_type.childType(mod).toIntern(), + .sentinel = .none, + }); + } + } + + opt_ptr_info = ptr_info; + } + + // Before we succeed, check the pointee type. If we tried to apply PTR to (for instance) + // &.{} and &.{}, we'll currently have a pointer type of `*[0]noreturn` - we wanted to + // coerce the empty struct to a specific type, but no peer provided one. We need to + // detect this case and emit an error. + const pointee = opt_ptr_info.?.pointee_type; + if (pointee.toIntern() == .noreturn_type or + (pointee.zigTypeTag(mod) == .Array and pointee.childType(mod).toIntern() == .noreturn_type)) + { + return .{ .conflict = .{ + .peer_idx_a = first_idx, + .peer_idx_b = other_idx, + } }; + } + + return .{ .success = try Type.ptr(sema.arena, mod, opt_ptr_info.?) }; + }, + + .func => { + var opt_cur_ty: ?Type = null; + var first_idx: usize = undefined; + for (peer_tys, 0..) |opt_ty, i| { + const ty = opt_ty orelse continue; + const cur_ty = opt_cur_ty orelse { + opt_cur_ty = ty; + first_idx = i; + continue; + }; + if (ty.zigTypeTag(mod) != .Fn) return .{ .conflict = .{ + .peer_idx_a = strat_reason, + .peer_idx_b = i, + } }; + // ty -> cur_ty + if (.ok == try sema.coerceInMemoryAllowedFns(block, cur_ty, ty, target, src, src)) { + continue; + } + // cur_ty -> ty + if (.ok == try sema.coerceInMemoryAllowedFns(block, ty, cur_ty, target, src, src)) { + opt_cur_ty = ty; + continue; + } + return .{ .conflict = .{ + .peer_idx_a = first_idx, + .peer_idx_b = i, + } }; + } + return .{ .success = opt_cur_ty.? }; + }, + + .enum_or_union => { + var opt_cur_ty: ?Type = null; + // The peer index which gave the current type + var cur_ty_idx: usize = undefined; + + for (peer_tys, 0..) |opt_ty, i| { + const ty = opt_ty orelse continue; + switch (ty.zigTypeTag(mod)) { + .EnumLiteral, .Enum, .Union => {}, + else => return .{ .conflict = .{ + .peer_idx_a = strat_reason, + .peer_idx_b = i, + } }, + } + const cur_ty = opt_cur_ty orelse { + opt_cur_ty = ty; + cur_ty_idx = i; + continue; + }; + + // We want to return this in a lot of cases, so alias it here for convenience + const generic_err: PeerResolveResult = .{ .conflict = .{ + .peer_idx_a = cur_ty_idx, + .peer_idx_b = i, + } }; + + switch (cur_ty.zigTypeTag(mod)) { + .EnumLiteral => { + opt_cur_ty = ty; + cur_ty_idx = i; + }, + .Enum => switch (ty.zigTypeTag(mod)) { + .EnumLiteral => {}, + .Enum => { + if (!ty.eql(cur_ty, mod)) return generic_err; + }, + .Union => { + const tag_ty = ty.unionTagTypeHypothetical(mod); + if (!tag_ty.eql(cur_ty, mod)) return generic_err; + opt_cur_ty = ty; + cur_ty_idx = i; + }, + else => unreachable, + }, + .Union => switch (ty.zigTypeTag(mod)) { + .EnumLiteral => {}, + .Enum => { + const cur_tag_ty = cur_ty.unionTagTypeHypothetical(mod); + if (!ty.eql(cur_tag_ty, mod)) return generic_err; + }, + .Union => { + if (!ty.eql(cur_ty, mod)) return generic_err; + }, + else => unreachable, + }, + else => unreachable, + } + } + return .{ .success = opt_cur_ty.? }; + }, + + .comptime_int => { + for (peer_tys, 0..) |opt_ty, i| { + const ty = opt_ty orelse continue; + switch (ty.zigTypeTag(mod)) { + .ComptimeInt => {}, + else => return .{ .conflict = .{ + .peer_idx_a = strat_reason, + .peer_idx_b = i, + } }, + } + } + return .{ .success = Type.comptime_int }; + }, + + .comptime_float => { + for (peer_tys, 0..) |opt_ty, i| { + const ty = opt_ty orelse continue; + switch (ty.zigTypeTag(mod)) { + .ComptimeInt, .ComptimeFloat => {}, + else => return .{ .conflict = .{ + .peer_idx_a = strat_reason, + .peer_idx_b = i, + } }, + } + } + return .{ .success = Type.comptime_float }; + }, + + .fixed_int => { + var idx_unsigned: ?usize = null; + var idx_signed: ?usize = null; + + // TODO: this is for compatibility with legacy behavior. See beneath the loop. + var any_comptime_known = false; + + for (peer_tys, peer_vals, 0..) |opt_ty, *ptr_opt_val, i| { + const ty = opt_ty orelse continue; + const opt_val = ptr_opt_val.*; + + const peer_tag = ty.zigTypeTag(mod); + switch (peer_tag) { + .ComptimeInt => { + // If the value is undefined, we can't refine to a fixed-width int + if (opt_val == null or opt_val.?.isUndef(mod)) return .{ .conflict = .{ + .peer_idx_a = strat_reason, + .peer_idx_b = i, + } }; + any_comptime_known = true; + ptr_opt_val.* = try sema.resolveLazyValue(opt_val.?); + continue; + }, + .Int => {}, + else => return .{ .conflict = .{ + .peer_idx_a = strat_reason, + .peer_idx_b = i, + } }, + } + + if (opt_val != null) any_comptime_known = true; + + const info = ty.intInfo(mod); + + const idx_ptr = switch (info.signedness) { + .unsigned => &idx_unsigned, + .signed => &idx_signed, + }; + + const largest_idx = idx_ptr.* orelse { + idx_ptr.* = i; + continue; + }; + + const cur_info = peer_tys[largest_idx].?.intInfo(mod); + if (info.bits > cur_info.bits) { + idx_ptr.* = i; + } + } + + if (idx_signed == null) { + return .{ .success = peer_tys[idx_unsigned.?].? }; + } + + if (idx_unsigned == null) { + return .{ .success = peer_tys[idx_signed.?].? }; + } + + const unsigned_info = peer_tys[idx_unsigned.?].?.intInfo(mod); + const signed_info = peer_tys[idx_signed.?].?.intInfo(mod); + if (signed_info.bits > unsigned_info.bits) { + return .{ .success = peer_tys[idx_signed.?].? }; + } + + // TODO: this is for compatibility with legacy behavior. Before this version of PTR was + // implemented, the algorithm very often returned false positives, with the expectation + // that you'd just hit a coercion error later. One of these was that for integers, the + // largest type would always be returned, even if it couldn't fit everything. This had + // an unintentional consequence to semantics, which is that if values were known at + // comptime, they would be coerced down to the smallest type where possible. This + // behavior is unintuitive and order-dependent, so in my opinion should be eliminated, + // but for now we'll retain compatibility. + if (any_comptime_known) { + if (unsigned_info.bits > signed_info.bits) { + return .{ .success = peer_tys[idx_unsigned.?].? }; + } + const idx = @min(idx_unsigned.?, idx_signed.?); + return .{ .success = peer_tys[idx].? }; + } + + return .{ .conflict = .{ + .peer_idx_a = idx_unsigned.?, + .peer_idx_b = idx_signed.?, + } }; + }, + + .fixed_float => { + var opt_cur_ty: ?Type = null; + + for (peer_tys, peer_vals, 0..) |opt_ty, opt_val, i| { + const ty = opt_ty orelse continue; + switch (ty.zigTypeTag(mod)) { + .ComptimeFloat, .ComptimeInt => {}, + .Int => { + if (opt_val == null) return .{ .conflict = .{ + .peer_idx_a = strat_reason, + .peer_idx_b = i, + } }; + }, + .Float => { + if (opt_cur_ty) |cur_ty| { + if (cur_ty.eql(ty, mod)) continue; + // Recreate the type so we eliminate any c_longdouble + const bits = @max(cur_ty.floatBits(target), ty.floatBits(target)); + opt_cur_ty = switch (bits) { + 16 => Type.f16, + 32 => Type.f32, + 64 => Type.f64, + 80 => Type.f80, + 128 => Type.f128, + else => unreachable, + }; + } else { + opt_cur_ty = ty; + } + }, + else => return .{ .conflict = .{ + .peer_idx_a = strat_reason, + .peer_idx_b = i, + } }, + } + } + + // Note that fixed_float is only chosen if there is at least one fixed-width float peer, + // so opt_cur_ty must be non-null. + return .{ .success = opt_cur_ty.? }; + }, + + .coercible_struct => { + // First, check that every peer has the same approximate structure (field count and names) + + var opt_first_idx: ?usize = null; + var is_tuple: bool = undefined; + var field_count: usize = undefined; + // Only defined for non-tuples. + var field_names: []InternPool.NullTerminatedString = undefined; + + for (peer_tys, 0..) |opt_ty, i| { + const ty = opt_ty orelse continue; + + if (!ty.isTupleOrAnonStruct(mod)) { + return .{ .conflict = .{ + .peer_idx_a = strat_reason, + .peer_idx_b = i, + } }; + } + + const first_idx = opt_first_idx orelse { + opt_first_idx = i; + is_tuple = ty.isTuple(mod); + field_count = ty.structFieldCount(mod); + if (!is_tuple) { + const names = mod.intern_pool.indexToKey(ty.toIntern()).anon_struct_type.names; + field_names = try sema.arena.dupe(InternPool.NullTerminatedString, names); + } + continue; + }; + + if (ty.isTuple(mod) != is_tuple or ty.structFieldCount(mod) != field_count) { + return .{ .conflict = .{ + .peer_idx_a = first_idx, + .peer_idx_b = i, + } }; + } + + if (!is_tuple) { + for (field_names, 0..) |expected, field_idx| { + const actual = ty.structFieldName(field_idx, mod); + if (actual == expected) continue; + return .{ .conflict = .{ + .peer_idx_a = first_idx, + .peer_idx_b = i, + } }; + } + } + } + + assert(opt_first_idx != null); + + // Now, we'll recursively resolve the field types + const field_types = try sema.arena.alloc(InternPool.Index, field_count); + // Values for `comptime` fields - `.none` used for non-comptime fields + const field_vals = try sema.arena.alloc(InternPool.Index, field_count); + const sub_peer_tys = try sema.arena.alloc(?Type, peer_tys.len); + const sub_peer_vals = try sema.arena.alloc(?Value, peer_vals.len); + + for (field_types, field_vals, 0..) |*field_ty, *field_val, field_idx| { + // Fill buffers with types and values of the field + for (peer_tys, peer_vals, sub_peer_tys, sub_peer_vals) |opt_ty, opt_val, *peer_field_ty, *peer_field_val| { + const ty = opt_ty orelse { + peer_field_ty.* = null; + peer_field_val.* = null; + continue; + }; + peer_field_ty.* = ty.structFieldType(field_idx, mod); + peer_field_val.* = if (opt_val) |val| try val.fieldValue(mod, field_idx) else null; + } + + // Resolve field type recursively + field_ty.* = switch (try sema.resolvePeerTypesInner(block, src, sub_peer_tys, sub_peer_vals)) { + .success => |ty| ty.toIntern(), + else => |result| { + const result_buf = try sema.arena.create(PeerResolveResult); + result_buf.* = result; + const field_name = if (is_tuple) name: { + break :name try std.fmt.allocPrint(sema.arena, "{d}", .{field_idx}); + } else try sema.arena.dupe(u8, mod.intern_pool.stringToSlice(field_names[field_idx])); + + // The error info needs the field types, but we can't reuse sub_peer_tys + // since the recursive call may have clobbered it. + const peer_field_tys = try sema.arena.alloc(Type, peer_tys.len); + for (peer_tys, peer_field_tys) |opt_ty, *peer_field_ty| { + // Already-resolved types won't be referenced by the error so it's fine + // to leave them undefined. + const ty = opt_ty orelse continue; + peer_field_ty.* = ty.structFieldType(field_idx, mod); + } + + return .{ .field_error = .{ + .field_name = field_name, + .field_types = peer_field_tys, + .sub_result = result_buf, + } }; + }, + }; + + // Decide if this is a comptime field. If it is comptime in all peers, and the + // coerced comptime values are all the same, we say it is comptime, else not. + + var comptime_val: ?Value = null; + for (peer_tys) |opt_ty| { + const struct_ty = opt_ty orelse continue; + const uncoerced_field_val = try struct_ty.structFieldValueComptime(mod, field_idx) orelse { + comptime_val = null; + break; + }; + const uncoerced_field_ty = struct_ty.structFieldType(field_idx, mod); + const uncoerced_field = try sema.addConstant(uncoerced_field_ty, uncoerced_field_val); + const coerced_inst = sema.coerceExtra(block, field_ty.toType(), uncoerced_field, src, .{ .report_err = false }) catch |err| switch (err) { + // It's possible for PTR to give false positives. Just give up on making this a comptime field, we'll get an error later anyway + error.NotCoercible => { + comptime_val = null; + break; + }, + else => |e| return e, + }; + const coerced_val = (try sema.resolveMaybeUndefVal(coerced_inst)) orelse continue; + const existing = comptime_val orelse { + comptime_val = coerced_val; + continue; + }; + if (!coerced_val.eql(existing, field_ty.toType(), mod)) { + comptime_val = null; + break; + } + } + + field_val.* = if (comptime_val) |v| v.toIntern() else .none; + } + + const final_ty = try mod.intern(.{ .anon_struct_type = .{ + .types = field_types, + .names = if (is_tuple) &.{} else field_names, + .values = field_vals, + } }); + + return .{ .success = final_ty.toType() }; + }, + + .exact => { + var expect_ty: ?Type = null; + var first_idx: usize = undefined; + for (peer_tys, 0..) |opt_ty, i| { + const ty = opt_ty orelse continue; + if (expect_ty) |expect| { + if (!ty.eql(expect, mod)) return .{ .conflict = .{ + .peer_idx_a = first_idx, + .peer_idx_b = i, + } }; + } else { + expect_ty = ty; + first_idx = i; + } + } + return .{ .success = expect_ty.? }; + }, + } +} + +fn maybeMergeErrorSets(sema: *Sema, block: *Block, src: LazySrcLoc, e0: Type, e1: Type) !Type { + // e0 -> e1 + if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, e1, e0, src, src)) { + return e1; + } + + // e1 -> e0 + if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, e0, e1, src, src)) { + return e0; + } + + return sema.errorSetMerge(e0, e1); +} + +fn resolvePairInMemoryCoercible(sema: *Sema, block: *Block, src: LazySrcLoc, ty_a: Type, ty_b: Type) !?Type { + // ty_b -> ty_a + if (.ok == try sema.coerceInMemoryAllowed(block, ty_a, ty_b, true, sema.mod.getTarget(), src, src)) { + return ty_a; + } + + // ty_a -> ty_b + if (.ok == try sema.coerceInMemoryAllowed(block, ty_b, ty_a, true, sema.mod.getTarget(), src, src)) { + return ty_b; + } + + return null; +} + +const ArrayLike = struct { + len: u64, + /// `noreturn` indicates that this type is `struct{}` so can coerce to anything + elem_ty: Type, +}; +fn typeIsArrayLike(sema: *Sema, ty: Type) ?ArrayLike { + const mod = sema.mod; + return switch (ty.zigTypeTag(mod)) { + .Array => .{ + .len = ty.arrayLen(mod), + .elem_ty = ty.childType(mod), + }, + .Struct => { + const field_count = ty.structFieldCount(mod); + if (field_count == 0) return .{ + .len = 0, + .elem_ty = Type.noreturn, + }; + if (!ty.isTuple(mod)) return null; + const elem_ty = ty.structFieldType(0, mod); + for (1..field_count) |i| { + if (!ty.structFieldType(i, mod).eql(elem_ty, mod)) { + return null; + } + } + return .{ + .len = field_count, + .elem_ty = elem_ty, + }; + }, + else => null, + }; } pub fn resolveFnTypes(sema: *Sema, fn_ty: Type) CompileError!void { @@ -34596,7 +35373,7 @@ fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value // Move mutable decl values to the InternPool and assert other decls are already in // the InternPool. const uncoerced_val = if (deref.is_mutable) try tv.val.intern(tv.ty, mod) else tv.val.toIntern(); - const coerced_val = try sema.coerceValueInMemory(block, uncoerced_val.toValue(), tv.ty, load_ty, src); + const coerced_val = try mod.getCoerced(uncoerced_val.toValue(), load_ty); return .{ .val = coerced_val }; } } diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index 9c7d8412f6..5123e190b6 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -1,6 +1,9 @@ const builtin = @import("builtin"); const std = @import("std"); +const assert = std.debug.assert; const expect = std.testing.expect; +const expectEqual = std.testing.expectEqual; +const expectEqualSlices = std.testing.expectEqualSlices; const mem = std.mem; const maxInt = std.math.maxInt; const native_endian = builtin.target.cpu.arch.endian(); @@ -1609,3 +1612,610 @@ test "coercion from single-item pointer to @as to slice" { try expect(t[0] == 1); } + +test "peer type resolution: const sentinel slice and mutable non-sentinel slice" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + + const S = struct { + fn doTheTest(comptime T: type, comptime s: T) !void { + var a: [:s]const T = @intToPtr(*const [2:s]T, 0x1000); + var b: []T = @intToPtr(*[3]T, 0x2000); + comptime assert(@TypeOf(a, b) == []const T); + comptime assert(@TypeOf(b, a) == []const T); + + var t = true; + const r1 = if (t) a else b; + const r2 = if (t) b else a; + + const R = @TypeOf(r1); + + try expectEqual(@as(R, @intToPtr(*const [2:s]T, 0x1000)), r1); + try expectEqual(@as(R, @intToPtr(*const [3]T, 0x2000)), r2); + } + }; + + try S.doTheTest(u8, 0); + try S.doTheTest(?*anyopaque, null); +} + +test "peer type resolution: float and comptime-known fixed-width integer" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + + const i: u8 = 100; + var f: f32 = 1.234; + comptime assert(@TypeOf(i, f) == f32); + comptime assert(@TypeOf(f, i) == f32); + + var t = true; + const r1 = if (t) i else f; + const r2 = if (t) f else i; + + const T = @TypeOf(r1); + + try expectEqual(@as(T, 100.0), r1); + try expectEqual(@as(T, 1.234), r2); +} + +test "peer type resolution: same array type with sentinel" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + + var a: [2:0]u32 = .{ 0, 1 }; + var b: [2:0]u32 = .{ 2, 3 }; + comptime assert(@TypeOf(a, b) == [2:0]u32); + comptime assert(@TypeOf(b, a) == [2:0]u32); + + var t = true; + const r1 = if (t) a else b; + const r2 = if (t) b else a; + + const T = @TypeOf(r1); + + try expectEqual(T{ 0, 1 }, r1); + try expectEqual(T{ 2, 3 }, r2); +} + +test "peer type resolution: array with sentinel and array without sentinel" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + + var a: [2:0]u32 = .{ 0, 1 }; + var b: [2]u32 = .{ 2, 3 }; + comptime assert(@TypeOf(a, b) == [2]u32); + comptime assert(@TypeOf(b, a) == [2]u32); + + var t = true; + const r1 = if (t) a else b; + const r2 = if (t) b else a; + + const T = @TypeOf(r1); + + try expectEqual(T{ 0, 1 }, r1); + try expectEqual(T{ 2, 3 }, r2); +} + +test "peer type resolution: array and vector with same child type" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + + var arr: [2]u32 = .{ 0, 1 }; + var vec: @Vector(2, u32) = .{ 2, 3 }; + comptime assert(@TypeOf(arr, vec) == @Vector(2, u32)); + comptime assert(@TypeOf(vec, arr) == @Vector(2, u32)); + + var t = true; + const r1 = if (t) arr else vec; + const r2 = if (t) vec else arr; + + const T = @TypeOf(r1); + + try expectEqual(T{ 0, 1 }, r1); + try expectEqual(T{ 2, 3 }, r2); +} + +test "peer type resolution: array with smaller child type and vector with larger child type" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + + var arr: [2]u8 = .{ 0, 1 }; + var vec: @Vector(2, u64) = .{ 2, 3 }; + comptime assert(@TypeOf(arr, vec) == @Vector(2, u64)); + comptime assert(@TypeOf(vec, arr) == @Vector(2, u64)); + + var t = true; + const r1 = if (t) arr else vec; + const r2 = if (t) vec else arr; + + const T = @TypeOf(r1); + + try expectEqual(T{ 0, 1 }, r1); + try expectEqual(T{ 2, 3 }, r2); +} + +test "peer type resolution: error union and optional of same type" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + + const E = error{Foo}; + var a: E!*u8 = error.Foo; + var b: ?*u8 = null; + comptime assert(@TypeOf(a, b) == E!?*u8); + comptime assert(@TypeOf(b, a) == E!?*u8); + + var t = true; + const r1 = if (t) a else b; + const r2 = if (t) b else a; + + const T = @TypeOf(r1); + + try expectEqual(@as(T, error.Foo), r1); + try expectEqual(@as(T, null), r2); +} + +test "peer type resolution: C pointer and @TypeOf(null)" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + + var a: [*c]c_int = 0x1000; + const b = null; + comptime assert(@TypeOf(a, b) == [*c]c_int); + comptime assert(@TypeOf(b, a) == [*c]c_int); + + var t = true; + const r1 = if (t) a else b; + const r2 = if (t) b else a; + + const T = @TypeOf(r1); + + try expectEqual(@as(T, 0x1000), r1); + try expectEqual(@as(T, null), r2); +} + +test "peer type resolution: three-way resolution combines error set and optional" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + + const E = error{Foo}; + var a: E = error.Foo; + var b: *const [5:0]u8 = @intToPtr(*const [5:0]u8, 0x1000); + var c: ?[*:0]u8 = null; + comptime assert(@TypeOf(a, b, c) == E!?[*:0]const u8); + comptime assert(@TypeOf(a, c, b) == E!?[*:0]const u8); + comptime assert(@TypeOf(b, a, c) == E!?[*:0]const u8); + comptime assert(@TypeOf(b, c, a) == E!?[*:0]const u8); + comptime assert(@TypeOf(c, a, b) == E!?[*:0]const u8); + comptime assert(@TypeOf(c, b, a) == E!?[*:0]const u8); + + var x: u8 = 0; + const r1 = switch (x) { + 0 => a, + 1 => b, + else => c, + }; + const r2 = switch (x) { + 0 => b, + 1 => a, + else => c, + }; + const r3 = switch (x) { + 0 => c, + 1 => a, + else => b, + }; + + const T = @TypeOf(r1); + + try expectEqual(@as(T, error.Foo), r1); + try expectEqual(@as(T, @intToPtr([*:0]u8, 0x1000)), r2); + try expectEqual(@as(T, null), r3); +} + +test "peer type resolution: vector and optional vector" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + + var a: ?@Vector(3, u32) = .{ 0, 1, 2 }; + var b: @Vector(3, u32) = .{ 3, 4, 5 }; + comptime assert(@TypeOf(a, b) == ?@Vector(3, u32)); + comptime assert(@TypeOf(b, a) == ?@Vector(3, u32)); + + var t = true; + const r1 = if (t) a else b; + const r2 = if (t) b else a; + + const T = @TypeOf(r1); + + try expectEqual(@as(T, .{ 0, 1, 2 }), r1); + try expectEqual(@as(T, .{ 3, 4, 5 }), r2); +} + +test "peer type resolution: optional fixed-width int and comptime_int" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + + var a: ?i32 = 42; + const b: comptime_int = 50; + comptime assert(@TypeOf(a, b) == ?i32); + comptime assert(@TypeOf(b, a) == ?i32); + + var t = true; + const r1 = if (t) a else b; + const r2 = if (t) b else a; + + const T = @TypeOf(r1); + + try expectEqual(@as(T, 42), r1); + try expectEqual(@as(T, 50), r2); +} + +test "peer type resolution: array and tuple" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + + var arr: [3]i32 = .{ 1, 2, 3 }; + const tup = .{ 4, 5, 6 }; + + comptime assert(@TypeOf(arr, tup) == [3]i32); + comptime assert(@TypeOf(tup, arr) == [3]i32); + + var t = true; + const r1 = if (t) arr else tup; + const r2 = if (t) tup else arr; + + const T = @TypeOf(r1); + + try expectEqual(T{ 1, 2, 3 }, r1); + try expectEqual(T{ 4, 5, 6 }, r2); +} + +test "peer type resolution: vector and tuple" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + + var vec: @Vector(3, i32) = .{ 1, 2, 3 }; + const tup = .{ 4, 5, 6 }; + + comptime assert(@TypeOf(vec, tup) == @Vector(3, i32)); + comptime assert(@TypeOf(tup, vec) == @Vector(3, i32)); + + var t = true; + const r1 = if (t) vec else tup; + const r2 = if (t) tup else vec; + + const T = @TypeOf(r1); + + try expectEqual(T{ 1, 2, 3 }, r1); + try expectEqual(T{ 4, 5, 6 }, r2); +} + +test "peer type resolution: vector and array and tuple" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + + var vec: @Vector(2, i8) = .{ 10, 20 }; + var arr: [2]i8 = .{ 30, 40 }; + const tup = .{ 50, 60 }; + + comptime assert(@TypeOf(vec, arr, tup) == @Vector(2, i8)); + comptime assert(@TypeOf(vec, tup, arr) == @Vector(2, i8)); + comptime assert(@TypeOf(arr, vec, tup) == @Vector(2, i8)); + comptime assert(@TypeOf(arr, tup, vec) == @Vector(2, i8)); + comptime assert(@TypeOf(tup, vec, arr) == @Vector(2, i8)); + comptime assert(@TypeOf(tup, arr, vec) == @Vector(2, i8)); + + var x: u8 = 0; + const r1 = switch (x) { + 0 => vec, + 1 => arr, + else => tup, + }; + const r2 = switch (x) { + 0 => arr, + 1 => vec, + else => tup, + }; + const r3 = switch (x) { + 0 => tup, + 1 => vec, + else => arr, + }; + + const T = @TypeOf(r1); + + try expectEqual(T{ 10, 20 }, r1); + try expectEqual(T{ 30, 40 }, r2); + try expectEqual(T{ 50, 60 }, r3); +} + +test "peer type resolution: empty tuple pointer and slice" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + + var a: [:0]const u8 = "Hello"; + var b = &.{}; + + comptime assert(@TypeOf(a, b) == []const u8); + comptime assert(@TypeOf(b, a) == []const u8); + + var t = true; + const r1 = if (t) a else b; + const r2 = if (t) b else a; + + try expectEqualSlices(u8, "Hello", r1); + try expectEqualSlices(u8, "", r2); +} + +test "peer type resolution: tuple pointer and slice" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + + var a: [:0]const u8 = "Hello"; + var b = &.{ @as(u8, 'x'), @as(u8, 'y'), @as(u8, 'z') }; + + comptime assert(@TypeOf(a, b) == []const u8); + comptime assert(@TypeOf(b, a) == []const u8); + + var t = true; + const r1 = if (t) a else b; + const r2 = if (t) b else a; + + try expectEqualSlices(u8, "Hello", r1); + try expectEqualSlices(u8, "xyz", r2); +} + +test "peer type resolution: tuple pointer and optional slice" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + + var a: ?[:0]const u8 = null; + var b = &.{ @as(u8, 'x'), @as(u8, 'y'), @as(u8, 'z') }; + + comptime assert(@TypeOf(a, b) == ?[]const u8); + comptime assert(@TypeOf(b, a) == ?[]const u8); + + var t = true; + const r1 = if (t) a else b; + const r2 = if (t) b else a; + + try expectEqual(@as(?[]const u8, null), r1); + try expectEqualSlices(u8, "xyz", r2 orelse ""); +} + +test "peer type resolution: many compatible pointers" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + + var buf = "foo-3".*; + + var vals = .{ + @as([*]const u8, "foo-0"), + @as([*:0]const u8, "foo-1"), + @as([*:0]const u8, "foo-2"), + @as([*]u8, &buf), + @as(*const [5]u8, "foo-4"), + }; + + // Check every possible permutation of types in @TypeOf + @setEvalBranchQuota(5000); + comptime var perms = 0; // check the loop is hitting every permutation + inline for (0..5) |i_0| { + inline for (0..5) |i_1| { + if (i_1 == i_0) continue; + inline for (0..5) |i_2| { + if (i_2 == i_0 or i_2 == i_1) continue; + inline for (0..5) |i_3| { + if (i_3 == i_0 or i_3 == i_1 or i_3 == i_2) continue; + inline for (0..5) |i_4| { + if (i_4 == i_0 or i_4 == i_1 or i_4 == i_2 or i_4 == i_3) continue; + perms += 1; + comptime assert(@TypeOf( + vals[i_0], + vals[i_1], + vals[i_2], + vals[i_3], + vals[i_4], + ) == [*]const u8); + } + } + } + } + } + comptime assert(perms == 5 * 4 * 3 * 2 * 1); + + var x: u8 = 0; + inline for (0..5) |i| { + const r = switch (x) { + 0 => vals[i], + 1 => vals[0], + 2 => vals[1], + 3 => vals[2], + 4 => vals[3], + else => vals[4], + }; + const expected = switch (i) { + 0 => "foo-0", + 1 => "foo-1", + 2 => "foo-2", + 3 => "foo-3", + 4 => "foo-4", + else => unreachable, + }; + try expectEqualSlices(u8, expected, std.mem.span(@ptrCast([*:0]const u8, r))); + } +} + +test "peer type resolution: tuples with comptime fields" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + + const a = .{ 1, 2 }; + const b = .{ @as(u32, 3), @as(i16, 4) }; + + // TODO: tuple type equality doesn't work properly yet + const ti1 = @typeInfo(@TypeOf(a, b)); + const ti2 = @typeInfo(@TypeOf(b, a)); + inline for (.{ ti1, ti2 }) |ti| { + const s = ti.Struct; + comptime assert(s.is_tuple); + comptime assert(s.fields.len == 2); + comptime assert(s.fields[0].type == u32); + comptime assert(s.fields[1].type == i16); + } + + var t = true; + const r1 = if (t) a else b; + const r2 = if (t) b else a; + + try expectEqual(@as(u32, 1), r1[0]); + try expectEqual(@as(i16, 2), r1[1]); + + try expectEqual(@as(u32, 3), r2[0]); + try expectEqual(@as(i16, 4), r2[1]); +} + +test "peer type resolution: C pointer and many pointer" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + + var buf = "hello".*; + + var a: [*c]u8 = &buf; + var b: [*:0]const u8 = "world"; + + comptime assert(@TypeOf(a, b) == [*c]const u8); + comptime assert(@TypeOf(b, a) == [*c]const u8); + + var t = true; + const r1 = if (t) a else b; + const r2 = if (t) b else a; + + try expectEqual(r1, a); + try expectEqual(r2, b); +} + +test "peer type resolution: pointer attributes are combined correctly" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + + var buf_a align(4) = "foo".*; + var buf_b align(4) = "bar".*; + var buf_c align(4) = "baz".*; + + var a: [*:0]align(4) const u8 = &buf_a; + var b: *align(2) volatile [3:0]u8 = &buf_b; + var c: [*:0]align(4) u8 = &buf_c; + + comptime assert(@TypeOf(a, b, c) == [*:0]align(2) const volatile u8); + comptime assert(@TypeOf(a, c, b) == [*:0]align(2) const volatile u8); + comptime assert(@TypeOf(b, a, c) == [*:0]align(2) const volatile u8); + comptime assert(@TypeOf(b, c, a) == [*:0]align(2) const volatile u8); + comptime assert(@TypeOf(c, a, b) == [*:0]align(2) const volatile u8); + comptime assert(@TypeOf(c, b, a) == [*:0]align(2) const volatile u8); + + var x: u8 = 0; + const r1 = switch (x) { + 0 => a, + 1 => b, + else => c, + }; + const r2 = switch (x) { + 0 => b, + 1 => a, + else => c, + }; + const r3 = switch (x) { + 0 => c, + 1 => a, + else => b, + }; + + try expectEqualSlices(u8, std.mem.span(@volatileCast(r1)), "foo"); + try expectEqualSlices(u8, std.mem.span(@volatileCast(r2)), "bar"); + try expectEqualSlices(u8, std.mem.span(@volatileCast(r3)), "baz"); +} diff --git a/test/cases/compile_errors/compare_optional_to_non-optional_with_invalid_types.zig b/test/cases/compile_errors/compare_optional_to_non-optional_with_invalid_types.zig deleted file mode 100644 index 0339ac034e..0000000000 --- a/test/cases/compile_errors/compare_optional_to_non-optional_with_invalid_types.zig +++ /dev/null @@ -1,37 +0,0 @@ -export fn inconsistentChildType() void { - var x: ?i32 = undefined; - const y: comptime_int = 10; - _ = (x == y); -} -export fn optionalToOptional() void { - var x: ?i32 = undefined; - var y: ?i32 = undefined; - _ = (x == y); -} -export fn optionalVector() void { - var x: ?@Vector(10, i32) = undefined; - var y: @Vector(10, i32) = undefined; - _ = (x == y); -} -export fn optionalVector2() void { - var x: ?@Vector(10, i32) = undefined; - var y: @Vector(11, i32) = undefined; - _ = (x == y); -} -export fn invalidChildType() void { - var x: ?[3]i32 = undefined; - var y: [3]i32 = undefined; - _ = (x == y); -} - -// error -// backend=llvm -// target=native -// -// :4:12: error: incompatible types: '?i32' and 'comptime_int' -// :4:10: note: type '?i32' here -// :4:15: note: type 'comptime_int' here -// :19:12: error: incompatible types: '?@Vector(10, i32)' and '@Vector(11, i32)' -// :19:10: note: type '?@Vector(10, i32)' here -// :19:15: note: type '@Vector(11, i32)' here -// :24:12: error: operator == not allowed for type '?[3]i32' diff --git a/test/cases/compile_errors/compare_optional_to_non_optional_with_incomparable_type.zig b/test/cases/compile_errors/compare_optional_to_non_optional_with_incomparable_type.zig new file mode 100644 index 0000000000..653b7ffcfb --- /dev/null +++ b/test/cases/compile_errors/compare_optional_to_non_optional_with_incomparable_type.zig @@ -0,0 +1,11 @@ +export fn entry() void { + var x: ?[3]i32 = undefined; + var y: [3]i32 = undefined; + _ = (x == y); +} + +// error +// backend=llvm +// target=native +// +// :4:12: error: operator == not allowed for type '?[3]i32' diff --git a/test/cases/compile_errors/invalid_peer_type_resolution.zig b/test/cases/compile_errors/invalid_peer_type_resolution.zig new file mode 100644 index 0000000000..d82986a60a --- /dev/null +++ b/test/cases/compile_errors/invalid_peer_type_resolution.zig @@ -0,0 +1,50 @@ +export fn optionalVector() void { + var x: ?@Vector(10, i32) = undefined; + var y: @Vector(11, i32) = undefined; + _ = @TypeOf(x, y); +} +export fn badTupleField() void { + var x = .{ @as(u8, 0), @as(u32, 1) }; + var y = .{ @as(u8, 1), "hello" }; + _ = @TypeOf(x, y); +} +export fn badNestedField() void { + const x = .{ .foo = "hi", .bar = .{ 0, 1 } }; + const y = .{ .foo = "hello", .bar = .{ 2, "hi" } }; + _ = @TypeOf(x, y); +} +export fn incompatiblePointers() void { + const x: []const u8 = "foo"; + const y: [*:0]const u8 = "bar"; + _ = @TypeOf(x, y); +} +export fn incompatiblePointers4() void { + const a: *const [5]u8 = "hello"; + const b: *const [3:0]u8 = "foo"; + const c: []const u8 = "baz"; // The conflict must be reported against this element! + const d: [*]const u8 = "bar"; + _ = @TypeOf(a, b, c, d); +} + +// error +// backend=llvm +// target=native +// +// :4:9: error: incompatible types: '?@Vector(10, i32)' and '@Vector(11, i32)' +// :4:17: note: type '?@Vector(10, i32)' here +// :4:20: note: type '@Vector(11, i32)' here +// :9:9: error: struct field '1' has conflicting types +// :9:9: note: incompatible types: 'u32' and '*const [5:0]u8' +// :9:17: note: type 'u32' here +// :9:20: note: type '*const [5:0]u8' here +// :14:9: error: struct field 'bar' has conflicting types +// :14:9: note: struct field '1' has conflicting types +// :14:9: note: incompatible types: 'comptime_int' and '*const [2:0]u8' +// :14:17: note: type 'comptime_int' here +// :14:20: note: type '*const [2:0]u8' here +// :19:9: error: incompatible types: '[]const u8' and '[*:0]const u8' +// :19:17: note: type '[]const u8' here +// :19:20: note: type '[*:0]const u8' here +// :26:9: error: incompatible types: '[]const u8' and '[*]const u8' +// :26:23: note: type '[]const u8' here +// :26:26: note: type '[*]const u8' here