From 6917a8c25824d12f00327171b583d6cd9a830c29 Mon Sep 17 00:00:00 2001 From: mlugg Date: Sat, 29 Jul 2023 06:03:51 +0100 Subject: [PATCH 1/6] AstGen: handle `ty` result location for struct and array init correctly Well, this was a journey! The original issue I was trying to fix is covered by the new behavior test in array.zig: in essence, `ty` and `coerced_ty` result locations were not correctly propagated. While fixing this, I noticed a similar bug in struct inits: the type was propagated to *fields* fine, but the actual struct init was unnecessarily anonymous, which could lead to unnecessary copies. Note that the behavior test added in struct.zig was already passing - the bug here didn't change any easy-to-test behavior - but I figured I'd add it anyway. This is a little harder than it seems, because the result type may not itself be an array/struct type: it could be an optional / error union wrapper. A new ZIR instruction is introduced to unwrap these. This is also made a little tricky by the fact that it's possible for result types to be unknown at the time of semantic analysis (due to `anytype` parameters), leading to generic poison. In these cases, we must essentially downgrade to an anonymous initialization. Fixing these issues exposed *another* bug, related to type resolution in Sema. That issue is now tracked by #16603. As a temporary workaround for this bug, a few result locations for builtin function operands have been disabled in AstGen. This is technically a breaking change, but it's very minor: I doubt it'll cause any breakage in the wild. --- src/AstGen.zig | 43 +++++++++--- src/Sema.zig | 140 +++++++++++++++++++++++++++++++++------ src/Zir.zig | 9 +++ src/print_zir.zig | 1 + src/type.zig | 1 + test/behavior/array.zig | 14 ++++ test/behavior/struct.zig | 14 ++++ 7 files changed, 192 insertions(+), 30 deletions(-) diff --git a/src/AstGen.zig b/src/AstGen.zig index 667137bd61..858b94622d 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -1509,9 +1509,11 @@ fn arrayInitExpr( const tag: Zir.Inst.Tag = if (types.array != .none) .array_init else .array_init_anon; return arrayInitExprInner(gz, scope, node, array_init.ast.elements, types.array, types.elem, tag); }, - .ty, .coerced_ty => { - const tag: Zir.Inst.Tag = if (types.array != .none) .array_init else .array_init_anon; - const result = try arrayInitExprInner(gz, scope, node, array_init.ast.elements, types.array, types.elem, tag); + .ty, .coerced_ty => |ty_inst| { + const arr_ty = if (types.array != .none) types.array else blk: { + break :blk try gz.addUnNode(.opt_eu_base_ty, ty_inst, node); + }; + const result = try arrayInitExprInner(gz, scope, node, array_init.ast.elements, arr_ty, types.elem, .array_init); return rvalue(gz, ri, result, node); }, .ptr => |ptr_res| { @@ -1748,7 +1750,9 @@ fn structInitExpr( }, .ty, .coerced_ty => |ty_inst| { if (struct_init.ast.type_expr == 0) { - const result = try structInitExprRlNone(gz, scope, node, struct_init, ty_inst, .struct_init_anon); + const struct_ty_inst = try gz.addUnNode(.opt_eu_base_ty, ty_inst, node); + _ = try gz.addUnNode(.validate_struct_init_ty, struct_ty_inst, node); + const result = try structInitExprRlTy(gz, scope, node, struct_init, struct_ty_inst, .struct_init); return rvalue(gz, ri, result, node); } const inner_ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr); @@ -2743,6 +2747,7 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As .for_len, .@"try", .try_ptr, + .opt_eu_base_ty, => break :b false, .extended => switch (gz.astgen.instructions.items(.data)[inst].extended.opcode) { @@ -8314,7 +8319,10 @@ fn builtinCall( local_val.used = ident_token; _ = try gz.addPlNode(.export_value, node, Zir.Inst.ExportValue{ .operand = local_val.inst, - .options = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .export_options_type } }, params[1]), + // TODO: the result location here should be `.{ .coerced_ty = .export_options_type }`, but + // that currently hits assertions in Sema due to type resolution issues. + // See #16603 + .options = try comptimeExpr(gz, scope, .{ .rl = .none }, params[1]), }); return rvalue(gz, ri, .void_value, node); } @@ -8329,7 +8337,10 @@ fn builtinCall( const loaded = try gz.addUnNode(.load, local_ptr.ptr, node); _ = try gz.addPlNode(.export_value, node, Zir.Inst.ExportValue{ .operand = loaded, - .options = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .export_options_type } }, params[1]), + // TODO: the result location here should be `.{ .coerced_ty = .export_options_type }`, but + // that currently hits assertions in Sema due to type resolution issues. + // See #16603 + .options = try comptimeExpr(gz, scope, .{ .rl = .none }, params[1]), }); return rvalue(gz, ri, .void_value, node); } @@ -8363,7 +8374,10 @@ fn builtinCall( }, else => return astgen.failNode(params[0], "symbol to export must identify a declaration", .{}), } - const options = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .export_options_type } }, params[1]); + // TODO: the result location here should be `.{ .coerced_ty = .export_options_type }`, but + // that currently hits assertions in Sema due to type resolution issues. + // See #16603 + const options = try comptimeExpr(gz, scope, .{ .rl = .none }, params[1]); _ = try gz.addPlNode(.@"export", node, Zir.Inst.Export{ .namespace = namespace, .decl_name = decl_name, @@ -8373,7 +8387,10 @@ fn builtinCall( }, .@"extern" => { const type_inst = try typeExpr(gz, scope, params[0]); - const options = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .extern_options_type } }, params[1]); + // TODO: the result location here should be `.{ .coerced_ty = .extern_options_type }`, but + // that currently hits assertions in Sema due to type resolution issues. + // See #16603 + const options = try comptimeExpr(gz, scope, .{ .rl = .none }, params[1]); const result = try gz.addExtendedPayload(.builtin_extern, Zir.Inst.BinNode{ .node = gz.nodeIndexToRelative(node), .lhs = type_inst, @@ -8477,7 +8494,10 @@ fn builtinCall( // zig fmt: on .Type => { - const operand = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .type_info_type } }, params[0]); + // TODO: the result location here should be `.{ .coerced_ty = .type_info_type }`, but + // that currently hits assertions in Sema due to type resolution issues. + // See #16603 + const operand = try expr(gz, scope, .{ .rl = .none }, params[0]); const gpa = gz.astgen.gpa; @@ -8755,7 +8775,10 @@ fn builtinCall( }, .prefetch => { const ptr = try expr(gz, scope, .{ .rl = .none }, params[0]); - const options = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .prefetch_options_type } }, params[1]); + // TODO: the result location here should be `.{ .coerced_ty = .preftech_options_type }`, but + // that currently hits assertions in Sema due to type resolution issues. + // See #16603 + const options = try comptimeExpr(gz, scope, .{ .rl = .none }, params[1]); _ = try gz.addExtendedPayload(.prefetch, Zir.Inst.BinNode{ .node = gz.nodeIndexToRelative(node), .lhs = ptr, diff --git a/src/Sema.zig b/src/Sema.zig index ac9886fa62..ab01d618e8 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1125,6 +1125,7 @@ fn analyzeBodyInner( .array_base_ptr => try sema.zirArrayBasePtr(block, inst), .field_base_ptr => try sema.zirFieldBasePtr(block, inst), .for_len => try sema.zirForLen(block, inst), + .opt_eu_base_ty => try sema.zirOptEuBaseTy(block, inst), .clz => try sema.zirBitCount(block, inst, .clz, Value.clz), .ctz => try sema.zirBitCount(block, inst, .ctz, Value.ctz), @@ -1359,12 +1360,12 @@ fn analyzeBodyInner( continue; }, .validate_array_init_ty => { - try sema.validateArrayInitTy(block, inst); + try sema.zirValidateArrayInitTy(block, inst); i += 1; continue; }, .validate_struct_init_ty => { - try sema.validateStructInitTy(block, inst); + try sema.zirValidateStructInitTy(block, inst); i += 1; continue; }, @@ -4312,7 +4313,31 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return len; } -fn validateArrayInitTy( +fn zirOptEuBaseTy( + sema: *Sema, + block: *Block, + inst: Zir.Inst.Index, +) CompileError!Air.Inst.Ref { + const mod = sema.mod; + const inst_data = sema.code.instructions.items(.data)[inst].un_node; + var ty = sema.resolveType(block, .unneeded, inst_data.operand) catch |err| switch (err) { + // Since this is a ZIR instruction that returns a type, encountering + // generic poison should not result in a failed compilation, but the + // generic poison type. This prevents unnecessary failures when + // constructing types at compile-time. + error.GenericPoison => return .generic_poison_type, + else => |e| return e, + }; + while (true) { + switch (ty.zigTypeTag(mod)) { + .Optional => ty = ty.optionalChild(mod), + .ErrorUnion => ty = ty.errorUnionPayload(mod), + else => return sema.addType(ty), + } + } +} + +fn zirValidateArrayInitTy( sema: *Sema, block: *Block, inst: Zir.Inst.Index, @@ -4322,7 +4347,11 @@ fn validateArrayInitTy( const src = inst_data.src(); const ty_src: LazySrcLoc = .{ .node_offset_init_ty = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.ArrayInit, inst_data.payload_index).data; - const ty = try sema.resolveType(block, ty_src, extra.ty); + const ty = sema.resolveType(block, ty_src, extra.ty) catch |err| switch (err) { + // It's okay for the type to be unknown: this will result in an anonymous array init. + error.GenericPoison => return, + else => |e| return e, + }; switch (ty.zigTypeTag(mod)) { .Array => { @@ -4358,7 +4387,7 @@ fn validateArrayInitTy( return sema.failWithArrayInitNotSupported(block, ty_src, ty); } -fn validateStructInitTy( +fn zirValidateStructInitTy( sema: *Sema, block: *Block, inst: Zir.Inst.Index, @@ -4366,7 +4395,11 @@ fn validateStructInitTy( const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const ty = try sema.resolveType(block, src, inst_data.operand); + const ty = sema.resolveType(block, src, inst_data.operand) catch |err| switch (err) { + // It's okay for the type to be unknown: this will result in an anonymous struct init. + error.GenericPoison => return, + else => |e| return e, + }; switch (ty.zigTypeTag(mod)) { .Struct, .Union => return, @@ -7744,7 +7777,15 @@ fn zirOptionalType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro fn zirElemTypeIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; const bin = sema.code.instructions.items(.data)[inst].bin; - const indexable_ty = try sema.resolveType(block, .unneeded, bin.lhs); + const operand = sema.resolveType(block, .unneeded, bin.lhs) catch |err| switch (err) { + // Since this is a ZIR instruction that returns a type, encountering + // generic poison should not result in a failed compilation, but the + // generic poison type. This prevents unnecessary failures when + // constructing types at compile-time. + error.GenericPoison => return .generic_poison_type, + else => |e| return e, + }; + const indexable_ty = try sema.resolveTypeFields(operand); assert(indexable_ty.isIndexable(mod)); // validated by a previous instruction if (indexable_ty.zigTypeTag(mod) == .Struct) { const elem_type = indexable_ty.structFieldType(@intFromEnum(bin.rhs), mod); @@ -18794,7 +18835,13 @@ fn zirStructInit( const first_item = sema.code.extraData(Zir.Inst.StructInit.Item, extra.end).data; const first_field_type_data = zir_datas[first_item.field_type].pl_node; const first_field_type_extra = sema.code.extraData(Zir.Inst.FieldType, first_field_type_data.payload_index).data; - const resolved_ty = try sema.resolveType(block, src, first_field_type_extra.container_type); + const resolved_ty = sema.resolveType(block, src, first_field_type_extra.container_type) catch |err| switch (err) { + error.GenericPoison => { + // The type wasn't actually known, so treat this as an anon struct init. + return sema.structInitAnon(block, src, .typed_init, extra.data, extra.end, is_ref); + }, + else => |e| return e, + }; try sema.resolveTypeLayout(resolved_ty); if (resolved_ty.zigTypeTag(mod) == .Struct) { @@ -19037,26 +19084,57 @@ fn zirStructInitAnon( inst: Zir.Inst.Index, is_ref: bool, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; - const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.StructInitAnon, inst_data.payload_index); - const types = try sema.arena.alloc(InternPool.Index, extra.data.fields_len); + return sema.structInitAnon(block, src, .anon_init, extra.data, extra.end, is_ref); +} + +fn structInitAnon( + sema: *Sema, + block: *Block, + src: LazySrcLoc, + /// It is possible for a typed struct_init to be downgraded to an anonymous init due to a + /// generic poison type. In this case, we need to know to interpret the extra data differently. + comptime kind: enum { anon_init, typed_init }, + extra_data: switch (kind) { + .anon_init => Zir.Inst.StructInitAnon, + .typed_init => Zir.Inst.StructInit, + }, + extra_end: usize, + is_ref: bool, +) CompileError!Air.Inst.Ref { + const mod = sema.mod; + const gpa = sema.gpa; + const zir_datas = sema.code.instructions.items(.data); + + const types = try sema.arena.alloc(InternPool.Index, extra_data.fields_len); const values = try sema.arena.alloc(InternPool.Index, types.len); + var fields = std.AutoArrayHashMap(InternPool.NullTerminatedString, u32).init(sema.arena); try fields.ensureUnusedCapacity(types.len); // Find which field forces the expression to be runtime, if any. const opt_runtime_index = rs: { var runtime_index: ?usize = null; - var extra_index = extra.end; + var extra_index = extra_end; for (types, 0..) |*field_ty, i_usize| { - const i = @as(u32, @intCast(i_usize)); - const item = sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index); + const i: u32 = @intCast(i_usize); + const item = switch (kind) { + .anon_init => sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index), + .typed_init => sema.code.extraData(Zir.Inst.StructInit.Item, extra_index), + }; extra_index = item.end; - const name = sema.code.nullTerminatedString(item.data.field_name); + const name = switch (kind) { + .anon_init => sema.code.nullTerminatedString(item.data.field_name), + .typed_init => name: { + // `item.data.field_type` references a `field_type` instruction + const field_type_data = zir_datas[item.data.field_type].pl_node; + const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index); + break :name sema.code.nullTerminatedString(field_type_extra.data.name_start); + }, + }; const name_ip = try mod.intern_pool.getOrPutString(gpa, name); const gop = fields.getOrPutAssumeCapacity(name_ip); if (gop.found_existing) { @@ -19129,10 +19207,13 @@ fn zirStructInitAnon( .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); const alloc = try block.addTy(.alloc, alloc_ty); - var extra_index = extra.end; + var extra_index = extra_end; for (types, 0..) |field_ty, i_usize| { const i = @as(u32, @intCast(i_usize)); - const item = sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index); + const item = switch (kind) { + .anon_init => sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index), + .typed_init => sema.code.extraData(Zir.Inst.StructInit.Item, extra_index), + }; extra_index = item.end; const field_ptr_ty = try mod.ptrType(.{ @@ -19150,9 +19231,12 @@ fn zirStructInitAnon( } const element_refs = try sema.arena.alloc(Air.Inst.Ref, types.len); - var extra_index = extra.end; + var extra_index = extra_end; for (types, 0..) |_, i| { - const item = sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index); + const item = switch (kind) { + .anon_init => sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index), + .typed_init => sema.code.extraData(Zir.Inst.StructInit.Item, extra_index), + }; extra_index = item.end; element_refs[i] = try sema.resolveInst(item.data.init); } @@ -19175,7 +19259,13 @@ fn zirArrayInit( const args = sema.code.refSlice(extra.end, extra.data.operands_len); assert(args.len >= 2); // array_ty + at least one element - const array_ty = try sema.resolveType(block, src, args[0]); + const array_ty = sema.resolveType(block, src, args[0]) catch |err| switch (err) { + error.GenericPoison => { + // The type wasn't actually known, so treat this as an anon array init. + return sema.arrayInitAnon(block, src, args[1..], is_ref); + }, + else => |e| return e, + }; const sentinel_val = array_ty.sentinel(mod); const resolved_args = try gpa.alloc(Air.Inst.Ref, args.len - 1 + @intFromBool(sentinel_val != null)); @@ -19283,6 +19373,16 @@ fn zirArrayInitAnon( const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index); const operands = sema.code.refSlice(extra.end, extra.data.operands_len); + return sema.arrayInitAnon(block, src, operands, is_ref); +} + +fn arrayInitAnon( + sema: *Sema, + block: *Block, + src: LazySrcLoc, + operands: []const Zir.Inst.Ref, + is_ref: bool, +) CompileError!Air.Inst.Ref { const mod = sema.mod; const types = try sema.arena.alloc(InternPool.Index, operands.len); diff --git a/src/Zir.zig b/src/Zir.zig index 572471c863..9f7450fe5c 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -700,10 +700,16 @@ pub const Inst = struct { /// *?S returns *S /// Uses the `un_node` field. field_base_ptr, + /// Given a type, strips all optional and error union types wrapping it. + /// e.g. `E!?u32` becomes `u32`, `[]u8` becomes `[]u8`. + /// Uses the `un_node` field. + opt_eu_base_ty, /// Checks that the type supports array init syntax. + /// Returns the underlying indexable type (since the given type may be e.g. an optional). /// Uses the `un_node` field. validate_array_init_ty, /// Checks that the type supports struct init syntax. + /// Returns the underlying struct type (since the given type may be e.g. an optional). /// Uses the `un_node` field. validate_struct_init_ty, /// Given a set of `field_ptr` instructions, assumes they are all part of a struct @@ -1234,6 +1240,7 @@ pub const Inst = struct { .save_err_ret_index, .restore_err_ret_index, .for_len, + .opt_eu_base_ty, => false, .@"break", @@ -1522,6 +1529,7 @@ pub const Inst = struct { .for_len, .@"try", .try_ptr, + .opt_eu_base_ty, => false, .extended => switch (data.extended.opcode) { @@ -1676,6 +1684,7 @@ pub const Inst = struct { .switch_block_ref = .pl_node, .array_base_ptr = .un_node, .field_base_ptr = .un_node, + .opt_eu_base_ty = .un_node, .validate_array_init_ty = .pl_node, .validate_struct_init_ty = .un_node, .validate_struct_init = .pl_node, diff --git a/src/print_zir.zig b/src/print_zir.zig index 42a9abf401..7ed233a0c6 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -229,6 +229,7 @@ const Writer = struct { .make_ptr_const, .validate_deref, .check_comptime_control_flow, + .opt_eu_base_ty, => try self.writeUnNode(stream, inst), .ref, diff --git a/src/type.zig b/src/type.zig index d1d182714f..3db11ac42e 100644 --- a/src/type.zig +++ b/src/type.zig @@ -3039,6 +3039,7 @@ pub const Type = struct { return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + assert(struct_obj.haveFieldTypes()); return struct_obj.fields.values()[index].ty; }, .union_type => |union_type| { diff --git a/test/behavior/array.zig b/test/behavior/array.zig index 5d475c9b25..a4cac26569 100644 --- a/test/behavior/array.zig +++ b/test/behavior/array.zig @@ -761,3 +761,17 @@ test "slicing array of zero-sized values" { for (arr[0..]) |zero| try expect(zero == 0); } + +test "array init with no result pointer sets field result types" { + const S = struct { + // A function parameter has a result type, but no result pointer. + fn f(arr: [1]u32) u32 { + return arr[0]; + } + }; + + const x: u64 = 123; + const y = S.f(.{@intCast(x)}); + + try expect(y == x); +} diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index 6017d4e63c..1c08e7b5fa 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -1724,3 +1724,17 @@ test "packed struct field in anonymous struct" { fn countFields(v: anytype) usize { return @typeInfo(@TypeOf(v)).Struct.fields.len; } + +test "struct init with no result pointer sets field result types" { + const S = struct { + // A function parameter has a result type, but no result pointer. + fn f(s: struct { x: u32 }) u32 { + return s.x; + } + }; + + const x: u64 = 123; + const y = S.f(.{ .x = @intCast(x) }); + + try expect(y == x); +} From 93e53d1e00793d769d4ee39b3cbfd0c88257687d Mon Sep 17 00:00:00 2001 From: mlugg Date: Sat, 29 Jul 2023 06:22:29 +0100 Subject: [PATCH 2/6] compiler: fix crash on invalid result type for `@splat` This introduces a new ZIR instruction, `vec_elem_type`. Co-Authored-By: Ali Chraghi Resolves: #16567 --- src/AstGen.zig | 9 ++------- src/Sema.zig | 18 ++++++++++++++++++ src/Zir.zig | 6 ++++++ src/print_zir.zig | 1 + .../splat_result_type_non_vector.zig | 9 +++++++++ 5 files changed, 36 insertions(+), 7 deletions(-) create mode 100644 test/cases/compile_errors/splat_result_type_non_vector.zig diff --git a/src/AstGen.zig b/src/AstGen.zig index 858b94622d..95287c0a3d 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -2569,6 +2569,7 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As .array_type_sentinel, .elem_type_index, .elem_type, + .vector_elem_type, .vector_type, .indexable_ptr_len, .anyframe_type, @@ -8624,13 +8625,7 @@ fn builtinCall( .splat => { const result_type = try ri.rl.resultType(gz, node, "@splat"); - const elem_type = try gz.add(.{ - .tag = .elem_type_index, - .data = .{ .bin = .{ - .lhs = result_type, - .rhs = @as(Zir.Inst.Ref, @enumFromInt(0)), - } }, - }); + const elem_type = try gz.addUnNode(.vector_elem_type, result_type, node); const scalar = try expr(gz, scope, .{ .rl = .{ .ty = elem_type } }, params[0]); const result = try gz.addPlNode(.splat, node, Zir.Inst.Bin{ .lhs = result_type, diff --git a/src/Sema.zig b/src/Sema.zig index ab01d618e8..49dac7177e 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1022,6 +1022,7 @@ fn analyzeBodyInner( .elem_val_node => try sema.zirElemValNode(block, inst), .elem_type_index => try sema.zirElemTypeIndex(block, inst), .elem_type => try sema.zirElemType(block, inst), + .vector_elem_type => try sema.zirVectorElemType(block, inst), .enum_literal => try sema.zirEnumLiteral(block, inst), .int_from_enum => try sema.zirIntFromEnum(block, inst), .enum_from_int => try sema.zirEnumFromInt(block, inst), @@ -7804,6 +7805,23 @@ fn zirElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addType(ptr_ty.childType(mod)); } +fn zirVectorElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; + const un_node = sema.code.instructions.items(.data)[inst].un_node; + const vec_ty = sema.resolveType(block, .unneeded, un_node.operand) catch |err| switch (err) { + // Since this is a ZIR instruction that returns a type, encountering + // generic poison should not result in a failed compilation, but the + // generic poison type. This prevents unnecessary failures when + // constructing types at compile-time. + error.GenericPoison => return .generic_poison_type, + else => |e| return e, + }; + if (!vec_ty.isVector(mod)) { + return sema.fail(block, un_node.src(), "expected vector type, found '{}'", .{vec_ty.fmt(mod)}); + } + return sema.addType(vec_ty.childType(mod)); +} + fn zirVectorType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; diff --git a/src/Zir.zig b/src/Zir.zig index 9f7450fe5c..aa1945b236 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -248,6 +248,9 @@ pub const Inst = struct { /// Given a pointer type, returns its element type. /// Uses the `un_node` field. elem_type, + /// Given a vector type, returns its element type. + /// Uses the `un_node` field. + vector_elem_type, /// Given a pointer to an indexable object, returns the len property. This is /// used by for loops. This instruction also emits a for-loop specific compile /// error if the indexable object is not indexable. @@ -1029,6 +1032,7 @@ pub const Inst = struct { .vector_type, .elem_type_index, .elem_type, + .vector_elem_type, .indexable_ptr_len, .anyframe_type, .as, @@ -1334,6 +1338,7 @@ pub const Inst = struct { .vector_type, .elem_type_index, .elem_type, + .vector_elem_type, .indexable_ptr_len, .anyframe_type, .as, @@ -1565,6 +1570,7 @@ pub const Inst = struct { .vector_type = .pl_node, .elem_type_index = .bin, .elem_type = .un_node, + .vector_elem_type = .un_node, .indexable_ptr_len = .un_node, .anyframe_type = .un_node, .as = .bin, diff --git a/src/print_zir.zig b/src/print_zir.zig index 7ed233a0c6..3a158375ea 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -155,6 +155,7 @@ const Writer = struct { .alloc_mut, .alloc_comptime_mut, .elem_type, + .vector_elem_type, .indexable_ptr_len, .anyframe_type, .bit_not, diff --git a/test/cases/compile_errors/splat_result_type_non_vector.zig b/test/cases/compile_errors/splat_result_type_non_vector.zig new file mode 100644 index 0000000000..dbff8dc041 --- /dev/null +++ b/test/cases/compile_errors/splat_result_type_non_vector.zig @@ -0,0 +1,9 @@ +export fn f() void { + _ = @as(u32, @splat(5)); +} + +// error +// backend=stage2 +// target=native +// +// :2:18: error: expected vector type, found 'u32' From f2c8fa769a92eb61c13f2cc0f75c526c8fd729a9 Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 1 Aug 2023 22:42:01 +0100 Subject: [PATCH 3/6] Sema: refactor generic calls to interleave argument analysis and parameter type resolution AstGen provides all function call arguments with a result location, referenced through the call instruction index. The idea is that this should be the parameter type, but for `anytype` parameters, we use generic poison, which is required to be handled correctly. Previously, generic instantiations and inline calls worked by evaluating all args in advance, before resolving generic parameter types. This means any generic parameter (not just `anytype` ones) had generic poison result types. This caused missing result locations in some cases. Additionally, the generic instantiation logic caused `zirParam` to analyze the argument types a second time before coercion. This meant that for nominal types (struct/enum/etc), a *new* type was created, distinct to the result type which was previously forwarded to the argument expression. This commit fixes both of these issues. Generic parameter type resolution is now interleaved with argument analysis, so that we don't have unnecessary generic poison types, and generic instantiation logic now handles parameters itself rather than falling through to the standard zirParam logic, so avoids duplicating the types. Resolves: #16566 Resolves: #16258 Resolves: #16753 --- src/Air.zig | 6 +- src/Module.zig | 29 -- src/Sema.zig | 959 +++++++++++++++++++++++------------------ test/behavior/call.zig | 61 +++ 4 files changed, 606 insertions(+), 449 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index ae43a493a9..2126b473a8 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1528,11 +1528,13 @@ pub fn refToInterned(ref: Inst.Ref) ?InternPool.Index { } pub fn internedToRef(ip_index: InternPool.Index) Inst.Ref { - assert(@intFromEnum(ip_index) >> 31 == 0); return switch (ip_index) { .var_args_param_type => .var_args_param_type, .none => .none, - else => @enumFromInt(@as(u31, @intCast(@intFromEnum(ip_index)))), + else => { + assert(@intFromEnum(ip_index) >> 31 == 0); + return @enumFromInt(@as(u31, @intCast(@intFromEnum(ip_index)))); + }, }; } diff --git a/src/Module.zig b/src/Module.zig index 010855069c..4a62fd07ca 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -5938,35 +5938,6 @@ pub fn paramSrc( unreachable; } -pub fn argSrc( - mod: *Module, - call_node_offset: i32, - decl: *Decl, - start_arg_i: usize, - bound_arg_src: ?LazySrcLoc, -) LazySrcLoc { - @setCold(true); - const gpa = mod.gpa; - if (start_arg_i == 0 and bound_arg_src != null) return bound_arg_src.?; - const arg_i = start_arg_i - @intFromBool(bound_arg_src != null); - const tree = decl.getFileScope(mod).getTree(gpa) catch |err| { - // In this case we emit a warning + a less precise source location. - log.warn("unable to load {s}: {s}", .{ - decl.getFileScope(mod).sub_file_path, @errorName(err), - }); - return LazySrcLoc.nodeOffset(0); - }; - const node = decl.relativeToNodeIndex(call_node_offset); - var args: [1]Ast.Node.Index = undefined; - const call_full = tree.fullCall(&args, node) orelse { - assert(tree.nodes.items(.tag)[node] == .builtin_call); - const call_args_node = tree.extra_data[tree.nodes.items(.data)[node].rhs - 1]; - const call_args_offset = decl.nodeIndexToRelative(call_args_node); - return mod.initSrc(call_args_offset, decl, arg_i); - }; - return LazySrcLoc.nodeOffset(decl.nodeIndexToRelative(call_full.ast.params[arg_i])); -} - pub fn initSrc( mod: *Module, init_node_offset: i32, diff --git a/src/Sema.zig b/src/Sema.zig index 49dac7177e..5aeb55ddc3 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -70,7 +70,6 @@ generic_owner: InternPool.Index = .none, /// instantiation can point back to the instantiation site in addition to the /// declaration site. generic_call_src: LazySrcLoc = .unneeded, -generic_bound_arg_src: ?LazySrcLoc = null, /// Corresponds to `generic_call_src`. generic_call_decl: Decl.OptionalIndex = .none, /// The key is types that must be fully resolved prior to machine code @@ -1401,22 +1400,22 @@ fn analyzeBodyInner( continue; }, .param => { - try sema.zirParam(block, inst, i, false); + try sema.zirParam(block, inst, false); i += 1; continue; }, .param_comptime => { - try sema.zirParam(block, inst, i, true); + try sema.zirParam(block, inst, true); i += 1; continue; }, .param_anytype => { - try sema.zirParamAnytype(block, inst, i, false); + try sema.zirParamAnytype(block, inst, false); i += 1; continue; }, .param_anytype_comptime => { - try sema.zirParamAnytype(block, inst, i, true); + try sema.zirParamAnytype(block, inst, true); i += 1; continue; }, @@ -6536,7 +6535,6 @@ fn zirCall( defer tracy.end(); const mod = sema.mod; - const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const callee_src: LazySrcLoc = .{ .node_offset_call_func = inst_data.src_node }; const call_src = inst_data.src(); @@ -6560,96 +6558,62 @@ fn zirCall( break :blk try sema.fieldCallBind(block, callee_src, object_ptr, field_name, field_name_src); }, }; - var resolved_args: []Air.Inst.Ref = undefined; - var bound_arg_src: ?LazySrcLoc = null; - var func: Air.Inst.Ref = undefined; - var arg_index: u32 = 0; - switch (callee) { - .direct => |func_inst| { - resolved_args = try sema.arena.alloc(Air.Inst.Ref, args_len); - func = func_inst; - }, - .method => |method| { - resolved_args = try sema.arena.alloc(Air.Inst.Ref, args_len + 1); - func = method.func_inst; - resolved_args[0] = method.arg0_inst; - arg_index += 1; - bound_arg_src = callee_src; - }, - } + const func: Air.Inst.Ref = switch (callee) { + .direct => |func_inst| func_inst, + .method => |method| method.func_inst, + }; const callee_ty = sema.typeOf(func); - const total_args = args_len + @intFromBool(bound_arg_src != null); - const func_ty = try sema.checkCallArgumentCount(block, func, callee_src, callee_ty, total_args, bound_arg_src != null); + const total_args = args_len + @intFromBool(callee == .method); + const func_ty = try sema.checkCallArgumentCount(block, func, callee_src, callee_ty, total_args, callee == .method); - const args_body = sema.code.extra[extra.end..]; - - var input_is_error = false; + // The block index before the call, so we can potentially insert an error trace save here later. const block_index: Air.Inst.Index = @intCast(block.instructions.items.len); - const func_ty_info = mod.typeToFunc(func_ty).?; - const fn_params_len = func_ty_info.param_types.len; - const parent_comptime = block.is_comptime; - // `extra_index` and `arg_index` are separate since the bound function is passed as the first argument. - var extra_index: usize = 0; - var arg_start: u32 = args_len; - while (extra_index < args_len) : ({ - extra_index += 1; - arg_index += 1; - }) { - const arg_end = sema.code.extra[extra.end + extra_index]; - defer arg_start = arg_end; + // This will be set by `analyzeCall` to indicate whether any parameter was an error (making the + // error trace potentially dirty). + var input_is_error = false; - // Generate args to comptime params in comptime block. - defer block.is_comptime = parent_comptime; - if (arg_index < @min(fn_params_len, 32) and func_ty_info.paramIsComptime(@intCast(arg_index))) { - block.is_comptime = true; - // TODO set comptime_reason - } - - sema.inst_map.putAssumeCapacity(inst, inst: { - if (arg_index >= fn_params_len) - break :inst Air.Inst.Ref.var_args_param_type; - - if (func_ty_info.param_types.get(ip)[arg_index] == .generic_poison_type) - break :inst Air.Inst.Ref.generic_poison_type; - - break :inst try sema.addType(func_ty_info.param_types.get(ip)[arg_index].toType()); - }); - - const resolved = try sema.resolveBody(block, args_body[arg_start..arg_end], inst); - const resolved_ty = sema.typeOf(resolved); - if (resolved_ty.zigTypeTag(mod) == .NoReturn) { - return resolved; - } - if (resolved_ty.isError(mod)) { - input_is_error = true; - } - resolved_args[arg_index] = resolved; - } - if (sema.owner_func_index == .none or - !ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn) - { - input_is_error = false; // input was an error type, but no errorable fn's were actually called - } + const args_info: CallArgsInfo = .{ .zir_call = .{ + .bound_arg = switch (callee) { + .direct => .none, + .method => |method| method.arg0_inst, + }, + .bound_arg_src = callee_src, + .call_inst = inst, + .call_node_offset = inst_data.src_node, + .num_args = args_len, + .args_body = sema.code.extra[extra.end..], + .any_arg_is_error = &input_is_error, + } }; // AstGen ensures that a call instruction is always preceded by a dbg_stmt instruction. const call_dbg_node = inst - 1; + const call_inst = try sema.analyzeCall(block, func, func_ty, callee_src, call_src, modifier, ensure_result_used, args_info, call_dbg_node, .call); + + if (sema.owner_func_index == .none or + !mod.intern_pool.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn) + { + // No errorable fn actually called; we have no error return trace + input_is_error = false; + } if (mod.backendSupportsFeature(.error_return_trace) and mod.comp.bin_file.options.error_return_tracing and !block.is_comptime and !block.is_typeof and (input_is_error or pop_error_return_trace)) { - const call_inst: Air.Inst.Ref = if (modifier == .always_tail) undefined else b: { - break :b try sema.analyzeCall(block, func, func_ty, callee_src, call_src, modifier, ensure_result_used, resolved_args, bound_arg_src, call_dbg_node, .call); - }; - const return_ty = sema.typeOf(call_inst); if (modifier != .always_tail and return_ty.isNoReturn(mod)) return call_inst; // call to "fn(...) noreturn", don't pop + // TODO: we don't fix up the error trace for always_tail correctly, we should be doing it + // *before* the recursive call. This will be a bit tricky to do and probably requires + // moving this logic into analyzeCall. But that's probably a good idea anyway. + if (modifier == .always_tail) + return call_inst; + // If any input is an error-type, we might need to pop any trace it generated. Otherwise, we only // need to clean-up our own trace if we were passed to a non-error-handling expression. - if (input_is_error or (pop_error_return_trace and modifier != .always_tail and return_ty.isError(mod))) { + if (input_is_error or (pop_error_return_trace and return_ty.isError(mod))) { const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "index"); @@ -6669,12 +6633,9 @@ fn zirCall( try sema.popErrorReturnTrace(block, call_src, operand, save_inst); } - if (modifier == .always_tail) // Perform the call *after* the restore, so that a tail call is possible. - return sema.analyzeCall(block, func, func_ty, callee_src, call_src, modifier, ensure_result_used, resolved_args, bound_arg_src, call_dbg_node, .call); - return call_inst; } else { - return sema.analyzeCall(block, func, func_ty, callee_src, call_src, modifier, ensure_result_used, resolved_args, bound_arg_src, call_dbg_node, .call); + return call_inst; } } @@ -6781,7 +6742,19 @@ fn callBuiltin( if (args.len != fn_params_len or (func_ty_info.is_var_args and args.len < fn_params_len)) { std.debug.panic("parameter count mismatch calling builtin fn, expected {d}, found {d}", .{ fn_params_len, args.len }); } - _ = try sema.analyzeCall(block, builtin_fn, func_ty, call_src, call_src, modifier, false, args, null, null, operation); + + _ = try sema.analyzeCall( + block, + builtin_fn, + func_ty, + call_src, + call_src, + modifier, + false, + .{ .resolved = .{ .src = call_src, .args = args } }, + null, + operation, + ); } const CallOperation = enum { @@ -6792,6 +6765,251 @@ const CallOperation = enum { @"error return", }; +const CallArgsInfo = union(enum) { + /// The full list of resolved (but uncoerced) arguments is known ahead of time. + resolved: struct { + src: LazySrcLoc, + args: []const Air.Inst.Ref, + }, + + /// The list of resolved (but uncoerced) arguments is known ahead of time, but + /// originated from a usage of the @call builtin at the given node offset. + call_builtin: struct { + call_node_offset: i32, + args: []const Air.Inst.Ref, + }, + + /// This call corresponds to a ZIR call instruction. The arguments have not yet been + /// resolved. They must be resolved by `analyzeCall` so that argument resolution and + /// generic instantiation may be interleaved. This is required for RLS to work on + /// generic parameters. + zir_call: struct { + /// This may be `none`, in which case it is ignored. Otherwise, it is the + /// already-resolved value of the first argument, from method call syntax. + bound_arg: Air.Inst.Ref, + /// The source location of `bound_arg` if it is not `null`. Otherwise `undefined`. + bound_arg_src: LazySrcLoc, + /// The ZIR call instruction. The parameter type is placed at this index while + /// analyzing arguments. + call_inst: Zir.Inst.Index, + /// The node offset of `call_inst`. + call_node_offset: i32, + /// The number of arguments to this call, not including `bound_arg`. + num_args: u32, + /// The ZIR corresponding to all function arguments (other than `bound_arg`, if it + /// is not `none`). Format is precisely the same as trailing data of ZIR `call`. + args_body: []const Zir.Inst.Index, + /// This bool will be set to true if any argument evaluated turns out to have an error set or error union type. + /// This is used by the caller to restore the error return trace when necessary. + any_arg_is_error: *bool, + }, + + fn count(cai: CallArgsInfo) usize { + return switch (cai) { + inline .resolved, .call_builtin => |resolved| resolved.args.len, + .zir_call => |zir_call| zir_call.num_args + @intFromBool(zir_call.bound_arg != .none), + }; + } + + fn argSrc(cai: CallArgsInfo, block: *Block, arg_index: usize) LazySrcLoc { + return switch (cai) { + .resolved => |resolved| resolved.src, + .call_builtin => |call_builtin| .{ .call_arg = .{ + .decl = block.src_decl, + .call_node_offset = call_builtin.call_node_offset, + .arg_index = @intCast(arg_index), + } }, + .zir_call => |zir_call| if (arg_index == 0 and zir_call.bound_arg != .none) { + return zir_call.bound_arg_src; + } else .{ .call_arg = .{ + .decl = block.src_decl, + .call_node_offset = zir_call.call_node_offset, + .arg_index = @intCast(arg_index - @intFromBool(zir_call.bound_arg != .none)), + } }, + }; + } + + /// Analyzes the arg at `arg_index` and coerces it to `param_ty`. + /// `param_ty` may be `generic_poison` or `var_args_param`. + /// `func_ty_info` may be the type before instantiation, even if a generic + /// instantiation has been partially completed. + fn analyzeArg( + cai: CallArgsInfo, + sema: *Sema, + block: *Block, + arg_index: usize, + param_ty: Type, + func_ty_info: InternPool.Key.FuncType, + func_inst: Air.Inst.Ref, + ) CompileError!Air.Inst.Ref { + const mod = sema.mod; + const param_count = func_ty_info.param_types.len; + switch (param_ty.toIntern()) { + .generic_poison_type, .var_args_param_type => {}, + else => try sema.queueFullTypeResolution(param_ty), + } + const uncoerced_arg: Air.Inst.Ref = switch (cai) { + inline .resolved, .call_builtin => |resolved| resolved.args[arg_index], + .zir_call => |zir_call| arg_val: { + const has_bound_arg = zir_call.bound_arg != .none; + if (arg_index == 0 and has_bound_arg) { + break :arg_val zir_call.bound_arg; + } + const real_arg_idx = arg_index - @intFromBool(has_bound_arg); + + const arg_body = if (real_arg_idx == 0) blk: { + const start = zir_call.num_args; + const end = zir_call.args_body[0]; + break :blk zir_call.args_body[start..end]; + } else blk: { + const start = zir_call.args_body[real_arg_idx - 1]; + const end = zir_call.args_body[real_arg_idx]; + break :blk zir_call.args_body[start..end]; + }; + + // Generate args to comptime params in comptime block + const parent_comptime = block.is_comptime; + defer block.is_comptime = parent_comptime; + // Note that we are indexing into parameters, not arguments, so use `arg_index` instead of `real_arg_idx` + if (arg_index < @min(param_count, 32) and func_ty_info.paramIsComptime(@intCast(arg_index))) { + block.is_comptime = true; + // TODO set comptime_reason + } + // Give the arg its result type + sema.inst_map.putAssumeCapacity(zir_call.call_inst, try sema.addType(param_ty)); + // Resolve the arg! + const uncoerced_arg = try sema.resolveBody(block, arg_body, zir_call.call_inst); + + if (sema.typeOf(uncoerced_arg).zigTypeTag(mod) == .NoReturn) { + // This terminates resolution of arguments. The caller should + // propagate this. + return uncoerced_arg; + } + + if (sema.typeOf(uncoerced_arg).isError(mod)) { + zir_call.any_arg_is_error.* = true; + } + + break :arg_val uncoerced_arg; + }, + }; + switch (param_ty.toIntern()) { + .generic_poison_type => return uncoerced_arg, + .var_args_param_type => return sema.coerceVarArgParam(block, uncoerced_arg, cai.argSrc(block, arg_index)), + else => return sema.coerceExtra( + block, + param_ty, + uncoerced_arg, + cai.argSrc(block, arg_index), + .{ .param_src = .{ + .func_inst = func_inst, + .param_i = @intCast(arg_index), + } }, + ) catch |err| switch (err) { + error.NotCoercible => unreachable, + else => |e| return e, + }, + } + } +}; + +/// While performing an inline call, we need to switch between two Sema states a few times: the +/// state for the caller (with the callee's `code`, `fn_ret_ty`, etc), and the state for the callee. +/// These cannot be two separate Sema instances as they must share AIR. +/// Therefore, this struct acts as a helper to switch between the two. +/// This switching is required during argument evaluation, where function argument analysis must be +/// interleaved with resolving generic parameter types. +const InlineCallSema = struct { + sema: *Sema, + cur: enum { + caller, + callee, + }, + + other_code: Zir, + other_func_index: InternPool.Index, + other_fn_ret_ty: Type, + other_fn_ret_ty_ies: ?*InferredErrorSet, + other_inst_map: InstMap, + other_error_return_trace_index_on_fn_entry: Air.Inst.Ref, + other_generic_owner: InternPool.Index, + other_generic_call_src: LazySrcLoc, + other_generic_call_decl: Decl.OptionalIndex, + + /// Sema should currently be set up for the caller (i.e. unchanged yet). This init will not + /// change that. The other parameters contain data for the callee Sema. The other modified + /// Sema fields are all initialized to default values for the callee. + /// Must call deinit on the result. + fn init( + sema: *Sema, + callee_code: Zir, + callee_func_index: InternPool.Index, + callee_error_return_trace_index_on_fn_entry: Air.Inst.Ref, + ) InlineCallSema { + return .{ + .sema = sema, + .cur = .caller, + .other_code = callee_code, + .other_func_index = callee_func_index, + .other_fn_ret_ty = Type.void, + .other_fn_ret_ty_ies = null, + .other_inst_map = .{}, + .other_error_return_trace_index_on_fn_entry = callee_error_return_trace_index_on_fn_entry, + .other_generic_owner = .none, + .other_generic_call_src = .unneeded, + .other_generic_call_decl = .none, + }; + } + + /// Switch back to the caller Sema if necessary and free all temporary state of the callee Sema. + fn deinit(ics: *InlineCallSema) void { + switch (ics.cur) { + .caller => {}, + .callee => ics.swap(), + } + // Callee Sema owns the inst_map memory + ics.other_inst_map.deinit(ics.sema.gpa); + ics.* = undefined; + } + + /// Returns a Sema instance suitable for usage from the caller context. + fn caller(ics: *InlineCallSema) *Sema { + switch (ics.cur) { + .caller => {}, + .callee => ics.swap(), + } + return ics.sema; + } + + /// Returns a Sema instance suitable for usage from the callee context. + fn callee(ics: *InlineCallSema) *Sema { + switch (ics.cur) { + .caller => ics.swap(), + .callee => {}, + } + return ics.sema; + } + + /// Internal use only. Swaps to the other Sema state. + fn swap(ics: *InlineCallSema) void { + ics.cur = switch (ics.cur) { + .caller => .callee, + .callee => .caller, + }; + // zig fmt: off + std.mem.swap(Zir, &ics.sema.code, &ics.other_code); + std.mem.swap(InternPool.Index, &ics.sema.func_index, &ics.other_func_index); + std.mem.swap(Type, &ics.sema.fn_ret_ty, &ics.other_fn_ret_ty); + std.mem.swap(?*InferredErrorSet, &ics.sema.fn_ret_ty_ies, &ics.other_fn_ret_ty_ies); + std.mem.swap(InstMap, &ics.sema.inst_map, &ics.other_inst_map); + std.mem.swap(InternPool.Index, &ics.sema.generic_owner, &ics.other_generic_owner); + std.mem.swap(LazySrcLoc, &ics.sema.generic_call_src, &ics.other_generic_call_src); + std.mem.swap(Decl.OptionalIndex, &ics.sema.generic_call_decl, &ics.other_generic_call_decl); + std.mem.swap(Air.Inst.Ref, &ics.sema.error_return_trace_index_on_fn_entry, &ics.other_error_return_trace_index_on_fn_entry); + // zig fmt: on + } +}; + fn analyzeCall( sema: *Sema, block: *Block, @@ -6801,8 +7019,7 @@ fn analyzeCall( call_src: LazySrcLoc, modifier: std.builtin.CallModifier, ensure_result_used: bool, - uncasted_args: []const Air.Inst.Ref, - bound_arg_src: ?LazySrcLoc, + args_info: CallArgsInfo, call_dbg_node: ?Zir.Inst.Index, operation: CallOperation, ) CompileError!Air.Inst.Ref { @@ -6811,7 +7028,6 @@ fn analyzeCall( const callee_ty = sema.typeOf(func); const func_ty_info = mod.typeToFunc(func_ty).?; - const fn_params_len = func_ty_info.param_types.len; const cc = func_ty_info.cc; if (cc == .Naked) { const maybe_decl = try sema.funcDeclSrc(func); @@ -6896,9 +7112,8 @@ fn analyzeCall( func_src, call_src, ensure_result_used, - uncasted_args, + args_info, call_tag, - bound_arg_src, call_dbg_node, )) |some| { return some; @@ -6973,31 +7188,23 @@ fn analyzeCall( .block_inst = block_inst, }, }; - // In order to save a bit of stack space, directly modify Sema rather - // than create a child one. - const parent_zir = sema.code; + const module_fn = mod.funcInfo(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); - sema.code = fn_owner_decl.getFileScope(mod).zir; - defer sema.code = parent_zir; - try mod.declareDeclDependencyType(sema.owner_decl_index, module_fn.owner_decl, .function_body); + // We effectively want a child Sema here, but can't literally do that, because we need AIR + // to be shared. InlineCallSema is a wrapper which handles this for us. While `ics` is in + // scope, we should use its `caller`/`callee` methods rather than using `sema` directly + // whenever performing an operation where the difference matters. + var ics = InlineCallSema.init( + sema, + fn_owner_decl.getFileScope(mod).zir, + module_fn_index, + block.error_return_trace_index, + ); + defer ics.deinit(); - const parent_inst_map = sema.inst_map; - sema.inst_map = .{}; - defer { - sema.src = call_src; - sema.inst_map.deinit(gpa); - sema.inst_map = parent_inst_map; - } - - const parent_func_index = sema.func_index; - sema.func_index = module_fn_index; - defer sema.func_index = parent_func_index; - - const parent_err_ret_index = sema.error_return_trace_index_on_fn_entry; - sema.error_return_trace_index_on_fn_entry = block.error_return_trace_index; - defer sema.error_return_trace_index_on_fn_entry = parent_err_ret_index; + try mod.declareDeclDependencyType(ics.callee().owner_decl_index, module_fn.owner_decl, .function_body); var wip_captures = try WipCaptureScope.init(gpa, fn_owner_decl.src_scope); defer wip_captures.deinit(); @@ -7053,37 +7260,37 @@ fn analyzeCall( // the AIR instructions of the callsite. The callee could be a generic function // which means its parameter type expressions must be resolved in order and used // to successively coerce the arguments. - const fn_info = sema.code.getFnInfo(module_fn.zir_body_inst); - try sema.inst_map.ensureSpaceForInstructions(sema.gpa, fn_info.param_body); + const fn_info = ics.callee().code.getFnInfo(module_fn.zir_body_inst); + try ics.callee().inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body); var has_comptime_args = false; var arg_i: u32 = 0; for (fn_info.param_body) |inst| { - const arg_src: LazySrcLoc = if (arg_i == 0 and bound_arg_src != null) - bound_arg_src.? - else - .{ .call_arg = .{ - .decl = block.src_decl, - .call_node_offset = call_src.node_offset.x, - .arg_index = arg_i - @intFromBool(bound_arg_src != null), - } }; - try sema.analyzeInlineCallArg( + const opt_noreturn_ref = try analyzeInlineCallArg( + &ics, block, &child_block, - arg_src, inst, new_fn_info.param_types, &arg_i, - uncasted_args, + args_info, is_comptime_call, &should_memoize, memoized_arg_values, - func_ty_info.param_types, + func_ty_info, func, &has_comptime_args, ); + if (opt_noreturn_ref) |ref| { + // Analyzing this argument gave a ref of a noreturn type. Terminate argument analysis here. + return ref; + } } + // From here, we only really need to use the callee Sema. Make it the active one, then we + // can just use `sema` directly. + _ = ics.callee(); + if (!has_comptime_args and module_fn.analysis(ip).state == .sema_failure) return error.AnalysisFail; @@ -7107,26 +7314,7 @@ fn analyzeCall( else try sema.resolveInst(fn_info.ret_ty_ref); const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 }; - const bare_return_type = try sema.analyzeAsType(&child_block, ret_ty_src, ret_ty_inst); - const parent_fn_ret_ty = sema.fn_ret_ty; - const parent_fn_ret_ty_ies = sema.fn_ret_ty_ies; - const parent_generic_owner = sema.generic_owner; - const parent_generic_call_src = sema.generic_call_src; - const parent_generic_bound_arg_src = sema.generic_bound_arg_src; - const parent_generic_call_decl = sema.generic_call_decl; - sema.fn_ret_ty = bare_return_type; - sema.fn_ret_ty_ies = null; - sema.generic_owner = .none; - sema.generic_call_src = .unneeded; - sema.generic_bound_arg_src = null; - sema.generic_call_decl = .none; - defer sema.fn_ret_ty = parent_fn_ret_ty; - defer sema.fn_ret_ty_ies = parent_fn_ret_ty_ies; - defer sema.generic_owner = parent_generic_owner; - defer sema.generic_call_src = parent_generic_call_src; - defer sema.generic_bound_arg_src = parent_generic_bound_arg_src; - defer sema.generic_call_decl = parent_generic_call_decl; - + sema.fn_ret_ty = try sema.analyzeAsType(&child_block, ret_ty_src, ret_ty_inst); if (module_fn.analysis(ip).inferred_error_set) { // Create a fresh inferred error set type for inline/comptime calls. const ies = try sema.arena.create(InferredErrorSet); @@ -7134,7 +7322,7 @@ fn analyzeCall( sema.fn_ret_ty_ies = ies; sema.fn_ret_ty = (try ip.get(gpa, .{ .error_union_type = .{ .error_set_type = .adhoc_inferred_error_set_type, - .payload_type = bare_return_type.toIntern(), + .payload_type = sema.fn_ret_ty.toIntern(), } })).toType(); } @@ -7157,7 +7345,7 @@ fn analyzeCall( new_fn_info.return_type = sema.fn_ret_ty.toIntern(); const new_func_resolved_ty = try mod.funcType(new_fn_info); if (!is_comptime_call and !block.is_typeof) { - try sema.emitDbgInline(block, parent_func_index, module_fn_index, new_func_resolved_ty, .dbg_inline_begin); + try sema.emitDbgInline(block, sema.func_index, module_fn_index, new_func_resolved_ty, .dbg_inline_begin); const zir_tags = sema.code.instructions.items(.tag); for (fn_info.param_body) |param| switch (zir_tags[param]) { @@ -7191,7 +7379,7 @@ fn analyzeCall( const err_msg = sema.err orelse return err; if (mem.eql(u8, err_msg.msg, recursive_msg)) return err; try sema.errNote(block, call_src, err_msg, "called from here", .{}); - err_msg.clearTrace(sema.gpa); + err_msg.clearTrace(gpa); return err; }, else => |e| return e, @@ -7205,8 +7393,8 @@ fn analyzeCall( try sema.emitDbgInline( block, module_fn_index, - parent_func_index, - mod.funcOwnerDeclPtr(parent_func_index).ty, + sema.func_index, + mod.funcOwnerDeclPtr(sema.func_index).ty, .dbg_inline_end, ); } @@ -7251,47 +7439,16 @@ fn analyzeCall( } else res: { assert(!func_ty_info.is_generic); - const args = try sema.arena.alloc(Air.Inst.Ref, uncasted_args.len); - for (uncasted_args, 0..) |uncasted_arg, i| { - if (i < fn_params_len) { - const opts: CoerceOpts = .{ .param_src = .{ - .func_inst = func, - .param_i = @intCast(i), - } }; - const param_ty = func_ty_info.param_types.get(ip)[i].toType(); - args[i] = sema.analyzeCallArg( - block, - .unneeded, - param_ty, - uncasted_arg, - opts, - ) catch |err| switch (err) { - error.NeededSourceLocation => { - const decl = mod.declPtr(block.src_decl); - _ = try sema.analyzeCallArg( - block, - mod.argSrc(call_src.node_offset.x, decl, i, bound_arg_src), - param_ty, - uncasted_arg, - opts, - ); - unreachable; - }, - else => |e| return e, - }; - } else { - args[i] = sema.coerceVarArgParam(block, uncasted_arg, .unneeded) catch |err| switch (err) { - error.NeededSourceLocation => { - const decl = mod.declPtr(block.src_decl); - _ = try sema.coerceVarArgParam( - block, - uncasted_arg, - mod.argSrc(call_src.node_offset.x, decl, i, bound_arg_src), - ); - unreachable; - }, - else => |e| return e, - }; + const args = try sema.arena.alloc(Air.Inst.Ref, args_info.count()); + for (args, 0..) |*arg_out, arg_idx| { + // Non-generic, so param types are already resolved + const param_ty = if (arg_idx < func_ty_info.param_types.len) ty: { + break :ty func_ty_info.param_types.get(ip)[arg_idx].toType(); + } else InternPool.Index.var_args_param_type.toType(); + assert(!param_ty.isGenericPoison()); + arg_out.* = try args_info.analyzeArg(sema, block, arg_idx, param_ty, func_ty_info, func); + if (sema.typeOf(arg_out.*).zigTypeTag(mod) == .NoReturn) { + return arg_out.*; } } @@ -7375,25 +7532,25 @@ fn handleTailCall(sema: *Sema, block: *Block, call_src: LazySrcLoc, func_ty: Typ return Air.Inst.Ref.unreachable_value; } +/// Usually, returns null. If an argument was noreturn, returns that ref (which should become the call result). fn analyzeInlineCallArg( - sema: *Sema, + ics: *InlineCallSema, arg_block: *Block, param_block: *Block, - arg_src: LazySrcLoc, inst: Zir.Inst.Index, new_param_types: []InternPool.Index, arg_i: *u32, - uncasted_args: []const Air.Inst.Ref, + args_info: CallArgsInfo, is_comptime_call: bool, should_memoize: *bool, memoized_arg_values: []InternPool.Index, - raw_param_types: InternPool.Index.Slice, + func_ty_info: InternPool.Key.FuncType, func_inst: Air.Inst.Ref, has_comptime_args: *bool, -) !void { - const mod = sema.mod; +) !?Air.Inst.Ref { + const mod = ics.sema.mod; const ip = &mod.intern_pool; - const zir_tags = sema.code.instructions.items(.tag); + const zir_tags = ics.callee().code.instructions.items(.tag); switch (zir_tags[inst]) { .param_comptime, .param_anytype_comptime => has_comptime_args.* = true, else => {}, @@ -7402,39 +7559,36 @@ fn analyzeInlineCallArg( .param, .param_comptime => { // Evaluate the parameter type expression now that previous ones have // been mapped, and coerce the corresponding argument to it. - const pl_tok = sema.code.instructions.items(.data)[inst].pl_tok; + const pl_tok = ics.callee().code.instructions.items(.data)[inst].pl_tok; const param_src = pl_tok.src(); - const extra = sema.code.extraData(Zir.Inst.Param, pl_tok.payload_index); - const param_body = sema.code.extra[extra.end..][0..extra.data.body_len]; + const extra = ics.callee().code.extraData(Zir.Inst.Param, pl_tok.payload_index); + const param_body = ics.callee().code.extra[extra.end..][0..extra.data.body_len]; const param_ty = param_ty: { - const raw_param_ty = raw_param_types.get(ip)[arg_i.*]; + const raw_param_ty = func_ty_info.param_types.get(ip)[arg_i.*]; if (raw_param_ty != .generic_poison_type) break :param_ty raw_param_ty; - const param_ty_inst = try sema.resolveBody(param_block, param_body, inst); - const param_ty = try sema.analyzeAsType(param_block, param_src, param_ty_inst); + const param_ty_inst = try ics.callee().resolveBody(param_block, param_body, inst); + const param_ty = try ics.callee().analyzeAsType(param_block, param_src, param_ty_inst); break :param_ty param_ty.toIntern(); }; new_param_types[arg_i.*] = param_ty; - const uncasted_arg = uncasted_args[arg_i.*]; - if (try sema.typeRequiresComptime(param_ty.toType())) { - _ = sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "argument to parameter with comptime-only type must be comptime-known") catch |err| { - if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err); + const casted_arg = try args_info.analyzeArg(ics.caller(), arg_block, arg_i.*, param_ty.toType(), func_ty_info, func_inst); + if (ics.caller().typeOf(casted_arg).zigTypeTag(mod) == .NoReturn) { + return casted_arg; + } + const arg_src = args_info.argSrc(arg_block, arg_i.*); + if (try ics.callee().typeRequiresComptime(param_ty.toType())) { + _ = ics.caller().resolveConstMaybeUndefVal(arg_block, arg_src, casted_arg, "argument to parameter with comptime-only type must be comptime-known") catch |err| { + if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(ics.caller(), ics.caller().err); return err; }; } else if (!is_comptime_call and zir_tags[inst] == .param_comptime) { - _ = try sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "parameter is comptime"); + _ = try ics.caller().resolveConstMaybeUndefVal(arg_block, arg_src, casted_arg, "parameter is comptime"); } - const casted_arg = sema.coerceExtra(arg_block, param_ty.toType(), uncasted_arg, arg_src, .{ .param_src = .{ - .func_inst = func_inst, - .param_i = @intCast(arg_i.*), - } }) catch |err| switch (err) { - error.NotCoercible => unreachable, - else => |e| return e, - }; if (is_comptime_call) { - sema.inst_map.putAssumeCapacityNoClobber(inst, casted_arg); - const arg_val = sema.resolveConstMaybeUndefVal(arg_block, arg_src, casted_arg, "argument to function being called at comptime must be comptime-known") catch |err| { - if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err); + ics.callee().inst_map.putAssumeCapacityNoClobber(inst, casted_arg); + const arg_val = ics.caller().resolveConstMaybeUndefVal(arg_block, arg_src, casted_arg, "argument to function being called at comptime must be comptime-known") catch |err| { + if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(ics.caller(), ics.caller().err); return err; }; switch (arg_val.toIntern()) { @@ -7448,14 +7602,14 @@ fn analyzeInlineCallArg( // Needed so that lazy values do not trigger // assertion due to type not being resolved // when the hash function is called. - const resolved_arg_val = try sema.resolveLazyValue(arg_val); + const resolved_arg_val = try ics.caller().resolveLazyValue(arg_val); should_memoize.* = should_memoize.* and !resolved_arg_val.canMutateComptimeVarState(mod); memoized_arg_values[arg_i.*] = try resolved_arg_val.intern(param_ty.toType(), mod); } else { - sema.inst_map.putAssumeCapacityNoClobber(inst, casted_arg); + ics.callee().inst_map.putAssumeCapacityNoClobber(inst, casted_arg); } - if (try sema.resolveMaybeUndefVal(casted_arg)) |_| { + if (try ics.caller().resolveMaybeUndefVal(casted_arg)) |_| { has_comptime_args.* = true; } @@ -7463,13 +7617,17 @@ fn analyzeInlineCallArg( }, .param_anytype, .param_anytype_comptime => { // No coercion needed. - const uncasted_arg = uncasted_args[arg_i.*]; - new_param_types[arg_i.*] = sema.typeOf(uncasted_arg).toIntern(); + const uncasted_arg = try args_info.analyzeArg(ics.caller(), arg_block, arg_i.*, Type.generic_poison, func_ty_info, func_inst); + if (ics.caller().typeOf(uncasted_arg).zigTypeTag(mod) == .NoReturn) { + return uncasted_arg; + } + const arg_src = args_info.argSrc(arg_block, arg_i.*); + new_param_types[arg_i.*] = ics.caller().typeOf(uncasted_arg).toIntern(); if (is_comptime_call) { - sema.inst_map.putAssumeCapacityNoClobber(inst, uncasted_arg); - const arg_val = sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "argument to function being called at comptime must be comptime-known") catch |err| { - if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err); + ics.callee().inst_map.putAssumeCapacityNoClobber(inst, uncasted_arg); + const arg_val = ics.caller().resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "argument to function being called at comptime must be comptime-known") catch |err| { + if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(ics.caller(), ics.caller().err); return err; }; switch (arg_val.toIntern()) { @@ -7483,17 +7641,17 @@ fn analyzeInlineCallArg( // Needed so that lazy values do not trigger // assertion due to type not being resolved // when the hash function is called. - const resolved_arg_val = try sema.resolveLazyValue(arg_val); + const resolved_arg_val = try ics.caller().resolveLazyValue(arg_val); should_memoize.* = should_memoize.* and !resolved_arg_val.canMutateComptimeVarState(mod); - memoized_arg_values[arg_i.*] = try resolved_arg_val.intern(sema.typeOf(uncasted_arg), mod); + memoized_arg_values[arg_i.*] = try resolved_arg_val.intern(ics.caller().typeOf(uncasted_arg), mod); } else { if (zir_tags[inst] == .param_anytype_comptime) { - _ = try sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "parameter is comptime"); + _ = try ics.caller().resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "parameter is comptime"); } - sema.inst_map.putAssumeCapacityNoClobber(inst, uncasted_arg); + ics.callee().inst_map.putAssumeCapacityNoClobber(inst, uncasted_arg); } - if (try sema.resolveMaybeUndefVal(uncasted_arg)) |_| { + if (try ics.caller().resolveMaybeUndefVal(uncasted_arg)) |_| { has_comptime_args.* = true; } @@ -7501,6 +7659,8 @@ fn analyzeInlineCallArg( }, else => {}, } + + return null; } fn analyzeCallArg( @@ -7525,9 +7685,8 @@ fn instantiateGenericCall( func_src: LazySrcLoc, call_src: LazySrcLoc, ensure_result_used: bool, - uncasted_args: []const Air.Inst.Ref, + args_info: CallArgsInfo, call_tag: Air.Inst.Tag, - bound_arg_src: ?LazySrcLoc, call_dbg_node: ?Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const mod = sema.mod; @@ -7541,6 +7700,7 @@ fn instantiateGenericCall( else => unreachable, }; const generic_owner_func = mod.intern_pool.indexToKey(generic_owner).func; + const generic_owner_ty_info = mod.typeToFunc(generic_owner_func.ty.toType()).?; // Even though there may already be a generic instantiation corresponding // to this callsite, we must evaluate the expressions of the generic @@ -7556,9 +7716,13 @@ fn instantiateGenericCall( const fn_zir = namespace.file_scope.zir; const fn_info = fn_zir.getFnInfo(generic_owner_func.zir_body_inst); - const comptime_args = try sema.arena.alloc(InternPool.Index, uncasted_args.len); + const comptime_args = try sema.arena.alloc(InternPool.Index, args_info.count()); @memset(comptime_args, .none); + // We may overestimate the number of runtime args, but this will definitely be sufficient. + const max_runtime_args = args_info.count() - @popCount(generic_owner_ty_info.comptime_bits); + var runtime_args = try std.ArrayListUnmanaged(Air.Inst.Ref).initCapacity(sema.arena, max_runtime_args); + // Re-run the block that creates the function, with the comptime parameters // pre-populated inside `inst_map`. This causes `param_comptime` and // `param_anytype_comptime` ZIR instructions to be ignored, resulting in a @@ -7583,7 +7747,6 @@ fn instantiateGenericCall( .comptime_args = comptime_args, .generic_owner = generic_owner, .generic_call_src = call_src, - .generic_bound_arg_src = bound_arg_src, .generic_call_decl = block.src_decl.toOptional(), .branch_quota = sema.branch_quota, .branch_count = sema.branch_count, @@ -7608,25 +7771,138 @@ fn instantiateGenericCall( try child_sema.inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body); - for (fn_info.param_body[0..uncasted_args.len], uncasted_args, 0..) |inst, arg, i| { - // `child_sema` will use a different `inst_map` which means we have to - // convert from parent-relative `Air.Inst.Ref` to child-relative here. - // Constants are simple; runtime-known values need a new instruction. - child_sema.inst_map.putAssumeCapacityNoClobber(inst, if (try sema.resolveMaybeUndefVal(arg)) |val| - Air.internedToRef(val.toIntern()) - else - // We insert into the map an instruction which is runtime-known - // but has the type of the argument. - try child_block.addInst(.{ + for (fn_info.param_body[0..args_info.count()], 0..) |param_inst, arg_index| { + const param_tag = fn_zir.instructions.items(.tag)[param_inst]; + + const param_ty = switch (generic_owner_ty_info.param_types.get(ip)[arg_index]) { + else => |ty| ty.toType(), // parameter is not generic, so type is already resolved + .generic_poison_type => param_ty: { + // We have every parameter before this one, so can resolve this parameter's type now. + // However, first check the param type, since it may be anytype. + switch (param_tag) { + .param_anytype, .param_anytype_comptime => { + // The parameter doesn't have a type. + break :param_ty Type.generic_poison; + }, + .param, .param_comptime => { + // We now know every prior parameter, so can resolve this + // parameter's type. The child sema has these types. + const param_data = fn_zir.instructions.items(.data)[param_inst].pl_tok; + const param_extra = fn_zir.extraData(Zir.Inst.Param, param_data.payload_index); + const param_ty_body = fn_zir.extra[param_extra.end..][0..param_extra.data.body_len]; + + // Make sure any nested instructions don't clobber our work. + const prev_params = child_block.params; + const prev_no_partial_func_ty = child_sema.no_partial_func_ty; + const prev_generic_owner = child_sema.generic_owner; + const prev_generic_call_src = child_sema.generic_call_src; + const prev_generic_call_decl = child_sema.generic_call_decl; + child_block.params = .{}; + child_sema.no_partial_func_ty = true; + child_sema.generic_owner = .none; + child_sema.generic_call_src = .unneeded; + child_sema.generic_call_decl = .none; + defer { + child_block.params = prev_params; + child_sema.no_partial_func_ty = prev_no_partial_func_ty; + child_sema.generic_owner = prev_generic_owner; + child_sema.generic_call_src = prev_generic_call_src; + child_sema.generic_call_decl = prev_generic_call_decl; + } + + const param_ty_inst = try child_sema.resolveBody(&child_block, param_ty_body, param_inst); + break :param_ty try child_sema.analyzeAsType(&child_block, param_data.src(), param_ty_inst); + }, + else => unreachable, + } + }, + }; + const arg_ref = try args_info.analyzeArg(sema, block, arg_index, param_ty, generic_owner_ty_info, func); + const arg_ty = sema.typeOf(arg_ref); + if (arg_ty.zigTypeTag(mod) == .NoReturn) { + // This terminates argument analysis. + return arg_ref; + } + + const arg_is_comptime = switch (param_tag) { + .param_comptime, .param_anytype_comptime => true, + .param, .param_anytype => try sema.typeRequiresComptime(arg_ty), + else => unreachable, + }; + + if (arg_is_comptime) { + if (try sema.resolveMaybeUndefVal(arg_ref)) |arg_val| { + comptime_args[arg_index] = arg_val.toIntern(); + child_sema.inst_map.putAssumeCapacityNoClobber( + param_inst, + Air.internedToRef(arg_val.toIntern()), + ); + } else switch (param_tag) { + .param_comptime, + .param_anytype_comptime, + => return sema.failWithOwnedErrorMsg(msg: { + const arg_src = args_info.argSrc(block, arg_index); + const msg = try sema.errMsg(block, arg_src, "runtime-known argument passed to comptime parameter", .{}); + errdefer msg.destroy(sema.gpa); + const param_src = switch (param_tag) { + .param_comptime => fn_zir.instructions.items(.data)[param_inst].pl_tok.src(), + .param_anytype_comptime => fn_zir.instructions.items(.data)[param_inst].str_tok.src(), + else => unreachable, + }; + try child_sema.errNote(&child_block, param_src, msg, "declared comptime here", .{}); + break :msg msg; + }), + + .param, + .param_anytype, + => return sema.failWithOwnedErrorMsg(msg: { + const arg_src = args_info.argSrc(block, arg_index); + const msg = try sema.errMsg(block, arg_src, "runtime-known argument passed to parameter of comptime-only type", .{}); + errdefer msg.destroy(sema.gpa); + const param_src = switch (param_tag) { + .param => fn_zir.instructions.items(.data)[param_inst].pl_tok.src(), + .param_anytype => fn_zir.instructions.items(.data)[param_inst].str_tok.src(), + else => unreachable, + }; + try child_sema.errNote(&child_block, param_src, msg, "declared here", .{}); + const src_decl = mod.declPtr(block.src_decl); + try sema.explainWhyTypeIsComptime(msg, arg_src.toSrcLoc(src_decl, mod), arg_ty); + break :msg msg; + }), + + else => unreachable, + } + } else { + // The parameter is runtime-known. + try sema.queueFullTypeResolution(arg_ty); + child_sema.inst_map.putAssumeCapacityNoClobber(param_inst, try child_block.addInst(.{ .tag = .arg, .data = .{ .arg = .{ - .ty = Air.internedToRef(sema.typeOf(arg).toIntern()), - .src_index = @intCast(i), + .ty = Air.internedToRef(arg_ty.toIntern()), + .src_index = @intCast(arg_index), } }, })); + const param_name: Zir.NullTerminatedString = switch (param_tag) { + .param_anytype => @enumFromInt(fn_zir.instructions.items(.data)[param_inst].str_tok.start), + .param => name: { + const inst_data = fn_zir.instructions.items(.data)[param_inst].pl_tok; + const extra = fn_zir.extraData(Zir.Inst.Param, inst_data.payload_index); + break :name @enumFromInt(extra.data.name); + }, + else => unreachable, + }; + try child_block.params.append(sema.arena, .{ + .ty = arg_ty.toIntern(), // This is the type after coercion + .is_comptime = false, // We're adding only runtime args to the instantiation + .name = param_name, + }); + runtime_args.appendAssumeCapacity(arg_ref); + } } - const new_func_inst = try child_sema.resolveBody(&child_block, fn_info.param_body, fn_info.param_body_inst); + // We've already handled parameters, so don't resolve the whole body. Instead, just + // do the instructions after the params (i.e. the func itself). + const new_func_inst = try child_sema.resolveBody(&child_block, fn_info.param_body[args_info.count()..], fn_info.param_body_inst); const callee_index = (child_sema.resolveConstValue(&child_block, .unneeded, new_func_inst, undefined) catch unreachable).toIntern(); const callee = mod.funcInfo(callee_index); @@ -7649,33 +7925,7 @@ fn instantiateGenericCall( return error.GenericPoison; } - const runtime_args_len: u32 = func_ty_info.param_types.len; - const runtime_args = try sema.arena.alloc(Air.Inst.Ref, runtime_args_len); - { - var runtime_i: u32 = 0; - for (uncasted_args, 0..) |uncasted_arg, total_i| { - // In the case of a function call generated by the language, the LazySrcLoc - // provided for `call_src` may not point to anything interesting. - const arg_src: LazySrcLoc = if (total_i == 0 and bound_arg_src != null) - bound_arg_src.? - else if (call_src == .node_offset) .{ .call_arg = .{ - .decl = block.src_decl, - .call_node_offset = call_src.node_offset.x, - .arg_index = @intCast(total_i - @intFromBool(bound_arg_src != null)), - } } else .unneeded; - - const comptime_arg = callee.comptime_args.get(ip)[total_i]; - if (comptime_arg == .none) { - const param_ty = func_ty_info.param_types.get(ip)[runtime_i].toType(); - const casted_arg = try sema.coerce(block, param_ty, uncasted_arg, arg_src); - try sema.queueFullTypeResolution(param_ty); - runtime_args[runtime_i] = casted_arg; - runtime_i += 1; - } - } - - try sema.queueFullTypeResolution(func_ty_info.return_type.toType()); - } + try sema.queueFullTypeResolution(func_ty_info.return_type.toType()); if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); @@ -7687,18 +7937,17 @@ fn instantiateGenericCall( try mod.ensureFuncBodyAnalysisQueued(callee_index); - try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Call).Struct.fields.len + - runtime_args_len); + try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Call).Struct.fields.len + runtime_args.items.len); const result = try block.addInst(.{ .tag = call_tag, .data = .{ .pl_op = .{ .operand = Air.internedToRef(callee_index), .payload = sema.addExtraAssumeCapacity(Air.Call{ - .args_len = runtime_args_len, + .args_len = @intCast(runtime_args.items.len), }), } }, }); - sema.appendRefsAssumeCapacity(runtime_args); + sema.appendRefsAssumeCapacity(runtime_args.items); if (ensure_result_used) { try sema.ensureResultUsed(block, sema.typeOf(result), call_src); @@ -8647,20 +8896,17 @@ fn resolveGenericBody( const prev_no_partial_func_type = sema.no_partial_func_ty; const prev_generic_owner = sema.generic_owner; const prev_generic_call_src = sema.generic_call_src; - const prev_generic_bound_arg_src = sema.generic_bound_arg_src; const prev_generic_call_decl = sema.generic_call_decl; block.params = .{}; sema.no_partial_func_ty = true; sema.generic_owner = .none; sema.generic_call_src = .unneeded; - sema.generic_bound_arg_src = null; sema.generic_call_decl = .none; defer { block.params = prev_params; sema.no_partial_func_ty = prev_no_partial_func_type; sema.generic_owner = prev_generic_owner; sema.generic_call_src = prev_generic_call_src; - sema.generic_bound_arg_src = prev_generic_bound_arg_src; sema.generic_call_decl = prev_generic_call_decl; } @@ -9278,37 +9524,18 @@ fn finishFunc( return Air.internedToRef(if (opt_func_index != .none) opt_func_index else func_ty); } -fn genericArgSrcLoc(sema: *Sema, block: *Block, param_index: u32, param_src: LazySrcLoc) Module.SrcLoc { - const mod = sema.mod; - if (sema.generic_owner == .none) return param_src.toSrcLoc(mod.declPtr(block.src_decl), mod); - const arg_decl = sema.generic_call_decl.unwrap().?; - const arg_src: LazySrcLoc = if (param_index == 0 and sema.generic_bound_arg_src != null) - sema.generic_bound_arg_src.? - else - .{ .call_arg = .{ - .decl = arg_decl, - .call_node_offset = sema.generic_call_src.node_offset.x, - .arg_index = param_index - @intFromBool(sema.generic_bound_arg_src != null), - } }; - return arg_src.toSrcLoc(mod.declPtr(arg_decl), mod); -} - fn zirParam( sema: *Sema, block: *Block, inst: Zir.Inst.Index, - param_index: u32, comptime_syntax: bool, ) CompileError!void { - const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].pl_tok; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Param, inst_data.payload_index); const param_name: Zir.NullTerminatedString = @enumFromInt(extra.data.name); const body = sema.code.extra[extra.end..][0..extra.data.body_len]; - // We could be in a generic function instantiation, or we could be evaluating a generic - // function without any comptime args provided. const param_ty = param_ty: { const err = err: { // Make sure any nested param instructions don't clobber our work. @@ -9316,20 +9543,17 @@ fn zirParam( const prev_no_partial_func_type = sema.no_partial_func_ty; const prev_generic_owner = sema.generic_owner; const prev_generic_call_src = sema.generic_call_src; - const prev_generic_bound_arg_src = sema.generic_bound_arg_src; const prev_generic_call_decl = sema.generic_call_decl; block.params = .{}; sema.no_partial_func_ty = true; sema.generic_owner = .none; sema.generic_call_src = .unneeded; - sema.generic_bound_arg_src = null; sema.generic_call_decl = .none; defer { block.params = prev_params; sema.no_partial_func_ty = prev_no_partial_func_type; sema.generic_owner = prev_generic_owner; sema.generic_call_src = prev_generic_call_src; - sema.generic_bound_arg_src = prev_generic_bound_arg_src; sema.generic_call_decl = prev_generic_call_decl; } @@ -9341,11 +9565,6 @@ fn zirParam( }; switch (err) { error.GenericPoison => { - if (sema.inst_map.contains(inst)) { - // A generic function is about to evaluate to another generic function. - // Return an error instead. - return error.GenericPoison; - } // The type is not available until the generic instantiation. // We result the param instruction with a poison value and // insert an anytype parameter. @@ -9363,11 +9582,6 @@ fn zirParam( const is_comptime = sema.typeRequiresComptime(param_ty) catch |err| switch (err) { error.GenericPoison => { - if (sema.inst_map.contains(inst)) { - // A generic function is about to evaluate to another generic function. - // Return an error instead. - return error.GenericPoison; - } // The type is not available until the generic instantiation. // We result the param instruction with a poison value and // insert an anytype parameter. @@ -9382,46 +9596,6 @@ fn zirParam( else => |e| return e, } or comptime_syntax; - if (sema.inst_map.get(inst)) |arg| { - if (is_comptime and sema.generic_owner != .none) { - // We have a comptime value for this parameter so it should be elided from the - // function type of the function instruction in this block. - const coerced_arg = sema.coerce(block, param_ty, arg, .unneeded) catch |err| switch (err) { - error.NeededSourceLocation => { - // We are instantiating a generic function and a comptime arg - // cannot be coerced to the param type, but since we don't - // have the callee source location return `GenericPoison` - // so that the instantiation is failed and the coercion - // is handled by comptime call logic instead. - assert(sema.generic_owner != .none); - return error.GenericPoison; - }, - else => |e| return e, - }; - sema.inst_map.putAssumeCapacity(inst, coerced_arg); - if (try sema.resolveMaybeUndefVal(coerced_arg)) |val| { - sema.comptime_args[param_index] = val.toIntern(); - return; - } - const msg = msg: { - const src_loc = sema.genericArgSrcLoc(block, param_index, src); - const msg = try Module.ErrorMsg.create(gpa, src_loc, "{s}", .{ - @as([]const u8, "runtime-known argument passed to comptime parameter"), - }); - errdefer msg.destroy(gpa); - - if (sema.generic_call_decl != .none) { - try sema.errNote(block, src, msg, "{s}", .{@as([]const u8, "declared comptime here")}); - } - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(msg); - } - // Even though a comptime argument is provided, the generic function wants to treat - // this as a runtime parameter. - assert(sema.inst_map.remove(inst)); - } - try block.params.append(sema.arena, .{ .ty = param_ty.toIntern(), .is_comptime = comptime_syntax, @@ -9447,75 +9621,10 @@ fn zirParamAnytype( sema: *Sema, block: *Block, inst: Zir.Inst.Index, - param_index: u32, comptime_syntax: bool, ) CompileError!void { - const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const param_name: Zir.NullTerminatedString = @enumFromInt(inst_data.start); - const src = inst_data.src(); - - if (sema.inst_map.get(inst)) |air_ref| { - const param_ty = sema.typeOf(air_ref); - // If we have a comptime value for this parameter, it should be elided - // from the function type of the function instruction in this block. - if (try sema.typeHasOnePossibleValue(param_ty)) |opv| { - sema.comptime_args[param_index] = opv.toIntern(); - return; - } - - if (comptime_syntax) { - if (try sema.resolveMaybeUndefVal(air_ref)) |val| { - sema.comptime_args[param_index] = val.toIntern(); - return; - } - const msg = msg: { - const src_loc = sema.genericArgSrcLoc(block, param_index, src); - const msg = try Module.ErrorMsg.create(gpa, src_loc, "{s}", .{ - @as([]const u8, "runtime-known argument passed to comptime parameter"), - }); - errdefer msg.destroy(gpa); - - if (sema.generic_call_decl != .none) { - try sema.errNote(block, src, msg, "{s}", .{@as([]const u8, "declared comptime here")}); - } - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(msg); - } - - if (try sema.typeRequiresComptime(param_ty)) { - if (try sema.resolveMaybeUndefVal(air_ref)) |val| { - sema.comptime_args[param_index] = val.toIntern(); - return; - } - const msg = msg: { - const src_loc = sema.genericArgSrcLoc(block, param_index, src); - const msg = try Module.ErrorMsg.create(gpa, src_loc, "{s}", .{ - @as([]const u8, "runtime-known argument passed to comptime-only type parameter"), - }); - errdefer msg.destroy(gpa); - - if (sema.generic_call_decl != .none) { - try sema.errNote(block, src, msg, "{s}", .{@as([]const u8, "declared here")}); - } - - try sema.explainWhyTypeIsComptime(msg, src_loc, param_ty); - - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(msg); - } - - // The parameter is runtime-known. - // The map is already populated but we do need to add a runtime parameter. - try block.params.append(sema.arena, .{ - .ty = param_ty.toIntern(), - .is_comptime = false, - .name = param_name, - }); - return; - } // We are evaluating a generic function without any comptime args provided. @@ -23152,7 +23261,21 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const callee_ty = sema.typeOf(func); const func_ty = try sema.checkCallArgumentCount(block, func, func_src, callee_ty, resolved_args.len, false); const ensure_result_used = extra.flags.ensure_result_used; - return sema.analyzeCall(block, func, func_ty, func_src, call_src, modifier, ensure_result_used, resolved_args, null, null, .@"@call"); + return sema.analyzeCall( + block, + func, + func_ty, + func_src, + call_src, + modifier, + ensure_result_used, + .{ .call_builtin = .{ + .call_node_offset = inst_data.src_node, + .args = resolved_args, + } }, + null, + .@"@call", + ); } fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { diff --git a/test/behavior/call.zig b/test/behavior/call.zig index c33c872347..d641d5d5ba 100644 --- a/test/behavior/call.zig +++ b/test/behavior/call.zig @@ -430,3 +430,64 @@ test "method call as parameter type" { try expectEqual(@as(u64, 123), S.foo(S{}, 123)); try expectEqual(@as(u64, 500), S.foo(S{}, 500)); } + +test "non-anytype generic parameters provide result type" { + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + + const S = struct { + fn f(comptime T: type, y: T) !void { + try expectEqual(@as(T, 123), y); + } + + fn g(x: anytype, y: @TypeOf(x)) !void { + try expectEqual(@as(@TypeOf(x), 0x222), y); + } + }; + + var rt_u16: u16 = 123; + var rt_u32: u32 = 0x10000222; + + try S.f(u8, @intCast(rt_u16)); + try S.f(u8, @intCast(123)); + + try S.g(rt_u16, @truncate(rt_u32)); + try S.g(rt_u16, @truncate(0x10000222)); + + try comptime S.f(u8, @intCast(123)); + try comptime S.g(@as(u16, undefined), @truncate(0x99990222)); +} + +test "argument to generic function has correct result type" { + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + + const S = struct { + fn foo(_: anytype, e: enum { a, b }) bool { + return e == .b; + } + + fn doTheTest() !void { + var t = true; + + // Since the enum literal passes through a runtime conditional here, these can only + // compile if RLS provides the correct result type to the argument + try expect(foo({}, if (!t) .a else .b)); + try expect(!foo("dummy", if (t) .a else .b)); + try expect(foo({}, if (t) .b else .a)); + try expect(!foo(123, if (t) .a else .a)); + try expect(foo(123, if (t) .b else .b)); + } + }; + + try S.doTheTest(); + try comptime S.doTheTest(); +} From 7a7d0225d9825f0150e797a4ff06f6e56af08458 Mon Sep 17 00:00:00 2001 From: mlugg Date: Sat, 5 Aug 2023 10:55:49 +0100 Subject: [PATCH 4/6] Sema: detect invalid field stores in tuple initialization This bug was exposed by the previous commit, since array_init is now used for tuple parameters. --- src/Sema.zig | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/src/Sema.zig b/src/Sema.zig index 5aeb55ddc3..6752e19b6b 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -19393,13 +19393,14 @@ fn zirArrayInit( }, else => |e| return e, }; + const is_tuple = array_ty.zigTypeTag(mod) == .Struct; const sentinel_val = array_ty.sentinel(mod); const resolved_args = try gpa.alloc(Air.Inst.Ref, args.len - 1 + @intFromBool(sentinel_val != null)); defer gpa.free(resolved_args); for (args[1..], 0..) |arg, i| { const resolved_arg = try sema.resolveInst(arg); - const elem_ty = if (array_ty.zigTypeTag(mod) == .Struct) + const elem_ty = if (is_tuple) array_ty.structFieldType(i, mod) else array_ty.elemType2(mod); @@ -19412,6 +19413,18 @@ fn zirArrayInit( }, else => return err, }; + if (is_tuple) if (try array_ty.structFieldValueComptime(mod, i)) |field_val| { + const init_val = try sema.resolveMaybeUndefVal(resolved_args[i]) orelse { + const decl = mod.declPtr(block.src_decl); + const elem_src = mod.initSrc(src.node_offset.x, decl, i); + return sema.failWithNeededComptime(block, elem_src, "value stored in comptime field must be comptime-known"); + }; + if (!field_val.eql(init_val, elem_ty, mod)) { + const decl = mod.declPtr(block.src_decl); + const elem_src = mod.initSrc(src.node_offset.x, decl, i); + return sema.failWithInvalidComptimeFieldStore(block, elem_src, array_ty, i); + } + }; } if (sentinel_val) |some| { From 2209813baee8715e739d07b16a2910408ff9230a Mon Sep 17 00:00:00 2001 From: mlugg Date: Sat, 5 Aug 2023 11:09:39 +0100 Subject: [PATCH 5/6] cases: modify error wording to match new errors The changes to result locations and generic calls has caused mild changes to some compile errors. Some are slightly better, some slightly worse, but none of the changes are major. --- .../cases/compile_errors/anytype_param_requires_comptime.zig | 2 +- test/cases/compile_errors/error_in_typeof_param.zig | 2 +- .../generic_method_call_with_invalid_param.zig | 2 ++ .../cases/compile_errors/invalid_store_to_comptime_field.zig | 5 +++-- test/cases/compile_errors/wrong_types_given_to_export.zig | 2 +- .../zero-bit_generic_args_are_coerced_to_param_type.zig | 1 + test/compile_errors.zig | 2 +- 7 files changed, 10 insertions(+), 6 deletions(-) diff --git a/test/cases/compile_errors/anytype_param_requires_comptime.zig b/test/cases/compile_errors/anytype_param_requires_comptime.zig index 453bd5bce5..6fd86b0de3 100644 --- a/test/cases/compile_errors/anytype_param_requires_comptime.zig +++ b/test/cases/compile_errors/anytype_param_requires_comptime.zig @@ -16,7 +16,7 @@ pub export fn entry() void { // backend=stage2 // target=native // -// :7:14: error: runtime-known argument passed to comptime-only type parameter +// :7:14: error: runtime-known argument passed to parameter of comptime-only type // :9:12: note: declared here // :4:16: note: struct requires comptime because of this field // :4:16: note: types are not available at runtime diff --git a/test/cases/compile_errors/error_in_typeof_param.zig b/test/cases/compile_errors/error_in_typeof_param.zig index 02d33e04f9..073bd29bc3 100644 --- a/test/cases/compile_errors/error_in_typeof_param.zig +++ b/test/cases/compile_errors/error_in_typeof_param.zig @@ -11,4 +11,4 @@ pub export fn entry() void { // target=native // // :6:31: error: unable to resolve comptime value -// :6:31: note: argument to parameter with comptime-only type must be comptime-known +// :6:31: note: value being casted to 'comptime_int' must be comptime-known diff --git a/test/cases/compile_errors/generic_method_call_with_invalid_param.zig b/test/cases/compile_errors/generic_method_call_with_invalid_param.zig index 0cea61301c..ccea6708e4 100644 --- a/test/cases/compile_errors/generic_method_call_with_invalid_param.zig +++ b/test/cases/compile_errors/generic_method_call_with_invalid_param.zig @@ -25,6 +25,8 @@ const S = struct { // target=native // // :3:18: error: expected type 'bool', found 'void' +// :18:43: note: parameter type declared here // :8:18: error: expected type 'void', found 'bool' +// :19:43: note: parameter type declared here // :14:26: error: runtime-known argument passed to comptime parameter // :20:57: note: declared comptime here diff --git a/test/cases/compile_errors/invalid_store_to_comptime_field.zig b/test/cases/compile_errors/invalid_store_to_comptime_field.zig index fd6fff5e17..b2605f9158 100644 --- a/test/cases/compile_errors/invalid_store_to_comptime_field.zig +++ b/test/cases/compile_errors/invalid_store_to_comptime_field.zig @@ -76,8 +76,9 @@ pub export fn entry8() void { // :19:38: error: value stored in comptime field does not match the default value of the field // :31:19: error: value stored in comptime field does not match the default value of the field // :25:29: note: default value set here -// :41:16: error: value stored in comptime field does not match the default value of the field +// :41:19: error: value stored in comptime field does not match the default value of the field +// :35:29: note: default value set here // :45:12: error: value stored in comptime field does not match the default value of the field -// :53:16: error: value stored in comptime field does not match the default value of the field +// :53:25: error: value stored in comptime field does not match the default value of the field // :66:43: error: value stored in comptime field does not match the default value of the field // :59:35: error: value stored in comptime field does not match the default value of the field diff --git a/test/cases/compile_errors/wrong_types_given_to_export.zig b/test/cases/compile_errors/wrong_types_given_to_export.zig index 6e688d33d6..8278deac15 100644 --- a/test/cases/compile_errors/wrong_types_given_to_export.zig +++ b/test/cases/compile_errors/wrong_types_given_to_export.zig @@ -7,5 +7,5 @@ comptime { // backend=stage2 // target=native // -// :3:51: error: expected type 'builtin.GlobalLinkage', found 'u32' +// :3:21: error: expected type 'builtin.GlobalLinkage', found 'u32' // :?:?: note: enum declared here diff --git a/test/cases/compile_errors/zero-bit_generic_args_are_coerced_to_param_type.zig b/test/cases/compile_errors/zero-bit_generic_args_are_coerced_to_param_type.zig index 0288979084..bf819220a3 100644 --- a/test/cases/compile_errors/zero-bit_generic_args_are_coerced_to_param_type.zig +++ b/test/cases/compile_errors/zero-bit_generic_args_are_coerced_to_param_type.zig @@ -8,3 +8,4 @@ pub export fn entry() void { // target=native // // :3:21: error: expected type 'u0', found '*const [4:0]u8' +// :1:23: note: parameter type declared here diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 8675eb9ed7..5cc5c36f3f 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -207,7 +207,7 @@ pub fn addCases(ctx: *Cases) !void { ":1:38: note: declared comptime here", ":8:36: error: runtime-known argument passed to comptime parameter", ":2:41: note: declared comptime here", - ":13:29: error: runtime-known argument passed to comptime-only type parameter", + ":13:29: error: runtime-known argument passed to parameter of comptime-only type", ":3:24: note: declared here", ":12:35: note: struct requires comptime because of this field", ":12:35: note: types are not available at runtime", From f32b9bc776bfffe0a1adadc013ff3fa3e5d6d34b Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 8 Aug 2023 13:55:43 +0100 Subject: [PATCH 6/6] Sema: add references to generic instantiations This makes the reference trace appear for generic calls where it previously did not. Resolves: #16725 --- src/Sema.zig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Sema.zig b/src/Sema.zig index 6752e19b6b..e89207cc00 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -7908,6 +7908,8 @@ fn instantiateGenericCall( const callee = mod.funcInfo(callee_index); callee.branchQuota(ip).* = @max(callee.branchQuota(ip).*, sema.branch_quota); + try sema.addReferencedBy(block, call_src, callee.owner_decl); + // Make a runtime call to the new function, making sure to omit the comptime args. const func_ty = callee.ty.toType(); const func_ty_info = mod.typeToFunc(func_ty).?;