From 2e27967a81d325047e6d82f8c0722a8a654d1ac7 Mon Sep 17 00:00:00 2001 From: mlugg Date: Sun, 11 Feb 2024 21:23:47 +0000 Subject: [PATCH 1/8] AstGen: avoid emitting multiple `ret_type` instructions This is a small optimization to generated ZIR. In any function where the return type is not a trivial Ref, we know it is almost certainly not `void` (unless the user aliased it or did something else weird to fool AstGen), and thus the return type is very likely to be required for return value RLS at some point. Thus, we can just emit one `ret_type` at the start of the function and use it throughout. This sees a very small improvement in overall ZIR bytes. --- src/AstGen.zig | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/src/AstGen.zig b/src/AstGen.zig index e444f836bb..0838055cdd 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -44,6 +44,9 @@ compile_errors: ArrayListUnmanaged(Zir.Inst.CompileErrors.Item) = .{}, /// The topmost block of the current function. fn_block: ?*GenZir = null, fn_var_args: bool = false, +/// The return type of the current function. This may be a trivial `Ref`, or +/// otherwise it refers to a `ret_type` instruction. +fn_ret_ty: Zir.Inst.Ref = .none, /// Maps string table indexes to the first `@import` ZIR instruction /// that uses this string as the operand. imports: std.AutoArrayHashMapUnmanaged(Zir.NullTerminatedString, Ast.TokenIndex) = .{}, @@ -4284,8 +4287,19 @@ fn fnDecl( fn_gz.instructions_top = ret_gz.instructions.items.len; const prev_fn_block = astgen.fn_block; + const prev_fn_ret_ty = astgen.fn_ret_ty; astgen.fn_block = &fn_gz; - defer astgen.fn_block = prev_fn_block; + astgen.fn_ret_ty = if (is_inferred_error or ret_ref.toIndex() != null) r: { + // We're essentially guaranteed to need the return type at some point, + // since the return type is likely not `void` or `noreturn` so there + // will probably be an explicit return requiring RLS. Fetch this + // return type now so the rest of the function can use it. + break :r try fn_gz.addNode(.ret_type, decl_node); + } else ret_ref; + defer { + astgen.fn_block = prev_fn_block; + astgen.fn_ret_ty = prev_fn_ret_ty; + } const prev_var_args = astgen.fn_var_args; astgen.fn_var_args = is_var_args; @@ -4732,8 +4746,13 @@ fn testDecl( defer fn_block.unstack(); const prev_fn_block = astgen.fn_block; + const prev_fn_ret_ty = astgen.fn_ret_ty; astgen.fn_block = &fn_block; - defer astgen.fn_block = prev_fn_block; + astgen.fn_ret_ty = .anyerror_void_error_union_type; + defer { + astgen.fn_block = prev_fn_block; + astgen.fn_ret_ty = prev_fn_ret_ty; + } astgen.advanceSourceCursorToNode(body_node); const lbrace_line = astgen.source_line - decl_block.decl_line; @@ -8038,7 +8057,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref .rl = .{ .ptr = .{ .inst = try gz.addNode(.ret_ptr, node) } }, .ctx = .@"return", } else .{ - .rl = .{ .ty = try gz.addNode(.ret_type, node) }, + .rl = .{ .ty = astgen.fn_ret_ty }, .ctx = .@"return", }; const prev_anon_name_strategy = gz.anon_name_strategy; From aba29f9789aff0c1ace2a1d0031818ec2c04070c Mon Sep 17 00:00:00 2001 From: mlugg Date: Sun, 11 Feb 2024 21:26:35 +0000 Subject: [PATCH 2/8] AstGen: fix elision of unnecessary `dbg_stmt` instructions AstGen has logic to elide leading `dbg_stmt` instructions when multiple are emitted consecutively; however, it only applied in some cases. A simple reshuffle here makes this logic apply universally, saving some bytes in ZIR. --- src/AstGen.zig | 73 +++++++++++++++++++++++++++++--------------------- 1 file changed, 43 insertions(+), 30 deletions(-) diff --git a/src/AstGen.zig b/src/AstGen.zig index 0838055cdd..a5a5b54f69 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -3332,31 +3332,11 @@ fn emitDbgNode(gz: *GenZir, node: Ast.Node.Index) !void { // If the current block will be evaluated only during semantic analysis // then no dbg_stmt ZIR instruction is needed. if (gz.is_comptime) return; - const astgen = gz.astgen; astgen.advanceSourceCursorToNode(node); const line = astgen.source_line - gz.decl_line; const column = astgen.source_column; - - if (gz.instructions.items.len > 0) { - const last = gz.instructions.items[gz.instructions.items.len - 1]; - const zir_tags = astgen.instructions.items(.tag); - if (zir_tags[@intFromEnum(last)] == .dbg_stmt) { - const zir_datas = astgen.instructions.items(.data); - zir_datas[@intFromEnum(last)].dbg_stmt = .{ - .line = line, - .column = column, - }; - return; - } - } - - _ = try gz.add(.{ .tag = .dbg_stmt, .data = .{ - .dbg_stmt = .{ - .line = line, - .column = column, - }, - } }); + try emitDbgStmt(gz, .{ line, column }); } fn assign(gz: *GenZir, scope: *Scope, infix_node: Ast.Node.Index) InnerError!void { @@ -7143,7 +7123,7 @@ fn switchExprErrUnion( block_scope.setBreakResultInfo(block_ri); // Sema expects a dbg_stmt immediately before switch_block_err_union - try emitDbgStmt(parent_gz, operand_lc); + try emitDbgStmtForceCurrentIndex(parent_gz, operand_lc); // This gets added to the parent block later, after the item expressions. const switch_block = try parent_gz.makeBlockInst(.switch_block_err_union, switch_node); @@ -7723,7 +7703,7 @@ fn switchExpr( block_scope.setBreakResultInfo(block_ri); // Sema expects a dbg_stmt immediately before switch_block(_ref) - try emitDbgStmt(parent_gz, operand_lc); + try emitDbgStmtForceCurrentIndex(parent_gz, operand_lc); // This gets added to the parent block later, after the item expressions. const switch_tag: Zir.Inst.Tag = if (any_payload_is_ref) .switch_block_ref else .switch_block; const switch_block = try parent_gz.makeBlockInst(switch_tag, switch_node); @@ -9847,13 +9827,8 @@ fn callExpr( astgen.advanceSourceCursor(astgen.tree.tokens.items(.start)[call.ast.lparen]); const line = astgen.source_line - gz.decl_line; const column = astgen.source_column; - - _ = try gz.add(.{ .tag = .dbg_stmt, .data = .{ - .dbg_stmt = .{ - .line = line, - .column = column, - }, - } }); + // Sema expects a dbg_stmt immediately before call, + try emitDbgStmtForceCurrentIndex(gz, .{ line, column }); } switch (callee) { @@ -13536,6 +13511,44 @@ fn countBodyLenAfterFixups(astgen: *AstGen, body: []const Zir.Inst.Index) u32 { fn emitDbgStmt(gz: *GenZir, lc: LineColumn) !void { if (gz.is_comptime) return; + if (gz.instructions.items.len > 0) { + const astgen = gz.astgen; + const last = gz.instructions.items[gz.instructions.items.len - 1]; + if (astgen.instructions.items(.tag)[@intFromEnum(last)] == .dbg_stmt) { + astgen.instructions.items(.data)[@intFromEnum(last)].dbg_stmt = .{ + .line = lc[0], + .column = lc[1], + }; + return; + } + } + + _ = try gz.add(.{ .tag = .dbg_stmt, .data = .{ + .dbg_stmt = .{ + .line = lc[0], + .column = lc[1], + }, + } }); +} + +/// In some cases, Sema expects us to generate a `dbg_stmt` at the instruction +/// *index* directly preceding the next instruction (e.g. if a call is %10, it +/// expects a dbg_stmt at %9). TODO: this logic may allow redundant dbg_stmt +/// instructions; fix up Sema so we don't need it! +fn emitDbgStmtForceCurrentIndex(gz: *GenZir, lc: LineColumn) !void { + const astgen = gz.astgen; + if (gz.instructions.items.len > 0 and + @intFromEnum(gz.instructions.items[gz.instructions.items.len - 1]) == astgen.instructions.len - 1) + { + const last = astgen.instructions.len - 1; + if (astgen.instructions.items(.tag)[last] == .dbg_stmt) { + astgen.instructions.items(.data)[last].dbg_stmt = .{ + .line = lc[0], + .column = lc[1], + }; + return; + } + } _ = try gz.add(.{ .tag = .dbg_stmt, .data = .{ .dbg_stmt = .{ From 434537213e406191959818f573b24ca45c1b0e45 Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 13 Feb 2024 01:54:57 +0000 Subject: [PATCH 3/8] Sema: eliminate `src` field `sema.src` is a failed experiment. It introduces complexity, and makes often unwarranted assumptions about the existence of instructions providing source locations, requiring an unreasonable amount of caution in AstGen for correctness. Eliminating it simplifies the whole frontend. This required adding source locations to a few instructions, but the cost in ZIR bytes should be counteracted by the other work on this branch. --- src/AstGen.zig | 140 ++++++++++++++++++++++------------------ src/Autodoc.zig | 6 +- src/Sema.zig | 159 +++++++++++++++++++++------------------------- src/Zir.zig | 85 +++++++++++++++---------- src/print_zir.zig | 37 +++++------ 5 files changed, 222 insertions(+), 205 deletions(-) diff --git a/src/AstGen.zig b/src/AstGen.zig index a5a5b54f69..0117f0ac26 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -2122,7 +2122,7 @@ fn restoreErrRetIndex( else => .none, // always restore/pop }, }; - _ = try gz.addRestoreErrRetIndex(bt, .{ .if_non_error = op }); + _ = try gz.addRestoreErrRetIndex(bt, .{ .if_non_error = op }, node); } fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { @@ -2179,7 +2179,7 @@ fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) Inn // As our last action before the break, "pop" the error trace if needed if (!block_gz.is_comptime) - _ = try parent_gz.addRestoreErrRetIndex(.{ .block = block_inst }, .always); + _ = try parent_gz.addRestoreErrRetIndex(.{ .block = block_inst }, .always, node); _ = try parent_gz.addBreak(break_tag, block_inst, .void_value); return Zir.Inst.Ref.unreachable_value; @@ -2271,7 +2271,7 @@ fn continueExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) // As our last action before the continue, "pop" the error trace if needed if (!gen_zir.is_comptime) - _ = try parent_gz.addRestoreErrRetIndex(.{ .block = continue_block }, .always); + _ = try parent_gz.addRestoreErrRetIndex(.{ .block = continue_block }, .always, node); _ = try parent_gz.addBreak(break_tag, continue_block, .void_value); return Zir.Inst.Ref.unreachable_value; @@ -2331,7 +2331,7 @@ fn blockExpr( if (!block_scope.endsWithNoReturn()) { // As our last action before the break, "pop" the error trace if needed - _ = try gz.addRestoreErrRetIndex(.{ .block = block_inst }, .always); + _ = try gz.addRestoreErrRetIndex(.{ .block = block_inst }, .always, block_node); _ = try block_scope.addBreak(.@"break", block_inst, .void_value); } @@ -2426,7 +2426,7 @@ fn labeledBlockExpr( try blockExprStmts(&block_scope, &block_scope.base, statements); if (!block_scope.endsWithNoReturn()) { // As our last action before the return, "pop" the error trace if needed - _ = try gz.addRestoreErrRetIndex(.{ .block = block_inst }, .always); + _ = try gz.addRestoreErrRetIndex(.{ .block = block_inst }, .always, block_node); _ = try block_scope.addBreak(.@"break", block_inst, .void_value); } @@ -2818,7 +2818,6 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As .export_value, .set_eval_branch_quota, .atomic_store, - .store, .store_node, .store_to_inferred_ptr, .resolve_inferred_alloc, @@ -2829,7 +2828,8 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As .validate_deref, .validate_destructure, .save_err_ret_index, - .restore_err_ret_index, + .restore_err_ret_index_unconditional, + .restore_err_ret_index_fn_entry, .validate_struct_init_ty, .validate_struct_init_result_ty, .validate_ptr_struct_init, @@ -3692,7 +3692,10 @@ fn assignOp( .lhs = lhs, .rhs = rhs, }); - _ = try gz.addBin(.store, lhs_ptr, result); + _ = try gz.addPlNode(.store_node, infix_node, Zir.Inst.Bin{ + .lhs = lhs_ptr, + .rhs = result, + }); } fn assignShift( @@ -3715,7 +3718,10 @@ fn assignShift( .lhs = lhs, .rhs = rhs, }); - _ = try gz.addBin(.store, lhs_ptr, result); + _ = try gz.addPlNode(.store_node, infix_node, Zir.Inst.Bin{ + .lhs = lhs_ptr, + .rhs = result, + }); } fn assignShiftSat(gz: *GenZir, scope: *Scope, infix_node: Ast.Node.Index) InnerError!void { @@ -3733,7 +3739,10 @@ fn assignShiftSat(gz: *GenZir, scope: *Scope, infix_node: Ast.Node.Index) InnerE .lhs = lhs, .rhs = rhs, }); - _ = try gz.addBin(.store, lhs_ptr, result); + _ = try gz.addPlNode(.store_node, infix_node, Zir.Inst.Bin{ + .lhs = lhs_ptr, + .rhs = result, + }); } fn ptrType( @@ -4294,7 +4303,7 @@ fn fnDecl( if (!fn_gz.endsWithNoReturn()) { // As our last action before the return, "pop" the error trace if needed - _ = try gz.addRestoreErrRetIndex(.ret, .always); + _ = try gz.addRestoreErrRetIndex(.ret, .always, decl_node); // Add implicit return at end of function. _ = try fn_gz.addUnTok(.ret_implicit, .void_value, tree.lastToken(body_node)); @@ -4742,7 +4751,7 @@ fn testDecl( if (fn_block.isEmpty() or !fn_block.refIsNoReturn(block_result)) { // As our last action before the return, "pop" the error trace if needed - _ = try gz.addRestoreErrRetIndex(.ret, .always); + _ = try gz.addRestoreErrRetIndex(.ret, .always, node); // Add implicit return at end of function. _ = try fn_block.addUnTok(.ret_implicit, .void_value, tree.lastToken(body_node)); @@ -6149,7 +6158,7 @@ fn boolBinOp( const node_datas = tree.nodes.items(.data); const lhs = try expr(gz, scope, bool_ri, node_datas[node].lhs); - const bool_br = try gz.addBoolBr(zir_tag, lhs); + const bool_br = (try gz.addPlNodePayloadIndex(zir_tag, node, undefined)).toIndex().?; var rhs_scope = gz.makeSubBlock(scope); defer rhs_scope.unstack(); @@ -6157,7 +6166,7 @@ fn boolBinOp( if (!gz.refIsNoReturn(rhs)) { _ = try rhs_scope.addBreakWithSrcNode(.break_inline, bool_br, rhs, node_datas[node].rhs); } - try rhs_scope.setBoolBrBody(bool_br); + try rhs_scope.setBoolBrBody(bool_br, lhs); const block_ref = bool_br.toRef(); return rvalue(gz, ri, block_ref, node); @@ -6725,7 +6734,10 @@ fn forExpr( const alloc_tag: Zir.Inst.Tag = if (is_inline) .alloc_comptime_mut else .alloc; const index_ptr = try parent_gz.addUnNode(alloc_tag, .usize_type, node); // initialize to zero - _ = try parent_gz.addBin(.store, index_ptr, .zero_usize); + _ = try parent_gz.addPlNode(.store_node, node, Zir.Inst.Bin{ + .lhs = index_ptr, + .rhs = .zero_usize, + }); break :blk index_ptr; }; @@ -6955,7 +6967,10 @@ fn forExpr( .lhs = index, .rhs = .one_usize, }); - _ = try loop_scope.addBin(.store, index_ptr, index_plus_one); + _ = try loop_scope.addPlNode(.store_node, node, Zir.Inst.Bin{ + .lhs = index_ptr, + .rhs = index_plus_one, + }); const repeat_tag: Zir.Inst.Tag = if (is_inline) .repeat_inline else .repeat; _ = try loop_scope.addNode(repeat_tag, node); @@ -8008,7 +8023,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref try genDefers(gz, defer_outer, scope, .normal_only); // As our last action before the return, "pop" the error trace if needed - _ = try gz.addRestoreErrRetIndex(.ret, .always); + _ = try gz.addRestoreErrRetIndex(.ret, .always, node); _ = try gz.addUnNode(.ret_node, .void_value, node); return Zir.Inst.Ref.unreachable_value; @@ -8051,7 +8066,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref try genDefers(gz, defer_outer, scope, .normal_only); // As our last action before the return, "pop" the error trace if needed - _ = try gz.addRestoreErrRetIndex(.ret, .always); + _ = try gz.addRestoreErrRetIndex(.ret, .always, node); try emitDbgStmt(gz, ret_lc); try gz.addRet(ri, operand, node); @@ -8074,7 +8089,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref // As our last action before the return, "pop" the error trace if needed const result = if (ri.rl == .ptr) try gz.addUnNode(.load, ri.rl.ptr.inst, node) else operand; - _ = try gz.addRestoreErrRetIndex(.ret, .{ .if_non_error = result }); + _ = try gz.addRestoreErrRetIndex(.ret, .{ .if_non_error = result }, node); try gz.addRet(ri, operand, node); return Zir.Inst.Ref.unreachable_value; @@ -8091,7 +8106,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref try genDefers(&then_scope, defer_outer, scope, .normal_only); // As our last action before the return, "pop" the error trace if needed - _ = try then_scope.addRestoreErrRetIndex(.ret, .always); + _ = try then_scope.addRestoreErrRetIndex(.ret, .always, node); try emitDbgStmt(&then_scope, ret_lc); try then_scope.addRet(ri, operand, node); @@ -10979,7 +10994,10 @@ fn rvalueInner( return .void_value; }, .inferred_ptr => |alloc| { - _ = try gz.addBin(.store_to_inferred_ptr, alloc, result); + _ = try gz.addPlNode(.store_to_inferred_ptr, src_node, Zir.Inst.Bin{ + .lhs = alloc, + .rhs = result, + }); return .void_value; }, .destructure => |destructure| { @@ -11006,7 +11024,10 @@ fn rvalueInner( }); }, .inferred_ptr => |ptr_inst| { - _ = try gz.addBin(.store_to_inferred_ptr, ptr_inst, elem_val); + _ = try gz.addPlNode(.store_to_inferred_ptr, src_node, Zir.Inst.Bin{ + .lhs = ptr_inst, + .rhs = elem_val, + }); }, .discard => unreachable, } @@ -11828,19 +11849,20 @@ const GenZir = struct { } /// Assumes nothing stacked on `gz`. Unstacks `gz`. - fn setBoolBrBody(gz: *GenZir, inst: Zir.Inst.Index) !void { + fn setBoolBrBody(gz: *GenZir, bool_br: Zir.Inst.Index, bool_br_lhs: Zir.Inst.Ref) !void { const astgen = gz.astgen; const gpa = astgen.gpa; const body = gz.instructionsSlice(); const body_len = astgen.countBodyLenAfterFixups(body); try astgen.extra.ensureUnusedCapacity( gpa, - @typeInfo(Zir.Inst.Block).Struct.fields.len + body_len, + @typeInfo(Zir.Inst.BoolBr).Struct.fields.len + body_len, ); const zir_datas = astgen.instructions.items(.data); - zir_datas[@intFromEnum(inst)].bool_br.payload_index = astgen.addExtraAssumeCapacity( - Zir.Inst.Block{ .body_len = body_len }, - ); + zir_datas[@intFromEnum(bool_br)].pl_node.payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.BoolBr{ + .lhs = bool_br_lhs, + .body_len = body_len, + }); astgen.appendBodyWithFixups(body); gz.unstack(); } @@ -12225,30 +12247,6 @@ const GenZir = struct { return new_index.toRef(); } - /// Note that this returns a `Zir.Inst.Index` not a ref. - /// Leaves the `payload_index` field undefined. - fn addBoolBr( - gz: *GenZir, - tag: Zir.Inst.Tag, - lhs: Zir.Inst.Ref, - ) !Zir.Inst.Index { - assert(lhs != .none); - const gpa = gz.astgen.gpa; - try gz.instructions.ensureUnusedCapacity(gpa, 1); - try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); - - const new_index: Zir.Inst.Index = @enumFromInt(gz.astgen.instructions.len); - gz.astgen.instructions.appendAssumeCapacity(.{ - .tag = tag, - .data = .{ .bool_br = .{ - .lhs = lhs, - .payload_index = undefined, - } }, - }); - gz.instructions.appendAssumeCapacity(new_index); - return new_index; - } - fn addInt(gz: *GenZir, integer: u64) !Zir.Inst.Ref { return gz.add(.{ .tag = .int, @@ -12569,17 +12567,37 @@ const GenZir = struct { always: void, if_non_error: Zir.Inst.Ref, }, + src_node: Ast.Node.Index, ) !Zir.Inst.Index { - return gz.addAsIndex(.{ - .tag = .restore_err_ret_index, - .data = .{ .restore_err_ret_index = .{ - .block = switch (bt) { - .ret => .none, - .block => |b| b.toRef(), - }, - .operand = if (cond == .if_non_error) cond.if_non_error else .none, - } }, - }); + switch (cond) { + .always => return gz.addAsIndex(.{ + .tag = .restore_err_ret_index_unconditional, + .data = .{ .un_node = .{ + .operand = switch (bt) { + .ret => .none, + .block => |b| b.toRef(), + }, + .src_node = gz.nodeIndexToRelative(src_node), + } }, + }), + .if_non_error => |operand| switch (bt) { + .ret => return gz.addAsIndex(.{ + .tag = .restore_err_ret_index_fn_entry, + .data = .{ .un_node = .{ + .operand = operand, + .src_node = gz.nodeIndexToRelative(src_node), + } }, + }), + .block => |block| return (try gz.addExtendedPayload( + .restore_err_ret_index, + Zir.Inst.RestoreErrRetIndex{ + .src_node = gz.nodeIndexToRelative(src_node), + .block = block.toRef(), + .operand = operand, + }, + )).toIndex().?, + }, + } } fn addBreak( diff --git a/src/Autodoc.zig b/src/Autodoc.zig index e5f0993bcb..6822d3a048 100644 --- a/src/Autodoc.zig +++ b/src/Autodoc.zig @@ -1799,7 +1799,8 @@ fn walkInstruction( }; }, .bool_br_and, .bool_br_or => { - const bool_br = data[@intFromEnum(inst)].bool_br; + const pl_node = data[@intFromEnum(inst)].pl_node; + const extra = file.zir.extraData(Zir.Inst.BoolBr, pl_node.payload_index); const bin_index = self.exprs.items.len; try self.exprs.append(self.arena, .{ .binOp = .{ .lhs = 0, .rhs = 0 } }); @@ -1808,14 +1809,13 @@ fn walkInstruction( file, parent_scope, parent_src, - bool_br.lhs, + extra.data.lhs, false, call_ctx, ); const lhs_index = self.exprs.items.len; try self.exprs.append(self.arena, lhs.expr); - const extra = file.zir.extraData(Zir.Inst.Block, bool_br.payload_index); const rhs = try self.walkInstruction( file, parent_scope, diff --git a/src/Sema.zig b/src/Sema.zig index 874c83ae0c..e0ff94ec83 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -50,11 +50,6 @@ branch_count: u32 = 0, /// Populated when returning `error.ComptimeBreak`. Used to communicate the /// break instruction up the stack to find the corresponding Block. comptime_break_inst: Zir.Inst.Index = undefined, -/// This field is updated when a new source location becomes active, so that -/// instructions which do not have explicitly mapped source locations still have -/// access to the source location set by the previous instruction which did -/// contain a mapped source location. -src: LazySrcLoc = .{ .token_offset = 0 }, decl_val_table: std.AutoHashMapUnmanaged(InternPool.DeclIndex, Air.Inst.Ref) = .{}, /// When doing a generic function instantiation, this array collects a value /// for each parameter of the generic owner. `none` for non-comptime parameters. @@ -1006,10 +1001,10 @@ fn analyzeBodyInner( const air_inst: Air.Inst.Ref = switch (tags[@intFromEnum(inst)]) { // zig fmt: off .alloc => try sema.zirAlloc(block, inst), - .alloc_inferred => try sema.zirAllocInferred(block, inst, true), - .alloc_inferred_mut => try sema.zirAllocInferred(block, inst, false), - .alloc_inferred_comptime => try sema.zirAllocInferredComptime(inst, true), - .alloc_inferred_comptime_mut => try sema.zirAllocInferredComptime(inst, false), + .alloc_inferred => try sema.zirAllocInferred(block, true), + .alloc_inferred_mut => try sema.zirAllocInferred(block, false), + .alloc_inferred_comptime => try sema.zirAllocInferredComptime(true), + .alloc_inferred_comptime_mut => try sema.zirAllocInferredComptime(false), .alloc_mut => try sema.zirAllocMut(block, inst), .alloc_comptime_mut => try sema.zirAllocComptime(block, inst), .make_ptr_const => try sema.zirMakePtrConst(block, inst), @@ -1308,6 +1303,11 @@ fn analyzeBodyInner( i += 1; continue; }, + .restore_err_ret_index => { + try sema.zirRestoreErrRetIndex(block, extended); + i += 1; + continue; + }, .value_placeholder => unreachable, // never appears in a body }; }, @@ -1369,11 +1369,6 @@ fn analyzeBodyInner( i += 1; continue; }, - .store => { - try sema.zirStore(block, inst); - i += 1; - continue; - }, .store_node => { try sema.zirStoreNode(block, inst); i += 1; @@ -1518,8 +1513,15 @@ fn analyzeBodyInner( i += 1; continue; }, - .restore_err_ret_index => { - try sema.zirRestoreErrRetIndex(block, inst); + .restore_err_ret_index_unconditional => { + const un_node = datas[@intFromEnum(inst)].un_node; + try sema.restoreErrRetIndex(block, un_node.src(), un_node.operand, .none); + i += 1; + continue; + }, + .restore_err_ret_index_fn_entry => { + const un_node = datas[@intFromEnum(inst)].un_node; + try sema.restoreErrRetIndex(block, un_node.src(), .none, un_node.operand); i += 1; continue; }, @@ -2779,7 +2781,7 @@ fn zirStructDecl( const src: LazySrcLoc = if (small.has_src_node) blk: { const node_offset: i32 = @bitCast(sema.code.extra[extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len]); break :blk LazySrcLoc.nodeOffset(node_offset); - } else sema.src; + } else unreachable; // MLUGG TODO // Because these three things each reference each other, `undefined` // placeholders are used before being set after the struct type gains an @@ -2945,7 +2947,7 @@ fn zirEnumDecl( const node_offset: i32 = @bitCast(sema.code.extra[extra_index]); extra_index += 1; break :blk LazySrcLoc.nodeOffset(node_offset); - } else sema.src; + } else unreachable; // MLUGG TODO const tag_ty_src: LazySrcLoc = .{ .node_offset_container_tag = src.node_offset.x }; const tag_type_ref = if (small.has_tag_type) blk: { @@ -3218,7 +3220,7 @@ fn zirUnionDecl( const node_offset: i32 = @bitCast(sema.code.extra[extra_index]); extra_index += 1; break :blk LazySrcLoc.nodeOffset(node_offset); - } else sema.src; + } else unreachable; // MLUGG TODO extra_index += @intFromBool(small.has_tag_type); extra_index += @intFromBool(small.has_body_len); @@ -3327,7 +3329,7 @@ fn zirOpaqueDecl( const node_offset: i32 = @bitCast(sema.code.extra[extra_index]); extra_index += 1; break :blk LazySrcLoc.nodeOffset(node_offset); - } else sema.src; + } else unreachable; // MLUGG TODO const decls_len = if (small.has_decls_len) blk: { const decls_len = sema.code.extra[extra_index]; @@ -3977,13 +3979,9 @@ fn makePtrConst(sema: *Sema, block: *Block, alloc: Air.Inst.Ref) CompileError!Ai fn zirAllocInferredComptime( sema: *Sema, - inst: Zir.Inst.Index, is_const: bool, ) CompileError!Air.Inst.Ref { const gpa = sema.gpa; - const src_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].node; - const src = LazySrcLoc.nodeOffset(src_node); - sema.src = src; try sema.air_instructions.append(gpa, .{ .tag = .inferred_alloc_comptime, @@ -4042,16 +4040,12 @@ fn zirAllocMut(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai fn zirAllocInferred( sema: *Sema, block: *Block, - inst: Zir.Inst.Index, is_const: bool, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const gpa = sema.gpa; - const src_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].node; - const src = LazySrcLoc.nodeOffset(src_node); - sema.src = src; if (block.is_comptime) { try sema.air_instructions.append(gpa, .{ @@ -5428,10 +5422,11 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compi const tracy = trace(@src()); defer tracy.end(); - const src: LazySrcLoc = sema.src; - const bin_inst = sema.code.instructions.items(.data)[@intFromEnum(inst)].bin; - const ptr = try sema.resolveInst(bin_inst.lhs); - const operand = try sema.resolveInst(bin_inst.rhs); + const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; + const src = pl_node.src(); + const bin = sema.code.extraData(Zir.Inst.Bin, pl_node.payload_index).data; + const ptr = try sema.resolveInst(bin.lhs); + const operand = try sema.resolveInst(bin.rhs); const ptr_inst = ptr.toIndex().?; const air_datas = sema.air_instructions.items(.data); @@ -5496,16 +5491,6 @@ fn zirSetEvalBranchQuota(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compi sema.branch_quota = @max(sema.branch_quota, quota); } -fn zirStore(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { - const tracy = trace(@src()); - defer tracy.end(); - - const bin_inst = sema.code.instructions.items(.data)[@intFromEnum(inst)].bin; - const ptr = try sema.resolveInst(bin_inst.lhs); - const value = try sema.resolveInst(bin_inst.rhs); - return sema.storePtr(block, sema.src, ptr, value); -} - fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -5709,7 +5694,6 @@ fn zirPanic(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.I fn zirTrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const src_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].node; const src = LazySrcLoc.nodeOffset(src_node); - sema.src = src; if (block.is_comptime) return sema.fail(block, src, "encountered @trap at comptime", .{}); _ = try block.addNoOp(.trap); @@ -6384,10 +6368,6 @@ fn zirBreak(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) CompileError } fn zirDbgStmt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { - // We do not set sema.src here because dbg_stmt instructions are only emitted for - // ZIR code that possibly will need to generate runtime code. So error messages - // and other source locations must not rely on sema.src being set from dbg_stmt - // instructions. if (block.is_comptime or block.ownerModule().strip) return; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt; @@ -6632,7 +6612,6 @@ fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?*Decl { pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; - const src = sema.src; if (block.is_comptime or block.is_typeof) { const index_val = try mod.intValue_u64(Type.usize, sema.comptime_err_ret_trace.items.len); @@ -6650,9 +6629,10 @@ pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref else => |e| return e, }; const field_name = try mod.intern_pool.getOrPutString(gpa, "index"); - const field_index = sema.structFieldIndex(block, stack_trace_ty, field_name, src) catch |err| switch (err) { - error.NeededSourceLocation, error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, - else => |e| return e, + const field_index = sema.structFieldIndex(block, stack_trace_ty, field_name, .unneeded) catch |err| switch (err) { + error.AnalysisFail, error.NeededSourceLocation => @panic("std.builtin.StackTrace is corrupt"), + error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, + error.OutOfMemory => |e| return e, }; return try block.addInst(.{ @@ -9900,7 +9880,6 @@ fn zirAsNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.As, inst_data.payload_index).data; - sema.src = src; return sema.analyzeAs(block, src, extra.dest_type, extra.operand, false); } @@ -10855,7 +10834,7 @@ const SwitchProngAnalysis = struct { .address_space = operand_ptr_ty.ptrAddressSpace(mod), }, }); - if (try sema.resolveDefinedValue(block, sema.src, spa.operand_ptr)) |union_ptr| { + if (try sema.resolveDefinedValue(block, operand_src, spa.operand_ptr)) |union_ptr| { return Air.internedToRef((try mod.intern(.{ .ptr = .{ .ty = ptr_field_ty.toIntern(), .addr = .{ .field = .{ @@ -10866,7 +10845,7 @@ const SwitchProngAnalysis = struct { } return block.addStructFieldPtr(spa.operand_ptr, field_index, ptr_field_ty); } else { - if (try sema.resolveDefinedValue(block, sema.src, spa.operand)) |union_val| { + if (try sema.resolveDefinedValue(block, operand_src, spa.operand)) |union_val| { const tag_and_val = ip.indexToKey(union_val.toIntern()).un; return Air.internedToRef(tag_and_val.val); } @@ -13191,6 +13170,7 @@ fn validateErrSetSwitch( // else => |e| return e, // even if all the possible errors were already handled. const tags = sema.code.instructions.items(.tag); + const datas = sema.code.instructions.items(.data); for (else_case.body) |else_inst| switch (tags[@intFromEnum(else_inst)]) { .dbg_block_begin, .dbg_block_end, @@ -13205,11 +13185,16 @@ fn validateErrSetSwitch( .err_union_code, .ret_err_value_code, .save_err_ret_index, - .restore_err_ret_index, + .restore_err_ret_index_unconditional, + .restore_err_ret_index_fn_entry, .is_non_err, .ret_is_non_err, .condbr, => {}, + .extended => switch (datas[@intFromEnum(else_inst)].extended.opcode) { + .restore_err_ret_index => {}, + else => break, + }, else => break, } else break :else_validation; @@ -13707,7 +13692,6 @@ fn zirShl( const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = inst_data.src(); - sema.src = src; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; @@ -13878,7 +13862,6 @@ fn zirShr( const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = inst_data.src(); - sema.src = src; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; @@ -14014,7 +13997,6 @@ fn zirBitwise( const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; - sema.src = src; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; @@ -14795,21 +14777,20 @@ fn zirArithmetic( defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; - sema.src = .{ .node_offset_bin_op = inst_data.src_node }; + const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = try sema.resolveInst(extra.lhs); const rhs = try sema.resolveInst(extra.rhs); - return sema.analyzeArithmetic(block, zir_tag, lhs, rhs, sema.src, lhs_src, rhs_src, safety); + return sema.analyzeArithmetic(block, zir_tag, lhs, rhs, src, lhs_src, rhs_src, safety); } fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; - sema.src = src; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; @@ -14975,7 +14956,6 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; - sema.src = src; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; @@ -15141,7 +15121,6 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; - sema.src = src; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; @@ -15252,7 +15231,6 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; - sema.src = src; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; @@ -15494,7 +15472,6 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; - sema.src = src; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; @@ -15679,7 +15656,6 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; - sema.src = src; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; @@ -15775,7 +15751,6 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; - sema.src = src; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; @@ -18741,14 +18716,17 @@ fn zirBoolBr( defer tracy.end(); const mod = sema.mod; - const datas = sema.code.instructions.items(.data); - const inst_data = datas[@intFromEnum(inst)].bool_br; - const lhs = try sema.resolveInst(inst_data.lhs); - const lhs_src = sema.src; - const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); - const body = sema.code.bodySlice(extra.end, extra.data.body_len); const gpa = sema.gpa; + const datas = sema.code.instructions.items(.data); + const inst_data = datas[@intFromEnum(inst)].pl_node; + const extra = sema.code.extraData(Zir.Inst.BoolBr, inst_data.payload_index); + + const lhs = try sema.resolveInst(extra.data.lhs); + const body = sema.code.bodySlice(extra.end, extra.data.body_len); + const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; + const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; + if (try sema.resolveDefinedValue(parent_block, lhs_src, lhs)) |lhs_val| { if (is_bool_or and lhs_val.toBool()) { return .bool_true; @@ -18795,7 +18773,7 @@ fn zirBoolBr( const result = sema.finishCondBr(parent_block, &child_block, &then_block, &else_block, lhs, block_inst); if (!sema.typeOf(rhs_result).isNoReturn(mod)) { - if (try sema.resolveDefinedValue(rhs_block, sema.src, rhs_result)) |rhs_val| { + if (try sema.resolveDefinedValue(rhs_block, rhs_src, rhs_result)) |rhs_val| { if (is_bool_or and rhs_val.toBool()) { return .bool_true; } else if (!is_bool_or and !rhs_val.toBool()) { @@ -19375,17 +19353,21 @@ fn zirSaveErrRetIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE block.error_return_trace_index = try sema.analyzeSaveErrRetIndex(block); } -fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) CompileError!void { - const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].restore_err_ret_index; - const src = sema.src; // TODO - - const mod = sema.mod; - const ip = &mod.intern_pool; +fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void { + const extra = sema.code.extraData(Zir.Inst.RestoreErrRetIndex, extended.operand).data; + return sema.restoreErrRetIndex(start_block, extra.src(), extra.block, extra.operand); +} +/// If `operand` is non-error (or is `none`), restores the error return trace to +/// its state at the point `block` was reached (or, if `block` is `none`, the +/// point this function began execution). +fn restoreErrRetIndex(sema: *Sema, start_block: *Block, src: LazySrcLoc, target_block: Zir.Inst.Ref, operand_zir: Zir.Inst.Ref) CompileError!void { const tracy = trace(@src()); defer tracy.end(); - const saved_index = if (inst_data.block.toIndexAllowNone()) |zir_block| b: { + const mod = sema.mod; + + const saved_index = if (target_block.toIndexAllowNone()) |zir_block| b: { var block = start_block; while (true) { if (block.label) |label| { @@ -19409,7 +19391,7 @@ fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) return; // No need to restore }; - const operand = try sema.resolveInstAllowNone(inst_data.operand); + const operand = try sema.resolveInstAllowNone(operand_zir); if (start_block.is_comptime or start_block.is_typeof) { const is_non_error = if (operand != .none) blk: { @@ -19427,7 +19409,7 @@ fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) return; } - if (!ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn) return; + if (!mod.intern_pool.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn) return; if (!start_block.ownerModule().error_tracing) return; assert(saved_index != .none); // The .error_return_trace_index field was dropped somewhere @@ -23161,7 +23143,6 @@ fn zirOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u64 { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; - sema.src = src; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; @@ -26416,12 +26397,16 @@ fn preparePanicId(sema: *Sema, block: *Block, panic_id: Module.PanicId) !InternP try sema.prepareSimplePanic(block); const panic_messages_ty = try sema.getBuiltinType("panic_messages"); - const msg_decl_index = (try sema.namespaceLookup( + const msg_decl_index = (sema.namespaceLookup( block, - sema.src, + .unneeded, panic_messages_ty.getNamespaceIndex(mod).unwrap().?, try mod.intern_pool.getOrPutString(gpa, @tagName(panic_id)), - )).?; + ) catch |err| switch (err) { + error.AnalysisFail, error.NeededSourceLocation => @panic("std.builtin.panic_messages is corrupt"), + error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, + error.OutOfMemory => |e| return e, + }).?; try sema.ensureDeclAnalyzed(msg_decl_index); mod.panic_messages[@intFromEnum(panic_id)] = msg_decl_index.toOptional(); return msg_decl_index; diff --git a/src/Zir.zig b/src/Zir.zig index c313ab8563..c76c1f2f62 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -303,11 +303,11 @@ pub const Inst = struct { bool_not, /// Short-circuiting boolean `and`. `lhs` is a boolean `Ref` and the other operand /// is a block, which is evaluated if `lhs` is `true`. - /// Uses the `bool_br` union field. + /// Uses the `pl_node` union field. Payload is `BoolBr`. bool_br_and, /// Short-circuiting boolean `or`. `lhs` is a boolean `Ref` and the other operand /// is a block, which is evaluated if `lhs` is `false`. - /// Uses the `bool_br` union field. + /// Uses the `pl_node` union field. Payload is `BoolBr`. bool_br_or, /// Return a value from a block. /// Uses the `break` union field. @@ -592,16 +592,12 @@ pub const Inst = struct { /// Returns a pointer to the subslice. /// Uses the `pl_node` field. AST node is the slice syntax. Payload is `SliceLength`. slice_length, - /// Write a value to a pointer. For loading, see `load`. - /// Source location is assumed to be same as previous instruction. - /// Uses the `bin` union field. - store, /// Same as `store` except provides a source location. /// Uses the `pl_node` union field. Payload is `Bin`. store_node, - /// Same as `store` but the type of the value being stored will be used to infer - /// the pointer type. - /// Uses the `bin` union field. + /// Same as `store_node` but the type of the value being stored will be + /// used to infer the pointer type of an `alloc_inferred`. + /// Uses the `pl_node` union field. Payload is `Bin`. store_to_inferred_ptr, /// String Literal. Makes an anonymous Decl and then takes a pointer to it. /// Uses the `str` union field. @@ -1036,10 +1032,18 @@ pub const Inst = struct { /// block, if the operand is .none or of an error/error-union type. /// Uses the `save_err_ret_index` field. save_err_ret_index, - /// Sets error return trace to zero if no operand is given, - /// otherwise sets the value to the given amount. - /// Uses the `restore_err_ret_index` union field. - restore_err_ret_index, + /// Specialized form of `Extended.restore_err_ret_index`. + /// Unconditionally restores the error return index to its last saved state + /// in the block referred to by `operand`. If `operand` is `none`, restores + /// to the point of function entry. + /// Uses the `un_node` field. + restore_err_ret_index_unconditional, + /// Specialized form of `Extended.restore_err_ret_index`. + /// Restores the error return index to its state at the entry of + /// the current function conditional on `operand` being a non-error. + /// If `operand` is `none`, restores unconditionally. + /// Uses the `un_node` field. + restore_err_ret_index_fn_entry, /// The ZIR instruction tag is one of the `Extended` ones. /// Uses the `extended` union field. @@ -1145,7 +1149,6 @@ pub const Inst = struct { .shl, .shl_sat, .shr, - .store, .store_node, .store_to_inferred_ptr, .str, @@ -1265,7 +1268,6 @@ pub const Inst = struct { .@"defer", .defer_err_code, .save_err_ret_index, - .restore_err_ret_index, .for_len, .opt_eu_base_ptr_init, .coerce_ptr_elem_ty, @@ -1290,6 +1292,8 @@ pub const Inst = struct { .array_init_elem_type, .array_init_elem_ptr, .validate_ref_ty, + .restore_err_ret_index_unconditional, + .restore_err_ret_index_fn_entry, => false, .@"break", @@ -1338,7 +1342,6 @@ pub const Inst = struct { .ensure_err_union_payload_void, .set_eval_branch_quota, .atomic_store, - .store, .store_node, .store_to_inferred_ptr, .resolve_inferred_alloc, @@ -1352,8 +1355,9 @@ pub const Inst = struct { .check_comptime_control_flow, .@"defer", .defer_err_code, - .restore_err_ret_index, .save_err_ret_index, + .restore_err_ret_index_unconditional, + .restore_err_ret_index_fn_entry, .validate_struct_init_ty, .validate_struct_init_result_ty, .validate_ptr_struct_init, @@ -1635,8 +1639,8 @@ pub const Inst = struct { .declaration = .pl_node, .suspend_block = .pl_node, .bool_not = .un_node, - .bool_br_and = .bool_br, - .bool_br_or = .bool_br, + .bool_br_and = .pl_node, + .bool_br_or = .pl_node, .@"break" = .@"break", .break_inline = .@"break", .check_comptime_control_flow = .un_node, @@ -1713,9 +1717,8 @@ pub const Inst = struct { .slice_end = .pl_node, .slice_sentinel = .pl_node, .slice_length = .pl_node, - .store = .bin, .store_node = .pl_node, - .store_to_inferred_ptr = .bin, + .store_to_inferred_ptr = .pl_node, .str = .str, .negate = .un_node, .negate_wrap = .un_node, @@ -1845,7 +1848,8 @@ pub const Inst = struct { .defer_err_code = .defer_err_code, .save_err_ret_index = .save_err_ret_index, - .restore_err_ret_index = .restore_err_ret_index, + .restore_err_ret_index_unconditional = .un_node, + .restore_err_ret_index_fn_entry = .un_node, .struct_init_empty = .un_node, .struct_init_empty_result = .un_node, @@ -2075,6 +2079,13 @@ pub const Inst = struct { /// Implements the `@inComptime` builtin. /// `operand` is `src_node: i32`. in_comptime, + /// Restores the error return index to its last saved state in a given + /// block. If the block is `.none`, restores to the state from the point + /// of function entry. If the operand is not `.none`, the restore is + /// conditional on the operand value not being an error. + /// `operand` is payload index to `RestoreErrRetIndex`. + /// `small` is undefined. + restore_err_ret_index, /// Used as a placeholder instruction which is just a dummy index for Sema to replace /// with a specific value. For instance, this is used for the capture of an `errdefer`. /// This should never appear in a body. @@ -2345,11 +2356,6 @@ pub const Inst = struct { return LazySrcLoc.nodeOffset(self.src_node); } }, - bool_br: struct { - lhs: Ref, - /// Points to a `Block`. - payload_index: u32, - }, @"unreachable": struct { /// Offset from Decl AST node index. /// `Tag` determines which kind of AST node this points to. @@ -2396,10 +2402,6 @@ pub const Inst = struct { save_err_ret_index: struct { operand: Ref, // If error type (or .none), save new trace index }, - restore_err_ret_index: struct { - block: Ref, // If restored, the index is from this block's entrypoint - operand: Ref, // If non-error (or .none), then restore the index - }, elem_val_imm: struct { /// The indexable value being accessed. operand: Ref, @@ -2435,7 +2437,6 @@ pub const Inst = struct { float, ptr_type, int_type, - bool_br, @"unreachable", @"break", dbg_stmt, @@ -2444,7 +2445,6 @@ pub const Inst = struct { @"defer", defer_err_code, save_err_ret_index, - restore_err_ret_index, elem_val_imm, }; }; @@ -2630,6 +2630,13 @@ pub const Inst = struct { body_len: u32, }; + /// Trailing: + /// * inst: Index // for each `body_len` + pub const BoolBr = struct { + lhs: Ref, + body_len: u32, + }; + /// Trailing: /// 0. doc_comment: u32 // if `has_doc_comment`; null-terminated string index /// 1. align_body_len: u32 // if `has_align_linksection_addrspace`; 0 means no `align` @@ -3439,6 +3446,18 @@ pub const Inst = struct { /// The RHS of the array multiplication. rhs: Ref, }; + + pub const RestoreErrRetIndex = struct { + src_node: i32, + /// If `.none`, restore the trace to its state upon function entry. + block: Ref, + /// If `.none`, restore unconditionally. + operand: Ref, + + pub fn src(self: RestoreErrRetIndex) LazySrcLoc { + return LazySrcLoc.nodeOffset(self.src_node); + } + }; }; pub const SpecialProng = enum { none, @"else", under }; diff --git a/src/print_zir.zig b/src/print_zir.zig index f33d00c989..8e5fbc1788 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -199,10 +199,6 @@ const Writer = struct { const tag = tags[@intFromEnum(inst)]; try stream.print("= {s}(", .{@tagName(tags[@intFromEnum(inst)])}); switch (tag) { - .store, - .store_to_inferred_ptr, - => try self.writeBin(stream, inst), - .alloc, .alloc_mut, .alloc_comptime_mut, @@ -280,6 +276,8 @@ const Writer = struct { .validate_deref, .check_comptime_control_flow, .opt_eu_base_ptr_init, + .restore_err_ret_index_unconditional, + .restore_err_ret_index_fn_entry, => try self.writeUnNode(stream, inst), .ref, @@ -303,7 +301,6 @@ const Writer = struct { .int_type => try self.writeIntType(stream, inst), .save_err_ret_index => try self.writeSaveErrRetIndex(stream, inst), - .restore_err_ret_index => try self.writeRestoreErrRetIndex(stream, inst), .@"break", .break_inline, @@ -392,6 +389,7 @@ const Writer = struct { .shr_exact, .xor, .store_node, + .store_to_inferred_ptr, .error_union_type, .merge_error_sets, .bit_and, @@ -615,6 +613,8 @@ const Writer = struct { .cmpxchg => try self.writeCmpxchg(stream, extended), .ptr_cast_full => try self.writePtrCastFull(stream, extended), .ptr_cast_no_dest => try self.writePtrCastNoDest(stream, extended), + + .restore_err_ret_index => try self.writeRestoreErrRetIndex(stream, extended), } } @@ -624,14 +624,6 @@ const Writer = struct { try self.writeSrc(stream, src); } - fn writeBin(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void { - const inst_data = self.code.instructions.items(.data)[@intFromEnum(inst)].bin; - try self.writeInstRef(stream, inst_data.lhs); - try stream.writeAll(", "); - try self.writeInstRef(stream, inst_data.rhs); - try stream.writeByte(')'); - } - fn writeArrayInitElemType(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void { const inst_data = self.code.instructions.items(.data)[@intFromEnum(inst)].bin; try self.writeInstRef(stream, inst_data.lhs); @@ -2505,12 +2497,14 @@ const Writer = struct { } fn writeBoolBr(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void { - const inst_data = self.code.instructions.items(.data)[@intFromEnum(inst)].bool_br; - const extra = self.code.extraData(Zir.Inst.Block, inst_data.payload_index); + const inst_data = self.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; + const extra = self.code.extraData(Zir.Inst.BoolBr, inst_data.payload_index); const body = self.code.bodySlice(extra.end, extra.data.body_len); - try self.writeInstRef(stream, inst_data.lhs); + try self.writeInstRef(stream, extra.data.lhs); try stream.writeAll(", "); try self.writeBracedBody(stream, body); + try stream.writeAll(") "); + try self.writeSrc(stream, inst_data.src()); } fn writeIntType(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void { @@ -2531,13 +2525,14 @@ const Writer = struct { try stream.writeAll(")"); } - fn writeRestoreErrRetIndex(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void { - const inst_data = self.code.instructions.items(.data)[@intFromEnum(inst)].restore_err_ret_index; + fn writeRestoreErrRetIndex(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void { + const extra = self.code.extraData(Zir.Inst.RestoreErrRetIndex, extended.operand).data; - try self.writeInstRef(stream, inst_data.block); - try self.writeInstRef(stream, inst_data.operand); + try self.writeInstRef(stream, extra.block); + try self.writeInstRef(stream, extra.operand); - try stream.writeAll(")"); + try stream.writeAll(") "); + try self.writeSrc(stream, extra.src()); } fn writeBreak(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void { From 10784c7fc8d419fff943994f1aa5a454d4e43391 Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 13 Feb 2024 02:36:13 +0000 Subject: [PATCH 4/8] AstGen: migrate `ty` result locations to `coerced_ty` In most cases where AstGen is coercing to a fixed type (such as `u29`, `type`, `std.builtin.CallingConvention) we do not necessarily require an explicit coercion instruction. Instead, Sema knows the type that is required, and can perform the coercion after the fact. This means we can use the `coerced_ty` result location kind, saving unnecessary coercion instructions and therefore ZIR bytes. This required a few enhancements to Sema to introduce missing coercions. --- src/AstGen.zig | 117 +++++++++++++++++++++++-------------------------- src/Sema.zig | 96 ++++++++++++++++++++++++++-------------- 2 files changed, 120 insertions(+), 93 deletions(-) diff --git a/src/AstGen.zig b/src/AstGen.zig index 0117f0ac26..4b9efa0408 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -360,16 +360,11 @@ const ResultInfo = struct { }; }; -/// TODO: modify Sema to remove in favour of `coerced_align_ri` -const align_ri: ResultInfo = .{ .rl = .{ .ty = .u29_type } }; const coerced_align_ri: ResultInfo = .{ .rl = .{ .coerced_ty = .u29_type } }; -/// TODO: modify Sema to remove in favour of `coerced_addrspace_ri` -const addrspace_ri: ResultInfo = .{ .rl = .{ .ty = .address_space_type } }; const coerced_addrspace_ri: ResultInfo = .{ .rl = .{ .coerced_ty = .address_space_type } }; const coerced_linksection_ri: ResultInfo = .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }; -const bool_ri: ResultInfo = .{ .rl = .{ .ty = .bool_type } }; -const type_ri: ResultInfo = .{ .rl = .{ .ty = .type_type } }; const coerced_type_ri: ResultInfo = .{ .rl = .{ .coerced_ty = .type_type } }; +const coerced_bool_ri: ResultInfo = .{ .rl = .{ .coerced_ty = .bool_type } }; fn typeExpr(gz: *GenZir, scope: *Scope, type_node: Ast.Node.Index) InnerError!Zir.Inst.Ref { return comptimeExpr(gz, scope, coerced_type_ri, type_node); @@ -786,7 +781,7 @@ fn expr(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.Index) InnerE .bool_and => return boolBinOp(gz, scope, ri, node, .bool_br_and), .bool_or => return boolBinOp(gz, scope, ri, node, .bool_br_or), - .bool_not => return simpleUnOp(gz, scope, ri, node, bool_ri, node_datas[node].lhs, .bool_not), + .bool_not => return simpleUnOp(gz, scope, ri, node, coerced_bool_ri, node_datas[node].lhs, .bool_not), .bit_not => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, node_datas[node].lhs, .bit_not), .negation => return negation(gz, scope, ri, node), @@ -1372,7 +1367,7 @@ fn fnProtoExpr( }; const align_ref: Zir.Inst.Ref = if (fn_proto.ast.align_expr == 0) .none else inst: { - break :inst try expr(&block_scope, scope, align_ri, fn_proto.ast.align_expr); + break :inst try expr(&block_scope, scope, coerced_align_ri, fn_proto.ast.align_expr); }; if (fn_proto.ast.addrspace_expr != 0) { @@ -1387,7 +1382,7 @@ fn fnProtoExpr( try expr( &block_scope, scope, - .{ .rl = .{ .ty = .calling_convention_type } }, + .{ .rl = .{ .coerced_ty = .calling_convention_type } }, fn_proto.ast.callconv_expr, ) else @@ -3136,7 +3131,7 @@ fn varDecl( } const align_inst: Zir.Inst.Ref = if (var_decl.ast.align_node != 0) - try expr(gz, scope, align_ri, var_decl.ast.align_node) + try expr(gz, scope, coerced_align_ri, var_decl.ast.align_node) else .none; @@ -3505,7 +3500,7 @@ fn assignDestructureMaybeDecls( const this_lhs_comptime = is_comptime or (is_const and rhs_is_comptime); const align_inst: Zir.Inst.Ref = if (full.ast.align_node != 0) - try expr(gz, scope, align_ri, full.ast.align_node) + try expr(gz, scope, coerced_align_ri, full.ast.align_node) else .none; @@ -3783,7 +3778,7 @@ fn ptrType( gz.astgen.source_line = source_line; gz.astgen.source_column = source_column; - addrspace_ref = try expr(gz, scope, addrspace_ri, ptr_info.ast.addrspace_node); + addrspace_ref = try expr(gz, scope, coerced_addrspace_ri, ptr_info.ast.addrspace_node); trailing_count += 1; } if (ptr_info.ast.align_node != 0) { @@ -4176,7 +4171,7 @@ fn fnDecl( var addrspace_gz = decl_gz.makeSubBlock(params_scope); defer addrspace_gz.unstack(); const addrspace_ref: Zir.Inst.Ref = if (fn_proto.ast.addrspace_expr == 0) .none else inst: { - const inst = try expr(&decl_gz, params_scope, addrspace_ri, fn_proto.ast.addrspace_expr); + const inst = try expr(&decl_gz, params_scope, coerced_addrspace_ri, fn_proto.ast.addrspace_expr); if (addrspace_gz.instructionsSlice().len == 0) { // In this case we will send a len=0 body which can be encoded more efficiently. break :inst inst; @@ -4431,7 +4426,7 @@ fn globalVarDecl( try expr( &block_scope, &block_scope.base, - .{ .rl = .{ .ty = .type_type } }, + coerced_type_ri, var_decl.ast.type_node, ) else @@ -5254,7 +5249,7 @@ fn unionDeclInner( return astgen.failNode(member_node, "union field missing type", .{}); } if (have_align) { - const align_inst = try expr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = .u32_type } }, member.ast.align_expr); + const align_inst = try expr(&block_scope, &block_scope.base, coerced_align_ri, member.ast.align_expr); wip_members.appendToField(@intFromEnum(align_inst)); any_aligned_fields = true; } @@ -5522,7 +5517,7 @@ fn containerDecl( namespace.base.tag = .enum_namespace; const arg_inst: Zir.Inst.Ref = if (container_decl.ast.arg != 0) - try comptimeExpr(&block_scope, &namespace.base, .{ .rl = .{ .ty = .type_type } }, container_decl.ast.arg) + try comptimeExpr(&block_scope, &namespace.base, coerced_type_ri, container_decl.ast.arg) else .none; @@ -6079,7 +6074,7 @@ fn arrayAccess( const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); - const rhs = try expr(gz, scope, .{ .rl = .{ .ty = .usize_type } }, node_datas[node].rhs); + const rhs = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, node_datas[node].rhs); try emitDbgStmt(gz, cursor); return gz.addPlNode(.elem_ptr_node, node, Zir.Inst.Bin{ .lhs = lhs, .rhs = rhs }); @@ -6089,7 +6084,7 @@ fn arrayAccess( const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); - const rhs = try expr(gz, scope, .{ .rl = .{ .ty = .usize_type } }, node_datas[node].rhs); + const rhs = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, node_datas[node].rhs); try emitDbgStmt(gz, cursor); return rvalue(gz, ri, try gz.addPlNode(.elem_val_node, node, Zir.Inst.Bin{ .lhs = lhs, .rhs = rhs }), node); @@ -6157,12 +6152,12 @@ fn boolBinOp( const tree = astgen.tree; const node_datas = tree.nodes.items(.data); - const lhs = try expr(gz, scope, bool_ri, node_datas[node].lhs); + const lhs = try expr(gz, scope, coerced_bool_ri, node_datas[node].lhs); const bool_br = (try gz.addPlNodePayloadIndex(zir_tag, node, undefined)).toIndex().?; var rhs_scope = gz.makeSubBlock(scope); defer rhs_scope.unstack(); - const rhs = try expr(&rhs_scope, &rhs_scope.base, bool_ri, node_datas[node].rhs); + const rhs = try expr(&rhs_scope, &rhs_scope.base, coerced_bool_ri, node_datas[node].rhs); if (!gz.refIsNoReturn(rhs)) { _ = try rhs_scope.addBreakWithSrcNode(.break_inline, bool_br, rhs, node_datas[node].rhs); } @@ -6230,7 +6225,7 @@ fn ifExpr( .bool_bit = try block_scope.addUnNode(tag, optional, if_full.ast.cond_expr), }; } else { - const cond = try expr(&block_scope, &block_scope.base, bool_ri, if_full.ast.cond_expr); + const cond = try expr(&block_scope, &block_scope.base, coerced_bool_ri, if_full.ast.cond_expr); break :c .{ .inst = cond, .bool_bit = cond, @@ -6476,7 +6471,7 @@ fn whileExpr( .bool_bit = try cond_scope.addUnNode(tag, optional, while_full.ast.cond_expr), }; } else { - const cond = try expr(&cond_scope, &cond_scope.base, bool_ri, while_full.ast.cond_expr); + const cond = try expr(&cond_scope, &cond_scope.base, coerced_bool_ri, while_full.ast.cond_expr); break :c .{ .inst = cond, .bool_bit = cond, @@ -8052,7 +8047,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref .rl = .{ .ptr = .{ .inst = try gz.addNode(.ret_ptr, node) } }, .ctx = .@"return", } else .{ - .rl = .{ .ty = astgen.fn_ret_ty }, + .rl = .{ .coerced_ty = astgen.fn_ret_ty }, .ctx = .@"return", }; const prev_anon_name_strategy = gz.anon_name_strategy; @@ -8688,7 +8683,7 @@ fn unionInit( params: []const Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const union_type = try typeExpr(gz, scope, params[0]); - const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]); + const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[1]); const field_type = try gz.addPlNode(.field_type_ref, node, Zir.Inst.FieldTypeRef{ .container_type = union_type, .field_name = field_name, @@ -9001,12 +8996,12 @@ fn builtinCall( if (ri.rl == .ref or ri.rl == .ref_coerced_ty) { return gz.addPlNode(.field_ptr_named, node, Zir.Inst.FieldNamed{ .lhs = try expr(gz, scope, .{ .rl = .ref }, params[0]), - .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]), + .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[1]), }); } const result = try gz.addPlNode(.field_val_named, node, Zir.Inst.FieldNamed{ .lhs = try expr(gz, scope, .{ .rl = .none }, params[0]), - .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]), + .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[1]), }); return rvalue(gz, ri, result, node); }, @@ -9133,7 +9128,7 @@ fn builtinCall( return rvalue(gz, ri, .void_value, node); }, .set_align_stack => { - const order = try expr(gz, scope, align_ri, params[0]); + const order = try expr(gz, scope, coerced_align_ri, params[0]); _ = try gz.addExtendedPayload(.set_align_stack, Zir.Inst.UnNode{ .node = gz.nodeIndexToRelative(node), .operand = order, @@ -9175,32 +9170,32 @@ fn builtinCall( .bit_size_of => return simpleUnOpType(gz, scope, ri, node, params[0], .bit_size_of), .align_of => return simpleUnOpType(gz, scope, ri, node, params[0], .align_of), - .int_from_ptr => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .int_from_ptr), - .compile_error => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0], .compile_error), - .set_eval_branch_quota => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0], .set_eval_branch_quota), - .int_from_enum => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .int_from_enum), - .int_from_bool => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .int_from_bool), - .embed_file => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0], .embed_file), - .error_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .anyerror_type } }, params[0], .error_name), - .set_runtime_safety => return simpleUnOp(gz, scope, ri, node, bool_ri, params[0], .set_runtime_safety), - .sqrt => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .sqrt), - .sin => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .sin), - .cos => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .cos), - .tan => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .tan), - .exp => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .exp), - .exp2 => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .exp2), - .log => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .log), - .log2 => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .log2), - .log10 => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .log10), - .abs => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .abs), - .floor => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .floor), - .ceil => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .ceil), - .trunc => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .trunc), - .round => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .round), - .tag_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .tag_name), - .type_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .type_name), - .Frame => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .frame_type), - .frame_size => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .frame_size), + .int_from_ptr => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .int_from_ptr), + .compile_error => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[0], .compile_error), + .set_eval_branch_quota => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0], .set_eval_branch_quota), + .int_from_enum => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .int_from_enum), + .int_from_bool => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .int_from_bool), + .embed_file => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[0], .embed_file), + .error_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .anyerror_type } }, params[0], .error_name), + .set_runtime_safety => return simpleUnOp(gz, scope, ri, node, coerced_bool_ri, params[0], .set_runtime_safety), + .sqrt => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .sqrt), + .sin => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .sin), + .cos => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .cos), + .tan => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .tan), + .exp => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .exp), + .exp2 => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .exp2), + .log => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .log), + .log2 => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .log2), + .log10 => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .log10), + .abs => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .abs), + .floor => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .floor), + .ceil => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .ceil), + .trunc => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .trunc), + .round => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .round), + .tag_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .tag_name), + .type_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .type_name), + .Frame => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .frame_type), + .frame_size => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .frame_size), .int_from_float => return typeCast(gz, scope, ri, node, params[0], .int_from_float, builtin_name), .float_from_int => return typeCast(gz, scope, ri, node, params[0], .float_from_int, builtin_name), @@ -9238,7 +9233,7 @@ fn builtinCall( }, .panic => { try emitDbgNode(gz, node); - return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0], .panic); + return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[0], .panic); }, .trap => { try emitDbgNode(gz, node); @@ -9327,7 +9322,7 @@ fn builtinCall( }, .c_define => { if (!gz.c_import) return gz.astgen.failNode(node, "C define valid only inside C import block", .{}); - const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0]); + const name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[0]); const value = try comptimeExpr(gz, scope, .{ .rl = .none }, params[1]); const result = try gz.addExtendedPayload(.c_define, Zir.Inst.BinNode{ .node = gz.nodeIndexToRelative(node), @@ -9348,7 +9343,7 @@ fn builtinCall( return rvalue(gz, ri, result, node); }, .reduce => { - const op = try expr(gz, scope, .{ .rl = .{ .ty = .reduce_op_type } }, params[0]); + const op = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .reduce_op_type } }, params[0]); const scalar = try expr(gz, scope, .{ .rl = .none }, params[1]); const result = try gz.addPlNode(.reduce, node, Zir.Inst.Bin{ .lhs = op, @@ -9424,7 +9419,7 @@ fn builtinCall( }, .field_parent_ptr => { const parent_type = try typeExpr(gz, scope, params[0]); - const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]); + const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[1]); const result = try gz.addPlNode(.field_parent_ptr, node, Zir.Inst.FieldParentPtr{ .parent_type = parent_type, .field_name = field_name, @@ -9561,7 +9556,7 @@ fn hasDeclOrField( tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const container_type = try typeExpr(gz, scope, lhs_node); - const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, rhs_node); + const name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, rhs_node); const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ .lhs = container_type, .rhs = name, @@ -9711,7 +9706,7 @@ fn simpleCBuiltin( ) InnerError!Zir.Inst.Ref { const name: []const u8 = if (tag == .c_undef) "C undef" else "C include"; if (!gz.c_import) return gz.astgen.failNode(node, "{s} valid only inside C import block", .{name}); - const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, operand_node); + const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, operand_node); _ = try gz.addExtendedPayload(tag, Zir.Inst.UnNode{ .node = gz.nodeIndexToRelative(node), .operand = operand, @@ -9729,7 +9724,7 @@ fn offsetOf( tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const type_inst = try typeExpr(gz, scope, lhs_node); - const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, rhs_node); + const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, rhs_node); const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ .lhs = type_inst, .rhs = field_name, @@ -13148,7 +13143,7 @@ const GenZir = struct { fn addRet(gz: *GenZir, ri: ResultInfo, operand: Zir.Inst.Ref, node: Ast.Node.Index) !void { switch (ri.rl) { .ptr => |ptr_res| _ = try gz.addUnNode(.ret_load, ptr_res.inst, node), - .ty => _ = try gz.addUnNode(.ret_node, operand, node), + .coerced_ty => _ = try gz.addUnNode(.ret_node, operand, node), else => unreachable, } } diff --git a/src/Sema.zig b/src/Sema.zig index e0ff94ec83..3465f923a8 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -10487,7 +10487,8 @@ fn zirElemValNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const array = try sema.resolveInst(extra.lhs); - const elem_index = try sema.resolveInst(extra.rhs); + const uncoerced_elem_index = try sema.resolveInst(extra.rhs); + const elem_index = try sema.coerce(block, Type.usize, uncoerced_elem_index, elem_index_src); return sema.elemVal(block, src, array, elem_index, elem_index_src, true); } @@ -10539,7 +10540,8 @@ fn zirElemPtrNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const array_ptr = try sema.resolveInst(extra.lhs); - const elem_index = try sema.resolveInst(extra.rhs); + const uncoerced_elem_index = try sema.resolveInst(extra.rhs); + const elem_index = try sema.coerce(block, Type.usize, uncoerced_elem_index, elem_index_src); return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src, false, true); } @@ -18722,11 +18724,13 @@ fn zirBoolBr( const inst_data = datas[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.BoolBr, inst_data.payload_index); - const lhs = try sema.resolveInst(extra.data.lhs); + const uncoerced_lhs = try sema.resolveInst(extra.data.lhs); const body = sema.code.bodySlice(extra.end, extra.data.body_len); const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; + const lhs = try sema.coerce(parent_block, Type.bool, uncoerced_lhs, lhs_src); + if (try sema.resolveDefinedValue(parent_block, lhs_src, lhs)) |lhs_val| { if (is_bool_or and lhs_val.toBool()) { return .bool_true; @@ -18736,7 +18740,11 @@ fn zirBoolBr( // comptime-known left-hand side. No need for a block here; the result // is simply the rhs expression. Here we rely on there only being 1 // break instruction (`break_inline`). - return sema.resolveBody(parent_block, body, inst); + const rhs_result = try sema.resolveBody(parent_block, body, inst); + if (sema.typeOf(rhs_result).isNoReturn(mod)) { + return rhs_result; + } + return sema.coerce(parent_block, Type.bool, rhs_result, rhs_src); } const block_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len); @@ -18767,13 +18775,16 @@ fn zirBoolBr( _ = try lhs_block.addBr(block_inst, lhs_result); const rhs_result = try sema.resolveBody(rhs_block, body, inst); - if (!sema.typeOf(rhs_result).isNoReturn(mod)) { - _ = try rhs_block.addBr(block_inst, rhs_result); - } + const rhs_noret = sema.typeOf(rhs_result).isNoReturn(mod); + const coerced_rhs_result = if (!rhs_noret) rhs: { + const coerced_result = try sema.coerce(rhs_block, Type.bool, rhs_result, rhs_src); + _ = try rhs_block.addBr(block_inst, coerced_result); + break :rhs coerced_result; + } else rhs_result; const result = sema.finishCondBr(parent_block, &child_block, &then_block, &else_block, lhs, block_inst); - if (!sema.typeOf(rhs_result).isNoReturn(mod)) { - if (try sema.resolveDefinedValue(rhs_block, rhs_src, rhs_result)) |rhs_val| { + if (!rhs_noret) { + if (try sema.resolveDefinedValue(rhs_block, rhs_src, coerced_rhs_result)) |rhs_val| { if (is_bool_or and rhs_val.toBool()) { return .bool_true; } else if (!is_bool_or and !rhs_val.toBool()) { @@ -20750,8 +20761,9 @@ fn zirIntFromBool(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError fn zirErrorName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; - const operand = try sema.resolveInst(inst_data.operand); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; + const uncoerced_operand = try sema.resolveInst(inst_data.operand); + const operand = try sema.coerce(block, Type.anyerror, uncoerced_operand, operand_src); if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| { const err_name = sema.mod.intern_pool.indexToKey(val.toIntern()).err.name; @@ -25375,15 +25387,21 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } else if (extra.data.bits.has_align_ref) blk: { const align_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]); extra_index += 1; - const align_tv = sema.resolveInstConst(block, align_src, align_ref, .{ - .needed_comptime_reason = "alignment must be comptime-known", - }) catch |err| switch (err) { - error.GenericPoison => { - break :blk null; - }, + const uncoerced_align = sema.resolveInst(align_ref) catch |err| switch (err) { + error.GenericPoison => break :blk null, else => |e| return e, }; - const alignment = try sema.validateAlignAllowZero(block, align_src, try align_tv.val.toUnsignedIntAdvanced(sema)); + const coerced_align = sema.coerce(block, Type.u29, uncoerced_align, align_src) catch |err| switch (err) { + error.GenericPoison => break :blk null, + else => |e| return e, + }; + const align_val = sema.resolveConstDefinedValue(block, align_src, coerced_align, .{ + .needed_comptime_reason = "alignment must be comptime-known", + }) catch |err| switch (err) { + error.GenericPoison => break :blk null, + else => |e| return e, + }; + const alignment = try sema.validateAlignAllowZero(block, align_src, try align_val.toUnsignedIntAdvanced(sema)); const default = target_util.defaultFunctionAlignment(target); break :blk if (alignment == default) .none else alignment; } else .none; @@ -25394,7 +25412,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const body = sema.code.bodySlice(extra_index, body_len); extra_index += body.len; - const addrspace_ty = try sema.getBuiltinType("AddressSpace"); + const addrspace_ty = Type.fromInterned(.address_space_type); const val = try sema.resolveGenericBody(block, addrspace_src, body, inst, addrspace_ty, .{ .needed_comptime_reason = "addrspace must be comptime-known", }); @@ -25405,15 +25423,22 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } else if (extra.data.bits.has_addrspace_ref) blk: { const addrspace_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]); extra_index += 1; - const addrspace_tv = sema.resolveInstConst(block, addrspace_src, addrspace_ref, .{ - .needed_comptime_reason = "addrspace must be comptime-known", - }) catch |err| switch (err) { - error.GenericPoison => { - break :blk null; - }, + const addrspace_ty = Type.fromInterned(.address_space_type); + const uncoerced_addrspace = sema.resolveInst(addrspace_ref) catch |err| switch (err) { + error.GenericPoison => break :blk null, else => |e| return e, }; - break :blk mod.toEnum(std.builtin.AddressSpace, addrspace_tv.val); + const coerced_addrspace = sema.coerce(block, addrspace_ty, uncoerced_addrspace, addrspace_src) catch |err| switch (err) { + error.GenericPoison => break :blk null, + else => |e| return e, + }; + const addrspace_val = sema.resolveConstDefinedValue(block, addrspace_src, coerced_addrspace, .{ + .needed_comptime_reason = "addrspace must be comptime-known", + }) catch |err| switch (err) { + error.GenericPoison => break :blk null, + else => |e| return e, + }; + break :blk mod.toEnum(std.builtin.AddressSpace, addrspace_val); } else target_util.defaultAddressSpace(target, .function); const section: Section = if (extra.data.bits.has_section_body) blk: { @@ -25461,15 +25486,22 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } else if (extra.data.bits.has_cc_ref) blk: { const cc_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]); extra_index += 1; - const cc_tv = sema.resolveInstConst(block, cc_src, cc_ref, .{ - .needed_comptime_reason = "calling convention must be comptime-known", - }) catch |err| switch (err) { - error.GenericPoison => { - break :blk null; - }, + const cc_ty = Type.fromInterned(.calling_convention_type); + const uncoerced_cc = sema.resolveInst(cc_ref) catch |err| switch (err) { + error.GenericPoison => break :blk null, else => |e| return e, }; - break :blk mod.toEnum(std.builtin.CallingConvention, cc_tv.val); + const coerced_cc = sema.coerce(block, cc_ty, uncoerced_cc, cc_src) catch |err| switch (err) { + error.GenericPoison => break :blk null, + else => |e| return e, + }; + const cc_val = sema.resolveConstDefinedValue(block, cc_src, coerced_cc, .{ + .needed_comptime_reason = "calling convention must be comptime-known", + }) catch |err| switch (err) { + error.GenericPoison => break :blk null, + else => |e| return e, + }; + break :blk mod.toEnum(std.builtin.CallingConvention, cc_val); } else if (sema.owner_decl.is_exported and has_body) .C else From 260c84535546c81028cf42f1eb6ec9f17275db0f Mon Sep 17 00:00:00 2001 From: mlugg Date: Wed, 14 Feb 2024 22:01:18 +0000 Subject: [PATCH 5/8] Zir: make src_node of type declarations non-optional Previously, the `src_node` field of `struct_decl`, `union_decl`, `enum_decl`, and `opaque_decl` was optional, included in trailing data only if a flag in `Small` was set. However, this was unnecessary logic: AstGen always provided the source node. We can simplify a few bits of logic by making this field non-optional, moving it into non-trailing data. There was one place where the field was actually omitted before: the root struct of a file was at source node 0, so the node was coincidentally elided. Therefore, this commit has a fixed cost of 4 bytes of ZIR per file. --- src/AstGen.zig | 44 +++++++++----------- src/Autodoc.zig | 57 ++++++-------------------- src/Sema.zig | 37 +++++------------ src/Zir.zig | 100 ++++++++++++++++++++++++++-------------------- src/print_zir.zig | 51 +++++++---------------- 5 files changed, 111 insertions(+), 178 deletions(-) diff --git a/src/AstGen.zig b/src/AstGen.zig index 4b9efa0408..bb2cab5c70 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -12918,20 +12918,20 @@ const GenZir = struct { const astgen = gz.astgen; const gpa = astgen.gpa; + // Node 0 is valid for the root `struct_decl` of a file! + assert(args.src_node != 0 or gz.parent.tag == .top); + const fields_hash_arr: [4]u32 = @bitCast(args.fields_hash); - try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.StructDecl).Struct.fields.len + 6); + try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.StructDecl).Struct.fields.len + 4); const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.StructDecl{ .fields_hash_0 = fields_hash_arr[0], .fields_hash_1 = fields_hash_arr[1], .fields_hash_2 = fields_hash_arr[2], .fields_hash_3 = fields_hash_arr[3], + .src_node = gz.nodeIndexToRelative(args.src_node), }); - if (args.src_node != 0) { - const node_offset = gz.nodeIndexToRelative(args.src_node); - astgen.extra.appendAssumeCapacity(@bitCast(node_offset)); - } if (args.fields_len != 0) { astgen.extra.appendAssumeCapacity(args.fields_len); } @@ -12949,7 +12949,6 @@ const GenZir = struct { .data = .{ .extended = .{ .opcode = .struct_decl, .small = @bitCast(Zir.Inst.StructDecl.Small{ - .has_src_node = args.src_node != 0, .has_fields_len = args.fields_len != 0, .has_decls_len = args.decls_len != 0, .has_backing_int = args.backing_int_ref != .none, @@ -12981,20 +12980,19 @@ const GenZir = struct { const astgen = gz.astgen; const gpa = astgen.gpa; + assert(args.src_node != 0); + const fields_hash_arr: [4]u32 = @bitCast(args.fields_hash); - try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.UnionDecl).Struct.fields.len + 5); + try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.UnionDecl).Struct.fields.len + 4); const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.UnionDecl{ .fields_hash_0 = fields_hash_arr[0], .fields_hash_1 = fields_hash_arr[1], .fields_hash_2 = fields_hash_arr[2], .fields_hash_3 = fields_hash_arr[3], + .src_node = gz.nodeIndexToRelative(args.src_node), }); - if (args.src_node != 0) { - const node_offset = gz.nodeIndexToRelative(args.src_node); - astgen.extra.appendAssumeCapacity(@bitCast(node_offset)); - } if (args.tag_type != .none) { astgen.extra.appendAssumeCapacity(@intFromEnum(args.tag_type)); } @@ -13012,7 +13010,6 @@ const GenZir = struct { .data = .{ .extended = .{ .opcode = .union_decl, .small = @bitCast(Zir.Inst.UnionDecl.Small{ - .has_src_node = args.src_node != 0, .has_tag_type = args.tag_type != .none, .has_body_len = args.body_len != 0, .has_fields_len = args.fields_len != 0, @@ -13039,20 +13036,19 @@ const GenZir = struct { const astgen = gz.astgen; const gpa = astgen.gpa; + assert(args.src_node != 0); + const fields_hash_arr: [4]u32 = @bitCast(args.fields_hash); - try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.EnumDecl).Struct.fields.len + 5); + try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.EnumDecl).Struct.fields.len + 4); const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.EnumDecl{ .fields_hash_0 = fields_hash_arr[0], .fields_hash_1 = fields_hash_arr[1], .fields_hash_2 = fields_hash_arr[2], .fields_hash_3 = fields_hash_arr[3], + .src_node = gz.nodeIndexToRelative(args.src_node), }); - if (args.src_node != 0) { - const node_offset = gz.nodeIndexToRelative(args.src_node); - astgen.extra.appendAssumeCapacity(@bitCast(node_offset)); - } if (args.tag_type != .none) { astgen.extra.appendAssumeCapacity(@intFromEnum(args.tag_type)); } @@ -13070,7 +13066,6 @@ const GenZir = struct { .data = .{ .extended = .{ .opcode = .enum_decl, .small = @bitCast(Zir.Inst.EnumDecl.Small{ - .has_src_node = args.src_node != 0, .has_tag_type = args.tag_type != .none, .has_body_len = args.body_len != 0, .has_fields_len = args.fields_len != 0, @@ -13090,13 +13085,13 @@ const GenZir = struct { const astgen = gz.astgen; const gpa = astgen.gpa; - try astgen.extra.ensureUnusedCapacity(gpa, 2); - const payload_index: u32 = @intCast(astgen.extra.items.len); + assert(args.src_node != 0); + + try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.OpaqueDecl).Struct.fields.len + 1); + const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.OpaqueDecl{ + .src_node = gz.nodeIndexToRelative(args.src_node), + }); - if (args.src_node != 0) { - const node_offset = gz.nodeIndexToRelative(args.src_node); - astgen.extra.appendAssumeCapacity(@bitCast(node_offset)); - } if (args.decls_len != 0) { astgen.extra.appendAssumeCapacity(args.decls_len); } @@ -13105,7 +13100,6 @@ const GenZir = struct { .data = .{ .extended = .{ .opcode = .opaque_decl, .small = @bitCast(Zir.Inst.OpaqueDecl.Small{ - .has_src_node = args.src_node != 0, .has_decls_len = args.decls_len != 0, .name_strategy = gz.anon_name_strategy, }), diff --git a/src/Autodoc.zig b/src/Autodoc.zig index 6822d3a048..6ede3637f8 100644 --- a/src/Autodoc.zig +++ b/src/Autodoc.zig @@ -3395,19 +3395,10 @@ fn walkInstruction( .enclosing_type = type_slot_index, }; - const small = @as(Zir.Inst.OpaqueDecl.Small, @bitCast(extended.small)); - var extra_index: usize = extended.operand; + const extra = file.zir.extraData(Zir.Inst.OpaqueDecl, extended.operand); + var extra_index: usize = extra.end; - const src_node: ?i32 = if (small.has_src_node) blk: { - const src_node = @as(i32, @bitCast(file.zir.extra[extra_index])); - extra_index += 1; - break :blk src_node; - } else null; - - const src_info = if (src_node) |sn| - try self.srcLocInfo(file, sn, parent_src) - else - parent_src; + const src_info = try self.srcLocInfo(file, extra.data.src_node, parent_src); var decl_indexes: std.ArrayListUnmanaged(usize) = .{}; var priv_decl_indexes: std.ArrayListUnmanaged(usize) = .{}; @@ -3498,18 +3489,10 @@ fn walkInstruction( }; const small = @as(Zir.Inst.UnionDecl.Small, @bitCast(extended.small)); - var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.UnionDecl).Struct.fields.len; + const extra = file.zir.extraData(Zir.Inst.UnionDecl, extended.operand); + var extra_index: usize = extra.end; - const src_node: ?i32 = if (small.has_src_node) blk: { - const src_node = @as(i32, @bitCast(file.zir.extra[extra_index])); - extra_index += 1; - break :blk src_node; - } else null; - - const src_info = if (src_node) |sn| - try self.srcLocInfo(file, sn, parent_src) - else - parent_src; + const src_info = try self.srcLocInfo(file, extra.data.src_node, parent_src); // We delay analysis because union tags can refer to // decls defined inside the union itself. @@ -3628,18 +3611,10 @@ fn walkInstruction( }; const small = @as(Zir.Inst.EnumDecl.Small, @bitCast(extended.small)); - var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.EnumDecl).Struct.fields.len; + const extra = file.zir.extraData(Zir.Inst.EnumDecl, extended.operand); + var extra_index: usize = extra.end; - const src_node: ?i32 = if (small.has_src_node) blk: { - const src_node = @as(i32, @bitCast(file.zir.extra[extra_index])); - extra_index += 1; - break :blk src_node; - } else null; - - const src_info = if (src_node) |sn| - try self.srcLocInfo(file, sn, parent_src) - else - parent_src; + const src_info = try self.srcLocInfo(file, extra.data.src_node, parent_src); const tag_type: ?DocData.Expr = if (small.has_tag_type) blk: { const tag_type = file.zir.extra[extra_index]; @@ -3779,18 +3754,10 @@ fn walkInstruction( }; const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small)); - var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len; + const extra = file.zir.extraData(Zir.Inst.StructDecl, extended.operand); + var extra_index: usize = extra.end; - const src_node: ?i32 = if (small.has_src_node) blk: { - const src_node = @as(i32, @bitCast(file.zir.extra[extra_index])); - extra_index += 1; - break :blk src_node; - } else null; - - const src_info = if (src_node) |sn| - try self.srcLocInfo(file, sn, parent_src) - else - parent_src; + const src_info = try self.srcLocInfo(file, extra.data.src_node, parent_src); const fields_len = if (small.has_fields_len) blk: { const fields_len = file.zir.extra[extra_index]; diff --git a/src/Sema.zig b/src/Sema.zig index 3465f923a8..45f0edcb56 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2725,7 +2725,6 @@ pub fn getStructType( const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len; - extra_index += @intFromBool(small.has_src_node); const fields_len = if (small.has_fields_len) blk: { const fields_len = sema.code.extra[extra_index]; extra_index += 1; @@ -2778,10 +2777,7 @@ fn zirStructDecl( const mod = sema.mod; const ip = &mod.intern_pool; const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); - const src: LazySrcLoc = if (small.has_src_node) blk: { - const node_offset: i32 = @bitCast(sema.code.extra[extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len]); - break :blk LazySrcLoc.nodeOffset(node_offset); - } else unreachable; // MLUGG TODO + const src = sema.code.extraData(Zir.Inst.StructDecl, extended.operand).data.src(); // Because these three things each reference each other, `undefined` // placeholders are used before being set after the struct type gains an @@ -2941,13 +2937,10 @@ fn zirEnumDecl( const mod = sema.mod; const gpa = sema.gpa; const small: Zir.Inst.EnumDecl.Small = @bitCast(extended.small); - var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.EnumDecl).Struct.fields.len; + const extra = sema.code.extraData(Zir.Inst.EnumDecl, extended.operand); + var extra_index: usize = extra.end; - const src: LazySrcLoc = if (small.has_src_node) blk: { - const node_offset: i32 = @bitCast(sema.code.extra[extra_index]); - extra_index += 1; - break :blk LazySrcLoc.nodeOffset(node_offset); - } else unreachable; // MLUGG TODO + const src = extra.data.src(); const tag_ty_src: LazySrcLoc = .{ .node_offset_container_tag = src.node_offset.x }; const tag_type_ref = if (small.has_tag_type) blk: { @@ -3214,13 +3207,10 @@ fn zirUnionDecl( const mod = sema.mod; const gpa = sema.gpa; const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small); - var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.UnionDecl).Struct.fields.len; + const extra = sema.code.extraData(Zir.Inst.UnionDecl, extended.operand); + var extra_index: usize = extra.end; - const src: LazySrcLoc = if (small.has_src_node) blk: { - const node_offset: i32 = @bitCast(sema.code.extra[extra_index]); - extra_index += 1; - break :blk LazySrcLoc.nodeOffset(node_offset); - } else unreachable; // MLUGG TODO + const src = extra.data.src(); extra_index += @intFromBool(small.has_tag_type); extra_index += @intFromBool(small.has_body_len); @@ -3323,13 +3313,10 @@ fn zirOpaqueDecl( const mod = sema.mod; const small: Zir.Inst.OpaqueDecl.Small = @bitCast(extended.small); - var extra_index: usize = extended.operand; + const extra = sema.code.extraData(Zir.Inst.OpaqueDecl, extended.operand); + var extra_index: usize = extra.end; - const src: LazySrcLoc = if (small.has_src_node) blk: { - const node_offset: i32 = @bitCast(sema.code.extra[extra_index]); - extra_index += 1; - break :blk LazySrcLoc.nodeOffset(node_offset); - } else unreachable; // MLUGG TODO + const src = extra.data.src(); const decls_len = if (small.has_decls_len) blk: { const decls_len = sema.code.extra[extra_index]; @@ -35662,7 +35649,6 @@ fn semaBackingIntType(mod: *Module, struct_type: InternPool.Key.StructType) Comp if (small.has_backing_int) { var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len; - extra_index += @intFromBool(small.has_src_node); extra_index += @intFromBool(small.has_fields_len); extra_index += @intFromBool(small.has_decls_len); @@ -36374,8 +36360,6 @@ fn structZirInfo(zir: Zir, zir_index: Zir.Inst.Index) struct { const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len; - extra_index += @intFromBool(small.has_src_node); - const fields_len = if (small.has_fields_len) blk: { const fields_len = zir.extra[extra_index]; extra_index += 1; @@ -36843,7 +36827,6 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Key.Un var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.UnionDecl).Struct.fields.len; const src = LazySrcLoc.nodeOffset(0); - extra_index += @intFromBool(small.has_src_node); const tag_type_ref: Zir.Inst.Ref = if (small.has_tag_type) blk: { const ty_ref: Zir.Inst.Ref = @enumFromInt(zir.extra[extra_index]); diff --git a/src/Zir.zig b/src/Zir.zig index c76c1f2f62..61d1301167 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -3022,20 +3022,19 @@ pub const Inst = struct { }; /// Trailing: - /// 0. src_node: i32, // if has_src_node - /// 1. fields_len: u32, // if has_fields_len - /// 2. decls_len: u32, // if has_decls_len - /// 3. backing_int_body_len: u32, // if has_backing_int - /// 4. backing_int_ref: Ref, // if has_backing_int and backing_int_body_len is 0 - /// 5. backing_int_body_inst: Inst, // if has_backing_int and backing_int_body_len is > 0 - /// 6. decl: Index, // for every decls_len; points to a `declaration` instruction - /// 7. flags: u32 // for every 8 fields + /// 0. fields_len: u32, // if has_fields_len + /// 1. decls_len: u32, // if has_decls_len + /// 2. backing_int_body_len: u32, // if has_backing_int + /// 3. backing_int_ref: Ref, // if has_backing_int and backing_int_body_len is 0 + /// 4. backing_int_body_inst: Inst, // if has_backing_int and backing_int_body_len is > 0 + /// 5. decl: Index, // for every decls_len; points to a `declaration` instruction + /// 6. flags: u32 // for every 8 fields /// - sets of 4 bits: /// 0b000X: whether corresponding field has an align expression /// 0b00X0: whether corresponding field has a default expression /// 0b0X00: whether corresponding field is comptime /// 0bX000: whether corresponding field has a type expression - /// 8. fields: { // for every fields_len + /// 7. fields: { // for every fields_len /// field_name: u32, // if !is_tuple /// doc_comment: NullTerminatedString, // .empty if no doc comment /// field_type: Ref, // if corresponding bit is not set. none means anytype. @@ -3043,7 +3042,7 @@ pub const Inst = struct { /// align_body_len: u32, // if corresponding bit is set /// init_body_len: u32, // if corresponding bit is set /// } - /// 10. bodies: { // for every fields_len + /// 8. bodies: { // for every fields_len /// field_type_body_inst: Inst, // for each field_type_body_len /// align_body_inst: Inst, // for each align_body_len /// init_body_inst: Inst, // for each init_body_len @@ -3055,8 +3054,13 @@ pub const Inst = struct { fields_hash_1: u32, fields_hash_2: u32, fields_hash_3: u32, + src_node: i32, + + pub fn src(self: StructDecl) LazySrcLoc { + return LazySrcLoc.nodeOffset(self.src_node); + } + pub const Small = packed struct { - has_src_node: bool, has_fields_len: bool, has_decls_len: bool, has_backing_int: bool, @@ -3068,7 +3072,7 @@ pub const Inst = struct { any_default_inits: bool, any_comptime_fields: bool, any_aligned_fields: bool, - _: u2 = undefined, + _: u3 = undefined, }; }; @@ -3102,16 +3106,15 @@ pub const Inst = struct { }; /// Trailing: - /// 0. src_node: i32, // if has_src_node - /// 1. tag_type: Ref, // if has_tag_type - /// 2. body_len: u32, // if has_body_len - /// 3. fields_len: u32, // if has_fields_len - /// 4. decls_len: u32, // if has_decls_len - /// 5. decl: Index, // for every decls_len; points to a `declaration` instruction - /// 6. inst: Index // for every body_len - /// 7. has_bits: u32 // for every 32 fields + /// 0. tag_type: Ref, // if has_tag_type + /// 1. body_len: u32, // if has_body_len + /// 2. fields_len: u32, // if has_fields_len + /// 3. decls_len: u32, // if has_decls_len + /// 4. decl: Index, // for every decls_len; points to a `declaration` instruction + /// 5. inst: Index // for every body_len + /// 6. has_bits: u32 // for every 32 fields /// - the bit is whether corresponding field has an value expression - /// 8. fields: { // for every fields_len + /// 7. fields: { // for every fields_len /// field_name: u32, /// doc_comment: u32, // .empty if no doc_comment /// value: Ref, // if corresponding bit is set @@ -3123,33 +3126,37 @@ pub const Inst = struct { fields_hash_1: u32, fields_hash_2: u32, fields_hash_3: u32, + src_node: i32, + + pub fn src(self: EnumDecl) LazySrcLoc { + return LazySrcLoc.nodeOffset(self.src_node); + } + pub const Small = packed struct { - has_src_node: bool, has_tag_type: bool, has_body_len: bool, has_fields_len: bool, has_decls_len: bool, name_strategy: NameStrategy, nonexhaustive: bool, - _: u8 = undefined, + _: u9 = undefined, }; }; /// Trailing: - /// 0. src_node: i32, // if has_src_node - /// 1. tag_type: Ref, // if has_tag_type - /// 2. body_len: u32, // if has_body_len - /// 3. fields_len: u32, // if has_fields_len - /// 4. decls_len: u32, // if has_decls_len - /// 5. decl: Index, // for every decls_len; points to a `declaration` instruction - /// 6. inst: Index // for every body_len - /// 7. has_bits: u32 // for every 8 fields + /// 0. tag_type: Ref, // if has_tag_type + /// 1. body_len: u32, // if has_body_len + /// 2. fields_len: u32, // if has_fields_len + /// 3. decls_len: u32, // if has_decls_len + /// 4. decl: Index, // for every decls_len; points to a `declaration` instruction + /// 5. inst: Index // for every body_len + /// 6. has_bits: u32 // for every 8 fields /// - sets of 4 bits: /// 0b000X: whether corresponding field has a type expression /// 0b00X0: whether corresponding field has a align expression /// 0b0X00: whether corresponding field has a tag value expression /// 0bX000: unused - /// 8. fields: { // for every fields_len + /// 7. fields: { // for every fields_len /// field_name: NullTerminatedString, // null terminated string index /// doc_comment: NullTerminatedString, // .empty if no doc comment /// field_type: Ref, // if corresponding bit is set @@ -3164,8 +3171,13 @@ pub const Inst = struct { fields_hash_1: u32, fields_hash_2: u32, fields_hash_3: u32, + src_node: i32, + + pub fn src(self: UnionDecl) LazySrcLoc { + return LazySrcLoc.nodeOffset(self.src_node); + } + pub const Small = packed struct { - has_src_node: bool, has_tag_type: bool, has_body_len: bool, has_fields_len: bool, @@ -3180,20 +3192,24 @@ pub const Inst = struct { /// true | false | union(T) { } auto_enum_tag: bool, any_aligned_fields: bool, - _: u5 = undefined, + _: u6 = undefined, }; }; /// Trailing: - /// 0. src_node: i32, // if has_src_node - /// 1. decls_len: u32, // if has_decls_len - /// 2. decl: Index, // for every decls_len; points to a `declaration` instruction + /// 0. decls_len: u32, // if has_decls_len + /// 1. decl: Index, // for every decls_len; points to a `declaration` instruction pub const OpaqueDecl = struct { + src_node: i32, + + pub fn src(self: OpaqueDecl) LazySrcLoc { + return LazySrcLoc.nodeOffset(self.src_node); + } + pub const Small = packed struct { - has_src_node: bool, has_decls_len: bool, name_strategy: NameStrategy, - _: u12 = undefined, + _: u13 = undefined, }; }; @@ -3495,7 +3511,6 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator { .struct_decl => { const small: Inst.StructDecl.Small = @bitCast(extended.small); var extra_index: u32 = @intCast(extended.operand + @typeInfo(Inst.StructDecl).Struct.fields.len); - extra_index += @intFromBool(small.has_src_node); extra_index += @intFromBool(small.has_fields_len); const decls_len = if (small.has_decls_len) decls_len: { const decls_len = zir.extra[extra_index]; @@ -3522,7 +3537,6 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator { .enum_decl => { const small: Inst.EnumDecl.Small = @bitCast(extended.small); var extra_index: u32 = @intCast(extended.operand + @typeInfo(Inst.EnumDecl).Struct.fields.len); - extra_index += @intFromBool(small.has_src_node); extra_index += @intFromBool(small.has_tag_type); extra_index += @intFromBool(small.has_body_len); extra_index += @intFromBool(small.has_fields_len); @@ -3541,7 +3555,6 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator { .union_decl => { const small: Inst.UnionDecl.Small = @bitCast(extended.small); var extra_index: u32 = @intCast(extended.operand + @typeInfo(Inst.UnionDecl).Struct.fields.len); - extra_index += @intFromBool(small.has_src_node); extra_index += @intFromBool(small.has_tag_type); extra_index += @intFromBool(small.has_body_len); extra_index += @intFromBool(small.has_fields_len); @@ -3559,8 +3572,7 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator { }, .opaque_decl => { const small: Inst.OpaqueDecl.Small = @bitCast(extended.small); - var extra_index: u32 = extended.operand; - extra_index += @intFromBool(small.has_src_node); + var extra_index: u32 = @intCast(extended.operand + @typeInfo(Inst.OpaqueDecl).Struct.fields.len); const decls_len = if (small.has_decls_len) decls_len: { const decls_len = zir.extra[extra_index]; extra_index += 1; diff --git a/src/print_zir.zig b/src/print_zir.zig index 8e5fbc1788..226b09a5c4 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -1405,12 +1405,6 @@ const Writer = struct { var extra_index: usize = extra.end; - const src_node: ?i32 = if (small.has_src_node) blk: { - const src_node = @as(i32, @bitCast(self.code.extra[extra_index])); - extra_index += 1; - break :blk src_node; - } else null; - const fields_len = if (small.has_fields_len) blk: { const fields_len = self.code.extra[extra_index]; extra_index += 1; @@ -1453,7 +1447,7 @@ const Writer = struct { try stream.writeAll("{}, "); } else { const prev_parent_decl_node = self.parent_decl_node; - if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off); + self.parent_decl_node = self.relativeToNodeIndex(extra.data.src_node); defer self.parent_decl_node = prev_parent_decl_node; try stream.writeAll("{\n"); @@ -1534,7 +1528,7 @@ const Writer = struct { } const prev_parent_decl_node = self.parent_decl_node; - if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off); + self.parent_decl_node = self.relativeToNodeIndex(extra.data.src_node); try stream.writeAll("{\n"); self.indent += 2; @@ -1587,7 +1581,7 @@ const Writer = struct { try stream.writeByteNTimes(' ', self.indent); try stream.writeAll("})"); } - try self.writeSrcNode(stream, src_node); + try self.writeSrcNode(stream, extra.data.src_node); } fn writeUnionDecl(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void { @@ -1605,12 +1599,6 @@ const Writer = struct { var extra_index: usize = extra.end; - const src_node: ?i32 = if (small.has_src_node) blk: { - const src_node = @as(i32, @bitCast(self.code.extra[extra_index])); - extra_index += 1; - break :blk src_node; - } else null; - const tag_type_ref = if (small.has_tag_type) blk: { const tag_type_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; @@ -1644,7 +1632,7 @@ const Writer = struct { try stream.writeAll("{}"); } else { const prev_parent_decl_node = self.parent_decl_node; - if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off); + self.parent_decl_node = self.relativeToNodeIndex(extra.data.src_node); defer self.parent_decl_node = prev_parent_decl_node; try stream.writeAll("{\n"); @@ -1663,7 +1651,7 @@ const Writer = struct { if (fields_len == 0) { try stream.writeAll("})"); - try self.writeSrcNode(stream, src_node); + try self.writeSrcNode(stream, extra.data.src_node); return; } try stream.writeAll(", "); @@ -1672,7 +1660,7 @@ const Writer = struct { extra_index += body.len; const prev_parent_decl_node = self.parent_decl_node; - if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off); + self.parent_decl_node = self.relativeToNodeIndex(extra.data.src_node); try self.writeBracedDecl(stream, body); try stream.writeAll(", {\n"); @@ -1740,7 +1728,7 @@ const Writer = struct { self.indent -= 2; try stream.writeByteNTimes(' ', self.indent); try stream.writeAll("})"); - try self.writeSrcNode(stream, src_node); + try self.writeSrcNode(stream, extra.data.src_node); } fn writeEnumDecl(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void { @@ -1758,12 +1746,6 @@ const Writer = struct { var extra_index: usize = extra.end; - const src_node: ?i32 = if (small.has_src_node) blk: { - const src_node = @as(i32, @bitCast(self.code.extra[extra_index])); - extra_index += 1; - break :blk src_node; - } else null; - const tag_type_ref = if (small.has_tag_type) blk: { const tag_type_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; @@ -1795,7 +1777,7 @@ const Writer = struct { try stream.writeAll("{}, "); } else { const prev_parent_decl_node = self.parent_decl_node; - if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off); + self.parent_decl_node = self.relativeToNodeIndex(extra.data.src_node); defer self.parent_decl_node = prev_parent_decl_node; try stream.writeAll("{\n"); @@ -1816,7 +1798,7 @@ const Writer = struct { extra_index += body.len; const prev_parent_decl_node = self.parent_decl_node; - if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off); + self.parent_decl_node = self.relativeToNodeIndex(extra.data.src_node); try self.writeBracedDecl(stream, body); if (fields_len == 0) { try stream.writeAll(", {})"); @@ -1864,7 +1846,7 @@ const Writer = struct { try stream.writeByteNTimes(' ', self.indent); try stream.writeAll("})"); } - try self.writeSrcNode(stream, src_node); + try self.writeSrcNode(stream, extra.data.src_node); } fn writeOpaqueDecl( @@ -1873,13 +1855,8 @@ const Writer = struct { extended: Zir.Inst.Extended.InstData, ) !void { const small = @as(Zir.Inst.OpaqueDecl.Small, @bitCast(extended.small)); - var extra_index: usize = extended.operand; - - const src_node: ?i32 = if (small.has_src_node) blk: { - const src_node = @as(i32, @bitCast(self.code.extra[extra_index])); - extra_index += 1; - break :blk src_node; - } else null; + const extra = self.code.extraData(Zir.Inst.OpaqueDecl, extended.operand); + var extra_index: usize = extra.end; const decls_len = if (small.has_decls_len) blk: { const decls_len = self.code.extra[extra_index]; @@ -1893,7 +1870,7 @@ const Writer = struct { try stream.writeAll("{})"); } else { const prev_parent_decl_node = self.parent_decl_node; - if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off); + self.parent_decl_node = self.relativeToNodeIndex(extra.data.src_node); defer self.parent_decl_node = prev_parent_decl_node; try stream.writeAll("{\n"); @@ -1903,7 +1880,7 @@ const Writer = struct { try stream.writeByteNTimes(' ', self.indent); try stream.writeAll("})"); } - try self.writeSrcNode(stream, src_node); + try self.writeSrcNode(stream, extra.data.src_node); } fn writeErrorSetDecl( From e6cf3ce24c42d4a2dffd4f0204a22a31eef3c562 Mon Sep 17 00:00:00 2001 From: mlugg Date: Fri, 16 Feb 2024 11:25:32 +0000 Subject: [PATCH 6/8] Sema: correct source location for return value coercion errors When coercing the operand of a `ret_node` etc instruction, the source location for errors used to point to the entire `return` statement. Instead, we now point to the operand, as would be expected if there was an explicit `as_node` instruction (like there used to be). --- src/Module.zig | 15 +++++++++++++++ src/Sema.zig | 15 ++++++++------- 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/src/Module.zig b/src/Module.zig index d332c4db53..66d6aa3fe5 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1867,6 +1867,16 @@ pub const SrcLoc = struct { else => return nodeToSpan(tree, node), } }, + .node_offset_return_operand => |node_off| { + const tree = try src_loc.file_scope.getTree(gpa); + const node = src_loc.declRelativeToNodeIndex(node_off); + const node_tags = tree.nodes.items(.tag); + const node_datas = tree.nodes.items(.data); + if (node_tags[node] == .@"return" and node_datas[node].lhs != 0) { + return nodeToSpan(tree, node_datas[node].lhs); + } + return nodeToSpan(tree, node); + }, } } @@ -2221,6 +2231,10 @@ pub const LazySrcLoc = union(enum) { /// The source location points to the RHS of an assignment. /// The Decl is determined contextually. node_offset_store_operand: i32, + /// The source location points to the operand of a `return` statement, or + /// the `return` itself if there is no explicit operand. + /// The Decl is determined contextually. + node_offset_return_operand: i32, /// The source location points to a for loop input. /// The Decl is determined contextually. for_input: struct { @@ -2347,6 +2361,7 @@ pub const LazySrcLoc = union(enum) { .node_offset_init_ty, .node_offset_store_ptr, .node_offset_store_operand, + .node_offset_return_operand, .for_input, .for_capture_from_input, .array_cat_lhs, diff --git a/src/Sema.zig b/src/Sema.zig index 45f0edcb56..ce231af6fd 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -19182,7 +19182,7 @@ fn zirRetErrValue( .ty = error_set_type.toIntern(), .name = err_name, } }))); - return sema.analyzeRet(block, result_inst, src); + return sema.analyzeRet(block, result_inst, src, src); } fn zirRetImplicit( @@ -19232,7 +19232,7 @@ fn zirRetImplicit( return sema.failWithOwnedErrorMsg(block, msg); } - return sema.analyzeRet(block, operand, r_brace_src); + return sema.analyzeRet(block, operand, r_brace_src, r_brace_src); } fn zirRetNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { @@ -19243,7 +19243,7 @@ fn zirRetNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir const operand = try sema.resolveInst(inst_data.operand); const src = inst_data.src(); - return sema.analyzeRet(block, operand, src); + return sema.analyzeRet(block, operand, src, .{ .node_offset_return_operand = inst_data.src_node }); } fn zirRetLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { @@ -19256,7 +19256,7 @@ fn zirRetLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir if (block.is_comptime or block.inlining != null or sema.func_is_naked) { const operand = try sema.analyzeLoad(block, src, ret_ptr, src); - return sema.analyzeRet(block, operand, src); + return sema.analyzeRet(block, operand, src, .{ .node_offset_return_operand = inst_data.src_node }); } if (sema.wantErrorReturnTracing(sema.fn_ret_ty)) { @@ -19450,6 +19450,7 @@ fn analyzeRet( block: *Block, uncasted_operand: Air.Inst.Ref, src: LazySrcLoc, + operand_src: LazySrcLoc, ) CompileError!Zir.Inst.Index { // Special case for returning an error to an inferred error set; we need to // add the error tag to the inferred error set of the in-scope function, so @@ -19458,14 +19459,14 @@ fn analyzeRet( if (sema.fn_ret_ty_ies != null and sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion) { try sema.addToInferredErrorSet(uncasted_operand); } - const operand = sema.coerceExtra(block, sema.fn_ret_ty, uncasted_operand, src, .{ .is_ret = true }) catch |err| switch (err) { + const operand = sema.coerceExtra(block, sema.fn_ret_ty, uncasted_operand, operand_src, .{ .is_ret = true }) catch |err| switch (err) { error.NotCoercible => unreachable, else => |e| return e, }; if (block.inlining) |inlining| { if (block.is_comptime) { - const ret_val = try sema.resolveConstValue(block, src, operand, .{ + const ret_val = try sema.resolveConstValue(block, operand_src, operand, .{ .needed_comptime_reason = "value being returned at comptime must be comptime-known", }); inlining.comptime_result = operand; @@ -19500,7 +19501,7 @@ fn analyzeRet( if (sema.wantErrorReturnTracing(sema.fn_ret_ty)) { // Avoid adding a frame to the error return trace in case the value is comptime-known // to be not an error. - const is_non_err = try sema.analyzeIsNonErr(block, src, operand); + const is_non_err = try sema.analyzeIsNonErr(block, operand_src, operand); return sema.retWithErrTracing(block, src, is_non_err, air_tag, operand); } From 5a957a32811602b7e2035e2046e565b3981d633d Mon Sep 17 00:00:00 2001 From: mlugg Date: Fri, 16 Feb 2024 12:03:43 +0000 Subject: [PATCH 7/8] Sema: improved source location for @panic operand coercion error Similar to the previous commit, errors coercing the panic message to `[]const u8` now point at the operand to `@panic` rather than the actual builtin call. --- src/Sema.zig | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/Sema.zig b/src/Sema.zig index ce231af6fd..1ee036c15c 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -5671,10 +5671,14 @@ fn zirPanic(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.I const src = inst_data.src(); const msg_inst = try sema.resolveInst(inst_data.operand); + // `panicWithMsg` would perform this coercion for us, but we can get a better + // source location if we do it here. + const coerced_msg = try sema.coerce(block, Type.slice_const_u8, msg_inst, .{ .node_offset_builtin_call_arg0 = inst_data.src_node }); + if (block.is_comptime) { return sema.fail(block, src, "encountered @panic at comptime", .{}); } - try sema.panicWithMsg(block, src, msg_inst, .@"@panic"); + try sema.panicWithMsg(block, src, coerced_msg, .@"@panic"); return always_noreturn; } From b2f28a104deb96eead604aa4a1a5a703689e2b61 Mon Sep 17 00:00:00 2001 From: mlugg Date: Fri, 16 Feb 2024 12:15:39 +0000 Subject: [PATCH 8/8] cases: account for changed compile errors --- .../compile_errors/non_constant_expression_in_array_size.zig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/cases/compile_errors/non_constant_expression_in_array_size.zig b/test/cases/compile_errors/non_constant_expression_in_array_size.zig index 7c4594a3e3..70d1655eb7 100644 --- a/test/cases/compile_errors/non_constant_expression_in_array_size.zig +++ b/test/cases/compile_errors/non_constant_expression_in_array_size.zig @@ -14,6 +14,6 @@ export fn entry() usize { // backend=stage2 // target=native // -// :6:5: error: unable to resolve comptime value -// :6:5: note: value being returned at comptime must be comptime-known +// :6:12: error: unable to resolve comptime value +// :6:12: note: value being returned at comptime must be comptime-known // :2:12: note: called from here