Merge pull request #18909 from mlugg/random-enhancements

A loose collection of random enhancements:
* Eliminate `Sema.src` field
* Optimize size of ZIR
* Slightly improve some compile errors
This commit is contained in:
Matthew Lugg 2024-02-16 19:09:57 +00:00 committed by GitHub
commit 88d4b5cb18
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 541 additions and 512 deletions

View File

@ -44,6 +44,9 @@ compile_errors: ArrayListUnmanaged(Zir.Inst.CompileErrors.Item) = .{},
/// The topmost block of the current function.
fn_block: ?*GenZir = null,
fn_var_args: bool = false,
/// The return type of the current function. This may be a trivial `Ref`, or
/// otherwise it refers to a `ret_type` instruction.
fn_ret_ty: Zir.Inst.Ref = .none,
/// Maps string table indexes to the first `@import` ZIR instruction
/// that uses this string as the operand.
imports: std.AutoArrayHashMapUnmanaged(Zir.NullTerminatedString, Ast.TokenIndex) = .{},
@ -357,16 +360,11 @@ const ResultInfo = struct {
};
};
/// TODO: modify Sema to remove in favour of `coerced_align_ri`
const align_ri: ResultInfo = .{ .rl = .{ .ty = .u29_type } };
const coerced_align_ri: ResultInfo = .{ .rl = .{ .coerced_ty = .u29_type } };
/// TODO: modify Sema to remove in favour of `coerced_addrspace_ri`
const addrspace_ri: ResultInfo = .{ .rl = .{ .ty = .address_space_type } };
const coerced_addrspace_ri: ResultInfo = .{ .rl = .{ .coerced_ty = .address_space_type } };
const coerced_linksection_ri: ResultInfo = .{ .rl = .{ .coerced_ty = .slice_const_u8_type } };
const bool_ri: ResultInfo = .{ .rl = .{ .ty = .bool_type } };
const type_ri: ResultInfo = .{ .rl = .{ .ty = .type_type } };
const coerced_type_ri: ResultInfo = .{ .rl = .{ .coerced_ty = .type_type } };
const coerced_bool_ri: ResultInfo = .{ .rl = .{ .coerced_ty = .bool_type } };
fn typeExpr(gz: *GenZir, scope: *Scope, type_node: Ast.Node.Index) InnerError!Zir.Inst.Ref {
return comptimeExpr(gz, scope, coerced_type_ri, type_node);
@ -783,7 +781,7 @@ fn expr(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.Index) InnerE
.bool_and => return boolBinOp(gz, scope, ri, node, .bool_br_and),
.bool_or => return boolBinOp(gz, scope, ri, node, .bool_br_or),
.bool_not => return simpleUnOp(gz, scope, ri, node, bool_ri, node_datas[node].lhs, .bool_not),
.bool_not => return simpleUnOp(gz, scope, ri, node, coerced_bool_ri, node_datas[node].lhs, .bool_not),
.bit_not => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, node_datas[node].lhs, .bit_not),
.negation => return negation(gz, scope, ri, node),
@ -1369,7 +1367,7 @@ fn fnProtoExpr(
};
const align_ref: Zir.Inst.Ref = if (fn_proto.ast.align_expr == 0) .none else inst: {
break :inst try expr(&block_scope, scope, align_ri, fn_proto.ast.align_expr);
break :inst try expr(&block_scope, scope, coerced_align_ri, fn_proto.ast.align_expr);
};
if (fn_proto.ast.addrspace_expr != 0) {
@ -1384,7 +1382,7 @@ fn fnProtoExpr(
try expr(
&block_scope,
scope,
.{ .rl = .{ .ty = .calling_convention_type } },
.{ .rl = .{ .coerced_ty = .calling_convention_type } },
fn_proto.ast.callconv_expr,
)
else
@ -2119,7 +2117,7 @@ fn restoreErrRetIndex(
else => .none, // always restore/pop
},
};
_ = try gz.addRestoreErrRetIndex(bt, .{ .if_non_error = op });
_ = try gz.addRestoreErrRetIndex(bt, .{ .if_non_error = op }, node);
}
fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref {
@ -2176,7 +2174,7 @@ fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) Inn
// As our last action before the break, "pop" the error trace if needed
if (!block_gz.is_comptime)
_ = try parent_gz.addRestoreErrRetIndex(.{ .block = block_inst }, .always);
_ = try parent_gz.addRestoreErrRetIndex(.{ .block = block_inst }, .always, node);
_ = try parent_gz.addBreak(break_tag, block_inst, .void_value);
return Zir.Inst.Ref.unreachable_value;
@ -2268,7 +2266,7 @@ fn continueExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index)
// As our last action before the continue, "pop" the error trace if needed
if (!gen_zir.is_comptime)
_ = try parent_gz.addRestoreErrRetIndex(.{ .block = continue_block }, .always);
_ = try parent_gz.addRestoreErrRetIndex(.{ .block = continue_block }, .always, node);
_ = try parent_gz.addBreak(break_tag, continue_block, .void_value);
return Zir.Inst.Ref.unreachable_value;
@ -2328,7 +2326,7 @@ fn blockExpr(
if (!block_scope.endsWithNoReturn()) {
// As our last action before the break, "pop" the error trace if needed
_ = try gz.addRestoreErrRetIndex(.{ .block = block_inst }, .always);
_ = try gz.addRestoreErrRetIndex(.{ .block = block_inst }, .always, block_node);
_ = try block_scope.addBreak(.@"break", block_inst, .void_value);
}
@ -2423,7 +2421,7 @@ fn labeledBlockExpr(
try blockExprStmts(&block_scope, &block_scope.base, statements);
if (!block_scope.endsWithNoReturn()) {
// As our last action before the return, "pop" the error trace if needed
_ = try gz.addRestoreErrRetIndex(.{ .block = block_inst }, .always);
_ = try gz.addRestoreErrRetIndex(.{ .block = block_inst }, .always, block_node);
_ = try block_scope.addBreak(.@"break", block_inst, .void_value);
}
@ -2815,7 +2813,6 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.export_value,
.set_eval_branch_quota,
.atomic_store,
.store,
.store_node,
.store_to_inferred_ptr,
.resolve_inferred_alloc,
@ -2826,7 +2823,8 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.validate_deref,
.validate_destructure,
.save_err_ret_index,
.restore_err_ret_index,
.restore_err_ret_index_unconditional,
.restore_err_ret_index_fn_entry,
.validate_struct_init_ty,
.validate_struct_init_result_ty,
.validate_ptr_struct_init,
@ -3133,7 +3131,7 @@ fn varDecl(
}
const align_inst: Zir.Inst.Ref = if (var_decl.ast.align_node != 0)
try expr(gz, scope, align_ri, var_decl.ast.align_node)
try expr(gz, scope, coerced_align_ri, var_decl.ast.align_node)
else
.none;
@ -3329,31 +3327,11 @@ fn emitDbgNode(gz: *GenZir, node: Ast.Node.Index) !void {
// If the current block will be evaluated only during semantic analysis
// then no dbg_stmt ZIR instruction is needed.
if (gz.is_comptime) return;
const astgen = gz.astgen;
astgen.advanceSourceCursorToNode(node);
const line = astgen.source_line - gz.decl_line;
const column = astgen.source_column;
if (gz.instructions.items.len > 0) {
const last = gz.instructions.items[gz.instructions.items.len - 1];
const zir_tags = astgen.instructions.items(.tag);
if (zir_tags[@intFromEnum(last)] == .dbg_stmt) {
const zir_datas = astgen.instructions.items(.data);
zir_datas[@intFromEnum(last)].dbg_stmt = .{
.line = line,
.column = column,
};
return;
}
}
_ = try gz.add(.{ .tag = .dbg_stmt, .data = .{
.dbg_stmt = .{
.line = line,
.column = column,
},
} });
try emitDbgStmt(gz, .{ line, column });
}
fn assign(gz: *GenZir, scope: *Scope, infix_node: Ast.Node.Index) InnerError!void {
@ -3522,7 +3500,7 @@ fn assignDestructureMaybeDecls(
const this_lhs_comptime = is_comptime or (is_const and rhs_is_comptime);
const align_inst: Zir.Inst.Ref = if (full.ast.align_node != 0)
try expr(gz, scope, align_ri, full.ast.align_node)
try expr(gz, scope, coerced_align_ri, full.ast.align_node)
else
.none;
@ -3709,7 +3687,10 @@ fn assignOp(
.lhs = lhs,
.rhs = rhs,
});
_ = try gz.addBin(.store, lhs_ptr, result);
_ = try gz.addPlNode(.store_node, infix_node, Zir.Inst.Bin{
.lhs = lhs_ptr,
.rhs = result,
});
}
fn assignShift(
@ -3732,7 +3713,10 @@ fn assignShift(
.lhs = lhs,
.rhs = rhs,
});
_ = try gz.addBin(.store, lhs_ptr, result);
_ = try gz.addPlNode(.store_node, infix_node, Zir.Inst.Bin{
.lhs = lhs_ptr,
.rhs = result,
});
}
fn assignShiftSat(gz: *GenZir, scope: *Scope, infix_node: Ast.Node.Index) InnerError!void {
@ -3750,7 +3734,10 @@ fn assignShiftSat(gz: *GenZir, scope: *Scope, infix_node: Ast.Node.Index) InnerE
.lhs = lhs,
.rhs = rhs,
});
_ = try gz.addBin(.store, lhs_ptr, result);
_ = try gz.addPlNode(.store_node, infix_node, Zir.Inst.Bin{
.lhs = lhs_ptr,
.rhs = result,
});
}
fn ptrType(
@ -3791,7 +3778,7 @@ fn ptrType(
gz.astgen.source_line = source_line;
gz.astgen.source_column = source_column;
addrspace_ref = try expr(gz, scope, addrspace_ri, ptr_info.ast.addrspace_node);
addrspace_ref = try expr(gz, scope, coerced_addrspace_ri, ptr_info.ast.addrspace_node);
trailing_count += 1;
}
if (ptr_info.ast.align_node != 0) {
@ -4184,7 +4171,7 @@ fn fnDecl(
var addrspace_gz = decl_gz.makeSubBlock(params_scope);
defer addrspace_gz.unstack();
const addrspace_ref: Zir.Inst.Ref = if (fn_proto.ast.addrspace_expr == 0) .none else inst: {
const inst = try expr(&decl_gz, params_scope, addrspace_ri, fn_proto.ast.addrspace_expr);
const inst = try expr(&decl_gz, params_scope, coerced_addrspace_ri, fn_proto.ast.addrspace_expr);
if (addrspace_gz.instructionsSlice().len == 0) {
// In this case we will send a len=0 body which can be encoded more efficiently.
break :inst inst;
@ -4284,8 +4271,19 @@ fn fnDecl(
fn_gz.instructions_top = ret_gz.instructions.items.len;
const prev_fn_block = astgen.fn_block;
const prev_fn_ret_ty = astgen.fn_ret_ty;
astgen.fn_block = &fn_gz;
defer astgen.fn_block = prev_fn_block;
astgen.fn_ret_ty = if (is_inferred_error or ret_ref.toIndex() != null) r: {
// We're essentially guaranteed to need the return type at some point,
// since the return type is likely not `void` or `noreturn` so there
// will probably be an explicit return requiring RLS. Fetch this
// return type now so the rest of the function can use it.
break :r try fn_gz.addNode(.ret_type, decl_node);
} else ret_ref;
defer {
astgen.fn_block = prev_fn_block;
astgen.fn_ret_ty = prev_fn_ret_ty;
}
const prev_var_args = astgen.fn_var_args;
astgen.fn_var_args = is_var_args;
@ -4300,7 +4298,7 @@ fn fnDecl(
if (!fn_gz.endsWithNoReturn()) {
// As our last action before the return, "pop" the error trace if needed
_ = try gz.addRestoreErrRetIndex(.ret, .always);
_ = try gz.addRestoreErrRetIndex(.ret, .always, decl_node);
// Add implicit return at end of function.
_ = try fn_gz.addUnTok(.ret_implicit, .void_value, tree.lastToken(body_node));
@ -4428,7 +4426,7 @@ fn globalVarDecl(
try expr(
&block_scope,
&block_scope.base,
.{ .rl = .{ .ty = .type_type } },
coerced_type_ri,
var_decl.ast.type_node,
)
else
@ -4732,8 +4730,13 @@ fn testDecl(
defer fn_block.unstack();
const prev_fn_block = astgen.fn_block;
const prev_fn_ret_ty = astgen.fn_ret_ty;
astgen.fn_block = &fn_block;
defer astgen.fn_block = prev_fn_block;
astgen.fn_ret_ty = .anyerror_void_error_union_type;
defer {
astgen.fn_block = prev_fn_block;
astgen.fn_ret_ty = prev_fn_ret_ty;
}
astgen.advanceSourceCursorToNode(body_node);
const lbrace_line = astgen.source_line - decl_block.decl_line;
@ -4743,7 +4746,7 @@ fn testDecl(
if (fn_block.isEmpty() or !fn_block.refIsNoReturn(block_result)) {
// As our last action before the return, "pop" the error trace if needed
_ = try gz.addRestoreErrRetIndex(.ret, .always);
_ = try gz.addRestoreErrRetIndex(.ret, .always, node);
// Add implicit return at end of function.
_ = try fn_block.addUnTok(.ret_implicit, .void_value, tree.lastToken(body_node));
@ -5246,7 +5249,7 @@ fn unionDeclInner(
return astgen.failNode(member_node, "union field missing type", .{});
}
if (have_align) {
const align_inst = try expr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = .u32_type } }, member.ast.align_expr);
const align_inst = try expr(&block_scope, &block_scope.base, coerced_align_ri, member.ast.align_expr);
wip_members.appendToField(@intFromEnum(align_inst));
any_aligned_fields = true;
}
@ -5514,7 +5517,7 @@ fn containerDecl(
namespace.base.tag = .enum_namespace;
const arg_inst: Zir.Inst.Ref = if (container_decl.ast.arg != 0)
try comptimeExpr(&block_scope, &namespace.base, .{ .rl = .{ .ty = .type_type } }, container_decl.ast.arg)
try comptimeExpr(&block_scope, &namespace.base, coerced_type_ri, container_decl.ast.arg)
else
.none;
@ -6071,7 +6074,7 @@ fn arrayAccess(
const cursor = maybeAdvanceSourceCursorToMainToken(gz, node);
const rhs = try expr(gz, scope, .{ .rl = .{ .ty = .usize_type } }, node_datas[node].rhs);
const rhs = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, node_datas[node].rhs);
try emitDbgStmt(gz, cursor);
return gz.addPlNode(.elem_ptr_node, node, Zir.Inst.Bin{ .lhs = lhs, .rhs = rhs });
@ -6081,7 +6084,7 @@ fn arrayAccess(
const cursor = maybeAdvanceSourceCursorToMainToken(gz, node);
const rhs = try expr(gz, scope, .{ .rl = .{ .ty = .usize_type } }, node_datas[node].rhs);
const rhs = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, node_datas[node].rhs);
try emitDbgStmt(gz, cursor);
return rvalue(gz, ri, try gz.addPlNode(.elem_val_node, node, Zir.Inst.Bin{ .lhs = lhs, .rhs = rhs }), node);
@ -6149,16 +6152,16 @@ fn boolBinOp(
const tree = astgen.tree;
const node_datas = tree.nodes.items(.data);
const lhs = try expr(gz, scope, bool_ri, node_datas[node].lhs);
const bool_br = try gz.addBoolBr(zir_tag, lhs);
const lhs = try expr(gz, scope, coerced_bool_ri, node_datas[node].lhs);
const bool_br = (try gz.addPlNodePayloadIndex(zir_tag, node, undefined)).toIndex().?;
var rhs_scope = gz.makeSubBlock(scope);
defer rhs_scope.unstack();
const rhs = try expr(&rhs_scope, &rhs_scope.base, bool_ri, node_datas[node].rhs);
const rhs = try expr(&rhs_scope, &rhs_scope.base, coerced_bool_ri, node_datas[node].rhs);
if (!gz.refIsNoReturn(rhs)) {
_ = try rhs_scope.addBreakWithSrcNode(.break_inline, bool_br, rhs, node_datas[node].rhs);
}
try rhs_scope.setBoolBrBody(bool_br);
try rhs_scope.setBoolBrBody(bool_br, lhs);
const block_ref = bool_br.toRef();
return rvalue(gz, ri, block_ref, node);
@ -6222,7 +6225,7 @@ fn ifExpr(
.bool_bit = try block_scope.addUnNode(tag, optional, if_full.ast.cond_expr),
};
} else {
const cond = try expr(&block_scope, &block_scope.base, bool_ri, if_full.ast.cond_expr);
const cond = try expr(&block_scope, &block_scope.base, coerced_bool_ri, if_full.ast.cond_expr);
break :c .{
.inst = cond,
.bool_bit = cond,
@ -6468,7 +6471,7 @@ fn whileExpr(
.bool_bit = try cond_scope.addUnNode(tag, optional, while_full.ast.cond_expr),
};
} else {
const cond = try expr(&cond_scope, &cond_scope.base, bool_ri, while_full.ast.cond_expr);
const cond = try expr(&cond_scope, &cond_scope.base, coerced_bool_ri, while_full.ast.cond_expr);
break :c .{
.inst = cond,
.bool_bit = cond,
@ -6726,7 +6729,10 @@ fn forExpr(
const alloc_tag: Zir.Inst.Tag = if (is_inline) .alloc_comptime_mut else .alloc;
const index_ptr = try parent_gz.addUnNode(alloc_tag, .usize_type, node);
// initialize to zero
_ = try parent_gz.addBin(.store, index_ptr, .zero_usize);
_ = try parent_gz.addPlNode(.store_node, node, Zir.Inst.Bin{
.lhs = index_ptr,
.rhs = .zero_usize,
});
break :blk index_ptr;
};
@ -6956,7 +6962,10 @@ fn forExpr(
.lhs = index,
.rhs = .one_usize,
});
_ = try loop_scope.addBin(.store, index_ptr, index_plus_one);
_ = try loop_scope.addPlNode(.store_node, node, Zir.Inst.Bin{
.lhs = index_ptr,
.rhs = index_plus_one,
});
const repeat_tag: Zir.Inst.Tag = if (is_inline) .repeat_inline else .repeat;
_ = try loop_scope.addNode(repeat_tag, node);
@ -7124,7 +7133,7 @@ fn switchExprErrUnion(
block_scope.setBreakResultInfo(block_ri);
// Sema expects a dbg_stmt immediately before switch_block_err_union
try emitDbgStmt(parent_gz, operand_lc);
try emitDbgStmtForceCurrentIndex(parent_gz, operand_lc);
// This gets added to the parent block later, after the item expressions.
const switch_block = try parent_gz.makeBlockInst(.switch_block_err_union, switch_node);
@ -7704,7 +7713,7 @@ fn switchExpr(
block_scope.setBreakResultInfo(block_ri);
// Sema expects a dbg_stmt immediately before switch_block(_ref)
try emitDbgStmt(parent_gz, operand_lc);
try emitDbgStmtForceCurrentIndex(parent_gz, operand_lc);
// This gets added to the parent block later, after the item expressions.
const switch_tag: Zir.Inst.Tag = if (any_payload_is_ref) .switch_block_ref else .switch_block;
const switch_block = try parent_gz.makeBlockInst(switch_tag, switch_node);
@ -8009,7 +8018,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref
try genDefers(gz, defer_outer, scope, .normal_only);
// As our last action before the return, "pop" the error trace if needed
_ = try gz.addRestoreErrRetIndex(.ret, .always);
_ = try gz.addRestoreErrRetIndex(.ret, .always, node);
_ = try gz.addUnNode(.ret_node, .void_value, node);
return Zir.Inst.Ref.unreachable_value;
@ -8038,7 +8047,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref
.rl = .{ .ptr = .{ .inst = try gz.addNode(.ret_ptr, node) } },
.ctx = .@"return",
} else .{
.rl = .{ .ty = try gz.addNode(.ret_type, node) },
.rl = .{ .coerced_ty = astgen.fn_ret_ty },
.ctx = .@"return",
};
const prev_anon_name_strategy = gz.anon_name_strategy;
@ -8052,7 +8061,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref
try genDefers(gz, defer_outer, scope, .normal_only);
// As our last action before the return, "pop" the error trace if needed
_ = try gz.addRestoreErrRetIndex(.ret, .always);
_ = try gz.addRestoreErrRetIndex(.ret, .always, node);
try emitDbgStmt(gz, ret_lc);
try gz.addRet(ri, operand, node);
@ -8075,7 +8084,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref
// As our last action before the return, "pop" the error trace if needed
const result = if (ri.rl == .ptr) try gz.addUnNode(.load, ri.rl.ptr.inst, node) else operand;
_ = try gz.addRestoreErrRetIndex(.ret, .{ .if_non_error = result });
_ = try gz.addRestoreErrRetIndex(.ret, .{ .if_non_error = result }, node);
try gz.addRet(ri, operand, node);
return Zir.Inst.Ref.unreachable_value;
@ -8092,7 +8101,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref
try genDefers(&then_scope, defer_outer, scope, .normal_only);
// As our last action before the return, "pop" the error trace if needed
_ = try then_scope.addRestoreErrRetIndex(.ret, .always);
_ = try then_scope.addRestoreErrRetIndex(.ret, .always, node);
try emitDbgStmt(&then_scope, ret_lc);
try then_scope.addRet(ri, operand, node);
@ -8674,7 +8683,7 @@ fn unionInit(
params: []const Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const union_type = try typeExpr(gz, scope, params[0]);
const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]);
const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[1]);
const field_type = try gz.addPlNode(.field_type_ref, node, Zir.Inst.FieldTypeRef{
.container_type = union_type,
.field_name = field_name,
@ -8987,12 +8996,12 @@ fn builtinCall(
if (ri.rl == .ref or ri.rl == .ref_coerced_ty) {
return gz.addPlNode(.field_ptr_named, node, Zir.Inst.FieldNamed{
.lhs = try expr(gz, scope, .{ .rl = .ref }, params[0]),
.field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]),
.field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[1]),
});
}
const result = try gz.addPlNode(.field_val_named, node, Zir.Inst.FieldNamed{
.lhs = try expr(gz, scope, .{ .rl = .none }, params[0]),
.field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]),
.field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[1]),
});
return rvalue(gz, ri, result, node);
},
@ -9119,7 +9128,7 @@ fn builtinCall(
return rvalue(gz, ri, .void_value, node);
},
.set_align_stack => {
const order = try expr(gz, scope, align_ri, params[0]);
const order = try expr(gz, scope, coerced_align_ri, params[0]);
_ = try gz.addExtendedPayload(.set_align_stack, Zir.Inst.UnNode{
.node = gz.nodeIndexToRelative(node),
.operand = order,
@ -9161,32 +9170,32 @@ fn builtinCall(
.bit_size_of => return simpleUnOpType(gz, scope, ri, node, params[0], .bit_size_of),
.align_of => return simpleUnOpType(gz, scope, ri, node, params[0], .align_of),
.int_from_ptr => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .int_from_ptr),
.compile_error => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0], .compile_error),
.set_eval_branch_quota => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0], .set_eval_branch_quota),
.int_from_enum => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .int_from_enum),
.int_from_bool => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .int_from_bool),
.embed_file => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0], .embed_file),
.error_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .anyerror_type } }, params[0], .error_name),
.set_runtime_safety => return simpleUnOp(gz, scope, ri, node, bool_ri, params[0], .set_runtime_safety),
.sqrt => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .sqrt),
.sin => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .sin),
.cos => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .cos),
.tan => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .tan),
.exp => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .exp),
.exp2 => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .exp2),
.log => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .log),
.log2 => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .log2),
.log10 => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .log10),
.abs => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .abs),
.floor => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .floor),
.ceil => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .ceil),
.trunc => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .trunc),
.round => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .round),
.tag_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .tag_name),
.type_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .type_name),
.Frame => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .frame_type),
.frame_size => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .frame_size),
.int_from_ptr => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .int_from_ptr),
.compile_error => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[0], .compile_error),
.set_eval_branch_quota => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0], .set_eval_branch_quota),
.int_from_enum => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .int_from_enum),
.int_from_bool => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .int_from_bool),
.embed_file => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[0], .embed_file),
.error_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .anyerror_type } }, params[0], .error_name),
.set_runtime_safety => return simpleUnOp(gz, scope, ri, node, coerced_bool_ri, params[0], .set_runtime_safety),
.sqrt => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .sqrt),
.sin => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .sin),
.cos => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .cos),
.tan => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .tan),
.exp => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .exp),
.exp2 => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .exp2),
.log => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .log),
.log2 => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .log2),
.log10 => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .log10),
.abs => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .abs),
.floor => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .floor),
.ceil => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .ceil),
.trunc => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .trunc),
.round => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .round),
.tag_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .tag_name),
.type_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .type_name),
.Frame => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .frame_type),
.frame_size => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .frame_size),
.int_from_float => return typeCast(gz, scope, ri, node, params[0], .int_from_float, builtin_name),
.float_from_int => return typeCast(gz, scope, ri, node, params[0], .float_from_int, builtin_name),
@ -9224,7 +9233,7 @@ fn builtinCall(
},
.panic => {
try emitDbgNode(gz, node);
return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0], .panic);
return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[0], .panic);
},
.trap => {
try emitDbgNode(gz, node);
@ -9313,7 +9322,7 @@ fn builtinCall(
},
.c_define => {
if (!gz.c_import) return gz.astgen.failNode(node, "C define valid only inside C import block", .{});
const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0]);
const name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[0]);
const value = try comptimeExpr(gz, scope, .{ .rl = .none }, params[1]);
const result = try gz.addExtendedPayload(.c_define, Zir.Inst.BinNode{
.node = gz.nodeIndexToRelative(node),
@ -9334,7 +9343,7 @@ fn builtinCall(
return rvalue(gz, ri, result, node);
},
.reduce => {
const op = try expr(gz, scope, .{ .rl = .{ .ty = .reduce_op_type } }, params[0]);
const op = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .reduce_op_type } }, params[0]);
const scalar = try expr(gz, scope, .{ .rl = .none }, params[1]);
const result = try gz.addPlNode(.reduce, node, Zir.Inst.Bin{
.lhs = op,
@ -9410,7 +9419,7 @@ fn builtinCall(
},
.field_parent_ptr => {
const parent_type = try typeExpr(gz, scope, params[0]);
const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]);
const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[1]);
const result = try gz.addPlNode(.field_parent_ptr, node, Zir.Inst.FieldParentPtr{
.parent_type = parent_type,
.field_name = field_name,
@ -9547,7 +9556,7 @@ fn hasDeclOrField(
tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
const container_type = try typeExpr(gz, scope, lhs_node);
const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, rhs_node);
const name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, rhs_node);
const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{
.lhs = container_type,
.rhs = name,
@ -9697,7 +9706,7 @@ fn simpleCBuiltin(
) InnerError!Zir.Inst.Ref {
const name: []const u8 = if (tag == .c_undef) "C undef" else "C include";
if (!gz.c_import) return gz.astgen.failNode(node, "{s} valid only inside C import block", .{name});
const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, operand_node);
const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, operand_node);
_ = try gz.addExtendedPayload(tag, Zir.Inst.UnNode{
.node = gz.nodeIndexToRelative(node),
.operand = operand,
@ -9715,7 +9724,7 @@ fn offsetOf(
tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
const type_inst = try typeExpr(gz, scope, lhs_node);
const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, rhs_node);
const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, rhs_node);
const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{
.lhs = type_inst,
.rhs = field_name,
@ -9828,13 +9837,8 @@ fn callExpr(
astgen.advanceSourceCursor(astgen.tree.tokens.items(.start)[call.ast.lparen]);
const line = astgen.source_line - gz.decl_line;
const column = astgen.source_column;
_ = try gz.add(.{ .tag = .dbg_stmt, .data = .{
.dbg_stmt = .{
.line = line,
.column = column,
},
} });
// Sema expects a dbg_stmt immediately before call,
try emitDbgStmtForceCurrentIndex(gz, .{ line, column });
}
switch (callee) {
@ -10985,7 +10989,10 @@ fn rvalueInner(
return .void_value;
},
.inferred_ptr => |alloc| {
_ = try gz.addBin(.store_to_inferred_ptr, alloc, result);
_ = try gz.addPlNode(.store_to_inferred_ptr, src_node, Zir.Inst.Bin{
.lhs = alloc,
.rhs = result,
});
return .void_value;
},
.destructure => |destructure| {
@ -11012,7 +11019,10 @@ fn rvalueInner(
});
},
.inferred_ptr => |ptr_inst| {
_ = try gz.addBin(.store_to_inferred_ptr, ptr_inst, elem_val);
_ = try gz.addPlNode(.store_to_inferred_ptr, src_node, Zir.Inst.Bin{
.lhs = ptr_inst,
.rhs = elem_val,
});
},
.discard => unreachable,
}
@ -11834,19 +11844,20 @@ const GenZir = struct {
}
/// Assumes nothing stacked on `gz`. Unstacks `gz`.
fn setBoolBrBody(gz: *GenZir, inst: Zir.Inst.Index) !void {
fn setBoolBrBody(gz: *GenZir, bool_br: Zir.Inst.Index, bool_br_lhs: Zir.Inst.Ref) !void {
const astgen = gz.astgen;
const gpa = astgen.gpa;
const body = gz.instructionsSlice();
const body_len = astgen.countBodyLenAfterFixups(body);
try astgen.extra.ensureUnusedCapacity(
gpa,
@typeInfo(Zir.Inst.Block).Struct.fields.len + body_len,
@typeInfo(Zir.Inst.BoolBr).Struct.fields.len + body_len,
);
const zir_datas = astgen.instructions.items(.data);
zir_datas[@intFromEnum(inst)].bool_br.payload_index = astgen.addExtraAssumeCapacity(
Zir.Inst.Block{ .body_len = body_len },
);
zir_datas[@intFromEnum(bool_br)].pl_node.payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.BoolBr{
.lhs = bool_br_lhs,
.body_len = body_len,
});
astgen.appendBodyWithFixups(body);
gz.unstack();
}
@ -12231,30 +12242,6 @@ const GenZir = struct {
return new_index.toRef();
}
/// Note that this returns a `Zir.Inst.Index` not a ref.
/// Leaves the `payload_index` field undefined.
fn addBoolBr(
gz: *GenZir,
tag: Zir.Inst.Tag,
lhs: Zir.Inst.Ref,
) !Zir.Inst.Index {
assert(lhs != .none);
const gpa = gz.astgen.gpa;
try gz.instructions.ensureUnusedCapacity(gpa, 1);
try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1);
const new_index: Zir.Inst.Index = @enumFromInt(gz.astgen.instructions.len);
gz.astgen.instructions.appendAssumeCapacity(.{
.tag = tag,
.data = .{ .bool_br = .{
.lhs = lhs,
.payload_index = undefined,
} },
});
gz.instructions.appendAssumeCapacity(new_index);
return new_index;
}
fn addInt(gz: *GenZir, integer: u64) !Zir.Inst.Ref {
return gz.add(.{
.tag = .int,
@ -12575,17 +12562,37 @@ const GenZir = struct {
always: void,
if_non_error: Zir.Inst.Ref,
},
src_node: Ast.Node.Index,
) !Zir.Inst.Index {
return gz.addAsIndex(.{
.tag = .restore_err_ret_index,
.data = .{ .restore_err_ret_index = .{
.block = switch (bt) {
.ret => .none,
.block => |b| b.toRef(),
},
.operand = if (cond == .if_non_error) cond.if_non_error else .none,
} },
});
switch (cond) {
.always => return gz.addAsIndex(.{
.tag = .restore_err_ret_index_unconditional,
.data = .{ .un_node = .{
.operand = switch (bt) {
.ret => .none,
.block => |b| b.toRef(),
},
.src_node = gz.nodeIndexToRelative(src_node),
} },
}),
.if_non_error => |operand| switch (bt) {
.ret => return gz.addAsIndex(.{
.tag = .restore_err_ret_index_fn_entry,
.data = .{ .un_node = .{
.operand = operand,
.src_node = gz.nodeIndexToRelative(src_node),
} },
}),
.block => |block| return (try gz.addExtendedPayload(
.restore_err_ret_index,
Zir.Inst.RestoreErrRetIndex{
.src_node = gz.nodeIndexToRelative(src_node),
.block = block.toRef(),
.operand = operand,
},
)).toIndex().?,
},
}
}
fn addBreak(
@ -12911,20 +12918,20 @@ const GenZir = struct {
const astgen = gz.astgen;
const gpa = astgen.gpa;
// Node 0 is valid for the root `struct_decl` of a file!
assert(args.src_node != 0 or gz.parent.tag == .top);
const fields_hash_arr: [4]u32 = @bitCast(args.fields_hash);
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.StructDecl).Struct.fields.len + 6);
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.StructDecl).Struct.fields.len + 4);
const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.StructDecl{
.fields_hash_0 = fields_hash_arr[0],
.fields_hash_1 = fields_hash_arr[1],
.fields_hash_2 = fields_hash_arr[2],
.fields_hash_3 = fields_hash_arr[3],
.src_node = gz.nodeIndexToRelative(args.src_node),
});
if (args.src_node != 0) {
const node_offset = gz.nodeIndexToRelative(args.src_node);
astgen.extra.appendAssumeCapacity(@bitCast(node_offset));
}
if (args.fields_len != 0) {
astgen.extra.appendAssumeCapacity(args.fields_len);
}
@ -12942,7 +12949,6 @@ const GenZir = struct {
.data = .{ .extended = .{
.opcode = .struct_decl,
.small = @bitCast(Zir.Inst.StructDecl.Small{
.has_src_node = args.src_node != 0,
.has_fields_len = args.fields_len != 0,
.has_decls_len = args.decls_len != 0,
.has_backing_int = args.backing_int_ref != .none,
@ -12974,20 +12980,19 @@ const GenZir = struct {
const astgen = gz.astgen;
const gpa = astgen.gpa;
assert(args.src_node != 0);
const fields_hash_arr: [4]u32 = @bitCast(args.fields_hash);
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.UnionDecl).Struct.fields.len + 5);
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.UnionDecl).Struct.fields.len + 4);
const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.UnionDecl{
.fields_hash_0 = fields_hash_arr[0],
.fields_hash_1 = fields_hash_arr[1],
.fields_hash_2 = fields_hash_arr[2],
.fields_hash_3 = fields_hash_arr[3],
.src_node = gz.nodeIndexToRelative(args.src_node),
});
if (args.src_node != 0) {
const node_offset = gz.nodeIndexToRelative(args.src_node);
astgen.extra.appendAssumeCapacity(@bitCast(node_offset));
}
if (args.tag_type != .none) {
astgen.extra.appendAssumeCapacity(@intFromEnum(args.tag_type));
}
@ -13005,7 +13010,6 @@ const GenZir = struct {
.data = .{ .extended = .{
.opcode = .union_decl,
.small = @bitCast(Zir.Inst.UnionDecl.Small{
.has_src_node = args.src_node != 0,
.has_tag_type = args.tag_type != .none,
.has_body_len = args.body_len != 0,
.has_fields_len = args.fields_len != 0,
@ -13032,20 +13036,19 @@ const GenZir = struct {
const astgen = gz.astgen;
const gpa = astgen.gpa;
assert(args.src_node != 0);
const fields_hash_arr: [4]u32 = @bitCast(args.fields_hash);
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.EnumDecl).Struct.fields.len + 5);
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.EnumDecl).Struct.fields.len + 4);
const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.EnumDecl{
.fields_hash_0 = fields_hash_arr[0],
.fields_hash_1 = fields_hash_arr[1],
.fields_hash_2 = fields_hash_arr[2],
.fields_hash_3 = fields_hash_arr[3],
.src_node = gz.nodeIndexToRelative(args.src_node),
});
if (args.src_node != 0) {
const node_offset = gz.nodeIndexToRelative(args.src_node);
astgen.extra.appendAssumeCapacity(@bitCast(node_offset));
}
if (args.tag_type != .none) {
astgen.extra.appendAssumeCapacity(@intFromEnum(args.tag_type));
}
@ -13063,7 +13066,6 @@ const GenZir = struct {
.data = .{ .extended = .{
.opcode = .enum_decl,
.small = @bitCast(Zir.Inst.EnumDecl.Small{
.has_src_node = args.src_node != 0,
.has_tag_type = args.tag_type != .none,
.has_body_len = args.body_len != 0,
.has_fields_len = args.fields_len != 0,
@ -13083,13 +13085,13 @@ const GenZir = struct {
const astgen = gz.astgen;
const gpa = astgen.gpa;
try astgen.extra.ensureUnusedCapacity(gpa, 2);
const payload_index: u32 = @intCast(astgen.extra.items.len);
assert(args.src_node != 0);
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.OpaqueDecl).Struct.fields.len + 1);
const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.OpaqueDecl{
.src_node = gz.nodeIndexToRelative(args.src_node),
});
if (args.src_node != 0) {
const node_offset = gz.nodeIndexToRelative(args.src_node);
astgen.extra.appendAssumeCapacity(@bitCast(node_offset));
}
if (args.decls_len != 0) {
astgen.extra.appendAssumeCapacity(args.decls_len);
}
@ -13098,7 +13100,6 @@ const GenZir = struct {
.data = .{ .extended = .{
.opcode = .opaque_decl,
.small = @bitCast(Zir.Inst.OpaqueDecl.Small{
.has_src_node = args.src_node != 0,
.has_decls_len = args.decls_len != 0,
.name_strategy = gz.anon_name_strategy,
}),
@ -13136,7 +13137,7 @@ const GenZir = struct {
fn addRet(gz: *GenZir, ri: ResultInfo, operand: Zir.Inst.Ref, node: Ast.Node.Index) !void {
switch (ri.rl) {
.ptr => |ptr_res| _ = try gz.addUnNode(.ret_load, ptr_res.inst, node),
.ty => _ = try gz.addUnNode(.ret_node, operand, node),
.coerced_ty => _ = try gz.addUnNode(.ret_node, operand, node),
else => unreachable,
}
}
@ -13517,6 +13518,44 @@ fn countBodyLenAfterFixups(astgen: *AstGen, body: []const Zir.Inst.Index) u32 {
fn emitDbgStmt(gz: *GenZir, lc: LineColumn) !void {
if (gz.is_comptime) return;
if (gz.instructions.items.len > 0) {
const astgen = gz.astgen;
const last = gz.instructions.items[gz.instructions.items.len - 1];
if (astgen.instructions.items(.tag)[@intFromEnum(last)] == .dbg_stmt) {
astgen.instructions.items(.data)[@intFromEnum(last)].dbg_stmt = .{
.line = lc[0],
.column = lc[1],
};
return;
}
}
_ = try gz.add(.{ .tag = .dbg_stmt, .data = .{
.dbg_stmt = .{
.line = lc[0],
.column = lc[1],
},
} });
}
/// In some cases, Sema expects us to generate a `dbg_stmt` at the instruction
/// *index* directly preceding the next instruction (e.g. if a call is %10, it
/// expects a dbg_stmt at %9). TODO: this logic may allow redundant dbg_stmt
/// instructions; fix up Sema so we don't need it!
fn emitDbgStmtForceCurrentIndex(gz: *GenZir, lc: LineColumn) !void {
const astgen = gz.astgen;
if (gz.instructions.items.len > 0 and
@intFromEnum(gz.instructions.items[gz.instructions.items.len - 1]) == astgen.instructions.len - 1)
{
const last = astgen.instructions.len - 1;
if (astgen.instructions.items(.tag)[last] == .dbg_stmt) {
astgen.instructions.items(.data)[last].dbg_stmt = .{
.line = lc[0],
.column = lc[1],
};
return;
}
}
_ = try gz.add(.{ .tag = .dbg_stmt, .data = .{
.dbg_stmt = .{

View File

@ -1799,7 +1799,8 @@ fn walkInstruction(
};
},
.bool_br_and, .bool_br_or => {
const bool_br = data[@intFromEnum(inst)].bool_br;
const pl_node = data[@intFromEnum(inst)].pl_node;
const extra = file.zir.extraData(Zir.Inst.BoolBr, pl_node.payload_index);
const bin_index = self.exprs.items.len;
try self.exprs.append(self.arena, .{ .binOp = .{ .lhs = 0, .rhs = 0 } });
@ -1808,14 +1809,13 @@ fn walkInstruction(
file,
parent_scope,
parent_src,
bool_br.lhs,
extra.data.lhs,
false,
call_ctx,
);
const lhs_index = self.exprs.items.len;
try self.exprs.append(self.arena, lhs.expr);
const extra = file.zir.extraData(Zir.Inst.Block, bool_br.payload_index);
const rhs = try self.walkInstruction(
file,
parent_scope,
@ -3395,19 +3395,10 @@ fn walkInstruction(
.enclosing_type = type_slot_index,
};
const small = @as(Zir.Inst.OpaqueDecl.Small, @bitCast(extended.small));
var extra_index: usize = extended.operand;
const extra = file.zir.extraData(Zir.Inst.OpaqueDecl, extended.operand);
var extra_index: usize = extra.end;
const src_node: ?i32 = if (small.has_src_node) blk: {
const src_node = @as(i32, @bitCast(file.zir.extra[extra_index]));
extra_index += 1;
break :blk src_node;
} else null;
const src_info = if (src_node) |sn|
try self.srcLocInfo(file, sn, parent_src)
else
parent_src;
const src_info = try self.srcLocInfo(file, extra.data.src_node, parent_src);
var decl_indexes: std.ArrayListUnmanaged(usize) = .{};
var priv_decl_indexes: std.ArrayListUnmanaged(usize) = .{};
@ -3498,18 +3489,10 @@ fn walkInstruction(
};
const small = @as(Zir.Inst.UnionDecl.Small, @bitCast(extended.small));
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.UnionDecl).Struct.fields.len;
const extra = file.zir.extraData(Zir.Inst.UnionDecl, extended.operand);
var extra_index: usize = extra.end;
const src_node: ?i32 = if (small.has_src_node) blk: {
const src_node = @as(i32, @bitCast(file.zir.extra[extra_index]));
extra_index += 1;
break :blk src_node;
} else null;
const src_info = if (src_node) |sn|
try self.srcLocInfo(file, sn, parent_src)
else
parent_src;
const src_info = try self.srcLocInfo(file, extra.data.src_node, parent_src);
// We delay analysis because union tags can refer to
// decls defined inside the union itself.
@ -3628,18 +3611,10 @@ fn walkInstruction(
};
const small = @as(Zir.Inst.EnumDecl.Small, @bitCast(extended.small));
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.EnumDecl).Struct.fields.len;
const extra = file.zir.extraData(Zir.Inst.EnumDecl, extended.operand);
var extra_index: usize = extra.end;
const src_node: ?i32 = if (small.has_src_node) blk: {
const src_node = @as(i32, @bitCast(file.zir.extra[extra_index]));
extra_index += 1;
break :blk src_node;
} else null;
const src_info = if (src_node) |sn|
try self.srcLocInfo(file, sn, parent_src)
else
parent_src;
const src_info = try self.srcLocInfo(file, extra.data.src_node, parent_src);
const tag_type: ?DocData.Expr = if (small.has_tag_type) blk: {
const tag_type = file.zir.extra[extra_index];
@ -3779,18 +3754,10 @@ fn walkInstruction(
};
const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small));
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len;
const extra = file.zir.extraData(Zir.Inst.StructDecl, extended.operand);
var extra_index: usize = extra.end;
const src_node: ?i32 = if (small.has_src_node) blk: {
const src_node = @as(i32, @bitCast(file.zir.extra[extra_index]));
extra_index += 1;
break :blk src_node;
} else null;
const src_info = if (src_node) |sn|
try self.srcLocInfo(file, sn, parent_src)
else
parent_src;
const src_info = try self.srcLocInfo(file, extra.data.src_node, parent_src);
const fields_len = if (small.has_fields_len) blk: {
const fields_len = file.zir.extra[extra_index];

View File

@ -1867,6 +1867,16 @@ pub const SrcLoc = struct {
else => return nodeToSpan(tree, node),
}
},
.node_offset_return_operand => |node_off| {
const tree = try src_loc.file_scope.getTree(gpa);
const node = src_loc.declRelativeToNodeIndex(node_off);
const node_tags = tree.nodes.items(.tag);
const node_datas = tree.nodes.items(.data);
if (node_tags[node] == .@"return" and node_datas[node].lhs != 0) {
return nodeToSpan(tree, node_datas[node].lhs);
}
return nodeToSpan(tree, node);
},
}
}
@ -2221,6 +2231,10 @@ pub const LazySrcLoc = union(enum) {
/// The source location points to the RHS of an assignment.
/// The Decl is determined contextually.
node_offset_store_operand: i32,
/// The source location points to the operand of a `return` statement, or
/// the `return` itself if there is no explicit operand.
/// The Decl is determined contextually.
node_offset_return_operand: i32,
/// The source location points to a for loop input.
/// The Decl is determined contextually.
for_input: struct {
@ -2347,6 +2361,7 @@ pub const LazySrcLoc = union(enum) {
.node_offset_init_ty,
.node_offset_store_ptr,
.node_offset_store_operand,
.node_offset_return_operand,
.for_input,
.for_capture_from_input,
.array_cat_lhs,

View File

@ -50,11 +50,6 @@ branch_count: u32 = 0,
/// Populated when returning `error.ComptimeBreak`. Used to communicate the
/// break instruction up the stack to find the corresponding Block.
comptime_break_inst: Zir.Inst.Index = undefined,
/// This field is updated when a new source location becomes active, so that
/// instructions which do not have explicitly mapped source locations still have
/// access to the source location set by the previous instruction which did
/// contain a mapped source location.
src: LazySrcLoc = .{ .token_offset = 0 },
decl_val_table: std.AutoHashMapUnmanaged(InternPool.DeclIndex, Air.Inst.Ref) = .{},
/// When doing a generic function instantiation, this array collects a value
/// for each parameter of the generic owner. `none` for non-comptime parameters.
@ -1006,10 +1001,10 @@ fn analyzeBodyInner(
const air_inst: Air.Inst.Ref = switch (tags[@intFromEnum(inst)]) {
// zig fmt: off
.alloc => try sema.zirAlloc(block, inst),
.alloc_inferred => try sema.zirAllocInferred(block, inst, true),
.alloc_inferred_mut => try sema.zirAllocInferred(block, inst, false),
.alloc_inferred_comptime => try sema.zirAllocInferredComptime(inst, true),
.alloc_inferred_comptime_mut => try sema.zirAllocInferredComptime(inst, false),
.alloc_inferred => try sema.zirAllocInferred(block, true),
.alloc_inferred_mut => try sema.zirAllocInferred(block, false),
.alloc_inferred_comptime => try sema.zirAllocInferredComptime(true),
.alloc_inferred_comptime_mut => try sema.zirAllocInferredComptime(false),
.alloc_mut => try sema.zirAllocMut(block, inst),
.alloc_comptime_mut => try sema.zirAllocComptime(block, inst),
.make_ptr_const => try sema.zirMakePtrConst(block, inst),
@ -1308,6 +1303,11 @@ fn analyzeBodyInner(
i += 1;
continue;
},
.restore_err_ret_index => {
try sema.zirRestoreErrRetIndex(block, extended);
i += 1;
continue;
},
.value_placeholder => unreachable, // never appears in a body
};
},
@ -1369,11 +1369,6 @@ fn analyzeBodyInner(
i += 1;
continue;
},
.store => {
try sema.zirStore(block, inst);
i += 1;
continue;
},
.store_node => {
try sema.zirStoreNode(block, inst);
i += 1;
@ -1518,8 +1513,15 @@ fn analyzeBodyInner(
i += 1;
continue;
},
.restore_err_ret_index => {
try sema.zirRestoreErrRetIndex(block, inst);
.restore_err_ret_index_unconditional => {
const un_node = datas[@intFromEnum(inst)].un_node;
try sema.restoreErrRetIndex(block, un_node.src(), un_node.operand, .none);
i += 1;
continue;
},
.restore_err_ret_index_fn_entry => {
const un_node = datas[@intFromEnum(inst)].un_node;
try sema.restoreErrRetIndex(block, un_node.src(), .none, un_node.operand);
i += 1;
continue;
},
@ -2723,7 +2725,6 @@ pub fn getStructType(
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len;
extra_index += @intFromBool(small.has_src_node);
const fields_len = if (small.has_fields_len) blk: {
const fields_len = sema.code.extra[extra_index];
extra_index += 1;
@ -2776,10 +2777,7 @@ fn zirStructDecl(
const mod = sema.mod;
const ip = &mod.intern_pool;
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
const src: LazySrcLoc = if (small.has_src_node) blk: {
const node_offset: i32 = @bitCast(sema.code.extra[extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len]);
break :blk LazySrcLoc.nodeOffset(node_offset);
} else sema.src;
const src = sema.code.extraData(Zir.Inst.StructDecl, extended.operand).data.src();
// Because these three things each reference each other, `undefined`
// placeholders are used before being set after the struct type gains an
@ -2939,13 +2937,10 @@ fn zirEnumDecl(
const mod = sema.mod;
const gpa = sema.gpa;
const small: Zir.Inst.EnumDecl.Small = @bitCast(extended.small);
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.EnumDecl).Struct.fields.len;
const extra = sema.code.extraData(Zir.Inst.EnumDecl, extended.operand);
var extra_index: usize = extra.end;
const src: LazySrcLoc = if (small.has_src_node) blk: {
const node_offset: i32 = @bitCast(sema.code.extra[extra_index]);
extra_index += 1;
break :blk LazySrcLoc.nodeOffset(node_offset);
} else sema.src;
const src = extra.data.src();
const tag_ty_src: LazySrcLoc = .{ .node_offset_container_tag = src.node_offset.x };
const tag_type_ref = if (small.has_tag_type) blk: {
@ -3212,13 +3207,10 @@ fn zirUnionDecl(
const mod = sema.mod;
const gpa = sema.gpa;
const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small);
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.UnionDecl).Struct.fields.len;
const extra = sema.code.extraData(Zir.Inst.UnionDecl, extended.operand);
var extra_index: usize = extra.end;
const src: LazySrcLoc = if (small.has_src_node) blk: {
const node_offset: i32 = @bitCast(sema.code.extra[extra_index]);
extra_index += 1;
break :blk LazySrcLoc.nodeOffset(node_offset);
} else sema.src;
const src = extra.data.src();
extra_index += @intFromBool(small.has_tag_type);
extra_index += @intFromBool(small.has_body_len);
@ -3321,13 +3313,10 @@ fn zirOpaqueDecl(
const mod = sema.mod;
const small: Zir.Inst.OpaqueDecl.Small = @bitCast(extended.small);
var extra_index: usize = extended.operand;
const extra = sema.code.extraData(Zir.Inst.OpaqueDecl, extended.operand);
var extra_index: usize = extra.end;
const src: LazySrcLoc = if (small.has_src_node) blk: {
const node_offset: i32 = @bitCast(sema.code.extra[extra_index]);
extra_index += 1;
break :blk LazySrcLoc.nodeOffset(node_offset);
} else sema.src;
const src = extra.data.src();
const decls_len = if (small.has_decls_len) blk: {
const decls_len = sema.code.extra[extra_index];
@ -3977,13 +3966,9 @@ fn makePtrConst(sema: *Sema, block: *Block, alloc: Air.Inst.Ref) CompileError!Ai
fn zirAllocInferredComptime(
sema: *Sema,
inst: Zir.Inst.Index,
is_const: bool,
) CompileError!Air.Inst.Ref {
const gpa = sema.gpa;
const src_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].node;
const src = LazySrcLoc.nodeOffset(src_node);
sema.src = src;
try sema.air_instructions.append(gpa, .{
.tag = .inferred_alloc_comptime,
@ -4042,16 +4027,12 @@ fn zirAllocMut(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
fn zirAllocInferred(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
is_const: bool,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const gpa = sema.gpa;
const src_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].node;
const src = LazySrcLoc.nodeOffset(src_node);
sema.src = src;
if (block.is_comptime) {
try sema.air_instructions.append(gpa, .{
@ -5428,10 +5409,11 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compi
const tracy = trace(@src());
defer tracy.end();
const src: LazySrcLoc = sema.src;
const bin_inst = sema.code.instructions.items(.data)[@intFromEnum(inst)].bin;
const ptr = try sema.resolveInst(bin_inst.lhs);
const operand = try sema.resolveInst(bin_inst.rhs);
const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = pl_node.src();
const bin = sema.code.extraData(Zir.Inst.Bin, pl_node.payload_index).data;
const ptr = try sema.resolveInst(bin.lhs);
const operand = try sema.resolveInst(bin.rhs);
const ptr_inst = ptr.toIndex().?;
const air_datas = sema.air_instructions.items(.data);
@ -5496,16 +5478,6 @@ fn zirSetEvalBranchQuota(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compi
sema.branch_quota = @max(sema.branch_quota, quota);
}
fn zirStore(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
const tracy = trace(@src());
defer tracy.end();
const bin_inst = sema.code.instructions.items(.data)[@intFromEnum(inst)].bin;
const ptr = try sema.resolveInst(bin_inst.lhs);
const value = try sema.resolveInst(bin_inst.rhs);
return sema.storePtr(block, sema.src, ptr, value);
}
fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
const tracy = trace(@src());
defer tracy.end();
@ -5699,17 +5671,20 @@ fn zirPanic(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.I
const src = inst_data.src();
const msg_inst = try sema.resolveInst(inst_data.operand);
// `panicWithMsg` would perform this coercion for us, but we can get a better
// source location if we do it here.
const coerced_msg = try sema.coerce(block, Type.slice_const_u8, msg_inst, .{ .node_offset_builtin_call_arg0 = inst_data.src_node });
if (block.is_comptime) {
return sema.fail(block, src, "encountered @panic at comptime", .{});
}
try sema.panicWithMsg(block, src, msg_inst, .@"@panic");
try sema.panicWithMsg(block, src, coerced_msg, .@"@panic");
return always_noreturn;
}
fn zirTrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
const src_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].node;
const src = LazySrcLoc.nodeOffset(src_node);
sema.src = src;
if (block.is_comptime)
return sema.fail(block, src, "encountered @trap at comptime", .{});
_ = try block.addNoOp(.trap);
@ -6384,10 +6359,6 @@ fn zirBreak(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) CompileError
}
fn zirDbgStmt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
// We do not set sema.src here because dbg_stmt instructions are only emitted for
// ZIR code that possibly will need to generate runtime code. So error messages
// and other source locations must not rely on sema.src being set from dbg_stmt
// instructions.
if (block.is_comptime or block.ownerModule().strip) return;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt;
@ -6632,7 +6603,6 @@ fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?*Decl {
pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref {
const mod = sema.mod;
const gpa = sema.gpa;
const src = sema.src;
if (block.is_comptime or block.is_typeof) {
const index_val = try mod.intValue_u64(Type.usize, sema.comptime_err_ret_trace.items.len);
@ -6650,9 +6620,10 @@ pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref
else => |e| return e,
};
const field_name = try mod.intern_pool.getOrPutString(gpa, "index");
const field_index = sema.structFieldIndex(block, stack_trace_ty, field_name, src) catch |err| switch (err) {
error.NeededSourceLocation, error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable,
else => |e| return e,
const field_index = sema.structFieldIndex(block, stack_trace_ty, field_name, .unneeded) catch |err| switch (err) {
error.AnalysisFail, error.NeededSourceLocation => @panic("std.builtin.StackTrace is corrupt"),
error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable,
error.OutOfMemory => |e| return e,
};
return try block.addInst(.{
@ -9900,7 +9871,6 @@ fn zirAsNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.As, inst_data.payload_index).data;
sema.src = src;
return sema.analyzeAs(block, src, extra.dest_type, extra.operand, false);
}
@ -10508,7 +10478,8 @@ fn zirElemValNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const array = try sema.resolveInst(extra.lhs);
const elem_index = try sema.resolveInst(extra.rhs);
const uncoerced_elem_index = try sema.resolveInst(extra.rhs);
const elem_index = try sema.coerce(block, Type.usize, uncoerced_elem_index, elem_index_src);
return sema.elemVal(block, src, array, elem_index, elem_index_src, true);
}
@ -10560,7 +10531,8 @@ fn zirElemPtrNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.lhs);
const elem_index = try sema.resolveInst(extra.rhs);
const uncoerced_elem_index = try sema.resolveInst(extra.rhs);
const elem_index = try sema.coerce(block, Type.usize, uncoerced_elem_index, elem_index_src);
return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src, false, true);
}
@ -10855,7 +10827,7 @@ const SwitchProngAnalysis = struct {
.address_space = operand_ptr_ty.ptrAddressSpace(mod),
},
});
if (try sema.resolveDefinedValue(block, sema.src, spa.operand_ptr)) |union_ptr| {
if (try sema.resolveDefinedValue(block, operand_src, spa.operand_ptr)) |union_ptr| {
return Air.internedToRef((try mod.intern(.{ .ptr = .{
.ty = ptr_field_ty.toIntern(),
.addr = .{ .field = .{
@ -10866,7 +10838,7 @@ const SwitchProngAnalysis = struct {
}
return block.addStructFieldPtr(spa.operand_ptr, field_index, ptr_field_ty);
} else {
if (try sema.resolveDefinedValue(block, sema.src, spa.operand)) |union_val| {
if (try sema.resolveDefinedValue(block, operand_src, spa.operand)) |union_val| {
const tag_and_val = ip.indexToKey(union_val.toIntern()).un;
return Air.internedToRef(tag_and_val.val);
}
@ -13191,6 +13163,7 @@ fn validateErrSetSwitch(
// else => |e| return e,
// even if all the possible errors were already handled.
const tags = sema.code.instructions.items(.tag);
const datas = sema.code.instructions.items(.data);
for (else_case.body) |else_inst| switch (tags[@intFromEnum(else_inst)]) {
.dbg_block_begin,
.dbg_block_end,
@ -13205,11 +13178,16 @@ fn validateErrSetSwitch(
.err_union_code,
.ret_err_value_code,
.save_err_ret_index,
.restore_err_ret_index,
.restore_err_ret_index_unconditional,
.restore_err_ret_index_fn_entry,
.is_non_err,
.ret_is_non_err,
.condbr,
=> {},
.extended => switch (datas[@intFromEnum(else_inst)].extended.opcode) {
.restore_err_ret_index => {},
else => break,
},
else => break,
} else break :else_validation;
@ -13707,7 +13685,6 @@ fn zirShl(
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = inst_data.src();
sema.src = src;
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
@ -13878,7 +13855,6 @@ fn zirShr(
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = inst_data.src();
sema.src = src;
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
@ -14014,7 +13990,6 @@ fn zirBitwise(
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
sema.src = src;
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
@ -14795,21 +14770,20 @@ fn zirArithmetic(
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
sema.src = .{ .node_offset_bin_op = inst_data.src_node };
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const lhs = try sema.resolveInst(extra.lhs);
const rhs = try sema.resolveInst(extra.rhs);
return sema.analyzeArithmetic(block, zir_tag, lhs, rhs, sema.src, lhs_src, rhs_src, safety);
return sema.analyzeArithmetic(block, zir_tag, lhs, rhs, src, lhs_src, rhs_src, safety);
}
fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
sema.src = src;
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
@ -14975,7 +14949,6 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
sema.src = src;
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
@ -15141,7 +15114,6 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
sema.src = src;
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
@ -15252,7 +15224,6 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
sema.src = src;
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
@ -15494,7 +15465,6 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
sema.src = src;
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
@ -15679,7 +15649,6 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
sema.src = src;
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
@ -15775,7 +15744,6 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
sema.src = src;
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
@ -18741,14 +18709,19 @@ fn zirBoolBr(
defer tracy.end();
const mod = sema.mod;
const datas = sema.code.instructions.items(.data);
const inst_data = datas[@intFromEnum(inst)].bool_br;
const lhs = try sema.resolveInst(inst_data.lhs);
const lhs_src = sema.src;
const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index);
const body = sema.code.bodySlice(extra.end, extra.data.body_len);
const gpa = sema.gpa;
const datas = sema.code.instructions.items(.data);
const inst_data = datas[@intFromEnum(inst)].pl_node;
const extra = sema.code.extraData(Zir.Inst.BoolBr, inst_data.payload_index);
const uncoerced_lhs = try sema.resolveInst(extra.data.lhs);
const body = sema.code.bodySlice(extra.end, extra.data.body_len);
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const lhs = try sema.coerce(parent_block, Type.bool, uncoerced_lhs, lhs_src);
if (try sema.resolveDefinedValue(parent_block, lhs_src, lhs)) |lhs_val| {
if (is_bool_or and lhs_val.toBool()) {
return .bool_true;
@ -18758,7 +18731,11 @@ fn zirBoolBr(
// comptime-known left-hand side. No need for a block here; the result
// is simply the rhs expression. Here we rely on there only being 1
// break instruction (`break_inline`).
return sema.resolveBody(parent_block, body, inst);
const rhs_result = try sema.resolveBody(parent_block, body, inst);
if (sema.typeOf(rhs_result).isNoReturn(mod)) {
return rhs_result;
}
return sema.coerce(parent_block, Type.bool, rhs_result, rhs_src);
}
const block_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
@ -18789,13 +18766,16 @@ fn zirBoolBr(
_ = try lhs_block.addBr(block_inst, lhs_result);
const rhs_result = try sema.resolveBody(rhs_block, body, inst);
if (!sema.typeOf(rhs_result).isNoReturn(mod)) {
_ = try rhs_block.addBr(block_inst, rhs_result);
}
const rhs_noret = sema.typeOf(rhs_result).isNoReturn(mod);
const coerced_rhs_result = if (!rhs_noret) rhs: {
const coerced_result = try sema.coerce(rhs_block, Type.bool, rhs_result, rhs_src);
_ = try rhs_block.addBr(block_inst, coerced_result);
break :rhs coerced_result;
} else rhs_result;
const result = sema.finishCondBr(parent_block, &child_block, &then_block, &else_block, lhs, block_inst);
if (!sema.typeOf(rhs_result).isNoReturn(mod)) {
if (try sema.resolveDefinedValue(rhs_block, sema.src, rhs_result)) |rhs_val| {
if (!rhs_noret) {
if (try sema.resolveDefinedValue(rhs_block, rhs_src, coerced_rhs_result)) |rhs_val| {
if (is_bool_or and rhs_val.toBool()) {
return .bool_true;
} else if (!is_bool_or and !rhs_val.toBool()) {
@ -19206,7 +19186,7 @@ fn zirRetErrValue(
.ty = error_set_type.toIntern(),
.name = err_name,
} })));
return sema.analyzeRet(block, result_inst, src);
return sema.analyzeRet(block, result_inst, src, src);
}
fn zirRetImplicit(
@ -19256,7 +19236,7 @@ fn zirRetImplicit(
return sema.failWithOwnedErrorMsg(block, msg);
}
return sema.analyzeRet(block, operand, r_brace_src);
return sema.analyzeRet(block, operand, r_brace_src, r_brace_src);
}
fn zirRetNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
@ -19267,7 +19247,7 @@ fn zirRetNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir
const operand = try sema.resolveInst(inst_data.operand);
const src = inst_data.src();
return sema.analyzeRet(block, operand, src);
return sema.analyzeRet(block, operand, src, .{ .node_offset_return_operand = inst_data.src_node });
}
fn zirRetLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
@ -19280,7 +19260,7 @@ fn zirRetLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir
if (block.is_comptime or block.inlining != null or sema.func_is_naked) {
const operand = try sema.analyzeLoad(block, src, ret_ptr, src);
return sema.analyzeRet(block, operand, src);
return sema.analyzeRet(block, operand, src, .{ .node_offset_return_operand = inst_data.src_node });
}
if (sema.wantErrorReturnTracing(sema.fn_ret_ty)) {
@ -19375,17 +19355,21 @@ fn zirSaveErrRetIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
block.error_return_trace_index = try sema.analyzeSaveErrRetIndex(block);
}
fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) CompileError!void {
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].restore_err_ret_index;
const src = sema.src; // TODO
const mod = sema.mod;
const ip = &mod.intern_pool;
fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
const extra = sema.code.extraData(Zir.Inst.RestoreErrRetIndex, extended.operand).data;
return sema.restoreErrRetIndex(start_block, extra.src(), extra.block, extra.operand);
}
/// If `operand` is non-error (or is `none`), restores the error return trace to
/// its state at the point `block` was reached (or, if `block` is `none`, the
/// point this function began execution).
fn restoreErrRetIndex(sema: *Sema, start_block: *Block, src: LazySrcLoc, target_block: Zir.Inst.Ref, operand_zir: Zir.Inst.Ref) CompileError!void {
const tracy = trace(@src());
defer tracy.end();
const saved_index = if (inst_data.block.toIndexAllowNone()) |zir_block| b: {
const mod = sema.mod;
const saved_index = if (target_block.toIndexAllowNone()) |zir_block| b: {
var block = start_block;
while (true) {
if (block.label) |label| {
@ -19409,7 +19393,7 @@ fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index)
return; // No need to restore
};
const operand = try sema.resolveInstAllowNone(inst_data.operand);
const operand = try sema.resolveInstAllowNone(operand_zir);
if (start_block.is_comptime or start_block.is_typeof) {
const is_non_error = if (operand != .none) blk: {
@ -19427,7 +19411,7 @@ fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index)
return;
}
if (!ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn) return;
if (!mod.intern_pool.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn) return;
if (!start_block.ownerModule().error_tracing) return;
assert(saved_index != .none); // The .error_return_trace_index field was dropped somewhere
@ -19470,6 +19454,7 @@ fn analyzeRet(
block: *Block,
uncasted_operand: Air.Inst.Ref,
src: LazySrcLoc,
operand_src: LazySrcLoc,
) CompileError!Zir.Inst.Index {
// Special case for returning an error to an inferred error set; we need to
// add the error tag to the inferred error set of the in-scope function, so
@ -19478,14 +19463,14 @@ fn analyzeRet(
if (sema.fn_ret_ty_ies != null and sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion) {
try sema.addToInferredErrorSet(uncasted_operand);
}
const operand = sema.coerceExtra(block, sema.fn_ret_ty, uncasted_operand, src, .{ .is_ret = true }) catch |err| switch (err) {
const operand = sema.coerceExtra(block, sema.fn_ret_ty, uncasted_operand, operand_src, .{ .is_ret = true }) catch |err| switch (err) {
error.NotCoercible => unreachable,
else => |e| return e,
};
if (block.inlining) |inlining| {
if (block.is_comptime) {
const ret_val = try sema.resolveConstValue(block, src, operand, .{
const ret_val = try sema.resolveConstValue(block, operand_src, operand, .{
.needed_comptime_reason = "value being returned at comptime must be comptime-known",
});
inlining.comptime_result = operand;
@ -19520,7 +19505,7 @@ fn analyzeRet(
if (sema.wantErrorReturnTracing(sema.fn_ret_ty)) {
// Avoid adding a frame to the error return trace in case the value is comptime-known
// to be not an error.
const is_non_err = try sema.analyzeIsNonErr(block, src, operand);
const is_non_err = try sema.analyzeIsNonErr(block, operand_src, operand);
return sema.retWithErrTracing(block, src, is_non_err, air_tag, operand);
}
@ -20768,8 +20753,9 @@ fn zirIntFromBool(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
fn zirErrorName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const operand = try sema.resolveInst(inst_data.operand);
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const uncoerced_operand = try sema.resolveInst(inst_data.operand);
const operand = try sema.coerce(block, Type.anyerror, uncoerced_operand, operand_src);
if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| {
const err_name = sema.mod.intern_pool.indexToKey(val.toIntern()).err.name;
@ -23161,7 +23147,6 @@ fn zirOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u64 {
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
sema.src = src;
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
@ -25394,15 +25379,21 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
} else if (extra.data.bits.has_align_ref) blk: {
const align_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
extra_index += 1;
const align_tv = sema.resolveInstConst(block, align_src, align_ref, .{
.needed_comptime_reason = "alignment must be comptime-known",
}) catch |err| switch (err) {
error.GenericPoison => {
break :blk null;
},
const uncoerced_align = sema.resolveInst(align_ref) catch |err| switch (err) {
error.GenericPoison => break :blk null,
else => |e| return e,
};
const alignment = try sema.validateAlignAllowZero(block, align_src, try align_tv.val.toUnsignedIntAdvanced(sema));
const coerced_align = sema.coerce(block, Type.u29, uncoerced_align, align_src) catch |err| switch (err) {
error.GenericPoison => break :blk null,
else => |e| return e,
};
const align_val = sema.resolveConstDefinedValue(block, align_src, coerced_align, .{
.needed_comptime_reason = "alignment must be comptime-known",
}) catch |err| switch (err) {
error.GenericPoison => break :blk null,
else => |e| return e,
};
const alignment = try sema.validateAlignAllowZero(block, align_src, try align_val.toUnsignedIntAdvanced(sema));
const default = target_util.defaultFunctionAlignment(target);
break :blk if (alignment == default) .none else alignment;
} else .none;
@ -25413,7 +25404,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const body = sema.code.bodySlice(extra_index, body_len);
extra_index += body.len;
const addrspace_ty = try sema.getBuiltinType("AddressSpace");
const addrspace_ty = Type.fromInterned(.address_space_type);
const val = try sema.resolveGenericBody(block, addrspace_src, body, inst, addrspace_ty, .{
.needed_comptime_reason = "addrspace must be comptime-known",
});
@ -25424,15 +25415,22 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
} else if (extra.data.bits.has_addrspace_ref) blk: {
const addrspace_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
extra_index += 1;
const addrspace_tv = sema.resolveInstConst(block, addrspace_src, addrspace_ref, .{
.needed_comptime_reason = "addrspace must be comptime-known",
}) catch |err| switch (err) {
error.GenericPoison => {
break :blk null;
},
const addrspace_ty = Type.fromInterned(.address_space_type);
const uncoerced_addrspace = sema.resolveInst(addrspace_ref) catch |err| switch (err) {
error.GenericPoison => break :blk null,
else => |e| return e,
};
break :blk mod.toEnum(std.builtin.AddressSpace, addrspace_tv.val);
const coerced_addrspace = sema.coerce(block, addrspace_ty, uncoerced_addrspace, addrspace_src) catch |err| switch (err) {
error.GenericPoison => break :blk null,
else => |e| return e,
};
const addrspace_val = sema.resolveConstDefinedValue(block, addrspace_src, coerced_addrspace, .{
.needed_comptime_reason = "addrspace must be comptime-known",
}) catch |err| switch (err) {
error.GenericPoison => break :blk null,
else => |e| return e,
};
break :blk mod.toEnum(std.builtin.AddressSpace, addrspace_val);
} else target_util.defaultAddressSpace(target, .function);
const section: Section = if (extra.data.bits.has_section_body) blk: {
@ -25480,15 +25478,22 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
} else if (extra.data.bits.has_cc_ref) blk: {
const cc_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
extra_index += 1;
const cc_tv = sema.resolveInstConst(block, cc_src, cc_ref, .{
.needed_comptime_reason = "calling convention must be comptime-known",
}) catch |err| switch (err) {
error.GenericPoison => {
break :blk null;
},
const cc_ty = Type.fromInterned(.calling_convention_type);
const uncoerced_cc = sema.resolveInst(cc_ref) catch |err| switch (err) {
error.GenericPoison => break :blk null,
else => |e| return e,
};
break :blk mod.toEnum(std.builtin.CallingConvention, cc_tv.val);
const coerced_cc = sema.coerce(block, cc_ty, uncoerced_cc, cc_src) catch |err| switch (err) {
error.GenericPoison => break :blk null,
else => |e| return e,
};
const cc_val = sema.resolveConstDefinedValue(block, cc_src, coerced_cc, .{
.needed_comptime_reason = "calling convention must be comptime-known",
}) catch |err| switch (err) {
error.GenericPoison => break :blk null,
else => |e| return e,
};
break :blk mod.toEnum(std.builtin.CallingConvention, cc_val);
} else if (sema.owner_decl.is_exported and has_body)
.C
else
@ -26416,12 +26421,16 @@ fn preparePanicId(sema: *Sema, block: *Block, panic_id: Module.PanicId) !InternP
try sema.prepareSimplePanic(block);
const panic_messages_ty = try sema.getBuiltinType("panic_messages");
const msg_decl_index = (try sema.namespaceLookup(
const msg_decl_index = (sema.namespaceLookup(
block,
sema.src,
.unneeded,
panic_messages_ty.getNamespaceIndex(mod).unwrap().?,
try mod.intern_pool.getOrPutString(gpa, @tagName(panic_id)),
)).?;
) catch |err| switch (err) {
error.AnalysisFail, error.NeededSourceLocation => @panic("std.builtin.panic_messages is corrupt"),
error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable,
error.OutOfMemory => |e| return e,
}).?;
try sema.ensureDeclAnalyzed(msg_decl_index);
mod.panic_messages[@intFromEnum(panic_id)] = msg_decl_index.toOptional();
return msg_decl_index;
@ -35645,7 +35654,6 @@ fn semaBackingIntType(mod: *Module, struct_type: InternPool.Key.StructType) Comp
if (small.has_backing_int) {
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len;
extra_index += @intFromBool(small.has_src_node);
extra_index += @intFromBool(small.has_fields_len);
extra_index += @intFromBool(small.has_decls_len);
@ -36357,8 +36365,6 @@ fn structZirInfo(zir: Zir, zir_index: Zir.Inst.Index) struct {
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len;
extra_index += @intFromBool(small.has_src_node);
const fields_len = if (small.has_fields_len) blk: {
const fields_len = zir.extra[extra_index];
extra_index += 1;
@ -36826,7 +36832,6 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Key.Un
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.UnionDecl).Struct.fields.len;
const src = LazySrcLoc.nodeOffset(0);
extra_index += @intFromBool(small.has_src_node);
const tag_type_ref: Zir.Inst.Ref = if (small.has_tag_type) blk: {
const ty_ref: Zir.Inst.Ref = @enumFromInt(zir.extra[extra_index]);

View File

@ -303,11 +303,11 @@ pub const Inst = struct {
bool_not,
/// Short-circuiting boolean `and`. `lhs` is a boolean `Ref` and the other operand
/// is a block, which is evaluated if `lhs` is `true`.
/// Uses the `bool_br` union field.
/// Uses the `pl_node` union field. Payload is `BoolBr`.
bool_br_and,
/// Short-circuiting boolean `or`. `lhs` is a boolean `Ref` and the other operand
/// is a block, which is evaluated if `lhs` is `false`.
/// Uses the `bool_br` union field.
/// Uses the `pl_node` union field. Payload is `BoolBr`.
bool_br_or,
/// Return a value from a block.
/// Uses the `break` union field.
@ -592,16 +592,12 @@ pub const Inst = struct {
/// Returns a pointer to the subslice.
/// Uses the `pl_node` field. AST node is the slice syntax. Payload is `SliceLength`.
slice_length,
/// Write a value to a pointer. For loading, see `load`.
/// Source location is assumed to be same as previous instruction.
/// Uses the `bin` union field.
store,
/// Same as `store` except provides a source location.
/// Uses the `pl_node` union field. Payload is `Bin`.
store_node,
/// Same as `store` but the type of the value being stored will be used to infer
/// the pointer type.
/// Uses the `bin` union field.
/// Same as `store_node` but the type of the value being stored will be
/// used to infer the pointer type of an `alloc_inferred`.
/// Uses the `pl_node` union field. Payload is `Bin`.
store_to_inferred_ptr,
/// String Literal. Makes an anonymous Decl and then takes a pointer to it.
/// Uses the `str` union field.
@ -1036,10 +1032,18 @@ pub const Inst = struct {
/// block, if the operand is .none or of an error/error-union type.
/// Uses the `save_err_ret_index` field.
save_err_ret_index,
/// Sets error return trace to zero if no operand is given,
/// otherwise sets the value to the given amount.
/// Uses the `restore_err_ret_index` union field.
restore_err_ret_index,
/// Specialized form of `Extended.restore_err_ret_index`.
/// Unconditionally restores the error return index to its last saved state
/// in the block referred to by `operand`. If `operand` is `none`, restores
/// to the point of function entry.
/// Uses the `un_node` field.
restore_err_ret_index_unconditional,
/// Specialized form of `Extended.restore_err_ret_index`.
/// Restores the error return index to its state at the entry of
/// the current function conditional on `operand` being a non-error.
/// If `operand` is `none`, restores unconditionally.
/// Uses the `un_node` field.
restore_err_ret_index_fn_entry,
/// The ZIR instruction tag is one of the `Extended` ones.
/// Uses the `extended` union field.
@ -1145,7 +1149,6 @@ pub const Inst = struct {
.shl,
.shl_sat,
.shr,
.store,
.store_node,
.store_to_inferred_ptr,
.str,
@ -1265,7 +1268,6 @@ pub const Inst = struct {
.@"defer",
.defer_err_code,
.save_err_ret_index,
.restore_err_ret_index,
.for_len,
.opt_eu_base_ptr_init,
.coerce_ptr_elem_ty,
@ -1290,6 +1292,8 @@ pub const Inst = struct {
.array_init_elem_type,
.array_init_elem_ptr,
.validate_ref_ty,
.restore_err_ret_index_unconditional,
.restore_err_ret_index_fn_entry,
=> false,
.@"break",
@ -1338,7 +1342,6 @@ pub const Inst = struct {
.ensure_err_union_payload_void,
.set_eval_branch_quota,
.atomic_store,
.store,
.store_node,
.store_to_inferred_ptr,
.resolve_inferred_alloc,
@ -1352,8 +1355,9 @@ pub const Inst = struct {
.check_comptime_control_flow,
.@"defer",
.defer_err_code,
.restore_err_ret_index,
.save_err_ret_index,
.restore_err_ret_index_unconditional,
.restore_err_ret_index_fn_entry,
.validate_struct_init_ty,
.validate_struct_init_result_ty,
.validate_ptr_struct_init,
@ -1635,8 +1639,8 @@ pub const Inst = struct {
.declaration = .pl_node,
.suspend_block = .pl_node,
.bool_not = .un_node,
.bool_br_and = .bool_br,
.bool_br_or = .bool_br,
.bool_br_and = .pl_node,
.bool_br_or = .pl_node,
.@"break" = .@"break",
.break_inline = .@"break",
.check_comptime_control_flow = .un_node,
@ -1713,9 +1717,8 @@ pub const Inst = struct {
.slice_end = .pl_node,
.slice_sentinel = .pl_node,
.slice_length = .pl_node,
.store = .bin,
.store_node = .pl_node,
.store_to_inferred_ptr = .bin,
.store_to_inferred_ptr = .pl_node,
.str = .str,
.negate = .un_node,
.negate_wrap = .un_node,
@ -1845,7 +1848,8 @@ pub const Inst = struct {
.defer_err_code = .defer_err_code,
.save_err_ret_index = .save_err_ret_index,
.restore_err_ret_index = .restore_err_ret_index,
.restore_err_ret_index_unconditional = .un_node,
.restore_err_ret_index_fn_entry = .un_node,
.struct_init_empty = .un_node,
.struct_init_empty_result = .un_node,
@ -2075,6 +2079,13 @@ pub const Inst = struct {
/// Implements the `@inComptime` builtin.
/// `operand` is `src_node: i32`.
in_comptime,
/// Restores the error return index to its last saved state in a given
/// block. If the block is `.none`, restores to the state from the point
/// of function entry. If the operand is not `.none`, the restore is
/// conditional on the operand value not being an error.
/// `operand` is payload index to `RestoreErrRetIndex`.
/// `small` is undefined.
restore_err_ret_index,
/// Used as a placeholder instruction which is just a dummy index for Sema to replace
/// with a specific value. For instance, this is used for the capture of an `errdefer`.
/// This should never appear in a body.
@ -2345,11 +2356,6 @@ pub const Inst = struct {
return LazySrcLoc.nodeOffset(self.src_node);
}
},
bool_br: struct {
lhs: Ref,
/// Points to a `Block`.
payload_index: u32,
},
@"unreachable": struct {
/// Offset from Decl AST node index.
/// `Tag` determines which kind of AST node this points to.
@ -2396,10 +2402,6 @@ pub const Inst = struct {
save_err_ret_index: struct {
operand: Ref, // If error type (or .none), save new trace index
},
restore_err_ret_index: struct {
block: Ref, // If restored, the index is from this block's entrypoint
operand: Ref, // If non-error (or .none), then restore the index
},
elem_val_imm: struct {
/// The indexable value being accessed.
operand: Ref,
@ -2435,7 +2437,6 @@ pub const Inst = struct {
float,
ptr_type,
int_type,
bool_br,
@"unreachable",
@"break",
dbg_stmt,
@ -2444,7 +2445,6 @@ pub const Inst = struct {
@"defer",
defer_err_code,
save_err_ret_index,
restore_err_ret_index,
elem_val_imm,
};
};
@ -2630,6 +2630,13 @@ pub const Inst = struct {
body_len: u32,
};
/// Trailing:
/// * inst: Index // for each `body_len`
pub const BoolBr = struct {
lhs: Ref,
body_len: u32,
};
/// Trailing:
/// 0. doc_comment: u32 // if `has_doc_comment`; null-terminated string index
/// 1. align_body_len: u32 // if `has_align_linksection_addrspace`; 0 means no `align`
@ -3015,20 +3022,19 @@ pub const Inst = struct {
};
/// Trailing:
/// 0. src_node: i32, // if has_src_node
/// 1. fields_len: u32, // if has_fields_len
/// 2. decls_len: u32, // if has_decls_len
/// 3. backing_int_body_len: u32, // if has_backing_int
/// 4. backing_int_ref: Ref, // if has_backing_int and backing_int_body_len is 0
/// 5. backing_int_body_inst: Inst, // if has_backing_int and backing_int_body_len is > 0
/// 6. decl: Index, // for every decls_len; points to a `declaration` instruction
/// 7. flags: u32 // for every 8 fields
/// 0. fields_len: u32, // if has_fields_len
/// 1. decls_len: u32, // if has_decls_len
/// 2. backing_int_body_len: u32, // if has_backing_int
/// 3. backing_int_ref: Ref, // if has_backing_int and backing_int_body_len is 0
/// 4. backing_int_body_inst: Inst, // if has_backing_int and backing_int_body_len is > 0
/// 5. decl: Index, // for every decls_len; points to a `declaration` instruction
/// 6. flags: u32 // for every 8 fields
/// - sets of 4 bits:
/// 0b000X: whether corresponding field has an align expression
/// 0b00X0: whether corresponding field has a default expression
/// 0b0X00: whether corresponding field is comptime
/// 0bX000: whether corresponding field has a type expression
/// 8. fields: { // for every fields_len
/// 7. fields: { // for every fields_len
/// field_name: u32, // if !is_tuple
/// doc_comment: NullTerminatedString, // .empty if no doc comment
/// field_type: Ref, // if corresponding bit is not set. none means anytype.
@ -3036,7 +3042,7 @@ pub const Inst = struct {
/// align_body_len: u32, // if corresponding bit is set
/// init_body_len: u32, // if corresponding bit is set
/// }
/// 10. bodies: { // for every fields_len
/// 8. bodies: { // for every fields_len
/// field_type_body_inst: Inst, // for each field_type_body_len
/// align_body_inst: Inst, // for each align_body_len
/// init_body_inst: Inst, // for each init_body_len
@ -3048,8 +3054,13 @@ pub const Inst = struct {
fields_hash_1: u32,
fields_hash_2: u32,
fields_hash_3: u32,
src_node: i32,
pub fn src(self: StructDecl) LazySrcLoc {
return LazySrcLoc.nodeOffset(self.src_node);
}
pub const Small = packed struct {
has_src_node: bool,
has_fields_len: bool,
has_decls_len: bool,
has_backing_int: bool,
@ -3061,7 +3072,7 @@ pub const Inst = struct {
any_default_inits: bool,
any_comptime_fields: bool,
any_aligned_fields: bool,
_: u2 = undefined,
_: u3 = undefined,
};
};
@ -3095,16 +3106,15 @@ pub const Inst = struct {
};
/// Trailing:
/// 0. src_node: i32, // if has_src_node
/// 1. tag_type: Ref, // if has_tag_type
/// 2. body_len: u32, // if has_body_len
/// 3. fields_len: u32, // if has_fields_len
/// 4. decls_len: u32, // if has_decls_len
/// 5. decl: Index, // for every decls_len; points to a `declaration` instruction
/// 6. inst: Index // for every body_len
/// 7. has_bits: u32 // for every 32 fields
/// 0. tag_type: Ref, // if has_tag_type
/// 1. body_len: u32, // if has_body_len
/// 2. fields_len: u32, // if has_fields_len
/// 3. decls_len: u32, // if has_decls_len
/// 4. decl: Index, // for every decls_len; points to a `declaration` instruction
/// 5. inst: Index // for every body_len
/// 6. has_bits: u32 // for every 32 fields
/// - the bit is whether corresponding field has an value expression
/// 8. fields: { // for every fields_len
/// 7. fields: { // for every fields_len
/// field_name: u32,
/// doc_comment: u32, // .empty if no doc_comment
/// value: Ref, // if corresponding bit is set
@ -3116,33 +3126,37 @@ pub const Inst = struct {
fields_hash_1: u32,
fields_hash_2: u32,
fields_hash_3: u32,
src_node: i32,
pub fn src(self: EnumDecl) LazySrcLoc {
return LazySrcLoc.nodeOffset(self.src_node);
}
pub const Small = packed struct {
has_src_node: bool,
has_tag_type: bool,
has_body_len: bool,
has_fields_len: bool,
has_decls_len: bool,
name_strategy: NameStrategy,
nonexhaustive: bool,
_: u8 = undefined,
_: u9 = undefined,
};
};
/// Trailing:
/// 0. src_node: i32, // if has_src_node
/// 1. tag_type: Ref, // if has_tag_type
/// 2. body_len: u32, // if has_body_len
/// 3. fields_len: u32, // if has_fields_len
/// 4. decls_len: u32, // if has_decls_len
/// 5. decl: Index, // for every decls_len; points to a `declaration` instruction
/// 6. inst: Index // for every body_len
/// 7. has_bits: u32 // for every 8 fields
/// 0. tag_type: Ref, // if has_tag_type
/// 1. body_len: u32, // if has_body_len
/// 2. fields_len: u32, // if has_fields_len
/// 3. decls_len: u32, // if has_decls_len
/// 4. decl: Index, // for every decls_len; points to a `declaration` instruction
/// 5. inst: Index // for every body_len
/// 6. has_bits: u32 // for every 8 fields
/// - sets of 4 bits:
/// 0b000X: whether corresponding field has a type expression
/// 0b00X0: whether corresponding field has a align expression
/// 0b0X00: whether corresponding field has a tag value expression
/// 0bX000: unused
/// 8. fields: { // for every fields_len
/// 7. fields: { // for every fields_len
/// field_name: NullTerminatedString, // null terminated string index
/// doc_comment: NullTerminatedString, // .empty if no doc comment
/// field_type: Ref, // if corresponding bit is set
@ -3157,8 +3171,13 @@ pub const Inst = struct {
fields_hash_1: u32,
fields_hash_2: u32,
fields_hash_3: u32,
src_node: i32,
pub fn src(self: UnionDecl) LazySrcLoc {
return LazySrcLoc.nodeOffset(self.src_node);
}
pub const Small = packed struct {
has_src_node: bool,
has_tag_type: bool,
has_body_len: bool,
has_fields_len: bool,
@ -3173,20 +3192,24 @@ pub const Inst = struct {
/// true | false | union(T) { }
auto_enum_tag: bool,
any_aligned_fields: bool,
_: u5 = undefined,
_: u6 = undefined,
};
};
/// Trailing:
/// 0. src_node: i32, // if has_src_node
/// 1. decls_len: u32, // if has_decls_len
/// 2. decl: Index, // for every decls_len; points to a `declaration` instruction
/// 0. decls_len: u32, // if has_decls_len
/// 1. decl: Index, // for every decls_len; points to a `declaration` instruction
pub const OpaqueDecl = struct {
src_node: i32,
pub fn src(self: OpaqueDecl) LazySrcLoc {
return LazySrcLoc.nodeOffset(self.src_node);
}
pub const Small = packed struct {
has_src_node: bool,
has_decls_len: bool,
name_strategy: NameStrategy,
_: u12 = undefined,
_: u13 = undefined,
};
};
@ -3439,6 +3462,18 @@ pub const Inst = struct {
/// The RHS of the array multiplication.
rhs: Ref,
};
pub const RestoreErrRetIndex = struct {
src_node: i32,
/// If `.none`, restore the trace to its state upon function entry.
block: Ref,
/// If `.none`, restore unconditionally.
operand: Ref,
pub fn src(self: RestoreErrRetIndex) LazySrcLoc {
return LazySrcLoc.nodeOffset(self.src_node);
}
};
};
pub const SpecialProng = enum { none, @"else", under };
@ -3476,7 +3511,6 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator {
.struct_decl => {
const small: Inst.StructDecl.Small = @bitCast(extended.small);
var extra_index: u32 = @intCast(extended.operand + @typeInfo(Inst.StructDecl).Struct.fields.len);
extra_index += @intFromBool(small.has_src_node);
extra_index += @intFromBool(small.has_fields_len);
const decls_len = if (small.has_decls_len) decls_len: {
const decls_len = zir.extra[extra_index];
@ -3503,7 +3537,6 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator {
.enum_decl => {
const small: Inst.EnumDecl.Small = @bitCast(extended.small);
var extra_index: u32 = @intCast(extended.operand + @typeInfo(Inst.EnumDecl).Struct.fields.len);
extra_index += @intFromBool(small.has_src_node);
extra_index += @intFromBool(small.has_tag_type);
extra_index += @intFromBool(small.has_body_len);
extra_index += @intFromBool(small.has_fields_len);
@ -3522,7 +3555,6 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator {
.union_decl => {
const small: Inst.UnionDecl.Small = @bitCast(extended.small);
var extra_index: u32 = @intCast(extended.operand + @typeInfo(Inst.UnionDecl).Struct.fields.len);
extra_index += @intFromBool(small.has_src_node);
extra_index += @intFromBool(small.has_tag_type);
extra_index += @intFromBool(small.has_body_len);
extra_index += @intFromBool(small.has_fields_len);
@ -3540,8 +3572,7 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator {
},
.opaque_decl => {
const small: Inst.OpaqueDecl.Small = @bitCast(extended.small);
var extra_index: u32 = extended.operand;
extra_index += @intFromBool(small.has_src_node);
var extra_index: u32 = @intCast(extended.operand + @typeInfo(Inst.OpaqueDecl).Struct.fields.len);
const decls_len = if (small.has_decls_len) decls_len: {
const decls_len = zir.extra[extra_index];
extra_index += 1;

View File

@ -199,10 +199,6 @@ const Writer = struct {
const tag = tags[@intFromEnum(inst)];
try stream.print("= {s}(", .{@tagName(tags[@intFromEnum(inst)])});
switch (tag) {
.store,
.store_to_inferred_ptr,
=> try self.writeBin(stream, inst),
.alloc,
.alloc_mut,
.alloc_comptime_mut,
@ -280,6 +276,8 @@ const Writer = struct {
.validate_deref,
.check_comptime_control_flow,
.opt_eu_base_ptr_init,
.restore_err_ret_index_unconditional,
.restore_err_ret_index_fn_entry,
=> try self.writeUnNode(stream, inst),
.ref,
@ -303,7 +301,6 @@ const Writer = struct {
.int_type => try self.writeIntType(stream, inst),
.save_err_ret_index => try self.writeSaveErrRetIndex(stream, inst),
.restore_err_ret_index => try self.writeRestoreErrRetIndex(stream, inst),
.@"break",
.break_inline,
@ -392,6 +389,7 @@ const Writer = struct {
.shr_exact,
.xor,
.store_node,
.store_to_inferred_ptr,
.error_union_type,
.merge_error_sets,
.bit_and,
@ -615,6 +613,8 @@ const Writer = struct {
.cmpxchg => try self.writeCmpxchg(stream, extended),
.ptr_cast_full => try self.writePtrCastFull(stream, extended),
.ptr_cast_no_dest => try self.writePtrCastNoDest(stream, extended),
.restore_err_ret_index => try self.writeRestoreErrRetIndex(stream, extended),
}
}
@ -624,14 +624,6 @@ const Writer = struct {
try self.writeSrc(stream, src);
}
fn writeBin(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
const inst_data = self.code.instructions.items(.data)[@intFromEnum(inst)].bin;
try self.writeInstRef(stream, inst_data.lhs);
try stream.writeAll(", ");
try self.writeInstRef(stream, inst_data.rhs);
try stream.writeByte(')');
}
fn writeArrayInitElemType(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
const inst_data = self.code.instructions.items(.data)[@intFromEnum(inst)].bin;
try self.writeInstRef(stream, inst_data.lhs);
@ -1413,12 +1405,6 @@ const Writer = struct {
var extra_index: usize = extra.end;
const src_node: ?i32 = if (small.has_src_node) blk: {
const src_node = @as(i32, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
break :blk src_node;
} else null;
const fields_len = if (small.has_fields_len) blk: {
const fields_len = self.code.extra[extra_index];
extra_index += 1;
@ -1461,7 +1447,7 @@ const Writer = struct {
try stream.writeAll("{}, ");
} else {
const prev_parent_decl_node = self.parent_decl_node;
if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off);
self.parent_decl_node = self.relativeToNodeIndex(extra.data.src_node);
defer self.parent_decl_node = prev_parent_decl_node;
try stream.writeAll("{\n");
@ -1542,7 +1528,7 @@ const Writer = struct {
}
const prev_parent_decl_node = self.parent_decl_node;
if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off);
self.parent_decl_node = self.relativeToNodeIndex(extra.data.src_node);
try stream.writeAll("{\n");
self.indent += 2;
@ -1595,7 +1581,7 @@ const Writer = struct {
try stream.writeByteNTimes(' ', self.indent);
try stream.writeAll("})");
}
try self.writeSrcNode(stream, src_node);
try self.writeSrcNode(stream, extra.data.src_node);
}
fn writeUnionDecl(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
@ -1613,12 +1599,6 @@ const Writer = struct {
var extra_index: usize = extra.end;
const src_node: ?i32 = if (small.has_src_node) blk: {
const src_node = @as(i32, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
break :blk src_node;
} else null;
const tag_type_ref = if (small.has_tag_type) blk: {
const tag_type_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
extra_index += 1;
@ -1652,7 +1632,7 @@ const Writer = struct {
try stream.writeAll("{}");
} else {
const prev_parent_decl_node = self.parent_decl_node;
if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off);
self.parent_decl_node = self.relativeToNodeIndex(extra.data.src_node);
defer self.parent_decl_node = prev_parent_decl_node;
try stream.writeAll("{\n");
@ -1671,7 +1651,7 @@ const Writer = struct {
if (fields_len == 0) {
try stream.writeAll("})");
try self.writeSrcNode(stream, src_node);
try self.writeSrcNode(stream, extra.data.src_node);
return;
}
try stream.writeAll(", ");
@ -1680,7 +1660,7 @@ const Writer = struct {
extra_index += body.len;
const prev_parent_decl_node = self.parent_decl_node;
if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off);
self.parent_decl_node = self.relativeToNodeIndex(extra.data.src_node);
try self.writeBracedDecl(stream, body);
try stream.writeAll(", {\n");
@ -1748,7 +1728,7 @@ const Writer = struct {
self.indent -= 2;
try stream.writeByteNTimes(' ', self.indent);
try stream.writeAll("})");
try self.writeSrcNode(stream, src_node);
try self.writeSrcNode(stream, extra.data.src_node);
}
fn writeEnumDecl(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
@ -1766,12 +1746,6 @@ const Writer = struct {
var extra_index: usize = extra.end;
const src_node: ?i32 = if (small.has_src_node) blk: {
const src_node = @as(i32, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
break :blk src_node;
} else null;
const tag_type_ref = if (small.has_tag_type) blk: {
const tag_type_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
extra_index += 1;
@ -1803,7 +1777,7 @@ const Writer = struct {
try stream.writeAll("{}, ");
} else {
const prev_parent_decl_node = self.parent_decl_node;
if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off);
self.parent_decl_node = self.relativeToNodeIndex(extra.data.src_node);
defer self.parent_decl_node = prev_parent_decl_node;
try stream.writeAll("{\n");
@ -1824,7 +1798,7 @@ const Writer = struct {
extra_index += body.len;
const prev_parent_decl_node = self.parent_decl_node;
if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off);
self.parent_decl_node = self.relativeToNodeIndex(extra.data.src_node);
try self.writeBracedDecl(stream, body);
if (fields_len == 0) {
try stream.writeAll(", {})");
@ -1872,7 +1846,7 @@ const Writer = struct {
try stream.writeByteNTimes(' ', self.indent);
try stream.writeAll("})");
}
try self.writeSrcNode(stream, src_node);
try self.writeSrcNode(stream, extra.data.src_node);
}
fn writeOpaqueDecl(
@ -1881,13 +1855,8 @@ const Writer = struct {
extended: Zir.Inst.Extended.InstData,
) !void {
const small = @as(Zir.Inst.OpaqueDecl.Small, @bitCast(extended.small));
var extra_index: usize = extended.operand;
const src_node: ?i32 = if (small.has_src_node) blk: {
const src_node = @as(i32, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
break :blk src_node;
} else null;
const extra = self.code.extraData(Zir.Inst.OpaqueDecl, extended.operand);
var extra_index: usize = extra.end;
const decls_len = if (small.has_decls_len) blk: {
const decls_len = self.code.extra[extra_index];
@ -1901,7 +1870,7 @@ const Writer = struct {
try stream.writeAll("{})");
} else {
const prev_parent_decl_node = self.parent_decl_node;
if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off);
self.parent_decl_node = self.relativeToNodeIndex(extra.data.src_node);
defer self.parent_decl_node = prev_parent_decl_node;
try stream.writeAll("{\n");
@ -1911,7 +1880,7 @@ const Writer = struct {
try stream.writeByteNTimes(' ', self.indent);
try stream.writeAll("})");
}
try self.writeSrcNode(stream, src_node);
try self.writeSrcNode(stream, extra.data.src_node);
}
fn writeErrorSetDecl(
@ -2505,12 +2474,14 @@ const Writer = struct {
}
fn writeBoolBr(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
const inst_data = self.code.instructions.items(.data)[@intFromEnum(inst)].bool_br;
const extra = self.code.extraData(Zir.Inst.Block, inst_data.payload_index);
const inst_data = self.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = self.code.extraData(Zir.Inst.BoolBr, inst_data.payload_index);
const body = self.code.bodySlice(extra.end, extra.data.body_len);
try self.writeInstRef(stream, inst_data.lhs);
try self.writeInstRef(stream, extra.data.lhs);
try stream.writeAll(", ");
try self.writeBracedBody(stream, body);
try stream.writeAll(") ");
try self.writeSrc(stream, inst_data.src());
}
fn writeIntType(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
@ -2531,13 +2502,14 @@ const Writer = struct {
try stream.writeAll(")");
}
fn writeRestoreErrRetIndex(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
const inst_data = self.code.instructions.items(.data)[@intFromEnum(inst)].restore_err_ret_index;
fn writeRestoreErrRetIndex(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
const extra = self.code.extraData(Zir.Inst.RestoreErrRetIndex, extended.operand).data;
try self.writeInstRef(stream, inst_data.block);
try self.writeInstRef(stream, inst_data.operand);
try self.writeInstRef(stream, extra.block);
try self.writeInstRef(stream, extra.operand);
try stream.writeAll(")");
try stream.writeAll(") ");
try self.writeSrc(stream, extra.src());
}
fn writeBreak(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {

View File

@ -14,6 +14,6 @@ export fn entry() usize {
// backend=stage2
// target=native
//
// :6:5: error: unable to resolve comptime value
// :6:5: note: value being returned at comptime must be comptime-known
// :6:12: error: unable to resolve comptime value
// :6:12: note: value being returned at comptime must be comptime-known
// :2:12: note: called from here