diff --git a/CMakeLists.txt b/CMakeLists.txt index de4fdfac96..d47c020ca8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -515,6 +515,7 @@ set(ZIG_STAGE2_SOURCES "${CMAKE_SOURCE_DIR}/lib/std/zig/tokenizer.zig" "${CMAKE_SOURCE_DIR}/src/Air.zig" "${CMAKE_SOURCE_DIR}/src/AstGen.zig" + "${CMAKE_SOURCE_DIR}/src/AstRlAnnotate.zig" "${CMAKE_SOURCE_DIR}/src/Compilation.zig" "${CMAKE_SOURCE_DIR}/src/Liveness.zig" "${CMAKE_SOURCE_DIR}/src/Module.zig" diff --git a/lib/std/Build/Step/ConfigHeader.zig b/lib/std/Build/Step/ConfigHeader.zig index d0cd067a4a..35e92ee04e 100644 --- a/lib/std/Build/Step/ConfigHeader.zig +++ b/lib/std/Build/Step/ConfigHeader.zig @@ -370,10 +370,9 @@ fn render_cmake( } }, - else => { - break :blk value; - }, + else => {}, } + break :blk value; }; if (booldefine) { diff --git a/src/AstGen.zig b/src/AstGen.zig index bff56614fe..388b3c7a83 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -17,9 +17,13 @@ const refToIndex = Zir.refToIndex; const indexToRef = Zir.indexToRef; const trace = @import("tracy.zig").trace; const BuiltinFn = @import("BuiltinFn.zig"); +const AstRlAnnotate = @import("AstRlAnnotate.zig"); gpa: Allocator, tree: *const Ast, +/// The set of nodes which, given the choice, must expose a result pointer to +/// sub-expressions. See `AstRlAnnotate` for details. +nodes_need_rl: *const AstRlAnnotate.RlNeededSet, instructions: std.MultiArrayList(Zir.Inst) = .{}, extra: ArrayListUnmanaged(u32) = .{}, string_bytes: ArrayListUnmanaged(u8) = .{}, @@ -113,10 +117,14 @@ pub fn generate(gpa: Allocator, tree: Ast) Allocator.Error!Zir { var arena = std.heap.ArenaAllocator.init(gpa); defer arena.deinit(); + var nodes_need_rl = try AstRlAnnotate.annotate(gpa, arena.allocator(), tree); + defer nodes_need_rl.deinit(gpa); + var astgen: AstGen = .{ .gpa = gpa, .arena = arena.allocator(), .tree = &tree, + .nodes_need_rl = &nodes_need_rl, }; defer astgen.deinit(gpa); @@ -272,69 +280,12 @@ const ResultInfo = struct { /// The result instruction from the expression must be ignored. /// Always an instruction with tag `alloc_inferred`. inferred_ptr: Zir.Inst.Ref, - /// There is a pointer for the expression to store its result into, however, its type - /// is inferred based on peer type resolution for a `Zir.Inst.Block`. - /// The result instruction from the expression must be ignored. - block_ptr: *GenZir, const PtrResultLoc = struct { inst: Zir.Inst.Ref, src_node: ?Ast.Node.Index = null, }; - const Strategy = struct { - elide_store_to_block_ptr_instructions: bool, - tag: Tag, - - const Tag = enum { - /// Both branches will use break_void; result location is used to communicate the - /// result instruction. - break_void, - /// Use break statements to pass the block result value, and call rvalue() at - /// the end depending on rl. Also elide the store_to_block_ptr instructions - /// depending on rl. - break_operand, - }; - }; - - fn strategy(rl: Loc, block_scope: *GenZir) Strategy { - switch (rl) { - // In this branch there will not be any store_to_block_ptr instructions. - .none, .ty, .coerced_ty, .ref => return .{ - .tag = .break_operand, - .elide_store_to_block_ptr_instructions = false, - }, - .discard => return .{ - .tag = .break_void, - .elide_store_to_block_ptr_instructions = false, - }, - // The pointer got passed through to the sub-expressions, so we will use - // break_void here. - // In this branch there will not be any store_to_block_ptr instructions. - .ptr => return .{ - .tag = .break_void, - .elide_store_to_block_ptr_instructions = false, - }, - .inferred_ptr, .block_ptr => { - if (block_scope.rvalue_rl_count == block_scope.break_count) { - // Neither prong of the if consumed the result location, so we can - // use break instructions to create an rvalue. - return .{ - .tag = .break_operand, - .elide_store_to_block_ptr_instructions = true, - }; - } else { - // Allow the store_to_block_ptr instructions to remain so that - // semantic analysis can turn them into bitcasts. - return .{ - .tag = .break_void, - .elide_store_to_block_ptr_instructions = false, - }; - } - }, - } - } - /// Find the result type for a cast builtin given the result location. /// If the location does not have a known result type, emits an error on /// the given node. @@ -347,13 +298,6 @@ const ResultInfo = struct { const ptr_ty = try gz.addUnNode(.typeof, ptr.inst, node); return gz.addUnNode(.elem_type, ptr_ty, node); }, - .block_ptr => |block_scope| { - if (block_scope.rl_ty_inst != .none) return block_scope.rl_ty_inst; - if (block_scope.break_result_info.rl == .ptr) { - const ptr_ty = try gz.addUnNode(.typeof, block_scope.break_result_info.rl.ptr.inst, node); - return gz.addUnNode(.elem_type, ptr_ty, node); - } - }, } return astgen.failNodeNotes(node, "{s} must have a known result type", .{builtin_name}, &.{ @@ -1516,7 +1460,7 @@ fn arrayInitExpr( return rvalue(gz, ri, result, node); }, .ptr => |ptr_res| { - return arrayInitExprRlPtr(gz, scope, ri, node, ptr_res.inst, array_init.ast.elements, types.array); + return arrayInitExprRlPtr(gz, scope, node, ptr_res.inst, array_init.ast.elements, types.array); }, .inferred_ptr => |ptr_inst| { if (types.array == .none) { @@ -1526,18 +1470,9 @@ fn arrayInitExpr( const result = try arrayInitExprRlNone(gz, scope, node, array_init.ast.elements, .array_init_anon); return rvalue(gz, ri, result, node); } else { - return arrayInitExprRlPtr(gz, scope, ri, node, ptr_inst, array_init.ast.elements, types.array); + return arrayInitExprRlPtr(gz, scope, node, ptr_inst, array_init.ast.elements, types.array); } }, - .block_ptr => |block_gz| { - // This condition is here for the same reason as the above condition in `inferred_ptr`. - // See corresponding logic in structInitExpr. - if (types.array == .none and astgen.isInferred(block_gz.rl_ptr)) { - const result = try arrayInitExprRlNone(gz, scope, node, array_init.ast.elements, .array_init_anon); - return rvalue(gz, ri, result, node); - } - return arrayInitExprRlPtr(gz, scope, ri, node, block_gz.rl_ptr, array_init.ast.elements, types.array); - }, } } @@ -1609,7 +1544,6 @@ fn arrayInitExprInner( fn arrayInitExprRlPtr( gz: *GenZir, scope: *Scope, - ri: ResultInfo, node: Ast.Node.Index, result_ptr: Zir.Inst.Ref, elements: []const Ast.Node.Index, @@ -1620,11 +1554,8 @@ fn arrayInitExprRlPtr( return arrayInitExprRlPtrInner(gz, scope, node, base_ptr, elements); } - var as_scope = try gz.makeCoercionScope(scope, array_ty, result_ptr, node); - defer as_scope.unstack(); - - const result = try arrayInitExprRlPtrInner(&as_scope, scope, node, as_scope.rl_ptr, elements); - return as_scope.finishCoercion(gz, ri, node, result, array_ty); + const casted_ptr = try gz.addPlNode(.coerce_result_ptr, node, Zir.Inst.Bin{ .lhs = array_ty, .rhs = result_ptr }); + return arrayInitExprRlPtrInner(gz, scope, node, casted_ptr, elements); } fn arrayInitExprRlPtrInner( @@ -1759,7 +1690,7 @@ fn structInitExpr( const result = try structInitExprRlTy(gz, scope, node, struct_init, inner_ty_inst, .struct_init); return rvalue(gz, ri, result, node); }, - .ptr => |ptr_res| return structInitExprRlPtr(gz, scope, ri, node, struct_init, ptr_res.inst), + .ptr => |ptr_res| return structInitExprRlPtr(gz, scope, node, struct_init, ptr_res.inst), .inferred_ptr => |ptr_inst| { if (struct_init.ast.type_expr == 0) { // We treat this case differently so that we don't get a crash when @@ -1768,19 +1699,9 @@ fn structInitExpr( const result = try structInitExprRlNone(gz, scope, node, struct_init, .none, .struct_init_anon); return rvalue(gz, ri, result, node); } else { - return structInitExprRlPtr(gz, scope, ri, node, struct_init, ptr_inst); + return structInitExprRlPtr(gz, scope, node, struct_init, ptr_inst); } }, - .block_ptr => |block_gz| { - // This condition is here for the same reason as the above condition in `inferred_ptr`. - // See corresponding logic in arrayInitExpr. - if (struct_init.ast.type_expr == 0 and astgen.isInferred(block_gz.rl_ptr)) { - const result = try structInitExprRlNone(gz, scope, node, struct_init, .none, .struct_init_anon); - return rvalue(gz, ri, result, node); - } - - return structInitExprRlPtr(gz, scope, ri, node, struct_init, block_gz.rl_ptr); - }, } } @@ -1824,7 +1745,6 @@ fn structInitExprRlNone( fn structInitExprRlPtr( gz: *GenZir, scope: *Scope, - ri: ResultInfo, node: Ast.Node.Index, struct_init: Ast.full.StructInit, result_ptr: Zir.Inst.Ref, @@ -1836,11 +1756,8 @@ fn structInitExprRlPtr( const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr); _ = try gz.addUnNode(.validate_struct_init_ty, ty_inst, node); - var as_scope = try gz.makeCoercionScope(scope, ty_inst, result_ptr, node); - defer as_scope.unstack(); - - const result = try structInitExprRlPtrInner(&as_scope, scope, node, struct_init, as_scope.rl_ptr); - return as_scope.finishCoercion(gz, ri, node, result, ty_inst); + const casted_ptr = try gz.addPlNode(.coerce_result_ptr, node, Zir.Inst.Bin{ .lhs = ty_inst, .rhs = result_ptr }); + return structInitExprRlPtrInner(gz, scope, node, struct_init, casted_ptr); } fn structInitExprRlPtrInner( @@ -2039,22 +1956,13 @@ fn restoreErrRetIndex( .maybe => switch (ri.ctx) { .error_handling_expr, .@"return", .fn_arg, .const_init => switch (ri.rl) { .ptr => |ptr_res| try gz.addUnNode(.load, ptr_res.inst, node), - .inferred_ptr => |ptr| try gz.addUnNode(.load, ptr, node), - .block_ptr => |block_scope| if (block_scope.rvalue_rl_count != block_scope.break_count) b: { - // The result location may have been used by this expression, in which case - // the operand is not the result and we need to load the rl ptr. - switch (gz.astgen.instructions.items(.tag)[Zir.refToIndex(block_scope.rl_ptr).?]) { - .alloc_inferred, .alloc_inferred_mut => { - // This is a terrible workaround for Sema's inability to load from a .alloc_inferred ptr - // before its type has been resolved. The operand we use here instead is not guaranteed - // to be valid, and when it's not, we will pop error traces prematurely. - // - // TODO: Update this to do a proper load from the rl_ptr, once Sema can support it. - break :b result; - }, - else => break :b try gz.addUnNode(.load, block_scope.rl_ptr, node), - } - } else result, + .inferred_ptr => blk: { + // This is a terrible workaround for Sema's inability to load from a .alloc_inferred ptr + // before its type has been resolved. There is no valid operand to use here, so error + // traces will be popped prematurely. + // TODO: Update this to do a proper load from the rl_ptr, once Sema can support it. + break :blk .none; + }, else => result, }, else => .none, // always restore/pop @@ -2110,7 +2018,6 @@ fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) Inn else .@"break"; - block_gz.break_count += 1; if (rhs == 0) { _ = try rvalue(parent_gz, block_gz.break_result_info, .void_value, node); @@ -2125,7 +2032,6 @@ fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) Inn } const operand = try reachableExpr(parent_gz, parent_scope, block_gz.break_result_info, rhs, node); - const search_index: Zir.Inst.Index = @intCast(astgen.instructions.len); try genDefers(parent_gz, scope, parent_scope, .normal_only); @@ -2134,10 +2040,6 @@ fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) Inn try restoreErrRetIndex(parent_gz, .{ .block = block_inst }, block_gz.break_result_info, rhs, operand); switch (block_gz.break_result_info.rl) { - .block_ptr => { - const br = try parent_gz.addBreakWithSrcNode(break_tag, block_inst, operand, rhs); - try block_gz.labeled_breaks.append(astgen.gpa, .{ .br = br, .search = search_index }); - }, .ptr => { // In this case we don't have any mechanism to intercept it; // we assume the result location is written, and we break with void. @@ -2346,6 +2248,20 @@ fn labeledBlockExpr( try astgen.checkLabelRedefinition(parent_scope, label_token); + const need_rl = astgen.nodes_need_rl.contains(block_node); + const block_ri: ResultInfo = if (need_rl) ri else .{ + .rl = switch (ri.rl) { + .ptr => .{ .ty = try ri.rl.resultType(gz, block_node, undefined) }, + .inferred_ptr => .none, + else => ri.rl, + }, + .ctx = ri.ctx, + }; + // We need to call `rvalue` to write through to the pointer only if we had a + // result pointer and aren't forwarding it. + const LocTag = @typeInfo(ResultInfo.Loc).Union.tag_type.?; + const need_result_rvalue = @as(LocTag, block_ri.rl) != @as(LocTag, ri.rl); + // Reserve the Block ZIR instruction index so that we can put it into the GenZir struct // so that break statements can reference it. const block_tag: Zir.Inst.Tag = if (force_comptime) .block_comptime else .block; @@ -2356,10 +2272,9 @@ fn labeledBlockExpr( .token = label_token, .block_inst = block_inst, }; - block_scope.setBreakResultInfo(ri); + block_scope.setBreakResultInfo(block_ri); if (force_comptime) block_scope.is_comptime = true; defer block_scope.unstack(); - defer block_scope.labeled_breaks.deinit(astgen.gpa); try blockExprStmts(&block_scope, &block_scope.base, statements); if (!block_scope.endsWithNoReturn()) { @@ -2372,75 +2287,11 @@ fn labeledBlockExpr( try astgen.appendErrorTok(label_token, "unused block label", .{}); } - const zir_datas = astgen.instructions.items(.data); - const zir_tags = astgen.instructions.items(.tag); - const strat = ri.rl.strategy(&block_scope); - switch (strat.tag) { - .break_void => { - // The code took advantage of the result location as a pointer. - // Turn the break instruction operands into void. - for (block_scope.labeled_breaks.items) |br| { - zir_datas[br.br].@"break".operand = .void_value; - } - try block_scope.setBlockBody(block_inst); - - return indexToRef(block_inst); - }, - .break_operand => { - // All break operands are values that did not use the result location pointer - // (except for a single .store_to_block_ptr inst which we re-write here). - // The break instructions need to have their operands coerced if the - // block's result location is a `ty`. In this case we overwrite the - // `store_to_block_ptr` instruction with an `as` instruction and repurpose - // it as the break operand. - // This corresponds to similar code in `setCondBrPayloadElideBlockStorePtr`. - if (block_scope.rl_ty_inst != .none) { - try astgen.extra.ensureUnusedCapacity( - astgen.gpa, - @typeInfo(Zir.Inst.As).Struct.fields.len * block_scope.labeled_breaks.items.len, - ); - for (block_scope.labeled_breaks.items) |br| { - // We expect the `store_to_block_ptr` to be created between 1-3 instructions - // prior to the break. - var search_index = br.search -| 3; - while (search_index < br.search) : (search_index += 1) { - if (zir_tags[search_index] == .store_to_block_ptr and - zir_datas[search_index].bin.lhs == block_scope.rl_ptr) - { - const break_data = &zir_datas[br.br].@"break"; - const break_src: i32 = @bitCast(astgen.extra.items[ - break_data.payload_index + - std.meta.fieldIndex(Zir.Inst.Break, "operand_src_node").? - ]); - if (break_src == Zir.Inst.Break.no_src_node) { - zir_tags[search_index] = .as; - zir_datas[search_index].bin = .{ - .lhs = block_scope.rl_ty_inst, - .rhs = break_data.operand, - }; - } else { - zir_tags[search_index] = .as_node; - zir_datas[search_index] = .{ .pl_node = .{ - .src_node = break_src, - .payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.As{ - .dest_type = block_scope.rl_ty_inst, - .operand = break_data.operand, - }), - } }; - } - break_data.operand = indexToRef(search_index); - break; - } - } else unreachable; - } - } - try block_scope.setBlockBody(block_inst); - const block_ref = indexToRef(block_inst); - switch (ri.rl) { - .ref => return block_ref, - else => return rvalue(gz, ri, block_ref, block_node), - } - }, + try block_scope.setBlockBody(block_inst); + if (need_result_rvalue) { + return rvalue(gz, ri, indexToRef(block_inst), block_node); + } else { + return indexToRef(block_inst); } } @@ -2818,7 +2669,6 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As .atomic_store, .store, .store_node, - .store_to_block_ptr, .store_to_inferred_ptr, .resolve_inferred_alloc, .validate_struct_init, @@ -3137,7 +2987,7 @@ fn varDecl( // the variable, no memory location needed. const type_node = var_decl.ast.type_node; if (align_inst == .none and - !nodeMayNeedMemoryLocation(tree, var_decl.ast.init_node, type_node != 0)) + !astgen.nodes_need_rl.contains(node)) { const result_info: ResultInfo = if (type_node != 0) .{ .rl = .{ .ty = try typeExpr(gz, scope, type_node) }, @@ -3170,132 +3020,62 @@ fn varDecl( const is_comptime = gz.is_comptime or tree.nodes.items(.tag)[var_decl.ast.init_node] == .@"comptime"; - // Detect whether the initialization expression actually uses the - // result location pointer. - var init_scope = gz.makeSubBlock(scope); - // we may add more instructions to gz before stacking init_scope - init_scope.instructions_top = GenZir.unstacked_top; - init_scope.anon_name_strategy = .dbg_var; - defer init_scope.unstack(); - var resolve_inferred_alloc: Zir.Inst.Ref = .none; var opt_type_inst: Zir.Inst.Ref = .none; - if (type_node != 0) { - const type_inst = try typeExpr(gz, &init_scope.base, type_node); + const init_rl: ResultInfo.Loc = if (type_node != 0) init_rl: { + const type_inst = try typeExpr(gz, scope, type_node); opt_type_inst = type_inst; if (align_inst == .none) { - init_scope.instructions_top = gz.instructions.items.len; - init_scope.rl_ptr = try init_scope.addUnNode(.alloc, type_inst, node); + break :init_rl .{ .ptr = .{ .inst = try gz.addUnNode(.alloc, type_inst, node) } }; } else { - init_scope.rl_ptr = try gz.addAllocExtended(.{ + break :init_rl .{ .ptr = .{ .inst = try gz.addAllocExtended(.{ .node = node, .type_inst = type_inst, .align_inst = align_inst, .is_const = true, .is_comptime = is_comptime, - }); - init_scope.instructions_top = gz.instructions.items.len; + }) } }; } - init_scope.rl_ty_inst = type_inst; - } else { - const alloc = if (align_inst == .none) alloc: { - init_scope.instructions_top = gz.instructions.items.len; + } else init_rl: { + const alloc_inst = if (align_inst == .none) ptr: { const tag: Zir.Inst.Tag = if (is_comptime) .alloc_inferred_comptime else .alloc_inferred; - break :alloc try init_scope.addNode(tag, node); - } else alloc: { - const ref = try gz.addAllocExtended(.{ + break :ptr try gz.addNode(tag, node); + } else ptr: { + break :ptr try gz.addAllocExtended(.{ .node = node, .type_inst = .none, .align_inst = align_inst, .is_const = true, .is_comptime = is_comptime, }); - init_scope.instructions_top = gz.instructions.items.len; - break :alloc ref; }; - resolve_inferred_alloc = alloc; - init_scope.rl_ptr = alloc; - init_scope.rl_ty_inst = .none; - } - const init_result_info: ResultInfo = .{ .rl = .{ .block_ptr = &init_scope }, .ctx = .const_init }; - const init_inst = try reachableExpr(&init_scope, &init_scope.base, init_result_info, var_decl.ast.init_node, node); + resolve_inferred_alloc = alloc_inst; + break :init_rl .{ .inferred_ptr = alloc_inst }; + }; + const var_ptr = switch (init_rl) { + .ptr => |ptr| ptr.inst, + .inferred_ptr => |inst| inst, + else => unreachable, + }; + const init_result_info: ResultInfo = .{ .rl = init_rl, .ctx = .const_init }; + + const prev_anon_name_strategy = gz.anon_name_strategy; + gz.anon_name_strategy = .dbg_var; + defer gz.anon_name_strategy = prev_anon_name_strategy; + const init_inst = try reachableExpr(gz, scope, init_result_info, var_decl.ast.init_node, node); // The const init expression may have modified the error return trace, so signal // to Sema that it should save the new index for restoring later. if (nodeMayAppendToErrorTrace(tree, var_decl.ast.init_node)) - _ = try init_scope.addSaveErrRetIndex(.{ .if_of_error_type = init_inst }); + _ = try gz.addSaveErrRetIndex(.{ .if_of_error_type = init_inst }); - const zir_tags = astgen.instructions.items(.tag); - const zir_datas = astgen.instructions.items(.data); - - if (align_inst == .none and init_scope.rvalue_rl_count == 1) { - // Result location pointer not used. We don't need an alloc for this - // const local, and type inference becomes trivial. - // Implicitly move the init_scope instructions into the parent scope, - // then elide the alloc instruction and the store_to_block_ptr instruction. - var src = init_scope.instructions_top; - var dst = src; - init_scope.instructions_top = GenZir.unstacked_top; - while (src < gz.instructions.items.len) : (src += 1) { - const src_inst = gz.instructions.items[src]; - if (indexToRef(src_inst) == init_scope.rl_ptr) continue; - if (zir_tags[src_inst] == .store_to_block_ptr) { - if (zir_datas[src_inst].bin.lhs == init_scope.rl_ptr) continue; - } - gz.instructions.items[dst] = src_inst; - dst += 1; - } - gz.instructions.items.len = dst; - - // In case the result location did not do the coercion - // for us so we must do it here. - const coerced_init = if (opt_type_inst != .none) - try gz.addPlNode(.as_node, var_decl.ast.init_node, Zir.Inst.As{ - .dest_type = opt_type_inst, - .operand = init_inst, - }) - else - init_inst; - - try gz.addDbgVar(.dbg_var_val, ident_name, coerced_init); - - const sub_scope = try block_arena.create(Scope.LocalVal); - sub_scope.* = .{ - .parent = scope, - .gen_zir = gz, - .name = ident_name, - .inst = coerced_init, - .token_src = name_token, - .id_cat = .@"local constant", - }; - return &sub_scope.base; - } - // The initialization expression took advantage of the result location - // of the const local. In this case we will create an alloc and a LocalPtr for it. - // Implicitly move the init_scope instructions into the parent scope, then swap - // store_to_block_ptr for store_to_inferred_ptr. - - var src = init_scope.instructions_top; - init_scope.instructions_top = GenZir.unstacked_top; - while (src < gz.instructions.items.len) : (src += 1) { - const src_inst = gz.instructions.items[src]; - if (zir_tags[src_inst] == .store_to_block_ptr) { - if (zir_datas[src_inst].bin.lhs == init_scope.rl_ptr) { - if (type_node != 0) { - zir_tags[src_inst] = .store; - } else { - zir_tags[src_inst] = .store_to_inferred_ptr; - } - } - } - } if (resolve_inferred_alloc != .none) { _ = try gz.addUnNode(.resolve_inferred_alloc, resolve_inferred_alloc, node); } - const const_ptr = try gz.addUnNode(.make_ptr_const, init_scope.rl_ptr, node); + const const_ptr = try gz.addUnNode(.make_ptr_const, var_ptr, node); try gz.addDbgVar(.dbg_var_ptr, ident_name, const_ptr); @@ -3312,9 +3092,6 @@ fn varDecl( return &sub_scope.base; }, .keyword_var => { - const old_rl_ty_inst = gz.rl_ty_inst; - defer gz.rl_ty_inst = old_rl_ty_inst; - const is_comptime = var_decl.comptime_token != null or gz.is_comptime; var resolve_inferred_alloc: Zir.Inst.Ref = .none; const var_data: struct { @@ -3339,7 +3116,6 @@ fn varDecl( }); } }; - gz.rl_ty_inst = type_inst; break :a .{ .alloc = alloc, .result_info = .{ .rl = .{ .ptr = .{ .inst = alloc } } } }; } else a: { const alloc = alloc: { @@ -3359,7 +3135,6 @@ fn varDecl( }); } }; - gz.rl_ty_inst = .none; resolve_inferred_alloc = alloc; break :a .{ .alloc = alloc, .result_info = .{ .rl = .{ .inferred_ptr = alloc } } }; }; @@ -5530,17 +5305,30 @@ fn orelseCatchExpr( const astgen = parent_gz.astgen; const tree = astgen.tree; + const need_rl = astgen.nodes_need_rl.contains(node); + const block_ri: ResultInfo = if (need_rl) ri else .{ + .rl = switch (ri.rl) { + .ptr => .{ .ty = try ri.rl.resultType(parent_gz, node, undefined) }, + .inferred_ptr => .none, + else => ri.rl, + }, + .ctx = ri.ctx, + }; + // We need to call `rvalue` to write through to the pointer only if we had a + // result pointer and aren't forwarding it. + const LocTag = @typeInfo(ResultInfo.Loc).Union.tag_type.?; + const need_result_rvalue = @as(LocTag, block_ri.rl) != @as(LocTag, ri.rl); + const do_err_trace = astgen.fn_block != null and (cond_op == .is_non_err or cond_op == .is_non_err_ptr); var block_scope = parent_gz.makeSubBlock(scope); - block_scope.setBreakResultInfo(ri); + block_scope.setBreakResultInfo(block_ri); defer block_scope.unstack(); const operand_ri: ResultInfo = switch (block_scope.break_result_info.rl) { .ref => .{ .rl = .ref, .ctx = if (do_err_trace) .error_handling_expr else .none }, else => .{ .rl = .none, .ctx = if (do_err_trace) .error_handling_expr else .none }, }; - block_scope.break_count += 1; // This could be a pointer or value depending on the `operand_ri` parameter. // We cannot use `block_scope.break_result_info` because that has the bare // type, whereas this expression has the optional type. Later we make @@ -5563,6 +5351,7 @@ fn orelseCatchExpr( .ref => unwrapped_payload, else => try rvalue(&then_scope, block_scope.break_result_info, unwrapped_payload, node), }; + _ = try then_scope.addBreakWithSrcNode(.@"break", block, then_result, node); var else_scope = block_scope.makeSubBlock(scope); defer else_scope.unstack(); @@ -5596,93 +5385,20 @@ fn orelseCatchExpr( const else_result = try expr(&else_scope, else_sub_scope, block_scope.break_result_info, rhs); if (!else_scope.endsWithNoReturn()) { - block_scope.break_count += 1; - // As our last action before the break, "pop" the error trace if needed if (do_err_trace) try restoreErrRetIndex(&else_scope, .{ .block = block }, block_scope.break_result_info, rhs, else_result); + + _ = try else_scope.addBreakWithSrcNode(.@"break", block, else_result, rhs); } try checkUsed(parent_gz, &else_scope.base, else_sub_scope); - // We hold off on the break instructions as well as copying the then/else - // instructions into place until we know whether to keep store_to_block_ptr - // instructions or not. + try setCondBrPayload(condbr, cond, &then_scope, &else_scope); - const result = try finishThenElseBlock( - parent_gz, - ri, - node, - &block_scope, - &then_scope, - &else_scope, - condbr, - cond, - then_result, - node, - else_result, - rhs, - block, - block, - .@"break", - ); - return result; -} - -/// Supports `else_scope` stacked on `then_scope` stacked on `block_scope`. Unstacks `else_scope` then `then_scope`. -fn finishThenElseBlock( - parent_gz: *GenZir, - ri: ResultInfo, - node: Ast.Node.Index, - block_scope: *GenZir, - then_scope: *GenZir, - else_scope: *GenZir, - condbr: Zir.Inst.Index, - cond: Zir.Inst.Ref, - then_result: Zir.Inst.Ref, - then_src_node: Ast.Node.Index, - else_result: Zir.Inst.Ref, - else_src_node: Ast.Node.Index, - main_block: Zir.Inst.Index, - then_break_block: Zir.Inst.Index, - break_tag: Zir.Inst.Tag, -) InnerError!Zir.Inst.Ref { - // We now have enough information to decide whether the result instruction should - // be communicated via result location pointer or break instructions. - const strat = ri.rl.strategy(block_scope); - // else_scope may be stacked on then_scope, so check for no-return on then_scope manually - const tags = parent_gz.astgen.instructions.items(.tag); - const then_slice = then_scope.instructionsSliceUpto(else_scope); - const then_no_return = then_slice.len > 0 and tags[then_slice[then_slice.len - 1]].isNoReturn(); - const else_no_return = else_scope.endsWithNoReturn(); - - switch (strat.tag) { - .break_void => { - const then_break = if (!then_no_return) try then_scope.makeBreak(break_tag, then_break_block, .void_value) else 0; - const else_break = if (!else_no_return) try else_scope.makeBreak(break_tag, main_block, .void_value) else 0; - assert(!strat.elide_store_to_block_ptr_instructions); - try setCondBrPayload(condbr, cond, then_scope, then_break, else_scope, else_break); - return indexToRef(main_block); - }, - .break_operand => { - const then_break = if (!then_no_return) try then_scope.makeBreakWithSrcNode(break_tag, then_break_block, then_result, then_src_node) else 0; - const else_break = if (else_result == .none) - try else_scope.makeBreak(break_tag, main_block, .void_value) - else if (!else_no_return) - try else_scope.makeBreakWithSrcNode(break_tag, main_block, else_result, else_src_node) - else - 0; - - if (strat.elide_store_to_block_ptr_instructions) { - try setCondBrPayloadElideBlockStorePtr(condbr, cond, then_scope, then_break, then_src_node, else_scope, else_break, else_src_node, block_scope.rl_ptr); - } else { - try setCondBrPayload(condbr, cond, then_scope, then_break, else_scope, else_break); - } - const block_ref = indexToRef(main_block); - switch (ri.rl) { - .ref => return block_ref, - else => return rvalue(parent_gz, ri, block_ref, node), - } - }, + if (need_result_rvalue) { + return rvalue(parent_gz, ri, indexToRef(block), node); + } else { + return indexToRef(block); } } @@ -5858,8 +5574,22 @@ fn ifExpr( const do_err_trace = astgen.fn_block != null and if_full.error_token != null; + const need_rl = astgen.nodes_need_rl.contains(node); + const block_ri: ResultInfo = if (need_rl) ri else .{ + .rl = switch (ri.rl) { + .ptr => .{ .ty = try ri.rl.resultType(parent_gz, node, undefined) }, + .inferred_ptr => .none, + else => ri.rl, + }, + .ctx = ri.ctx, + }; + // We need to call `rvalue` to write through to the pointer only if we had a + // result pointer and aren't forwarding it. + const LocTag = @typeInfo(ResultInfo.Loc).Union.tag_type.?; + const need_result_rvalue = @as(LocTag, block_ri.rl) != @as(LocTag, ri.rl); + var block_scope = parent_gz.makeSubBlock(scope); - block_scope.setBreakResultInfo(ri); + block_scope.setBreakResultInfo(block_ri); defer block_scope.unstack(); const payload_is_ref = if (if_full.payload_token) |payload_token| @@ -5967,14 +5697,11 @@ fn ifExpr( }; const then_result = try expr(&then_scope, then_sub_scope, block_scope.break_result_info, then_node); - if (!then_scope.endsWithNoReturn()) { - block_scope.break_count += 1; - } try checkUsed(parent_gz, &then_scope.base, then_sub_scope); - try then_scope.addDbgBlockEnd(); - // We hold off on the break instructions as well as copying the then/else - // instructions into place until we know whether to keep store_to_block_ptr - // instructions or not. + if (!then_scope.endsWithNoReturn()) { + try then_scope.addDbgBlockEnd(); + _ = try then_scope.addBreakWithSrcNode(.@"break", block, then_result, then_node); + } var else_scope = parent_gz.makeSubBlock(scope); defer else_scope.unstack(); @@ -5985,10 +5712,7 @@ fn ifExpr( _ = try else_scope.addSaveErrRetIndex(.always); const else_node = if_full.ast.else_expr; - const else_info: struct { - src: Ast.Node.Index, - result: Zir.Inst.Ref, - } = if (else_node != 0) blk: { + if (else_node != 0) { try else_scope.addDbgBlockBegin(); const sub_scope = s: { if (if_full.error_token) |error_token| { @@ -6016,47 +5740,27 @@ fn ifExpr( break :s &else_scope.base; } }; - const e = try expr(&else_scope, sub_scope, block_scope.break_result_info, else_node); + const else_result = try expr(&else_scope, sub_scope, block_scope.break_result_info, else_node); + try else_scope.addDbgBlockEnd(); if (!else_scope.endsWithNoReturn()) { - block_scope.break_count += 1; - // As our last action before the break, "pop" the error trace if needed if (do_err_trace) - try restoreErrRetIndex(&else_scope, .{ .block = block }, block_scope.break_result_info, else_node, e); + try restoreErrRetIndex(&else_scope, .{ .block = block }, block_scope.break_result_info, else_node, else_result); + _ = try else_scope.addBreakWithSrcNode(.@"break", block, else_result, else_node); } try checkUsed(parent_gz, &else_scope.base, sub_scope); - try else_scope.addDbgBlockEnd(); - break :blk .{ - .src = else_node, - .result = e, - }; - } else .{ - .src = then_node, - .result = switch (ri.rl) { - // Explicitly store void to ptr result loc if there is no else branch - .ptr, .block_ptr => try rvalue(&else_scope, ri, .void_value, node), - else => .none, - }, - }; + } else { + const result = try rvalue(&else_scope, ri, .void_value, node); + _ = try else_scope.addBreak(.@"break", block, result); + } - const result = try finishThenElseBlock( - parent_gz, - ri, - node, - &block_scope, - &then_scope, - &else_scope, - condbr, - cond.bool_bit, - then_result, - then_node, - else_info.result, - else_info.src, - block, - block, - .@"break", - ); - return result; + try setCondBrPayload(condbr, cond.bool_bit, &then_scope, &else_scope); + + if (need_result_rvalue) { + return rvalue(parent_gz, ri, indexToRef(block), node); + } else { + return indexToRef(block); + } } /// Supports `else_scope` stacked on `then_scope`. Unstacks `else_scope` then `then_scope`. @@ -6064,17 +5768,15 @@ fn setCondBrPayload( condbr: Zir.Inst.Index, cond: Zir.Inst.Ref, then_scope: *GenZir, - then_break: Zir.Inst.Index, else_scope: *GenZir, - else_break: Zir.Inst.Index, ) !void { defer then_scope.unstack(); defer else_scope.unstack(); const astgen = then_scope.astgen; const then_body = then_scope.instructionsSliceUpto(else_scope); const else_body = else_scope.instructionsSlice(); - const then_body_len = astgen.countBodyLenAfterFixups(then_body) + @intFromBool(then_break != 0); - const else_body_len = astgen.countBodyLenAfterFixups(else_body) + @intFromBool(else_break != 0); + const then_body_len = astgen.countBodyLenAfterFixups(then_body); + const else_body_len = astgen.countBodyLenAfterFixups(else_body); try astgen.extra.ensureUnusedCapacity( astgen.gpa, @typeInfo(Zir.Inst.CondBr).Struct.fields.len + then_body_len + else_body_len, @@ -6087,112 +5789,7 @@ fn setCondBrPayload( .else_body_len = else_body_len, }); astgen.appendBodyWithFixups(then_body); - if (then_break != 0) astgen.extra.appendAssumeCapacity(then_break); astgen.appendBodyWithFixups(else_body); - if (else_break != 0) astgen.extra.appendAssumeCapacity(else_break); -} - -/// Supports `else_scope` stacked on `then_scope`. Unstacks `else_scope` then `then_scope`. -fn setCondBrPayloadElideBlockStorePtr( - condbr: Zir.Inst.Index, - cond: Zir.Inst.Ref, - then_scope: *GenZir, - then_break: Zir.Inst.Index, - then_src_node: Ast.Node.Index, - else_scope: *GenZir, - else_break: Zir.Inst.Index, - else_src_node: Ast.Node.Index, - block_ptr: Zir.Inst.Ref, -) !void { - defer then_scope.unstack(); - defer else_scope.unstack(); - const astgen = then_scope.astgen; - const then_body = then_scope.instructionsSliceUpto(else_scope); - const else_body = else_scope.instructionsSlice(); - const has_then_break = then_break != 0; - const has_else_break = else_break != 0; - const then_body_len = astgen.countBodyLenAfterFixups(then_body) + @intFromBool(has_then_break); - const else_body_len = astgen.countBodyLenAfterFixups(else_body) + @intFromBool(has_else_break); - try astgen.extra.ensureUnusedCapacity( - astgen.gpa, - @typeInfo(Zir.Inst.CondBr).Struct.fields.len + then_body_len + else_body_len + - @typeInfo(Zir.Inst.As).Struct.fields.len * 2, - ); - - const zir_tags = astgen.instructions.items(.tag); - const zir_datas = astgen.instructions.items(.data); - - const condbr_extra = astgen.addExtraAssumeCapacity(Zir.Inst.CondBr{ - .condition = cond, - .then_body_len = then_body_len, - .else_body_len = else_body_len, - }); - zir_datas[condbr].pl_node.payload_index = condbr_extra; - - // The break instructions need to have their operands coerced if the - // switch's result location is a `ty`. In this case we overwrite the - // `store_to_block_ptr` instruction with an `as` instruction and repurpose - // it as the break operand. - // This corresponds to similar code in `labeledBlockExpr`. - var then_as_inst: Zir.Inst.Index = 0; - for (then_body) |src_inst| { - if (zir_tags[src_inst] == .store_to_block_ptr and - zir_datas[src_inst].bin.lhs == block_ptr) - { - if (then_scope.rl_ty_inst != .none and has_then_break) { - then_as_inst = src_inst; - } else { - astgen.extra.items[ - condbr_extra + std.meta.fieldIndex(Zir.Inst.CondBr, "then_body_len").? - ] -= 1; - continue; - } - } - appendPossiblyRefdBodyInst(astgen, &astgen.extra, src_inst); - } - if (has_then_break) astgen.extra.appendAssumeCapacity(then_break); - - var else_as_inst: Zir.Inst.Index = 0; - for (else_body) |src_inst| { - if (zir_tags[src_inst] == .store_to_block_ptr and - zir_datas[src_inst].bin.lhs == block_ptr) - { - if (else_scope.rl_ty_inst != .none and has_else_break) { - else_as_inst = src_inst; - } else { - astgen.extra.items[ - condbr_extra + std.meta.fieldIndex(Zir.Inst.CondBr, "else_body_len").? - ] -= 1; - continue; - } - } - appendPossiblyRefdBodyInst(astgen, &astgen.extra, src_inst); - } - if (has_else_break) astgen.extra.appendAssumeCapacity(else_break); - - if (then_as_inst != 0) { - zir_tags[then_as_inst] = .as_node; - zir_datas[then_as_inst] = .{ .pl_node = .{ - .src_node = then_scope.nodeIndexToRelative(then_src_node), - .payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.As{ - .dest_type = then_scope.rl_ty_inst, - .operand = zir_datas[then_break].@"break".operand, - }), - } }; - zir_datas[then_break].@"break".operand = indexToRef(then_as_inst); - } - - if (else_as_inst != 0) { - zir_tags[else_as_inst] = .as_node; - zir_datas[else_as_inst] = .{ .pl_node = .{ - .src_node = else_scope.nodeIndexToRelative(else_src_node), - .payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.As{ - .dest_type = else_scope.rl_ty_inst, - .operand = zir_datas[else_break].@"break".operand, - }), - } }; - zir_datas[else_break].@"break".operand = indexToRef(else_as_inst); - } } fn whileExpr( @@ -6207,6 +5804,20 @@ fn whileExpr( const tree = astgen.tree; const token_tags = tree.tokens.items(.tag); + const need_rl = astgen.nodes_need_rl.contains(node); + const block_ri: ResultInfo = if (need_rl) ri else .{ + .rl = switch (ri.rl) { + .ptr => .{ .ty = try ri.rl.resultType(parent_gz, node, undefined) }, + .inferred_ptr => .none, + else => ri.rl, + }, + .ctx = ri.ctx, + }; + // We need to call `rvalue` to write through to the pointer only if we had a + // result pointer and aren't forwarding it. + const LocTag = @typeInfo(ResultInfo.Loc).Union.tag_type.?; + const need_result_rvalue = @as(LocTag, block_ri.rl) != @as(LocTag, ri.rl); + if (while_full.label_token) |label_token| { try astgen.checkLabelRedefinition(scope, label_token); } @@ -6218,9 +5829,8 @@ fn whileExpr( var loop_scope = parent_gz.makeSubBlock(scope); loop_scope.is_inline = is_inline; - loop_scope.setBreakResultInfo(ri); + loop_scope.setBreakResultInfo(block_ri); defer loop_scope.unstack(); - defer loop_scope.labeled_breaks.deinit(astgen.gpa); var cond_scope = parent_gz.makeSubBlock(&loop_scope.base); defer cond_scope.unstack(); @@ -6378,19 +5988,16 @@ fn whileExpr( try checkUsed(parent_gz, &then_scope.base, then_sub_scope); const break_tag: Zir.Inst.Tag = if (is_inline) .break_inline else .@"break"; if (!continue_scope.endsWithNoReturn()) { - const break_inst = try continue_scope.makeBreak(break_tag, continue_block, .void_value); - try then_scope.instructions.append(astgen.gpa, break_inst); + _ = try continue_scope.addBreak(break_tag, continue_block, .void_value); } try continue_scope.setBlockBody(continue_block); + _ = try then_scope.addBreak(break_tag, cond_block, .void_value); var else_scope = parent_gz.makeSubBlock(&cond_scope.base); defer else_scope.unstack(); const else_node = while_full.ast.else_expr; - const else_info: struct { - src: Ast.Node.Index, - result: Zir.Inst.Ref, - } = if (else_node != 0) blk: { + if (else_node != 0) { try else_scope.addDbgBlockBegin(); const sub_scope = s: { if (while_full.error_token) |error_token| { @@ -6427,45 +6034,33 @@ fn whileExpr( _ = try addEnsureResult(&else_scope, else_result, else_node); } - if (!else_scope.endsWithNoReturn()) { - loop_scope.break_count += 1; - } try checkUsed(parent_gz, &else_scope.base, sub_scope); try else_scope.addDbgBlockEnd(); - break :blk .{ - .src = else_node, - .result = else_result, - }; - } else .{ - .src = then_node, - .result = .none, - }; + if (!else_scope.endsWithNoReturn()) { + _ = try else_scope.addBreakWithSrcNode(break_tag, loop_block, else_result, else_node); + } + } else { + const result = try rvalue(&else_scope, ri, .void_value, node); + _ = try else_scope.addBreak(break_tag, loop_block, result); + } if (loop_scope.label) |some| { if (!some.used) { try astgen.appendErrorTok(some.token, "unused while loop label", .{}); } } - const result = try finishThenElseBlock( - parent_gz, - ri, - node, - &loop_scope, - &then_scope, - &else_scope, - condbr, - cond.bool_bit, - .void_value, - then_node, - else_info.result, - else_info.src, - loop_block, - cond_block, - break_tag, - ); + + try setCondBrPayload(condbr, cond.bool_bit, &then_scope, &else_scope); + + const result = if (need_result_rvalue) + try rvalue(parent_gz, ri, indexToRef(loop_block), node) + else + indexToRef(loop_block); + if (is_statement) { _ = try parent_gz.addUnNode(.ensure_result_used, result, node); } + return result; } @@ -6483,6 +6078,20 @@ fn forExpr( try astgen.checkLabelRedefinition(scope, label_token); } + const need_rl = astgen.nodes_need_rl.contains(node); + const block_ri: ResultInfo = if (need_rl) ri else .{ + .rl = switch (ri.rl) { + .ptr => .{ .ty = try ri.rl.resultType(parent_gz, node, undefined) }, + .inferred_ptr => .none, + else => ri.rl, + }, + .ctx = ri.ctx, + }; + // We need to call `rvalue` to write through to the pointer only if we had a + // result pointer and aren't forwarding it. + const LocTag = @typeInfo(ResultInfo.Loc).Union.tag_type.?; + const need_result_rvalue = @as(LocTag, block_ri.rl) != @as(LocTag, ri.rl); + const is_inline = for_full.inline_token != null; const tree = astgen.tree; const token_tags = tree.tokens.items(.tag); @@ -6603,9 +6212,8 @@ fn forExpr( var loop_scope = parent_gz.makeSubBlock(scope); loop_scope.is_inline = is_inline; - loop_scope.setBreakResultInfo(ri); + loop_scope.setBreakResultInfo(block_ri); defer loop_scope.unstack(); - defer loop_scope.labeled_breaks.deinit(gpa); // We need to finish loop_scope later once we have the deferred refs from then_scope. However, the // load must be removed from instructions in the meantime or it appears to be part of parent_gz. @@ -6709,14 +6317,15 @@ fn forExpr( try checkUsed(parent_gz, &then_scope.base, then_sub_scope); try then_scope.addDbgBlockEnd(); + const break_tag: Zir.Inst.Tag = if (is_inline) .break_inline else .@"break"; + + _ = try then_scope.addBreak(break_tag, cond_block, .void_value); + var else_scope = parent_gz.makeSubBlock(&cond_scope.base); defer else_scope.unstack(); const else_node = for_full.ast.else_expr; - const else_info: struct { - src: Ast.Node.Index, - result: Zir.Inst.Ref, - } = if (else_node != 0) blk: { + if (else_node != 0) { const sub_scope = &else_scope.base; // Remove the continue block and break block so that `continue` and `break` // control flow apply to outer loops; not this one. @@ -6726,42 +6335,21 @@ fn forExpr( if (is_statement) { _ = try addEnsureResult(&else_scope, else_result, else_node); } - if (!else_scope.endsWithNoReturn()) { - loop_scope.break_count += 1; + _ = try else_scope.addBreakWithSrcNode(break_tag, loop_block, else_result, else_node); } - break :blk .{ - .src = else_node, - .result = else_result, - }; - } else .{ - .src = then_node, - .result = .none, - }; + } else { + const result = try rvalue(&else_scope, ri, .void_value, node); + _ = try else_scope.addBreak(break_tag, loop_block, result); + } if (loop_scope.label) |some| { if (!some.used) { try astgen.appendErrorTok(some.token, "unused for loop label", .{}); } } - const break_tag: Zir.Inst.Tag = if (is_inline) .break_inline else .@"break"; - const result = try finishThenElseBlock( - parent_gz, - ri, - node, - &loop_scope, - &then_scope, - &else_scope, - condbr, - cond, - then_result, - then_node, - else_info.result, - else_info.src, - loop_block, - cond_block, - break_tag, - ); + + try setCondBrPayload(condbr, cond, &then_scope, &else_scope); // then_block and else_block unstacked now, can resurrect loop_scope to finally finish it { @@ -6780,9 +6368,11 @@ fn forExpr( try loop_scope.setBlockBody(loop_block); } - if (ri.rl.strategy(&loop_scope).tag == .break_void and loop_scope.break_count == 0) { - _ = try rvalue(parent_gz, ri, .void_value, node); - } + const result = if (need_result_rvalue) + try rvalue(parent_gz, ri, indexToRef(loop_block), node) + else + indexToRef(loop_block); + if (is_statement) { _ = try parent_gz.addUnNode(.ensure_result_used, result, node); } @@ -6806,6 +6396,20 @@ fn switchExpr( const extra = tree.extraData(node_datas[switch_node].rhs, Ast.Node.SubRange); const case_nodes = tree.extra_data[extra.start..extra.end]; + const need_rl = astgen.nodes_need_rl.contains(switch_node); + const block_ri: ResultInfo = if (need_rl) ri else .{ + .rl = switch (ri.rl) { + .ptr => .{ .ty = try ri.rl.resultType(parent_gz, switch_node, undefined) }, + .inferred_ptr => .none, + else => ri.rl, + }, + .ctx = ri.ctx, + }; + // We need to call `rvalue` to write through to the pointer only if we had a + // result pointer and aren't forwarding it. + const LocTag = @typeInfo(ResultInfo.Loc).Union.tag_type.?; + const need_result_rvalue = @as(LocTag, block_ri.rl) != @as(LocTag, ri.rl); + // We perform two passes over the AST. This first pass is to collect information // for the following variables, make note of the special prong AST node index, // and bail out with a compile error if there are multiple special prongs present. @@ -6952,7 +6556,7 @@ fn switchExpr( var block_scope = parent_gz.makeSubBlock(scope); // block_scope not used for collecting instructions block_scope.instructions_top = GenZir.unstacked_top; - block_scope.setBreakResultInfo(ri); + block_scope.setBreakResultInfo(block_ri); // Sema expects a dbg_stmt immediately before switch_block(_ref) try emitDbgStmt(parent_gz, operand_lc); @@ -6973,7 +6577,7 @@ fn switchExpr( .opcode = .value_placeholder, .small = undefined, .operand = undefined, - } }, // TODO rename opcode + } }, }); break :tag_inst inst; } else undefined; @@ -7122,7 +6726,6 @@ fn switchExpr( try checkUsed(parent_gz, &case_scope.base, sub_scope); try case_scope.addDbgBlockEnd(); if (!parent_gz.refIsNoReturn(case_result)) { - block_scope.break_count += 1; _ = try case_scope.addBreakWithSrcNode(.@"break", switch_block, case_result, target_expr_node); } @@ -7195,119 +6798,33 @@ fn switchExpr( } const zir_datas = astgen.instructions.items(.data); - const zir_tags = astgen.instructions.items(.tag); - zir_datas[switch_block].pl_node.payload_index = payload_index; - const strat = ri.rl.strategy(&block_scope); - inline for (.{ .body, .breaks }) |pass| { - for (payloads.items[case_table_start..case_table_end], 0..) |start_index, i| { - var body_len_index = start_index; - var end_index = start_index; - const table_index = case_table_start + i; - if (table_index < scalar_case_table) { - end_index += 1; - } else if (table_index < multi_case_table) { - body_len_index += 1; - end_index += 2; - } else { - body_len_index += 2; - const items_len = payloads.items[start_index]; - const ranges_len = payloads.items[start_index + 1]; - end_index += 3 + items_len + 2 * ranges_len; - } - - const prong_info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(payloads.items[body_len_index]); - end_index += prong_info.body_len; - - switch (strat.tag) { - .break_operand => blk: { - // Switch expressions return `true` for `nodeMayNeedMemoryLocation` thus - // `elide_store_to_block_ptr_instructions` will either be true, - // or all prongs are noreturn. - if (!strat.elide_store_to_block_ptr_instructions) - break :blk; - - // There will necessarily be a store_to_block_ptr for - // all prongs, except for prongs that ended with a noreturn instruction. - // Elide all the `store_to_block_ptr` instructions. - - // The break instructions need to have their operands coerced if the - // switch's result location is a `ty`. In this case we overwrite the - // `store_to_block_ptr` instruction with an `as` instruction and repurpose - // it as the break operand. - if (prong_info.body_len < 2) - break :blk; - - var store_index = end_index - 2; - while (true) : (store_index -= 1) switch (zir_tags[payloads.items[store_index]]) { - .dbg_block_end, .dbg_block_begin, .dbg_stmt, .dbg_var_val, .dbg_var_ptr => {}, - else => break, - }; - const store_inst = payloads.items[store_index]; - if (zir_tags[store_inst] != .store_to_block_ptr or - zir_datas[store_inst].bin.lhs != block_scope.rl_ptr) - break :blk; - const break_inst = payloads.items[end_index - 1]; - if (block_scope.rl_ty_inst != .none) { - if (pass == .breaks) { - const break_data = &zir_datas[break_inst].@"break"; - const break_src: i32 = @bitCast(astgen.extra.items[ - break_data.payload_index + - std.meta.fieldIndex(Zir.Inst.Break, "operand_src_node").? - ]); - if (break_src == Zir.Inst.Break.no_src_node) { - zir_tags[store_inst] = .as; - zir_datas[store_inst].bin = .{ - .lhs = block_scope.rl_ty_inst, - .rhs = break_data.operand, - }; - } else { - zir_tags[store_inst] = .as_node; - zir_datas[store_inst] = .{ .pl_node = .{ - .src_node = break_src, - .payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.As{ - .dest_type = block_scope.rl_ty_inst, - .operand = break_data.operand, - }), - } }; - } - break_data.operand = indexToRef(store_inst); - } - } else { - if (pass == .body) { - payloads.items[body_len_index] -= 1; - astgen.extra.appendSliceAssumeCapacity( - payloads.items[start_index .. end_index - 2], - ); - astgen.extra.appendAssumeCapacity(break_inst); - } - continue; - } - }, - .break_void => if (pass == .breaks) { - assert(!strat.elide_store_to_block_ptr_instructions); - const last_inst = payloads.items[end_index - 1]; - if (zir_tags[last_inst] == .@"break") { - const break_data = &zir_datas[last_inst].@"break"; - const block_inst = astgen.extra.items[ - break_data.payload_index + - std.meta.fieldIndex(Zir.Inst.Break, "block_inst").? - ]; - if (block_inst == switch_block) break_data.operand = .void_value; - } - }, - } - - if (pass == .body) - astgen.extra.appendSliceAssumeCapacity(payloads.items[start_index..end_index]); + for (payloads.items[case_table_start..case_table_end], 0..) |start_index, i| { + var body_len_index = start_index; + var end_index = start_index; + const table_index = case_table_start + i; + if (table_index < scalar_case_table) { + end_index += 1; + } else if (table_index < multi_case_table) { + body_len_index += 1; + end_index += 2; + } else { + body_len_index += 2; + const items_len = payloads.items[start_index]; + const ranges_len = payloads.items[start_index + 1]; + end_index += 3 + items_len + 2 * ranges_len; } + const prong_info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(payloads.items[body_len_index]); + end_index += prong_info.body_len; + astgen.extra.appendSliceAssumeCapacity(payloads.items[start_index..end_index]); } - const block_ref = indexToRef(switch_block); - if (strat.tag == .break_operand and strat.elide_store_to_block_ptr_instructions and ri.rl != .ref) - return rvalue(parent_gz, ri, block_ref, switch_node); - return block_ref; + if (need_result_rvalue) { + return rvalue(parent_gz, ri, indexToRef(switch_block), switch_node); + } else { + return indexToRef(switch_block); + } } fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { @@ -7372,7 +6889,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref return Zir.Inst.Ref.unreachable_value; } - const ri: ResultInfo = if (nodeMayNeedMemoryLocation(tree, operand_node, true)) .{ + const ri: ResultInfo = if (astgen.nodes_need_rl.contains(node)) .{ .rl = .{ .ptr = .{ .inst = try gz.addNode(.ret_ptr, node) } }, .ctx = .@"return", } else .{ @@ -7445,7 +6962,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref try emitDbgStmt(&else_scope, ret_lc); try else_scope.addRet(ri, operand, node); - try setCondBrPayload(condbr, is_non_err, &then_scope, 0, &else_scope, 0); + try setCondBrPayload(condbr, is_non_err, &then_scope, &else_scope); return Zir.Inst.Ref.unreachable_value; }, @@ -8003,9 +7520,6 @@ fn as( .inferred_ptr => |result_ptr| { return asRlPtr(gz, scope, ri, node, result_ptr, rhs, dest_type); }, - .block_ptr => |block_scope| { - return asRlPtr(gz, scope, ri, node, block_scope.rl_ptr, rhs, dest_type); - }, } } @@ -8032,7 +7546,7 @@ fn unionInit( } fn asRlPtr( - parent_gz: *GenZir, + gz: *GenZir, scope: *Scope, ri: ResultInfo, src_node: Ast.Node.Index, @@ -8040,11 +7554,13 @@ fn asRlPtr( operand_node: Ast.Node.Index, dest_type: Zir.Inst.Ref, ) InnerError!Zir.Inst.Ref { - var as_scope = try parent_gz.makeCoercionScope(scope, dest_type, result_ptr, src_node); - defer as_scope.unstack(); - - const result = try reachableExpr(&as_scope, &as_scope.base, .{ .rl = .{ .block_ptr = &as_scope } }, operand_node, src_node); - return as_scope.finishCoercion(parent_gz, ri, operand_node, result, dest_type); + if (gz.astgen.nodes_need_rl.contains(src_node)) { + const casted_ptr = try gz.addPlNode(.coerce_result_ptr, src_node, Zir.Inst.Bin{ .lhs = dest_type, .rhs = result_ptr }); + return reachableExpr(gz, scope, .{ .rl = .{ .ptr = .{ .inst = casted_ptr } } }, operand_node, src_node); + } else { + const result = try reachableExpr(gz, scope, .{ .rl = .{ .ty = dest_type } }, operand_node, src_node); + return rvalue(gz, ri, result, src_node); + } } fn bitCast( @@ -9226,7 +8742,6 @@ fn callExpr( defer arg_block.unstack(); // `call_inst` is reused to provide the param type. - arg_block.rl_ty_inst = call_inst; const arg_ref = try expr(&arg_block, &arg_block.base, .{ .rl = .{ .coerced_ty = call_inst }, .ctx = .fn_arg }, param_node); _ = try arg_block.addBreakWithSrcNode(.break_inline, call_index, arg_ref, param_node); @@ -9420,250 +8935,6 @@ fn nodeIsTriviallyZero(tree: *const Ast, node: Ast.Node.Index) bool { } } -fn nodeMayNeedMemoryLocation(tree: *const Ast, start_node: Ast.Node.Index, have_res_ty: bool) bool { - const node_tags = tree.nodes.items(.tag); - const node_datas = tree.nodes.items(.data); - const main_tokens = tree.nodes.items(.main_token); - const token_tags = tree.tokens.items(.tag); - - var node = start_node; - while (true) { - switch (node_tags[node]) { - .root, - .@"usingnamespace", - .test_decl, - .switch_case, - .switch_case_inline, - .switch_case_one, - .switch_case_inline_one, - .container_field_init, - .container_field_align, - .container_field, - .asm_output, - .asm_input, - => unreachable, - - .@"return", - .@"break", - .@"continue", - .bit_not, - .bool_not, - .global_var_decl, - .local_var_decl, - .simple_var_decl, - .aligned_var_decl, - .@"defer", - .@"errdefer", - .address_of, - .optional_type, - .negation, - .negation_wrap, - .@"resume", - .array_type, - .array_type_sentinel, - .ptr_type_aligned, - .ptr_type_sentinel, - .ptr_type, - .ptr_type_bit_range, - .@"suspend", - .fn_proto_simple, - .fn_proto_multi, - .fn_proto_one, - .fn_proto, - .fn_decl, - .anyframe_type, - .anyframe_literal, - .number_literal, - .enum_literal, - .string_literal, - .multiline_string_literal, - .char_literal, - .unreachable_literal, - .identifier, - .error_set_decl, - .container_decl, - .container_decl_trailing, - .container_decl_two, - .container_decl_two_trailing, - .container_decl_arg, - .container_decl_arg_trailing, - .tagged_union, - .tagged_union_trailing, - .tagged_union_two, - .tagged_union_two_trailing, - .tagged_union_enum_tag, - .tagged_union_enum_tag_trailing, - .@"asm", - .asm_simple, - .add, - .add_wrap, - .add_sat, - .array_cat, - .array_mult, - .assign, - .assign_bit_and, - .assign_bit_or, - .assign_shl, - .assign_shl_sat, - .assign_shr, - .assign_bit_xor, - .assign_div, - .assign_sub, - .assign_sub_wrap, - .assign_sub_sat, - .assign_mod, - .assign_add, - .assign_add_wrap, - .assign_add_sat, - .assign_mul, - .assign_mul_wrap, - .assign_mul_sat, - .bang_equal, - .bit_and, - .bit_or, - .shl, - .shl_sat, - .shr, - .bit_xor, - .bool_and, - .bool_or, - .div, - .equal_equal, - .error_union, - .greater_or_equal, - .greater_than, - .less_or_equal, - .less_than, - .merge_error_sets, - .mod, - .mul, - .mul_wrap, - .mul_sat, - .switch_range, - .for_range, - .field_access, - .sub, - .sub_wrap, - .sub_sat, - .slice, - .slice_open, - .slice_sentinel, - .deref, - .array_access, - .error_value, - .while_simple, // This variant cannot have an else expression. - .while_cont, // This variant cannot have an else expression. - .for_simple, // This variant cannot have an else expression. - .if_simple, // This variant cannot have an else expression. - => return false, - - // Forward the question to the LHS sub-expression. - .grouped_expression, - .@"try", - .@"await", - .@"comptime", - .@"nosuspend", - .unwrap_optional, - => node = node_datas[node].lhs, - - // Forward the question to the RHS sub-expression. - .@"catch", - .@"orelse", - => node = node_datas[node].rhs, - - // Array and struct init exprs write to result locs, but anon literals do not. - .array_init_one, - .array_init_one_comma, - .struct_init_one, - .struct_init_one_comma, - .array_init, - .array_init_comma, - .struct_init, - .struct_init_comma, - => return have_res_ty or node_datas[node].lhs != 0, - - // Anon literals do not need result location. - .array_init_dot_two, - .array_init_dot_two_comma, - .array_init_dot, - .array_init_dot_comma, - .struct_init_dot_two, - .struct_init_dot_two_comma, - .struct_init_dot, - .struct_init_dot_comma, - => return have_res_ty, - - // True because depending on comptime conditions, sub-expressions - // may be the kind that need memory locations. - .@"while", // This variant always has an else expression. - .@"if", // This variant always has an else expression. - .@"for", // This variant always has an else expression. - .@"switch", - .switch_comma, - .async_call_one, - .async_call_one_comma, - .async_call, - .async_call_comma, - => return true, - - // https://github.com/ziglang/zig/issues/2765 would change this. - .call_one, - .call_one_comma, - .call, - .call_comma, - => return false, - - .block_two, - .block_two_semicolon, - .block, - .block_semicolon, - => { - const lbrace = main_tokens[node]; - if (token_tags[lbrace - 1] == .colon) { - // Labeled blocks may need a memory location to forward - // to their break statements. - return true; - } else { - return false; - } - }, - - .builtin_call_two, .builtin_call_two_comma => { - const builtin_token = main_tokens[node]; - const builtin_name = tree.tokenSlice(builtin_token); - // If the builtin is an invalid name, we don't cause an error here; instead - // let it pass, and the error will be "invalid builtin function" later. - const builtin_info = BuiltinFn.list.get(builtin_name) orelse return false; - switch (builtin_info.needs_mem_loc) { - .never => return false, - .always => return true, - .forward0 => node = node_datas[node].lhs, - .forward1 => node = node_datas[node].rhs, - } - // Missing builtin arg is not a parsing error, expect an error later. - if (node == 0) return false; - }, - - .builtin_call, .builtin_call_comma => { - const params = tree.extra_data[node_datas[node].lhs..node_datas[node].rhs]; - const builtin_token = main_tokens[node]; - const builtin_name = tree.tokenSlice(builtin_token); - // If the builtin is an invalid name, we don't cause an error here; instead - // let it pass, and the error will be "invalid builtin function" later. - const builtin_info = BuiltinFn.list.get(builtin_name) orelse return false; - switch (builtin_info.needs_mem_loc) { - .never => return false, - .always => return true, - .forward0 => node = params[0], - .forward1 => node = params[1], - } - // Missing builtin arg is not a parsing error, expect an error later. - if (node == 0) return false; - }, - } - } -} - fn nodeMayAppendToErrorTrace(tree: *const Ast, start_node: Ast.Node.Index) bool { const node_tags = tree.nodes.items(.tag); const node_datas = tree.nodes.items(.data); @@ -10450,7 +9721,7 @@ fn rvalue( .discard => { // Emit a compile error for discarding error values. _ = try gz.addUnNode(.ensure_result_non_error, result, src_node); - return result; + return .void_value; }, .ref => { // We need a pointer but we have a value. @@ -10561,16 +9832,11 @@ fn rvalue( .lhs = ptr_res.inst, .rhs = result, }); - return result; + return .void_value; }, .inferred_ptr => |alloc| { _ = try gz.addBin(.store_to_inferred_ptr, alloc, result); - return result; - }, - .block_ptr => |block_scope| { - block_scope.rvalue_rl_count += 1; - _ = try gz.addBin(.store_to_block_ptr, block_scope.rl_ptr, result); - return result; + return .void_value; }, } } @@ -11260,22 +10526,6 @@ const GenZir = struct { continue_block: Zir.Inst.Index = 0, /// Only valid when setBreakResultInfo is called. break_result_info: AstGen.ResultInfo = undefined, - /// When a block has a pointer result location, here it is. - rl_ptr: Zir.Inst.Ref = .none, - /// When a block has a type result location, here it is. - rl_ty_inst: Zir.Inst.Ref = .none, - /// Keeps track of how many branches of a block did not actually - /// consume the result location. astgen uses this to figure out - /// whether to rely on break instructions or writing to the result - /// pointer for the result instruction. - rvalue_rl_count: usize = 0, - /// Keeps track of how many break instructions there are. When astgen is finished - /// with a block, it can check this against rvalue_rl_count to find out whether - /// the break instructions should be downgraded to break_void. - break_count: usize = 0, - /// Tracks `break :foo bar` instructions so they can possibly be elided later if - /// the labeled block ends up not needing a result location pointer. - labeled_breaks: ArrayListUnmanaged(struct { br: Zir.Inst.Index, search: Zir.Inst.Index }) = .{}, suspend_node: Ast.Node.Index = 0, nosuspend_node: Ast.Node.Index = 0, @@ -11327,7 +10577,6 @@ const GenZir = struct { .decl_node_index = gz.decl_node_index, .decl_line = gz.decl_line, .parent = scope, - .rl_ty_inst = gz.rl_ty_inst, .astgen = gz.astgen, .suspend_node = gz.suspend_node, .nosuspend_node = gz.nosuspend_node, @@ -11337,67 +10586,6 @@ const GenZir = struct { }; } - fn makeCoercionScope( - parent_gz: *GenZir, - scope: *Scope, - dest_type: Zir.Inst.Ref, - result_ptr: Zir.Inst.Ref, - src_node: Ast.Node.Index, - ) !GenZir { - // Detect whether this expr() call goes into rvalue() to store the result into the - // result location. If it does, elide the coerce_result_ptr instruction - // as well as the store instruction, instead passing the result as an rvalue. - var as_scope = parent_gz.makeSubBlock(scope); - errdefer as_scope.unstack(); - as_scope.rl_ptr = try as_scope.addPlNode(.coerce_result_ptr, src_node, Zir.Inst.Bin{ .lhs = dest_type, .rhs = result_ptr }); - - // `rl_ty_inst` needs to be set in case the stores to `rl_ptr` are eliminated. - as_scope.rl_ty_inst = dest_type; - - return as_scope; - } - - /// Assumes `as_scope` is stacked immediately on top of `parent_gz`. Unstacks `as_scope`. - fn finishCoercion( - as_scope: *GenZir, - parent_gz: *GenZir, - ri: ResultInfo, - src_node: Ast.Node.Index, - result: Zir.Inst.Ref, - dest_type: Zir.Inst.Ref, - ) InnerError!Zir.Inst.Ref { - assert(as_scope.instructions == parent_gz.instructions); - const astgen = as_scope.astgen; - if (as_scope.rvalue_rl_count == 1) { - // Busted! This expression didn't actually need a pointer. - const zir_tags = astgen.instructions.items(.tag); - const zir_datas = astgen.instructions.items(.data); - var src: usize = as_scope.instructions_top; - var dst: usize = src; - while (src < as_scope.instructions.items.len) : (src += 1) { - const src_inst = as_scope.instructions.items[src]; - if (indexToRef(src_inst) == as_scope.rl_ptr) continue; - if (zir_tags[src_inst] == .store_to_block_ptr) { - if (zir_datas[src_inst].bin.lhs == as_scope.rl_ptr) continue; - } - as_scope.instructions.items[dst] = src_inst; - dst += 1; - } - parent_gz.instructions.items.len -= src - dst; - as_scope.instructions_top = GenZir.unstacked_top; - // as_scope now unstacked, can add new instructions to parent_gz - const casted_result = try parent_gz.addPlNode(.as_node, src_node, Zir.Inst.As{ - .dest_type = dest_type, - .operand = result, - }); - return rvalue(parent_gz, ri, casted_result, src_node); - } else { - // implicitly move all as_scope instructions to parent_gz - as_scope.instructions_top = GenZir.unstacked_top; - return result; - } - } - const Label = struct { token: Ast.TokenIndex, block_inst: Zir.Inst.Index, @@ -11438,46 +10626,20 @@ const GenZir = struct { // ZIR needs to be generated. In the former case we rely on storing to the // pointer to communicate the result, and use breakvoid; in the latter case // the block break instructions will have the result values. - // One more complication: when the result location is a pointer, we detect - // the scenario where the result location is not consumed. In this case - // we emit ZIR for the block break instructions to have the result values, - // and then rvalue() on that to pass the value to the result location. switch (parent_ri.rl) { .coerced_ty => |ty_inst| { - // Type coercion needs to happend before breaks. - gz.rl_ty_inst = ty_inst; - gz.break_result_info = .{ .rl = .{ .ty = ty_inst } }; + // Type coercion needs to happen before breaks. + gz.break_result_info = .{ .rl = .{ .ty = ty_inst }, .ctx = parent_ri.ctx }; }, - .ty => |ty_inst| { - gz.rl_ty_inst = ty_inst; - gz.break_result_info = parent_ri; - }, - - .none, .ref => { - gz.rl_ty_inst = .none; - gz.break_result_info = parent_ri; - }, - .discard => { - gz.rl_ty_inst = .none; + // We don't forward the result context here. This prevents + // "unnecessary discard" errors from being caused by expressions + // far from the actual discard, such as a `break` from a + // discarded block. gz.break_result_info = .{ .rl = .discard }; }, - - .ptr => |ptr_res| { - gz.rl_ty_inst = .none; - gz.break_result_info = .{ .rl = .{ .ptr = .{ .inst = ptr_res.inst } }, .ctx = parent_ri.ctx }; - }, - - .inferred_ptr => |ptr| { - gz.rl_ty_inst = .none; - gz.rl_ptr = ptr; - gz.break_result_info = .{ .rl = .{ .block_ptr = gz }, .ctx = parent_ri.ctx }; - }, - - .block_ptr => |parent_block_scope| { - gz.rl_ty_inst = parent_block_scope.rl_ty_inst; - gz.rl_ptr = parent_block_scope.rl_ptr; - gz.break_result_info = .{ .rl = .{ .block_ptr = gz }, .ctx = parent_ri.ctx }; + else => { + gz.break_result_info = parent_ri; }, } } diff --git a/src/AstRlAnnotate.zig b/src/AstRlAnnotate.zig new file mode 100644 index 0000000000..2a84de64d0 --- /dev/null +++ b/src/AstRlAnnotate.zig @@ -0,0 +1,1088 @@ +//! AstRlAnnotate is a simple pass which runs over the AST before AstGen to +//! determine which expressions require result locations. +//! +//! In some cases, AstGen can choose whether to provide a result pointer or to +//! just use standard `break` instructions from a block. The latter choice can +//! result in more efficient ZIR and runtime code, but does not allow for RLS to +//! occur. Thus, we want to provide a real result pointer (from an alloc) only +//! when necessary. +//! +//! To achive this, we need to determine which expressions require a result +//! pointer. This pass is reponsible for analyzing all syntax forms which may +//! provide a result location and, if sub-expressions consume this result +//! pointer non-trivially (e.g. writing through field pointers), marking the +//! node as requiring a result location. + +const std = @import("std"); +const AstRlAnnotate = @This(); +const Ast = std.zig.Ast; +const Allocator = std.mem.Allocator; +const AutoHashMapUnmanaged = std.AutoHashMapUnmanaged; +const BuiltinFn = @import("BuiltinFn.zig"); +const assert = std.debug.assert; + +gpa: Allocator, +arena: Allocator, +tree: *const Ast, + +/// Certain nodes are placed in this set under the following conditions: +/// * if-else: either branch consumes the result location +/// * labeled block: any break consumes the result location +/// * switch: any prong consumes the result location +/// * orelse/catch: the RHS expression consumes the result location +/// * while/for: any break consumes the result location +/// * @as: the second operand consumes the result location +/// * const: the init expression consumes the result location +/// * return: the return expression consumes the result location +nodes_need_rl: RlNeededSet = .{}, + +pub const RlNeededSet = AutoHashMapUnmanaged(Ast.Node.Index, void); + +const ResultInfo = packed struct { + /// Do we have a known result type? + have_type: bool, + /// Do we (potentially) have a result pointer? Note that this pointer's type + /// may not be known due to it being an inferred alloc. + have_ptr: bool, + + const none: ResultInfo = .{ .have_type = false, .have_ptr = false }; + const typed_ptr: ResultInfo = .{ .have_type = true, .have_ptr = true }; + const inferred_ptr: ResultInfo = .{ .have_type = false, .have_ptr = true }; + const type_only: ResultInfo = .{ .have_type = true, .have_ptr = false }; +}; + +/// A labeled block or a loop. When this block is broken from, `consumes_res_ptr` +/// should be set if the break expression consumed the result pointer. +const Block = struct { + parent: ?*Block, + label: ?[]const u8, + is_loop: bool, + ri: ResultInfo, + consumes_res_ptr: bool, +}; + +pub fn annotate(gpa: Allocator, arena: Allocator, tree: Ast) Allocator.Error!RlNeededSet { + var astrl: AstRlAnnotate = .{ + .gpa = gpa, + .arena = arena, + .tree = &tree, + }; + defer astrl.deinit(gpa); + + if (tree.errors.len != 0) { + // We can't perform analysis on a broken AST. AstGen will not run in + // this case. + return .{}; + } + + for (tree.containerDeclRoot().ast.members) |member_node| { + _ = try astrl.expr(member_node, null, ResultInfo.none); + } + + return astrl.nodes_need_rl.move(); +} + +fn deinit(astrl: *AstRlAnnotate, gpa: Allocator) void { + astrl.nodes_need_rl.deinit(gpa); +} + +fn containerDecl( + astrl: *AstRlAnnotate, + block: ?*Block, + full: Ast.full.ContainerDecl, +) !void { + const tree = astrl.tree; + const token_tags = tree.tokens.items(.tag); + switch (token_tags[full.ast.main_token]) { + .keyword_struct => { + if (full.ast.arg != 0) { + _ = try astrl.expr(full.ast.arg, block, ResultInfo.type_only); + } + for (full.ast.members) |member_node| { + _ = try astrl.expr(member_node, block, ResultInfo.none); + } + }, + .keyword_union => { + if (full.ast.arg != 0) { + _ = try astrl.expr(full.ast.arg, block, ResultInfo.type_only); + } + for (full.ast.members) |member_node| { + _ = try astrl.expr(member_node, block, ResultInfo.none); + } + }, + .keyword_enum => { + if (full.ast.arg != 0) { + _ = try astrl.expr(full.ast.arg, block, ResultInfo.type_only); + } + for (full.ast.members) |member_node| { + _ = try astrl.expr(member_node, block, ResultInfo.none); + } + }, + .keyword_opaque => { + for (full.ast.members) |member_node| { + _ = try astrl.expr(member_node, block, ResultInfo.none); + } + }, + else => unreachable, + } +} + +/// Returns true if `rl` provides a result pointer and the expression consumes it. +fn expr(astrl: *AstRlAnnotate, node: Ast.Node.Index, block: ?*Block, ri: ResultInfo) Allocator.Error!bool { + const tree = astrl.tree; + const token_tags = tree.tokens.items(.tag); + const node_datas = tree.nodes.items(.data); + const node_tags = tree.nodes.items(.tag); + switch (node_tags[node]) { + .root, + .switch_case_one, + .switch_case_inline_one, + .switch_case, + .switch_case_inline, + .switch_range, + .for_range, + .asm_output, + .asm_input, + => unreachable, + + .@"errdefer", .@"defer" => { + _ = try astrl.expr(node_datas[node].rhs, block, ResultInfo.none); + return false; + }, + + .container_field_init, + .container_field_align, + .container_field, + => { + const full = tree.fullContainerField(node).?; + _ = try astrl.expr(full.ast.type_expr, block, ResultInfo.type_only); + if (full.ast.align_expr != 0) { + _ = try astrl.expr(full.ast.align_expr, block, ResultInfo.type_only); + } + if (full.ast.value_expr != 0) { + _ = try astrl.expr(full.ast.value_expr, block, ResultInfo.type_only); + } + return false; + }, + .@"usingnamespace" => { + _ = try astrl.expr(node_datas[node].lhs, block, ResultInfo.type_only); + return false; + }, + .test_decl => { + _ = try astrl.expr(node_datas[node].rhs, block, ResultInfo.none); + return false; + }, + .global_var_decl, + .local_var_decl, + .simple_var_decl, + .aligned_var_decl, + => { + const full = tree.fullVarDecl(node).?; + const init_ri = if (full.ast.type_node != 0) init_ri: { + _ = try astrl.expr(full.ast.type_node, block, ResultInfo.type_only); + break :init_ri ResultInfo.typed_ptr; + } else ResultInfo.inferred_ptr; + if (full.ast.init_node == 0) { + // No init node, so we're done. + return false; + } + switch (token_tags[full.ast.mut_token]) { + .keyword_const => { + const init_consumes_rl = try astrl.expr(full.ast.init_node, block, init_ri); + if (init_consumes_rl) { + try astrl.nodes_need_rl.putNoClobber(astrl.gpa, node, {}); + } + return false; + }, + .keyword_var => { + // We'll create an alloc either way, so don't care if the + // result pointer is consumed. + _ = try astrl.expr(full.ast.init_node, block, init_ri); + return false; + }, + else => unreachable, + } + }, + .assign => { + _ = try astrl.expr(node_datas[node].lhs, block, ResultInfo.none); + _ = try astrl.expr(node_datas[node].rhs, block, ResultInfo.typed_ptr); + return false; + }, + .assign_shl, + .assign_shl_sat, + .assign_shr, + .assign_bit_and, + .assign_bit_or, + .assign_bit_xor, + .assign_div, + .assign_sub, + .assign_sub_wrap, + .assign_sub_sat, + .assign_mod, + .assign_add, + .assign_add_wrap, + .assign_add_sat, + .assign_mul, + .assign_mul_wrap, + .assign_mul_sat, + => { + _ = try astrl.expr(node_datas[node].lhs, block, ResultInfo.none); + _ = try astrl.expr(node_datas[node].rhs, block, ResultInfo.none); + return false; + }, + .shl, .shr => { + _ = try astrl.expr(node_datas[node].lhs, block, ResultInfo.none); + _ = try astrl.expr(node_datas[node].rhs, block, ResultInfo.type_only); + return false; + }, + .add, + .add_wrap, + .add_sat, + .sub, + .sub_wrap, + .sub_sat, + .mul, + .mul_wrap, + .mul_sat, + .div, + .mod, + .shl_sat, + .bit_and, + .bit_or, + .bit_xor, + .bang_equal, + .equal_equal, + .greater_than, + .greater_or_equal, + .less_than, + .less_or_equal, + .array_cat, + => { + _ = try astrl.expr(node_datas[node].lhs, block, ResultInfo.none); + _ = try astrl.expr(node_datas[node].rhs, block, ResultInfo.none); + return false; + }, + .array_mult => { + _ = try astrl.expr(node_datas[node].lhs, block, ResultInfo.none); + _ = try astrl.expr(node_datas[node].rhs, block, ResultInfo.type_only); + return false; + }, + .error_union, .merge_error_sets => { + _ = try astrl.expr(node_datas[node].lhs, block, ResultInfo.none); + _ = try astrl.expr(node_datas[node].rhs, block, ResultInfo.none); + return false; + }, + .bool_and, + .bool_or, + => { + _ = try astrl.expr(node_datas[node].lhs, block, ResultInfo.type_only); + _ = try astrl.expr(node_datas[node].rhs, block, ResultInfo.type_only); + return false; + }, + .bool_not => { + _ = try astrl.expr(node_datas[node].lhs, block, ResultInfo.type_only); + return false; + }, + .bit_not, .negation, .negation_wrap => { + _ = try astrl.expr(node_datas[node].lhs, block, ResultInfo.none); + return false; + }, + + // These nodes are leaves and never consume a result location. + .identifier, + .string_literal, + .multiline_string_literal, + .number_literal, + .unreachable_literal, + .asm_simple, + .@"asm", + .enum_literal, + .error_value, + .anyframe_literal, + .@"continue", + .char_literal, + .error_set_decl, + => return false, + + .builtin_call_two, .builtin_call_two_comma => { + if (node_datas[node].lhs == 0) { + return astrl.builtinCall(block, ri, node, &.{}); + } else if (node_datas[node].rhs == 0) { + return astrl.builtinCall(block, ri, node, &.{node_datas[node].lhs}); + } else { + return astrl.builtinCall(block, ri, node, &.{ node_datas[node].lhs, node_datas[node].rhs }); + } + }, + .builtin_call, .builtin_call_comma => { + const params = tree.extra_data[node_datas[node].lhs..node_datas[node].rhs]; + return astrl.builtinCall(block, ri, node, params); + }, + + .call_one, + .call_one_comma, + .async_call_one, + .async_call_one_comma, + .call, + .call_comma, + .async_call, + .async_call_comma, + => { + var buf: [1]Ast.Node.Index = undefined; + const full = tree.fullCall(&buf, node).?; + _ = try astrl.expr(full.ast.fn_expr, block, ResultInfo.none); + for (full.ast.params) |param_node| { + _ = try astrl.expr(param_node, block, ResultInfo.type_only); + } + return switch (node_tags[node]) { + .call_one, + .call_one_comma, + .call, + .call_comma, + => false, // TODO: once function calls are passed result locations this will change + .async_call_one, + .async_call_one_comma, + .async_call, + .async_call_comma, + => ri.have_ptr, // always use result ptr for frames + else => unreachable, + }; + }, + + .@"return" => { + if (node_datas[node].lhs != 0) { + const ret_val_consumes_rl = try astrl.expr(node_datas[node].lhs, block, ResultInfo.typed_ptr); + if (ret_val_consumes_rl) { + try astrl.nodes_need_rl.putNoClobber(astrl.gpa, node, {}); + } + } + return false; + }, + + .field_access => { + _ = try astrl.expr(node_datas[node].lhs, block, ResultInfo.none); + return false; + }, + + .if_simple, .@"if" => { + const full = tree.fullIf(node).?; + if (full.error_token != null or full.payload_token != null) { + _ = try astrl.expr(full.ast.cond_expr, block, ResultInfo.none); + } else { + _ = try astrl.expr(full.ast.cond_expr, block, ResultInfo.type_only); // bool + } + + if (full.ast.else_expr == 0) { + _ = try astrl.expr(full.ast.then_expr, block, ResultInfo.none); + return false; + } else { + const then_uses_rl = try astrl.expr(full.ast.then_expr, block, ri); + const else_uses_rl = try astrl.expr(full.ast.else_expr, block, ri); + const uses_rl = then_uses_rl or else_uses_rl; + if (uses_rl) try astrl.nodes_need_rl.putNoClobber(astrl.gpa, node, {}); + return uses_rl; + } + }, + + .while_simple, .while_cont, .@"while" => { + const full = tree.fullWhile(node).?; + const label: ?[]const u8 = if (full.label_token) |label_token| label: { + break :label try astrl.identString(label_token); + } else null; + if (full.error_token != null or full.payload_token != null) { + _ = try astrl.expr(full.ast.cond_expr, block, ResultInfo.none); + } else { + _ = try astrl.expr(full.ast.cond_expr, block, ResultInfo.type_only); // bool + } + var new_block: Block = .{ + .parent = block, + .label = label, + .is_loop = true, + .ri = ri, + .consumes_res_ptr = false, + }; + if (full.ast.cont_expr != 0) { + _ = try astrl.expr(full.ast.cont_expr, &new_block, ResultInfo.none); + } + _ = try astrl.expr(full.ast.then_expr, &new_block, ResultInfo.none); + const else_consumes_rl = if (full.ast.else_expr != 0) else_rl: { + break :else_rl try astrl.expr(full.ast.else_expr, block, ri); + } else false; + if (new_block.consumes_res_ptr or else_consumes_rl) { + try astrl.nodes_need_rl.putNoClobber(astrl.gpa, node, {}); + return true; + } else { + return false; + } + }, + + .for_simple, .@"for" => { + const full = tree.fullFor(node).?; + const label: ?[]const u8 = if (full.label_token) |label_token| label: { + break :label try astrl.identString(label_token); + } else null; + for (full.ast.inputs) |input| { + if (node_tags[input] == .for_range) { + _ = try astrl.expr(node_datas[input].lhs, block, ResultInfo.type_only); + if (node_datas[input].rhs != 0) { + _ = try astrl.expr(node_datas[input].rhs, block, ResultInfo.type_only); + } + } else { + _ = try astrl.expr(input, block, ResultInfo.none); + } + } + var new_block: Block = .{ + .parent = block, + .label = label, + .is_loop = true, + .ri = ri, + .consumes_res_ptr = false, + }; + _ = try astrl.expr(full.ast.then_expr, &new_block, ResultInfo.none); + const else_consumes_rl = if (full.ast.else_expr != 0) else_rl: { + break :else_rl try astrl.expr(full.ast.else_expr, block, ri); + } else false; + if (new_block.consumes_res_ptr or else_consumes_rl) { + try astrl.nodes_need_rl.putNoClobber(astrl.gpa, node, {}); + return true; + } else { + return false; + } + }, + + .slice_open => { + _ = try astrl.expr(node_datas[node].lhs, block, ResultInfo.none); + _ = try astrl.expr(node_datas[node].rhs, block, ResultInfo.type_only); + return false; + }, + .slice => { + const extra = tree.extraData(node_datas[node].rhs, Ast.Node.Slice); + _ = try astrl.expr(node_datas[node].lhs, block, ResultInfo.none); + _ = try astrl.expr(extra.start, block, ResultInfo.type_only); + _ = try astrl.expr(extra.end, block, ResultInfo.type_only); + return false; + }, + .slice_sentinel => { + const extra = tree.extraData(node_datas[node].rhs, Ast.Node.SliceSentinel); + _ = try astrl.expr(node_datas[node].lhs, block, ResultInfo.none); + _ = try astrl.expr(extra.start, block, ResultInfo.type_only); + if (extra.end != 0) { + _ = try astrl.expr(extra.end, block, ResultInfo.type_only); + } + _ = try astrl.expr(extra.sentinel, block, ResultInfo.none); + return false; + }, + .deref => { + _ = try astrl.expr(node_datas[node].lhs, block, ResultInfo.none); + return false; + }, + .address_of => { + _ = try astrl.expr(node_datas[node].lhs, block, ResultInfo.none); + return false; + }, + .optional_type => { + _ = try astrl.expr(node_datas[node].lhs, block, ResultInfo.type_only); + return false; + }, + .grouped_expression, + .@"try", + .@"await", + .@"nosuspend", + .unwrap_optional, + => return astrl.expr(node_datas[node].lhs, block, ri), + + .block_two, .block_two_semicolon => { + if (node_datas[node].lhs == 0) { + return astrl.blockExpr(block, ri, node, &.{}); + } else if (node_datas[node].rhs == 0) { + return astrl.blockExpr(block, ri, node, &.{node_datas[node].lhs}); + } else { + return astrl.blockExpr(block, ri, node, &.{ node_datas[node].lhs, node_datas[node].rhs }); + } + }, + .block, .block_semicolon => { + const statements = tree.extra_data[node_datas[node].lhs..node_datas[node].rhs]; + return astrl.blockExpr(block, ri, node, statements); + }, + .anyframe_type => { + _ = try astrl.expr(node_datas[node].rhs, block, ResultInfo.type_only); + return false; + }, + .@"catch", .@"orelse" => { + _ = try astrl.expr(node_datas[node].lhs, block, ResultInfo.none); + const rhs_consumes_rl = try astrl.expr(node_datas[node].rhs, block, ri); + if (rhs_consumes_rl) { + try astrl.nodes_need_rl.putNoClobber(astrl.gpa, node, {}); + } + return rhs_consumes_rl; + }, + + .ptr_type_aligned, + .ptr_type_sentinel, + .ptr_type, + .ptr_type_bit_range, + => { + const full = tree.fullPtrType(node).?; + _ = try astrl.expr(full.ast.child_type, block, ResultInfo.type_only); + if (full.ast.sentinel != 0) { + _ = try astrl.expr(full.ast.sentinel, block, ResultInfo.type_only); + } + if (full.ast.addrspace_node != 0) { + _ = try astrl.expr(full.ast.addrspace_node, block, ResultInfo.type_only); + } + if (full.ast.align_node != 0) { + _ = try astrl.expr(full.ast.align_node, block, ResultInfo.type_only); + } + if (full.ast.bit_range_start != 0) { + assert(full.ast.bit_range_end != 0); + _ = try astrl.expr(full.ast.bit_range_start, block, ResultInfo.type_only); + _ = try astrl.expr(full.ast.bit_range_end, block, ResultInfo.type_only); + } + return false; + }, + + .container_decl, + .container_decl_trailing, + .container_decl_arg, + .container_decl_arg_trailing, + .container_decl_two, + .container_decl_two_trailing, + .tagged_union, + .tagged_union_trailing, + .tagged_union_enum_tag, + .tagged_union_enum_tag_trailing, + .tagged_union_two, + .tagged_union_two_trailing, + => { + var buf: [2]Ast.Node.Index = undefined; + try astrl.containerDecl(block, tree.fullContainerDecl(&buf, node).?); + return false; + }, + + .@"break" => { + if (node_datas[node].rhs == 0) { + // Breaks with void are not interesting + return false; + } + + var opt_cur_block = block; + if (node_datas[node].lhs == 0) { + // No label - we're breaking from a loop. + while (opt_cur_block) |cur_block| : (opt_cur_block = cur_block.parent) { + if (cur_block.is_loop) break; + } + } else { + const break_label = try astrl.identString(node_datas[node].lhs); + while (opt_cur_block) |cur_block| : (opt_cur_block = cur_block.parent) { + const block_label = cur_block.label orelse continue; + if (std.mem.eql(u8, block_label, break_label)) break; + } + } + + if (opt_cur_block) |target_block| { + const consumes_break_rl = try astrl.expr(node_datas[node].rhs, block, target_block.ri); + if (consumes_break_rl) target_block.consumes_res_ptr = true; + } else { + // No corresponding scope to break from - AstGen will emit an error. + _ = try astrl.expr(node_datas[node].rhs, block, ResultInfo.none); + } + + return false; + }, + + .array_type => { + _ = try astrl.expr(node_datas[node].lhs, block, ResultInfo.type_only); + _ = try astrl.expr(node_datas[node].rhs, block, ResultInfo.type_only); + return false; + }, + .array_type_sentinel => { + const extra = tree.extraData(node_datas[node].rhs, Ast.Node.ArrayTypeSentinel); + _ = try astrl.expr(node_datas[node].lhs, block, ResultInfo.type_only); + _ = try astrl.expr(extra.elem_type, block, ResultInfo.type_only); + _ = try astrl.expr(extra.sentinel, block, ResultInfo.type_only); + return false; + }, + .array_access => { + _ = try astrl.expr(node_datas[node].lhs, block, ResultInfo.none); + _ = try astrl.expr(node_datas[node].rhs, block, ResultInfo.type_only); + return false; + }, + .@"comptime" => { + // AstGen will emit an error if the scope is already comptime, so we can assume it is + // not. This means the result location is not forwarded. + _ = try astrl.expr(node_datas[node].lhs, block, ResultInfo.none); + return false; + }, + .@"switch", .switch_comma => { + const operand_node = node_datas[node].lhs; + const extra = tree.extraData(node_datas[node].rhs, Ast.Node.SubRange); + const case_nodes = tree.extra_data[extra.start..extra.end]; + + _ = try astrl.expr(operand_node, block, ResultInfo.none); + + var any_prong_consumed_rl = false; + for (case_nodes) |case_node| { + const case = tree.fullSwitchCase(case_node).?; + for (case.ast.values) |item_node| { + if (node_tags[item_node] == .switch_range) { + _ = try astrl.expr(node_datas[item_node].lhs, block, ResultInfo.none); + _ = try astrl.expr(node_datas[item_node].rhs, block, ResultInfo.none); + } else { + _ = try astrl.expr(item_node, block, ResultInfo.none); + } + } + if (try astrl.expr(case.ast.target_expr, block, ri)) { + any_prong_consumed_rl = true; + } + } + if (any_prong_consumed_rl) { + try astrl.nodes_need_rl.putNoClobber(astrl.gpa, node, {}); + } + return any_prong_consumed_rl; + }, + .@"suspend" => { + _ = try astrl.expr(node_datas[node].lhs, block, ResultInfo.none); + return false; + }, + .@"resume" => { + _ = try astrl.expr(node_datas[node].lhs, block, ResultInfo.none); + return false; + }, + + .array_init_one, + .array_init_one_comma, + .array_init_dot_two, + .array_init_dot_two_comma, + .array_init_dot, + .array_init_dot_comma, + .array_init, + .array_init_comma, + => { + var buf: [2]Ast.Node.Index = undefined; + const full = tree.fullArrayInit(&buf, node).?; + const have_type = if (full.ast.type_expr != 0) have_type: { + _ = try astrl.expr(full.ast.type_expr, block, ResultInfo.none); + break :have_type true; + } else ri.have_type; + if (have_type) { + const elem_ri: ResultInfo = .{ + .have_type = true, + .have_ptr = ri.have_ptr, + }; + for (full.ast.elements) |elem_init| { + _ = try astrl.expr(elem_init, block, elem_ri); + } + return ri.have_ptr; + } else { + // Untyped init does not consume result location + for (full.ast.elements) |elem_init| { + _ = try astrl.expr(elem_init, block, ResultInfo.none); + } + return false; + } + }, + + .struct_init_one, + .struct_init_one_comma, + .struct_init_dot_two, + .struct_init_dot_two_comma, + .struct_init_dot, + .struct_init_dot_comma, + .struct_init, + .struct_init_comma, + => { + var buf: [2]Ast.Node.Index = undefined; + const full = tree.fullStructInit(&buf, node).?; + const have_type = if (full.ast.type_expr != 0) have_type: { + _ = try astrl.expr(full.ast.type_expr, block, ResultInfo.none); + break :have_type true; + } else ri.have_type; + if (have_type) { + const elem_ri: ResultInfo = .{ + .have_type = true, + .have_ptr = ri.have_ptr, + }; + for (full.ast.fields) |field_init| { + _ = try astrl.expr(field_init, block, elem_ri); + } + return ri.have_ptr; + } else { + // Untyped init does not consume result location + for (full.ast.fields) |field_init| { + _ = try astrl.expr(field_init, block, ResultInfo.none); + } + return false; + } + }, + + .fn_proto_simple, + .fn_proto_multi, + .fn_proto_one, + .fn_proto, + .fn_decl, + => { + var buf: [1]Ast.Node.Index = undefined; + const full = tree.fullFnProto(&buf, node).?; + const body_node = if (node_tags[node] == .fn_decl) node_datas[node].rhs else 0; + { + var it = full.iterate(tree); + while (it.next()) |param| { + if (param.anytype_ellipsis3 == null) { + _ = try astrl.expr(param.type_expr, block, ResultInfo.type_only); + } + } + } + if (full.ast.align_expr != 0) { + _ = try astrl.expr(full.ast.align_expr, block, ResultInfo.type_only); + } + if (full.ast.addrspace_expr != 0) { + _ = try astrl.expr(full.ast.addrspace_expr, block, ResultInfo.type_only); + } + if (full.ast.section_expr != 0) { + _ = try astrl.expr(full.ast.section_expr, block, ResultInfo.type_only); + } + if (full.ast.callconv_expr != 0) { + _ = try astrl.expr(full.ast.callconv_expr, block, ResultInfo.type_only); + } + _ = try astrl.expr(full.ast.return_type, block, ResultInfo.type_only); + if (body_node != 0) { + _ = try astrl.expr(body_node, block, ResultInfo.none); + } + return false; + }, + } +} + +fn identString(astrl: *AstRlAnnotate, token: Ast.TokenIndex) ![]const u8 { + const tree = astrl.tree; + const token_tags = tree.tokens.items(.tag); + assert(token_tags[token] == .identifier); + const ident_name = tree.tokenSlice(token); + if (!std.mem.startsWith(u8, ident_name, "@")) { + return ident_name; + } + return std.zig.string_literal.parseAlloc(astrl.arena, ident_name[1..]) catch |err| switch (err) { + error.OutOfMemory => error.OutOfMemory, + error.InvalidLiteral => "", // This pass can safely return garbage on invalid AST + }; +} + +fn blockExpr(astrl: *AstRlAnnotate, parent_block: ?*Block, ri: ResultInfo, node: Ast.Node.Index, statements: []const Ast.Node.Index) !bool { + const tree = astrl.tree; + const token_tags = tree.tokens.items(.tag); + const main_tokens = tree.nodes.items(.main_token); + + const lbrace = main_tokens[node]; + if (token_tags[lbrace - 1] == .colon and + token_tags[lbrace - 2] == .identifier) + { + // Labeled block + var new_block: Block = .{ + .parent = parent_block, + .label = try astrl.identString(lbrace - 2), + .is_loop = false, + .ri = ri, + .consumes_res_ptr = false, + }; + for (statements) |statement| { + _ = try astrl.expr(statement, &new_block, ResultInfo.none); + } + if (new_block.consumes_res_ptr) { + try astrl.nodes_need_rl.putNoClobber(astrl.gpa, node, {}); + } + return new_block.consumes_res_ptr; + } else { + // Unlabeled block + for (statements) |statement| { + _ = try astrl.expr(statement, parent_block, ResultInfo.none); + } + return false; + } +} + +fn builtinCall(astrl: *AstRlAnnotate, block: ?*Block, ri: ResultInfo, node: Ast.Node.Index, args: []const Ast.Node.Index) !bool { + const tree = astrl.tree; + const main_tokens = tree.nodes.items(.main_token); + const builtin_token = main_tokens[node]; + const builtin_name = tree.tokenSlice(builtin_token); + const info = BuiltinFn.list.get(builtin_name) orelse return false; + if (info.param_count) |expected| { + if (expected != args.len) return false; + } + switch (info.tag) { + .import => return false, + .compile_log, .TypeOf => { + for (args) |arg_node| { + _ = try astrl.expr(arg_node, block, ResultInfo.none); + } + return false; + }, + .as => { + _ = try astrl.expr(args[0], block, ResultInfo.type_only); + const rhs_consumes_rl = try astrl.expr(args[1], block, ri); + if (rhs_consumes_rl) { + try astrl.nodes_need_rl.putNoClobber(astrl.gpa, node, {}); + } + return rhs_consumes_rl; + }, + .bit_cast => { + _ = try astrl.expr(args[0], block, ResultInfo.none); + return false; + }, + .union_init => { + _ = try astrl.expr(args[0], block, ResultInfo.type_only); + _ = try astrl.expr(args[1], block, ResultInfo.type_only); + _ = try astrl.expr(args[2], block, ResultInfo.type_only); + return false; + }, + .c_import => { + _ = try astrl.expr(args[0], block, ResultInfo.none); + return false; + }, + .min, .max => { + for (args) |arg_node| { + _ = try astrl.expr(arg_node, block, ResultInfo.none); + } + return false; + }, + .@"export" => { + _ = try astrl.expr(args[0], block, ResultInfo.none); + _ = try astrl.expr(args[1], block, ResultInfo.type_only); + return false; + }, + .@"extern" => { + _ = try astrl.expr(args[0], block, ResultInfo.type_only); + _ = try astrl.expr(args[1], block, ResultInfo.type_only); + return false; + }, + // These builtins take no args and do not consume the result pointer. + .src, + .This, + .return_address, + .error_return_trace, + .frame, + .breakpoint, + .in_comptime, + .panic, + .trap, + .c_va_start, + => return false, + // TODO: this is a workaround for llvm/llvm-project#68409 + // Zig tracking issue: #16876 + .frame_address => return true, + // These builtins take a single argument with a known result type, but do not consume their + // result pointer. + .size_of, + .bit_size_of, + .align_of, + .compile_error, + .set_eval_branch_quota, + .int_from_bool, + .int_from_error, + .error_from_int, + .embed_file, + .error_name, + .set_runtime_safety, + .Type, + .c_undef, + .c_include, + .wasm_memory_size, + .splat, + .fence, + .set_float_mode, + .set_align_stack, + .set_cold, + .type_info, + .work_item_id, + .work_group_size, + .work_group_id, + => { + _ = try astrl.expr(args[0], block, ResultInfo.type_only); + return false; + }, + // These builtins take a single argument with no result information and do not consume their + // result pointer. + .int_from_ptr, + .int_from_enum, + .sqrt, + .sin, + .cos, + .tan, + .exp, + .exp2, + .log, + .log2, + .log10, + .fabs, + .floor, + .ceil, + .trunc, + .round, + .tag_name, + .type_name, + .Frame, + .frame_size, + .int_from_float, + .float_from_int, + .ptr_from_int, + .enum_from_int, + .float_cast, + .int_cast, + .truncate, + .err_set_cast, + .ptr_cast, + .align_cast, + .addrspace_cast, + .const_cast, + .volatile_cast, + .clz, + .ctz, + .pop_count, + .byte_swap, + .bit_reverse, + => { + _ = try astrl.expr(args[0], block, ResultInfo.none); + return false; + }, + .div_exact, + .div_floor, + .div_trunc, + .mod, + .rem, + => { + _ = try astrl.expr(args[0], block, ResultInfo.none); + _ = try astrl.expr(args[1], block, ResultInfo.none); + return false; + }, + .shl_exact, .shr_exact => { + _ = try astrl.expr(args[0], block, ResultInfo.none); + _ = try astrl.expr(args[1], block, ResultInfo.type_only); + return false; + }, + .bit_offset_of, + .offset_of, + .field_parent_ptr, + .has_decl, + .has_field, + .field, + => { + _ = try astrl.expr(args[0], block, ResultInfo.type_only); + _ = try astrl.expr(args[1], block, ResultInfo.type_only); + return false; + }, + .wasm_memory_grow => { + _ = try astrl.expr(args[0], block, ResultInfo.type_only); + _ = try astrl.expr(args[1], block, ResultInfo.type_only); + return false; + }, + .c_define => { + _ = try astrl.expr(args[0], block, ResultInfo.type_only); + _ = try astrl.expr(args[1], block, ResultInfo.none); + return false; + }, + .reduce => { + _ = try astrl.expr(args[0], block, ResultInfo.type_only); + _ = try astrl.expr(args[1], block, ResultInfo.none); + return false; + }, + .add_with_overflow, .sub_with_overflow, .mul_with_overflow, .shl_with_overflow => { + _ = try astrl.expr(args[0], block, ResultInfo.none); + _ = try astrl.expr(args[1], block, ResultInfo.none); + return false; + }, + .atomic_load => { + _ = try astrl.expr(args[0], block, ResultInfo.type_only); + _ = try astrl.expr(args[1], block, ResultInfo.none); + _ = try astrl.expr(args[2], block, ResultInfo.type_only); + return false; + }, + .atomic_rmw => { + _ = try astrl.expr(args[0], block, ResultInfo.type_only); + _ = try astrl.expr(args[1], block, ResultInfo.none); + _ = try astrl.expr(args[2], block, ResultInfo.type_only); + _ = try astrl.expr(args[3], block, ResultInfo.type_only); + _ = try astrl.expr(args[4], block, ResultInfo.type_only); + return false; + }, + .atomic_store => { + _ = try astrl.expr(args[0], block, ResultInfo.type_only); + _ = try astrl.expr(args[1], block, ResultInfo.none); + _ = try astrl.expr(args[2], block, ResultInfo.type_only); + _ = try astrl.expr(args[3], block, ResultInfo.type_only); + return false; + }, + .mul_add => { + _ = try astrl.expr(args[0], block, ResultInfo.type_only); + _ = try astrl.expr(args[1], block, ResultInfo.type_only); + _ = try astrl.expr(args[2], block, ResultInfo.type_only); + return false; + }, + .call => { + _ = try astrl.expr(args[0], block, ResultInfo.type_only); + _ = try astrl.expr(args[1], block, ResultInfo.none); + _ = try astrl.expr(args[2], block, ResultInfo.none); + return false; + }, + .memcpy => { + _ = try astrl.expr(args[0], block, ResultInfo.none); + _ = try astrl.expr(args[1], block, ResultInfo.none); + return false; + }, + .memset => { + _ = try astrl.expr(args[0], block, ResultInfo.none); + _ = try astrl.expr(args[1], block, ResultInfo.type_only); + return false; + }, + .shuffle => { + _ = try astrl.expr(args[0], block, ResultInfo.type_only); + _ = try astrl.expr(args[1], block, ResultInfo.none); + _ = try astrl.expr(args[2], block, ResultInfo.none); + _ = try astrl.expr(args[3], block, ResultInfo.none); + return false; + }, + .select => { + _ = try astrl.expr(args[0], block, ResultInfo.type_only); + _ = try astrl.expr(args[1], block, ResultInfo.none); + _ = try astrl.expr(args[2], block, ResultInfo.none); + _ = try astrl.expr(args[3], block, ResultInfo.none); + return false; + }, + .async_call => { + _ = try astrl.expr(args[0], block, ResultInfo.none); + _ = try astrl.expr(args[1], block, ResultInfo.none); + _ = try astrl.expr(args[2], block, ResultInfo.none); + _ = try astrl.expr(args[3], block, ResultInfo.none); + return false; // buffer passed as arg for frame data + }, + .Vector => { + _ = try astrl.expr(args[0], block, ResultInfo.type_only); + _ = try astrl.expr(args[1], block, ResultInfo.type_only); + return false; + }, + .prefetch => { + _ = try astrl.expr(args[0], block, ResultInfo.none); + _ = try astrl.expr(args[1], block, ResultInfo.type_only); + return false; + }, + .c_va_arg => { + _ = try astrl.expr(args[0], block, ResultInfo.none); + _ = try astrl.expr(args[1], block, ResultInfo.type_only); + return false; + }, + .c_va_copy => { + _ = try astrl.expr(args[0], block, ResultInfo.none); + return false; + }, + .c_va_end => { + _ = try astrl.expr(args[0], block, ResultInfo.none); + return false; + }, + .cmpxchg_strong, .cmpxchg_weak => { + _ = try astrl.expr(args[0], block, ResultInfo.none); + _ = try astrl.expr(args[1], block, ResultInfo.type_only); + _ = try astrl.expr(args[2], block, ResultInfo.type_only); + _ = try astrl.expr(args[3], block, ResultInfo.type_only); + _ = try astrl.expr(args[4], block, ResultInfo.type_only); + return false; + }, + } +} diff --git a/src/Sema.zig b/src/Sema.zig index be5242db2b..c1f7eb2268 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1346,11 +1346,6 @@ fn analyzeBodyInner( i += 1; continue; }, - .store_to_block_ptr => { - try sema.zirStoreToBlockPtr(block, inst); - i += 1; - continue; - }, .store_to_inferred_ptr => { try sema.zirStoreToInferredPtr(block, inst); i += 1; @@ -5269,35 +5264,6 @@ fn addDeclaredHereNote(sema: *Sema, parent: *Module.ErrorMsg, decl_ty: Type) !vo try mod.errNoteNonLazy(src_loc, parent, "{s} declared here", .{category}); } -fn zirStoreToBlockPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { - const tracy = trace(@src()); - defer tracy.end(); - - const bin_inst = sema.code.instructions.items(.data)[inst].bin; - const ptr = sema.inst_map.get(Zir.refToIndex(bin_inst.lhs).?) orelse { - // This is an elided instruction, but AstGen was unable to omit it. - return; - }; - const operand = try sema.resolveInst(bin_inst.rhs); - const src: LazySrcLoc = sema.src; - blk: { - const ptr_inst = Air.refToIndex(ptr) orelse break :blk; - switch (sema.air_instructions.items(.tag)[ptr_inst]) { - .inferred_alloc_comptime => { - const iac = &sema.air_instructions.items(.data)[ptr_inst].inferred_alloc_comptime; - return sema.storeToInferredAllocComptime(block, src, operand, iac); - }, - .inferred_alloc => { - const ia = sema.unresolved_inferred_allocs.getPtr(ptr_inst).?; - return sema.storeToInferredAlloc(block, ptr, operand, ia); - }, - else => break :blk, - } - } - - return sema.storePtr(block, src, ptr, operand); -} - fn zirStoreToInferredPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); diff --git a/src/Zir.zig b/src/Zir.zig index 704fdcac03..d38b2d01e3 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -602,20 +602,9 @@ pub const Inst = struct { /// Same as `store` except provides a source location. /// Uses the `pl_node` union field. Payload is `Bin`. store_node, - /// This instruction is not really supposed to be emitted from AstGen; nevertheless it - /// is sometimes emitted due to deficiencies in AstGen. When Sema sees this instruction, - /// it must clean up after AstGen's mess by looking at various context clues and - /// then treating it as one of the following: - /// * no-op - /// * store_to_inferred_ptr - /// * store - /// Uses the `bin` union field with LHS as the pointer to store to. - store_to_block_ptr, /// Same as `store` but the type of the value being stored will be used to infer /// the pointer type. - /// Uses the `bin` union field - Astgen.zig depends on the ability to change - /// the tag of an instruction from `store_to_block_ptr` to `store_to_inferred_ptr` - /// without changing the data. + /// Uses the `bin` union field. store_to_inferred_ptr, /// String Literal. Makes an anonymous Decl and then takes a pointer to it. /// Uses the `str` union field. @@ -1109,7 +1098,6 @@ pub const Inst = struct { .shr, .store, .store_node, - .store_to_block_ptr, .store_to_inferred_ptr, .str, .sub, @@ -1295,7 +1283,6 @@ pub const Inst = struct { .atomic_store, .store, .store_node, - .store_to_block_ptr, .store_to_inferred_ptr, .resolve_inferred_alloc, .validate_array_init_ty, @@ -1667,7 +1654,6 @@ pub const Inst = struct { .slice_length = .pl_node, .store = .bin, .store_node = .pl_node, - .store_to_block_ptr = .bin, .store_to_inferred_ptr = .bin, .str = .str, .negate = .un_node, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index bea3c02985..330a9b33e8 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -4285,7 +4285,7 @@ fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const inst_ty = f.typeOfIndex(inst); - const result = if (inst_ty.ip_index != .void_type and !f.liveness.isUnused(inst)) + const result = if (inst_ty.hasRuntimeBitsIgnoreComptime(mod) and !f.liveness.isUnused(inst)) try f.allocLocal(inst, inst_ty) else .none; diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index 9632beef3a..00b3826d7d 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -10092,8 +10092,8 @@ fn vectorTypeAssumeCapacity( .data = self.addTypeExtraAssumeCapacity(data), }); if (self.useLibLlvm()) self.llvm.types.appendAssumeCapacity(switch (kind) { - .normal => llvm.Type.vectorType, - .scalable => llvm.Type.scalableVectorType, + .normal => &llvm.Type.vectorType, + .scalable => &llvm.Type.scalableVectorType, }(child.toLlvm(self), @intCast(len))); } return @enumFromInt(gop.index); @@ -11274,8 +11274,8 @@ fn gepConstAssumeCapacity( for (llvm_indices, indices) |*llvm_index, index| llvm_index.* = index.toLlvm(self); self.llvm.constants.appendAssumeCapacity(switch (kind) { - .normal => llvm.Type.constGEP, - .inbounds => llvm.Type.constInBoundsGEP, + .normal => &llvm.Type.constGEP, + .inbounds => &llvm.Type.constInBoundsGEP, }(ty.toLlvm(self), base.toLlvm(self), llvm_indices.ptr, @intCast(llvm_indices.len))); } } diff --git a/src/print_zir.zig b/src/print_zir.zig index 3a158375ea..86f4a45e29 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -145,7 +145,6 @@ const Writer = struct { switch (tag) { .as, .store, - .store_to_block_ptr, .store_to_inferred_ptr, => try self.writeBin(stream, inst), diff --git a/stage1/zig.h b/stage1/zig.h index 2224e71764..fc8995b887 100644 --- a/stage1/zig.h +++ b/stage1/zig.h @@ -1176,10 +1176,10 @@ typedef signed __int128 zig_i128; #if zig_little_endian typedef struct { zig_align(16) uint64_t lo; uint64_t hi; } zig_u128; -typedef struct { zig_align(16) uint64_t lo; int64_t hi; } zig_i128; +typedef struct { zig_align(16) uint64_t lo; int64_t hi; } zig_i128; #else typedef struct { zig_align(16) uint64_t hi; uint64_t lo; } zig_u128; -typedef struct { zig_align(16) int64_t hi; uint64_t lo; } zig_i128; +typedef struct { zig_align(16) int64_t hi; uint64_t lo; } zig_i128; #endif #define zig_make_u128(hi, lo) ((zig_u128){ .h##i = (hi), .l##o = (lo) }) @@ -3197,11 +3197,9 @@ zig_convert_builtin(zig_compiler_rt_f16, zig_f16, trunc, zig_f64, zig_convert_builtin(zig_f16, zig_f16, trunc, zig_f80, zig_f80, 2) zig_convert_builtin(zig_f16, zig_f16, trunc, zig_f128, zig_f128, 2) zig_convert_builtin(zig_f32, zig_f32, extend, zig_compiler_rt_f16, zig_f16, 2) -zig_convert_builtin(zig_f32, zig_f32, trunc, zig_f64, zig_f64, 2) zig_convert_builtin(zig_f32, zig_f32, trunc, zig_f80, zig_f80, 2) zig_convert_builtin(zig_f32, zig_f32, trunc, zig_f128, zig_f128, 2) zig_convert_builtin(zig_f64, zig_f64, extend, zig_compiler_rt_f16, zig_f16, 2) -zig_convert_builtin(zig_f64, zig_f64, extend, zig_f32, zig_f32, 2) zig_convert_builtin(zig_f64, zig_f64, trunc, zig_f80, zig_f80, 2) zig_convert_builtin(zig_f64, zig_f64, trunc, zig_f128, zig_f128, 2) zig_convert_builtin(zig_f80, zig_f80, extend, zig_f16, zig_f16, 2) @@ -3213,6 +3211,21 @@ zig_convert_builtin(zig_f128, zig_f128, extend, zig_f32, zig_convert_builtin(zig_f128, zig_f128, extend, zig_f64, zig_f64, 2) zig_convert_builtin(zig_f128, zig_f128, extend, zig_f80, zig_f80, 2) +#ifdef __ARM_EABI__ + +zig_extern zig_callconv(pcs("aapcs")) zig_f32 __aeabi_d2f(zig_f64); +static inline zig_f32 zig_truncdfsf(zig_f64 arg) { return __aeabi_d2f(arg); } + +zig_extern zig_callconv(pcs("aapcs")) zig_f64 __aeabi_f2d(zig_f32); +static inline zig_f64 zig_extendsfdf(zig_f32 arg) { return __aeabi_f2d(arg); } + +#else /* __ARM_EABI__ */ + +zig_convert_builtin(zig_f32, zig_f32, trunc, zig_f64, zig_f64, 2) +zig_convert_builtin(zig_f64, zig_f64, extend, zig_f32, zig_f32, 2) + +#endif /* __ARM_EABI__ */ + #define zig_float_negate_builtin_0(w, c, sb) \ zig_expand_concat(zig_xor_, zig_repr_f##w)(arg, zig_make_f##w(-0x0.0p0, c sb)) #define zig_float_negate_builtin_1(w, c, sb) -arg @@ -3255,17 +3268,11 @@ zig_float_negate_builtin(128, zig_make_u128, (UINT64_C(1) << 63, UINT64_C(0))) return lhs operator rhs; \ } -#define zig_float_builtins(w) \ - zig_convert_builtin( int32_t, int32_t, fix, zig_f##w, zig_f##w, ) \ - zig_convert_builtin(uint32_t, uint32_t, fixuns, zig_f##w, zig_f##w, ) \ +#define zig_common_float_builtins(w) \ zig_convert_builtin( int64_t, int64_t, fix, zig_f##w, zig_f##w, ) \ - zig_convert_builtin(uint64_t, uint64_t, fixuns, zig_f##w, zig_f##w, ) \ zig_convert_builtin(zig_i128, zig_i128, fix, zig_f##w, zig_f##w, ) \ zig_convert_builtin(zig_u128, zig_u128, fixuns, zig_f##w, zig_f##w, ) \ - zig_convert_builtin(zig_f##w, zig_f##w, float, int32_t, int32_t, ) \ - zig_convert_builtin(zig_f##w, zig_f##w, floatun, uint32_t, uint32_t, ) \ zig_convert_builtin(zig_f##w, zig_f##w, float, int64_t, int64_t, ) \ - zig_convert_builtin(zig_f##w, zig_f##w, floatun, uint64_t, uint64_t, ) \ zig_convert_builtin(zig_f##w, zig_f##w, float, zig_i128, zig_i128, ) \ zig_convert_builtin(zig_f##w, zig_f##w, floatun, zig_u128, zig_u128, ) \ zig_expand_concat(zig_float_less_builtin_, zig_has_f##w)(f##w, cmp) \ @@ -3309,12 +3316,68 @@ zig_float_negate_builtin(128, zig_make_u128, (UINT64_C(1) << 63, UINT64_C(0))) static inline zig_f##w zig_mod_f##w(zig_f##w lhs, zig_f##w rhs) { \ return zig_sub_f##w(lhs, zig_mul_f##w(zig_div_floor_f##w(lhs, rhs), rhs)); \ } +zig_common_float_builtins(16) +zig_common_float_builtins(32) +zig_common_float_builtins(64) +zig_common_float_builtins(80) +zig_common_float_builtins(128) + +#define zig_float_builtins(w) \ + zig_convert_builtin( int32_t, int32_t, fix, zig_f##w, zig_f##w, ) \ + zig_convert_builtin(uint32_t, uint32_t, fixuns, zig_f##w, zig_f##w, ) \ + zig_convert_builtin(uint64_t, uint64_t, fixuns, zig_f##w, zig_f##w, ) \ + zig_convert_builtin(zig_f##w, zig_f##w, float, int32_t, int32_t, ) \ + zig_convert_builtin(zig_f##w, zig_f##w, floatun, uint32_t, uint32_t, ) \ + zig_convert_builtin(zig_f##w, zig_f##w, floatun, uint64_t, uint64_t, ) zig_float_builtins(16) -zig_float_builtins(32) -zig_float_builtins(64) zig_float_builtins(80) zig_float_builtins(128) +#ifdef __ARM_EABI__ + +zig_extern zig_callconv(pcs("aapcs")) int32_t __aeabi_f2iz(zig_f32); +static inline int32_t zig_fixsfsi(zig_f32 arg) { return __aeabi_f2iz(arg); } + +zig_extern zig_callconv(pcs("aapcs")) uint32_t __aeabi_f2uiz(zig_f32); +static inline uint32_t zig_fixunssfsi(zig_f32 arg) { return __aeabi_f2uiz(arg); } + +zig_extern zig_callconv(pcs("aapcs")) uint64_t __aeabi_f2ulz(zig_f32); +static inline uint64_t zig_fixunssfdi(zig_f32 arg) { return __aeabi_f2ulz(arg); } + +zig_extern zig_callconv(pcs("aapcs")) zig_f32 __aeabi_i2f(int32_t); +static inline zig_f32 zig_floatsisf(int32_t arg) { return __aeabi_i2f(arg); } + +zig_extern zig_callconv(pcs("aapcs")) zig_f32 __aeabi_ui2f(uint32_t); +static inline zig_f32 zig_floatunsisf(uint32_t arg) { return __aeabi_ui2f(arg); } + +zig_extern zig_callconv(pcs("aapcs")) zig_f32 __aeabi_ul2f(uint64_t); +static inline zig_f32 zig_floatundisf(uint64_t arg) { return __aeabi_ul2f(arg); } + +zig_extern zig_callconv(pcs("aapcs")) int32_t __aeabi_d2iz(zig_f64); +static inline int32_t zig_fixdfsi(zig_f64 arg) { return __aeabi_d2iz(arg); } + +zig_extern zig_callconv(pcs("aapcs")) uint32_t __aeabi_d2uiz(zig_f64); +static inline uint32_t zig_fixunsdfsi(zig_f64 arg) { return __aeabi_d2uiz(arg); } + +zig_extern zig_callconv(pcs("aapcs")) uint64_t __aeabi_d2ulz(zig_f64); +static inline uint64_t zig_fixunsdfdi(zig_f64 arg) { return __aeabi_d2ulz(arg); } + +zig_extern zig_callconv(pcs("aapcs")) zig_f64 __aeabi_i2d(int32_t); +static inline zig_f64 zig_floatsidf(int32_t arg) { return __aeabi_i2d(arg); } + +zig_extern zig_callconv(pcs("aapcs")) zig_f64 __aeabi_ui2d(uint32_t); +static inline zig_f64 zig_floatunsidf(uint32_t arg) { return __aeabi_ui2d(arg); } + +zig_extern zig_callconv(pcs("aapcs")) zig_f64 __aeabi_ul2d(uint64_t); +static inline zig_f64 zig_floatundidf(uint64_t arg) { return __aeabi_ul2d(arg); } + +#else /* __ARM_EABI__ */ + +zig_float_builtins(32) +zig_float_builtins(64) + +#endif /* __ARM_EABI__ */ + /* ============================ Atomics Support ============================= */ /* Note that atomics should be implemented as macros because most diff --git a/stage1/zig1.wasm b/stage1/zig1.wasm index 2a3ecafb48..ed3654be43 100644 Binary files a/stage1/zig1.wasm and b/stage1/zig1.wasm differ diff --git a/test/cases/compile_errors/break_void_result_location.zig b/test/cases/compile_errors/break_void_result_location.zig index bd15aa6a77..140ddb6ed1 100644 --- a/test/cases/compile_errors/break_void_result_location.zig +++ b/test/cases/compile_errors/break_void_result_location.zig @@ -29,4 +29,4 @@ export fn f4() void { // :2:22: error: expected type 'usize', found 'void' // :7:9: error: expected type 'usize', found 'void' // :14:9: error: expected type 'usize', found 'void' -// :19:27: error: expected type 'usize', found 'void' +// :20:9: error: expected type 'usize', found 'void' diff --git a/test/cases/compile_errors/missing_else_clause.zig b/test/cases/compile_errors/missing_else_clause.zig index 13a164ddea..8e4d4334b6 100644 --- a/test/cases/compile_errors/missing_else_clause.zig +++ b/test/cases/compile_errors/missing_else_clause.zig @@ -34,8 +34,7 @@ export fn entry() void { // backend=stage2 // target=native // -// :2:20: error: incompatible types: 'i32' and 'void' -// :2:30: note: type 'i32' here +// :2:20: error: expected type 'i32', found 'void' // :8:15: error: incompatible types: 'i32' and 'void' // :8:25: note: type 'i32' here // :16:16: error: expected type 'tmp.h.T', found 'void'