mirror of
https://github.com/ziglang/zig.git
synced 2025-12-28 17:13:19 +00:00
Merge pull request #12124 from ziglang/stage2-coerce-result-ptr
Sema: fix coerce_result_ptr in case of inferred result type
This commit is contained in:
commit
4c7fe74b2c
127
src/Sema.zig
127
src/Sema.zig
@ -1974,8 +1974,6 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
|
||||
defer trash_block.instructions.deinit(sema.gpa);
|
||||
const operand = try trash_block.addBitCast(pointee_ty, .void_value);
|
||||
|
||||
try inferred_alloc.stored_inst_list.append(sema.arena, operand);
|
||||
|
||||
try sema.requireRuntimeBlock(block, src);
|
||||
const ptr_ty = try Type.ptr(sema.arena, sema.mod, .{
|
||||
.pointee_type = pointee_ty,
|
||||
@ -1983,6 +1981,12 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
|
||||
.@"addrspace" = addr_space,
|
||||
});
|
||||
const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr);
|
||||
|
||||
try inferred_alloc.prongs.append(sema.arena, .{
|
||||
.stored_inst = operand,
|
||||
.placeholder = Air.refToIndex(bitcasted_ptr).?,
|
||||
});
|
||||
|
||||
return bitcasted_ptr;
|
||||
},
|
||||
.inferred_alloc_comptime => {
|
||||
@ -2027,7 +2031,24 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
|
||||
|
||||
const dummy_ptr = try trash_block.addTy(.alloc, sema.typeOf(ptr));
|
||||
const dummy_operand = try trash_block.addBitCast(pointee_ty, .void_value);
|
||||
try sema.storePtr2(&trash_block, src, dummy_ptr, src, dummy_operand, src, .bitcast);
|
||||
return coerceResultPtr(sema, block, src, ptr, dummy_ptr, dummy_operand, &trash_block);
|
||||
}
|
||||
|
||||
fn coerceResultPtr(
|
||||
sema: *Sema,
|
||||
block: *Block,
|
||||
src: LazySrcLoc,
|
||||
ptr: Air.Inst.Ref,
|
||||
dummy_ptr: Air.Inst.Ref,
|
||||
dummy_operand: Air.Inst.Ref,
|
||||
trash_block: *Block,
|
||||
) CompileError!Air.Inst.Ref {
|
||||
const target = sema.mod.getTarget();
|
||||
const addr_space = target_util.defaultAddressSpace(target, .local);
|
||||
const pointee_ty = sema.typeOf(dummy_operand);
|
||||
const prev_trash_len = trash_block.instructions.items.len;
|
||||
|
||||
try sema.storePtr2(trash_block, src, dummy_ptr, src, dummy_operand, src, .bitcast);
|
||||
|
||||
{
|
||||
const air_tags = sema.air_instructions.items(.tag);
|
||||
@ -2059,15 +2080,24 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
|
||||
while (true) {
|
||||
const air_tags = sema.air_instructions.items(.tag);
|
||||
const air_datas = sema.air_instructions.items(.data);
|
||||
|
||||
if (trash_block.instructions.items.len == prev_trash_len) {
|
||||
if (try sema.resolveDefinedValue(block, src, new_ptr)) |ptr_val| {
|
||||
return sema.addConstant(ptr_ty, ptr_val);
|
||||
}
|
||||
if (pointee_ty.eql(Type.@"null", sema.mod)) {
|
||||
const opt_ty = sema.typeOf(new_ptr).childType();
|
||||
const null_inst = try sema.addConstant(opt_ty, Value.@"null");
|
||||
_ = try block.addBinOp(.store, new_ptr, null_inst);
|
||||
return Air.Inst.Ref.void_value;
|
||||
}
|
||||
return sema.bitCast(block, ptr_ty, new_ptr, src);
|
||||
}
|
||||
|
||||
const trash_inst = trash_block.instructions.pop();
|
||||
|
||||
switch (air_tags[trash_inst]) {
|
||||
.bitcast => {
|
||||
if (Air.indexToRef(trash_inst) == dummy_operand) {
|
||||
if (try sema.resolveDefinedValue(block, src, new_ptr)) |ptr_val| {
|
||||
return sema.addConstant(ptr_ty, ptr_val);
|
||||
}
|
||||
return sema.bitCast(block, ptr_ty, new_ptr, src);
|
||||
}
|
||||
const ty_op = air_datas[trash_inst].ty_op;
|
||||
const operand_ty = sema.typeOf(ty_op.operand);
|
||||
const ptr_operand_ty = try Type.ptr(sema.arena, sema.mod, .{
|
||||
@ -3141,7 +3171,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
|
||||
},
|
||||
.inferred_alloc => {
|
||||
const inferred_alloc = ptr_val.castTag(.inferred_alloc).?;
|
||||
const peer_inst_list = inferred_alloc.data.stored_inst_list.items;
|
||||
const peer_inst_list = inferred_alloc.data.prongs.items(.stored_inst);
|
||||
const final_elem_ty = try sema.resolvePeerTypes(block, ty_src, peer_inst_list, .none);
|
||||
|
||||
const final_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{
|
||||
@ -3250,6 +3280,70 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
|
||||
.tag = .alloc,
|
||||
.data = .{ .ty = final_ptr_ty },
|
||||
});
|
||||
|
||||
// Now we need to go back over all the coerce_result_ptr instructions, which
|
||||
// previously inserted a bitcast as a placeholder, and do the logic as if
|
||||
// the new result ptr type was available.
|
||||
const placeholders = inferred_alloc.data.prongs.items(.placeholder);
|
||||
const gpa = sema.gpa;
|
||||
|
||||
var trash_block = block.makeSubBlock();
|
||||
trash_block.is_comptime = false;
|
||||
trash_block.is_coerce_result_ptr = true;
|
||||
defer trash_block.instructions.deinit(gpa);
|
||||
|
||||
const mut_final_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{
|
||||
.pointee_type = final_elem_ty,
|
||||
.mutable = true,
|
||||
.@"align" = inferred_alloc.data.alignment,
|
||||
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
|
||||
});
|
||||
const dummy_ptr = try trash_block.addTy(.alloc, mut_final_ptr_ty);
|
||||
const empty_trash_count = trash_block.instructions.items.len;
|
||||
|
||||
for (placeholders) |bitcast_inst, i| {
|
||||
const sub_ptr_ty = sema.typeOf(Air.indexToRef(bitcast_inst));
|
||||
|
||||
if (mut_final_ptr_ty.eql(sub_ptr_ty, sema.mod)) {
|
||||
// New result location type is the same as the old one; nothing
|
||||
// to do here.
|
||||
continue;
|
||||
}
|
||||
|
||||
var bitcast_block = block.makeSubBlock();
|
||||
defer bitcast_block.instructions.deinit(gpa);
|
||||
|
||||
trash_block.instructions.shrinkRetainingCapacity(empty_trash_count);
|
||||
const sub_ptr = try coerceResultPtr(sema, &bitcast_block, src, ptr, dummy_ptr, peer_inst_list[i], &trash_block);
|
||||
|
||||
assert(bitcast_block.instructions.items.len > 0);
|
||||
// If only one instruction is produced then we can replace the bitcast
|
||||
// placeholder instruction with this instruction; no need for an entire block.
|
||||
if (bitcast_block.instructions.items.len == 1) {
|
||||
const only_inst = bitcast_block.instructions.items[0];
|
||||
sema.air_instructions.set(bitcast_inst, sema.air_instructions.get(only_inst));
|
||||
continue;
|
||||
}
|
||||
|
||||
// Here we replace the placeholder bitcast instruction with a block
|
||||
// that does the coerce_result_ptr logic.
|
||||
_ = try bitcast_block.addBr(bitcast_inst, sub_ptr);
|
||||
const ty_inst = sema.air_instructions.items(.data)[bitcast_inst].ty_op.ty;
|
||||
try sema.air_extra.ensureUnusedCapacity(
|
||||
gpa,
|
||||
@typeInfo(Air.Block).Struct.fields.len + bitcast_block.instructions.items.len,
|
||||
);
|
||||
sema.air_instructions.set(bitcast_inst, .{
|
||||
.tag = .block,
|
||||
.data = .{ .ty_pl = .{
|
||||
.ty = ty_inst,
|
||||
.payload = sema.addExtraAssumeCapacity(Air.Block{
|
||||
.body_len = @intCast(u32, bitcast_block.instructions.items.len),
|
||||
}),
|
||||
} },
|
||||
});
|
||||
sema.air_extra.appendSliceAssumeCapacity(bitcast_block.instructions.items);
|
||||
}
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
@ -4086,9 +4180,6 @@ fn storeToInferredAlloc(
|
||||
inferred_alloc: *Value.Payload.InferredAlloc,
|
||||
) CompileError!void {
|
||||
const operand_ty = sema.typeOf(operand);
|
||||
// Add the stored instruction to the set we will use to resolve peer types
|
||||
// for the inferred allocation.
|
||||
try inferred_alloc.data.stored_inst_list.append(sema.arena, operand);
|
||||
// Create a runtime bitcast instruction with exactly the type the pointer wants.
|
||||
const target = sema.mod.getTarget();
|
||||
const ptr_ty = try Type.ptr(sema.arena, sema.mod, .{
|
||||
@ -4097,7 +4188,13 @@ fn storeToInferredAlloc(
|
||||
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
|
||||
});
|
||||
const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr);
|
||||
return sema.storePtr(block, src, bitcasted_ptr, operand);
|
||||
// Add the stored instruction to the set we will use to resolve peer types
|
||||
// for the inferred allocation.
|
||||
try inferred_alloc.data.prongs.append(sema.arena, .{
|
||||
.stored_inst = operand,
|
||||
.placeholder = Air.refToIndex(bitcasted_ptr).?,
|
||||
});
|
||||
return sema.storePtr2(block, src, bitcasted_ptr, src, operand, src, .bitcast);
|
||||
}
|
||||
|
||||
fn storeToInferredAllocComptime(
|
||||
@ -21614,8 +21711,6 @@ fn storePtr2(
|
||||
if ((try sema.typeHasOnePossibleValue(block, src, elem_ty)) != null)
|
||||
return;
|
||||
|
||||
// TODO handle if the element type requires comptime
|
||||
|
||||
if (air_tag == .bitcast) {
|
||||
// `air_tag == .bitcast` is used as a special case for `zirCoerceResultPtr`
|
||||
// to avoid calling `requireRuntimeBlock` for the dummy block.
|
||||
|
||||
@ -232,13 +232,13 @@ const Writer = struct {
|
||||
.validate_array_init_ty,
|
||||
.validate_struct_init_ty,
|
||||
.make_ptr_const,
|
||||
.validate_deref,
|
||||
=> try self.writeUnNode(stream, inst),
|
||||
|
||||
.ref,
|
||||
.ret_tok,
|
||||
.ensure_err_payload_void,
|
||||
.closure_capture,
|
||||
.validate_deref,
|
||||
=> try self.writeUnTok(stream, inst),
|
||||
|
||||
.bool_br_and,
|
||||
|
||||
@ -4935,7 +4935,16 @@ pub const Value = extern union {
|
||||
/// peer type resolution. This is stored in a separate list so that
|
||||
/// the items are contiguous in memory and thus can be passed to
|
||||
/// `Module.resolvePeerTypes`.
|
||||
stored_inst_list: std.ArrayListUnmanaged(Air.Inst.Ref) = .{},
|
||||
prongs: std.MultiArrayList(struct {
|
||||
/// The dummy instruction used as a peer to resolve the type.
|
||||
/// Although this has a redundant type with placeholder, this is
|
||||
/// needed in addition because it may be a constant value, which
|
||||
/// affects peer type resolution.
|
||||
stored_inst: Air.Inst.Ref,
|
||||
/// The bitcast instruction used as a placeholder when the
|
||||
/// new result pointer type is not yet known.
|
||||
placeholder: Air.Inst.Index,
|
||||
}) = .{},
|
||||
/// 0 means ABI-aligned.
|
||||
alignment: u32,
|
||||
},
|
||||
|
||||
@ -570,10 +570,7 @@ test "type coercion of pointer to anon struct literal to pointer to array" {
|
||||
}
|
||||
|
||||
test "array with comptime only element type" {
|
||||
const a = [_]type{
|
||||
u32,
|
||||
i32,
|
||||
};
|
||||
const a = [_]type{ u32, i32 };
|
||||
try testing.expect(a[0] == u32);
|
||||
try testing.expect(a[1] == i32);
|
||||
}
|
||||
|
||||
@ -128,3 +128,31 @@ test "if peer expressions inferred optional type" {
|
||||
try expect(left.? == 98);
|
||||
try expect(right.? == 99);
|
||||
}
|
||||
|
||||
test "if-else expression with runtime condition result location is inferred optional" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||
|
||||
const A = struct { b: u64, c: u64 };
|
||||
var d: bool = true;
|
||||
const e = if (d) A{ .b = 15, .c = 30 } else null;
|
||||
try expect(e != null);
|
||||
}
|
||||
|
||||
test "result location with inferred type ends up being pointer to comptime_int" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||
|
||||
var a: ?u32 = 1234;
|
||||
var b: u32 = 2000;
|
||||
var c = if (a) |d| blk: {
|
||||
if (d < b) break :blk @as(u32, 1);
|
||||
break :blk 0;
|
||||
} else @as(u32, 0);
|
||||
try expect(c == 1);
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user