Sema: implement comptime variables

Sema now properly handles alloc_inferred and alloc_inferred_mut ZIR
instructions inside a comptime execution context. In this case it
creates Decl objects and points to them with the new `decl_ref_mut`
Value Tag. `storePtr` is updated to mutate such Decl types and values.
In this case it destroys the old arena and makes a new one, preventing
memory growth during comptime code execution.

Additionally:

 * Fix `storePtr` to emit a compile error for a pointer comptime-known
   to be undefined.
 * Fix `storePtr` to emit runtime instructions for all the cases that a
   pointer is comptime-known but does not support comptime
   dereferencing, such as `@intToPtr` on a hard-coded address, or an
   extern function.
 * Fix `ret_coerce` not coercing inside inline function call context.
This commit is contained in:
Andrew Kelley 2021-08-01 12:27:22 -07:00
parent 7e52a096db
commit 6ae0825e7f
6 changed files with 226 additions and 162 deletions

View File

@ -154,9 +154,6 @@ pub fn analyzeBody(
// We use a while(true) loop here to avoid a redundant way of breaking out of
// the loop. The only way to break out of the loop is with a `noreturn`
// instruction.
// TODO: As an optimization, make sure the codegen for these switch prongs
// directly jump to the next one, rather than detouring through the loop
// continue expression. Related: https://github.com/ziglang/zig/issues/8220
var i: usize = 0;
while (true) {
const inst = body[i];
@ -391,7 +388,7 @@ pub fn analyzeBody(
.condbr => return sema.zirCondbr(block, inst),
.@"break" => return sema.zirBreak(block, inst),
.compile_error => return sema.zirCompileError(block, inst),
.ret_coerce => return sema.zirRetCoerce(block, inst, true),
.ret_coerce => return sema.zirRetCoerce(block, inst),
.ret_node => return sema.zirRetNode(block, inst),
.ret_err_value => return sema.zirRetErrValue(block, inst),
.@"unreachable" => return sema.zirUnreachable(block, inst),
@ -1396,14 +1393,19 @@ fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Comp
const var_type = try sema.resolveType(block, ty_src, inst_data.operand);
const ptr_type = try Module.simplePtrType(sema.arena, var_type, true, .One);
const val_payload = try sema.arena.create(Value.Payload.ComptimeAlloc);
val_payload.* = .{
.data = .{
.runtime_index = block.runtime_index,
.val = undefined, // astgen guarantees there will be a store before the first load
},
};
return sema.addConstant(ptr_type, Value.initPayload(&val_payload.base));
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
const decl = try anon_decl.finish(
try var_type.copy(anon_decl.arena()),
// AstGen guarantees there will be a store before the first load, so we put a value
// here indicating there is no valid value.
Value.initTag(.unreachable_value),
);
try sema.mod.declareDeclDependency(sema.owner_decl, decl);
return sema.addConstant(ptr_type, try Value.Tag.decl_ref_mut.create(sema.arena, .{
.runtime_index = block.runtime_index,
.decl = decl,
}));
}
fn zirAllocInferredComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@ -1450,16 +1452,23 @@ fn zirAllocInferred(
const src_node = sema.code.instructions.items(.data)[inst].node;
const src: LazySrcLoc = .{ .node_offset = src_node };
sema.src = src;
const val_payload = try sema.arena.create(Value.Payload.InferredAlloc);
val_payload.* = .{
.data = .{},
};
// `Module.constInst` does not add the instruction to the block because it is
if (block.is_comptime) {
return sema.addConstant(
inferred_alloc_ty,
try Value.Tag.inferred_alloc_comptime.create(sema.arena, undefined),
);
}
// `Sema.addConstant` does not add the instruction to the block because it is
// not needed in the case of constant values. However here, we plan to "downgrade"
// to a normal instruction when we hit `resolve_inferred_alloc`. So we append
// to the block even though it is currently a `.constant`.
const result = try sema.addConstant(inferred_alloc_ty, Value.initPayload(&val_payload.base));
const result = try sema.addConstant(
inferred_alloc_ty,
try Value.Tag.inferred_alloc.create(sema.arena, .{}),
);
try sema.requireFunctionBlock(block, src);
try block.instructions.append(sema.gpa, Air.refToIndex(result).?);
return result;
@ -1475,25 +1484,47 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde
const ptr_inst = Air.refToIndex(ptr).?;
assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant);
const air_datas = sema.air_instructions.items(.data);
const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload];
const inferred_alloc = ptr_val.castTag(.inferred_alloc).?;
const peer_inst_list = inferred_alloc.data.stored_inst_list.items;
const final_elem_ty = try sema.resolvePeerTypes(block, ty_src, peer_inst_list);
const value_index = air_datas[ptr_inst].ty_pl.payload;
const ptr_val = sema.air_values.items[value_index];
const var_is_mut = switch (sema.typeOf(ptr).tag()) {
.inferred_alloc_const => false,
.inferred_alloc_mut => true,
else => unreachable,
};
if (var_is_mut) {
try sema.validateVarType(block, ty_src, final_elem_ty);
}
const final_ptr_ty = try Module.simplePtrType(sema.arena, final_elem_ty, true, .One);
// Change it to a normal alloc.
sema.air_instructions.set(ptr_inst, .{
.tag = .alloc,
.data = .{ .ty = final_ptr_ty },
});
if (ptr_val.castTag(.inferred_alloc_comptime)) |iac| {
const decl = iac.data;
try sema.mod.declareDeclDependency(sema.owner_decl, decl);
const final_elem_ty = try decl.ty.copy(sema.arena);
const final_ptr_ty = try Module.simplePtrType(sema.arena, final_elem_ty, true, .One);
air_datas[ptr_inst].ty_pl.ty = try sema.addType(final_ptr_ty);
if (var_is_mut) {
sema.air_values.items[value_index] = try Value.Tag.decl_ref_mut.create(sema.arena, .{
.decl = decl,
.runtime_index = block.runtime_index,
});
} else {
sema.air_values.items[value_index] = try Value.Tag.decl_ref.create(sema.arena, decl);
}
return;
}
if (ptr_val.castTag(.inferred_alloc)) |inferred_alloc| {
const peer_inst_list = inferred_alloc.data.stored_inst_list.items;
const final_elem_ty = try sema.resolvePeerTypes(block, ty_src, peer_inst_list);
if (var_is_mut) {
try sema.validateVarType(block, ty_src, final_elem_ty);
}
// Change it to a normal alloc.
const final_ptr_ty = try Module.simplePtrType(sema.arena, final_elem_ty, true, .One);
sema.air_instructions.set(ptr_inst, .{
.tag = .alloc,
.data = .{ .ty = final_ptr_ty },
});
return;
}
}
fn zirValidateStructInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void {
@ -1654,23 +1685,45 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index)
const tracy = trace(@src());
defer tracy.end();
const src: LazySrcLoc = .unneeded;
const src: LazySrcLoc = sema.src;
const bin_inst = sema.code.instructions.items(.data)[inst].bin;
const ptr = sema.resolveInst(bin_inst.lhs);
const value = sema.resolveInst(bin_inst.rhs);
const operand = sema.resolveInst(bin_inst.rhs);
const operand_ty = sema.typeOf(operand);
const ptr_inst = Air.refToIndex(ptr).?;
assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant);
const air_datas = sema.air_instructions.items(.data);
const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload];
const inferred_alloc = ptr_val.castTag(.inferred_alloc).?;
// Add the stored instruction to the set we will use to resolve peer types
// for the inferred allocation.
try inferred_alloc.data.stored_inst_list.append(sema.arena, value);
// Create a runtime bitcast instruction with exactly the type the pointer wants.
const ptr_ty = try Module.simplePtrType(sema.arena, sema.typeOf(value), true, .One);
try sema.requireRuntimeBlock(block, src);
const bitcasted_ptr = try block.addTyOp(.bitcast, ptr_ty, ptr);
return sema.storePtr(block, src, bitcasted_ptr, value);
if (ptr_val.castTag(.inferred_alloc_comptime)) |iac| {
// There will be only one store_to_inferred_ptr because we are running at comptime.
// The alloc will turn into a Decl.
if (try sema.resolveMaybeUndefValAllowVariables(block, src, operand)) |operand_val| {
if (operand_val.tag() == .variable) {
return sema.failWithNeededComptime(block, src);
}
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
iac.data = try anon_decl.finish(
try operand_ty.copy(anon_decl.arena()),
try operand_val.copy(anon_decl.arena()),
);
return;
} else {
return sema.failWithNeededComptime(block, src);
}
}
if (ptr_val.castTag(.inferred_alloc)) |inferred_alloc| {
// Add the stored instruction to the set we will use to resolve peer types
// for the inferred allocation.
try inferred_alloc.data.stored_inst_list.append(sema.arena, operand);
// Create a runtime bitcast instruction with exactly the type the pointer wants.
const ptr_ty = try Module.simplePtrType(sema.arena, operand_ty, true, .One);
const bitcasted_ptr = try block.addTyOp(.bitcast, ptr_ty, ptr);
return sema.storePtr(block, src, bitcasted_ptr, operand);
}
unreachable;
}
fn zirSetEvalBranchQuota(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void {
@ -5643,7 +5696,6 @@ fn zirRetCoerce(
sema: *Sema,
block: *Scope.Block,
inst: Zir.Inst.Index,
need_coercion: bool,
) CompileError!Zir.Inst.Index {
const tracy = trace(@src());
defer tracy.end();
@ -5652,7 +5704,7 @@ fn zirRetCoerce(
const operand = sema.resolveInst(inst_data.operand);
const src = inst_data.src();
return sema.analyzeRet(block, operand, src, need_coercion);
return sema.analyzeRet(block, operand, src, true);
}
fn zirRetNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
@ -5673,23 +5725,20 @@ fn analyzeRet(
src: LazySrcLoc,
need_coercion: bool,
) CompileError!Zir.Inst.Index {
const casted_operand = if (!need_coercion) operand else op: {
const func = sema.func.?;
const fn_ty = func.owner_decl.ty;
const fn_ret_ty = fn_ty.fnReturnType();
break :op try sema.coerce(block, fn_ret_ty, operand, src);
};
if (block.inlining) |inlining| {
// We are inlining a function call; rewrite the `ret` as a `break`.
try inlining.merges.results.append(sema.gpa, operand);
_ = try block.addBr(inlining.merges.block_inst, operand);
try inlining.merges.results.append(sema.gpa, casted_operand);
_ = try block.addBr(inlining.merges.block_inst, casted_operand);
return always_noreturn;
}
if (need_coercion) {
if (sema.func) |func| {
const fn_ty = func.owner_decl.ty;
const fn_ret_ty = fn_ty.fnReturnType();
const casted_operand = try sema.coerce(block, fn_ret_ty, operand, src);
_ = try block.addUnOp(.ret, casted_operand);
return always_noreturn;
}
}
_ = try block.addUnOp(.ret, operand);
_ = try block.addUnOp(.ret, casted_operand);
return always_noreturn;
}
@ -7603,37 +7652,45 @@ fn storePtr(
if ((try sema.typeHasOnePossibleValue(block, src, elem_ty)) != null)
return;
if (try sema.resolveMaybeUndefVal(block, src, ptr)) |ptr_val| blk: {
const const_val = (try sema.resolveMaybeUndefVal(block, src, value)) orelse
return sema.mod.fail(&block.base, src, "cannot store runtime value in compile time variable", .{});
if (try sema.resolveDefinedValue(block, src, ptr)) |ptr_val| {
if (ptr_val.castTag(.decl_ref_mut)) |decl_ref_mut| {
const const_val = (try sema.resolveMaybeUndefVal(block, src, value)) orelse
return sema.mod.fail(&block.base, src, "cannot store runtime value in compile time variable", .{});
if (ptr_val.tag() == .int_u64)
break :blk; // propogate it down to runtime
const comptime_alloc = ptr_val.castTag(.comptime_alloc).?;
if (comptime_alloc.data.runtime_index < block.runtime_index) {
if (block.runtime_cond) |cond_src| {
const msg = msg: {
const msg = try sema.mod.errMsg(&block.base, src, "store to comptime variable depends on runtime condition", .{});
errdefer msg.destroy(sema.gpa);
try sema.mod.errNote(&block.base, cond_src, msg, "runtime condition here", .{});
break :msg msg;
};
return sema.mod.failWithOwnedErrorMsg(&block.base, msg);
if (decl_ref_mut.data.runtime_index < block.runtime_index) {
if (block.runtime_cond) |cond_src| {
const msg = msg: {
const msg = try sema.mod.errMsg(&block.base, src, "store to comptime variable depends on runtime condition", .{});
errdefer msg.destroy(sema.gpa);
try sema.mod.errNote(&block.base, cond_src, msg, "runtime condition here", .{});
break :msg msg;
};
return sema.mod.failWithOwnedErrorMsg(&block.base, msg);
}
if (block.runtime_loop) |loop_src| {
const msg = msg: {
const msg = try sema.mod.errMsg(&block.base, src, "cannot store to comptime variable in non-inline loop", .{});
errdefer msg.destroy(sema.gpa);
try sema.mod.errNote(&block.base, loop_src, msg, "non-inline loop here", .{});
break :msg msg;
};
return sema.mod.failWithOwnedErrorMsg(&block.base, msg);
}
unreachable;
}
if (block.runtime_loop) |loop_src| {
const msg = msg: {
const msg = try sema.mod.errMsg(&block.base, src, "cannot store to comptime variable in non-inline loop", .{});
errdefer msg.destroy(sema.gpa);
try sema.mod.errNote(&block.base, loop_src, msg, "non-inline loop here", .{});
break :msg msg;
};
return sema.mod.failWithOwnedErrorMsg(&block.base, msg);
}
unreachable;
var new_arena = std.heap.ArenaAllocator.init(sema.gpa);
errdefer new_arena.deinit();
const new_ty = try elem_ty.copy(&new_arena.allocator);
const new_val = try const_val.copy(&new_arena.allocator);
const decl = decl_ref_mut.data.decl;
var old_arena = decl.value_arena.?.promote(sema.gpa);
decl.value_arena = null;
try decl.finalizeNewArena(&new_arena);
decl.ty = new_ty;
decl.val = new_val;
old_arena.deinit();
return;
}
comptime_alloc.data.val = const_val;
return;
}
// TODO handle if the element type requires comptime

View File

@ -100,11 +100,13 @@ pub const Value = extern union {
function,
extern_fn,
variable,
/// Represents a comptime variables storage.
comptime_alloc,
/// Represents a pointer to a decl, not the value of the decl.
/// Represents a pointer to a Decl.
/// When machine codegen backend sees this, it must set the Decl's `alive` field to true.
decl_ref,
/// Pointer to a Decl, but allows comptime code to mutate the Decl's Value.
/// This Tag will never be seen by machine codegen backends. It is changed into a
/// `decl_ref` when a comptime variable goes out of scope.
decl_ref_mut,
elem_ptr,
field_ptr,
/// A slice of u8 whose memory is managed externally.
@ -134,6 +136,9 @@ pub const Value = extern union {
/// This is a special value that tracks a set of types that have been stored
/// to an inferred allocation. It does not support any of the normal value queries.
inferred_alloc,
/// Used to coordinate alloc_inferred, store_to_inferred_ptr, and resolve_inferred_alloc
/// instructions for comptime code.
inferred_alloc_comptime,
pub const last_no_payload_tag = Tag.empty_array;
pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1;
@ -213,6 +218,7 @@ pub const Value = extern union {
.extern_fn,
.decl_ref,
.inferred_alloc_comptime,
=> Payload.Decl,
.repeated,
@ -235,7 +241,7 @@ pub const Value = extern union {
.int_i64 => Payload.I64,
.function => Payload.Function,
.variable => Payload.Variable,
.comptime_alloc => Payload.ComptimeAlloc,
.decl_ref_mut => Payload.DeclRefMut,
.elem_ptr => Payload.ElemPtr,
.field_ptr => Payload.FieldPtr,
.float_16 => Payload.Float_16,
@ -408,8 +414,8 @@ pub const Value = extern union {
.function => return self.copyPayloadShallow(allocator, Payload.Function),
.extern_fn => return self.copyPayloadShallow(allocator, Payload.Decl),
.variable => return self.copyPayloadShallow(allocator, Payload.Variable),
.comptime_alloc => return self.copyPayloadShallow(allocator, Payload.ComptimeAlloc),
.decl_ref => return self.copyPayloadShallow(allocator, Payload.Decl),
.decl_ref_mut => return self.copyPayloadShallow(allocator, Payload.DeclRefMut),
.elem_ptr => {
const payload = self.castTag(.elem_ptr).?;
const new_payload = try allocator.create(Payload.ElemPtr);
@ -485,6 +491,7 @@ pub const Value = extern union {
.@"union" => @panic("TODO can't copy union value without knowing the type"),
.inferred_alloc => unreachable,
.inferred_alloc_comptime => unreachable,
}
}
@ -592,10 +599,9 @@ pub const Value = extern union {
.function => return out_stream.print("(function '{s}')", .{val.castTag(.function).?.data.owner_decl.name}),
.extern_fn => return out_stream.writeAll("(extern function)"),
.variable => return out_stream.writeAll("(variable)"),
.comptime_alloc => {
const ref_val = val.castTag(.comptime_alloc).?.data.val;
try out_stream.writeAll("&");
val = ref_val;
.decl_ref_mut => {
const decl = val.castTag(.decl_ref_mut).?.data.decl;
return out_stream.print("(decl_ref_mut '{s}')", .{decl.name});
},
.decl_ref => return out_stream.writeAll("(decl ref)"),
.elem_ptr => {
@ -626,6 +632,7 @@ pub const Value = extern union {
// TODO to print this it should be error{ Set, Items }!T(val), but we need the type for that
.error_union => return out_stream.print("error_union_val({})", .{val.castTag(.error_union).?.data}),
.inferred_alloc => return out_stream.writeAll("(inferred allocation value)"),
.inferred_alloc_comptime => return out_stream.writeAll("(inferred comptime allocation value)"),
.eu_payload_ptr => {
try out_stream.writeAll("(eu_payload_ptr)");
val = val.castTag(.eu_payload_ptr).?.data;
@ -741,8 +748,8 @@ pub const Value = extern union {
.function,
.extern_fn,
.variable,
.comptime_alloc,
.decl_ref,
.decl_ref_mut,
.elem_ptr,
.field_ptr,
.bytes,
@ -761,6 +768,7 @@ pub const Value = extern union {
.@"struct",
.@"union",
.inferred_alloc,
.inferred_alloc_comptime,
.abi_align_default,
.eu_payload_ptr,
=> unreachable,
@ -1234,7 +1242,13 @@ pub const Value = extern union {
allocator: *Allocator,
) error{ AnalysisFail, OutOfMemory }!?Value {
const sub_val: Value = switch (self.tag()) {
.comptime_alloc => self.castTag(.comptime_alloc).?.data.val,
.decl_ref_mut => val: {
// The decl whose value we are obtaining here may be overwritten with
// a different value, which would invalidate this memory. So we must
// copy here.
const val = try self.castTag(.decl_ref_mut).?.data.decl.value();
break :val try val.copy(allocator);
},
.decl_ref => try self.castTag(.decl_ref).?.data.value(),
.elem_ptr => blk: {
const elem_ptr = self.castTag(.elem_ptr).?.data;
@ -1351,6 +1365,7 @@ pub const Value = extern union {
.undef => unreachable,
.unreachable_value => unreachable,
.inferred_alloc => unreachable,
.inferred_alloc_comptime => unreachable,
.null_value => true,
else => false,
@ -1371,6 +1386,7 @@ pub const Value = extern union {
.undef => unreachable,
.unreachable_value => unreachable,
.inferred_alloc => unreachable,
.inferred_alloc_comptime => unreachable,
else => null,
};
@ -1380,6 +1396,7 @@ pub const Value = extern union {
return switch (self.tag()) {
.undef => unreachable,
.inferred_alloc => unreachable,
.inferred_alloc_comptime => unreachable,
.float_16,
.float_32,
@ -1443,12 +1460,12 @@ pub const Value = extern union {
data: Value,
};
pub const ComptimeAlloc = struct {
pub const base_tag = Tag.comptime_alloc;
pub const DeclRefMut = struct {
pub const base_tag = Tag.decl_ref_mut;
base: Payload = Payload{ .tag = base_tag },
data: struct {
val: Value,
decl: *Module.Decl,
runtime_index: u32,
},
};

View File

@ -3,6 +3,7 @@ const builtin = @import("builtin");
test {
// Tests that pass for both.
_ = @import("behavior/bool.zig");
_ = @import("behavior/basic.zig");
if (!builtin.zig_is_stage2) {
// Tests that only pass for stage1.

9
test/behavior/basic.zig Normal file
View File

@ -0,0 +1,9 @@
// normal comment
/// this is a documentation comment
/// doc comment line 2
fn emptyFunctionWithComments() void {}
test "empty function with comments" {
emptyFunctionWithComments();
}

View File

@ -33,3 +33,47 @@ test "compile time bool not" {
try expect(not_global_f);
try expect(!not_global_t);
}
test "short circuit" {
try testShortCircuit(false, true);
comptime try testShortCircuit(false, true);
}
fn testShortCircuit(f: bool, t: bool) !void {
var hit_1 = f;
var hit_2 = f;
var hit_3 = f;
var hit_4 = f;
if (t or x: {
try expect(f);
break :x f;
}) {
hit_1 = t;
}
if (f or x: {
hit_2 = t;
break :x f;
}) {
try expect(f);
}
if (t and x: {
hit_3 = t;
break :x f;
}) {
try expect(f);
}
if (f and x: {
try expect(f);
break :x f;
}) {
try expect(f);
} else {
hit_4 = t;
}
try expect(hit_1);
try expect(hit_2);
try expect(hit_3);
try expect(hit_4);
}

View File

@ -5,70 +5,6 @@ const expectEqualStrings = std.testing.expectEqualStrings;
const mem = std.mem;
const builtin = @import("builtin");
// normal comment
/// this is a documentation comment
/// doc comment line 2
fn emptyFunctionWithComments() void {}
test "empty function with comments" {
emptyFunctionWithComments();
}
comptime {
@export(disabledExternFn, .{ .name = "disabledExternFn", .linkage = .Internal });
}
fn disabledExternFn() callconv(.C) void {}
test "call disabled extern fn" {
disabledExternFn();
}
test "short circuit" {
try testShortCircuit(false, true);
comptime try testShortCircuit(false, true);
}
fn testShortCircuit(f: bool, t: bool) !void {
var hit_1 = f;
var hit_2 = f;
var hit_3 = f;
var hit_4 = f;
if (t or x: {
try expect(f);
break :x f;
}) {
hit_1 = t;
}
if (f or x: {
hit_2 = t;
break :x f;
}) {
try expect(f);
}
if (t and x: {
hit_3 = t;
break :x f;
}) {
try expect(f);
}
if (f and x: {
try expect(f);
break :x f;
}) {
try expect(f);
} else {
hit_4 = t;
}
try expect(hit_1);
try expect(hit_2);
try expect(hit_3);
try expect(hit_4);
}
test "truncate" {
try expect(testTruncate(0x10fd) == 0xfd);
}