mirror of
https://github.com/ziglang/zig.git
synced 2026-01-14 19:35:12 +00:00
Merge pull request #21135 from mlugg/incremental
incremental: more progress
This commit is contained in:
commit
61919fe63d
2
lib/compiler/aro/aro/Builtins/Builtin.zig
vendored
2
lib/compiler/aro/aro/Builtins/Builtin.zig
vendored
@ -5165,7 +5165,7 @@ const dafsa = [_]Node{
|
||||
.{ .char = 'e', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 4913 },
|
||||
};
|
||||
pub const data = blk: {
|
||||
@setEvalBranchQuota(3986);
|
||||
@setEvalBranchQuota(30_000);
|
||||
break :blk [_]@This(){
|
||||
// _Block_object_assign
|
||||
.{ .tag = @enumFromInt(0), .properties = .{ .param_str = "vv*vC*iC", .header = .blocks, .attributes = .{ .lib_function_without_prefix = true } } },
|
||||
|
||||
2
lib/compiler/aro/aro/Parser.zig
vendored
2
lib/compiler/aro/aro/Parser.zig
vendored
@ -4802,6 +4802,7 @@ const CallExpr = union(enum) {
|
||||
}
|
||||
|
||||
fn shouldPromoteVarArg(self: CallExpr, arg_idx: u32) bool {
|
||||
@setEvalBranchQuota(2000);
|
||||
return switch (self) {
|
||||
.standard => true,
|
||||
.builtin => |builtin| switch (builtin.tag) {
|
||||
@ -4902,6 +4903,7 @@ const CallExpr = union(enum) {
|
||||
}
|
||||
|
||||
fn returnType(self: CallExpr, p: *Parser, callable_ty: Type) Type {
|
||||
@setEvalBranchQuota(6000);
|
||||
return switch (self) {
|
||||
.standard => callable_ty.returnType(),
|
||||
.builtin => |builtin| switch (builtin.tag) {
|
||||
|
||||
@ -629,6 +629,8 @@ fn generateSbox(invert: bool) [256]u8 {
|
||||
|
||||
// Generate lookup tables.
|
||||
fn generateTable(invert: bool) [4][256]u32 {
|
||||
@setEvalBranchQuota(50000);
|
||||
|
||||
var table: [4][256]u32 = undefined;
|
||||
|
||||
for (generateSbox(invert), 0..) |value, index| {
|
||||
|
||||
@ -786,7 +786,7 @@ test "blake2b384 streaming" {
|
||||
|
||||
test "comptime blake2b384" {
|
||||
comptime {
|
||||
@setEvalBranchQuota(10000);
|
||||
@setEvalBranchQuota(20000);
|
||||
var block = [_]u8{0} ** Blake2b384.block_length;
|
||||
var out: [Blake2b384.digest_length]u8 = undefined;
|
||||
|
||||
@ -878,7 +878,7 @@ test "blake2b512 keyed" {
|
||||
|
||||
test "comptime blake2b512" {
|
||||
comptime {
|
||||
@setEvalBranchQuota(10000);
|
||||
@setEvalBranchQuota(12000);
|
||||
var block = [_]u8{0} ** Blake2b512.block_length;
|
||||
var out: [Blake2b512.digest_length]u8 = undefined;
|
||||
|
||||
|
||||
@ -393,7 +393,7 @@ pub const P384 = struct {
|
||||
}
|
||||
|
||||
const basePointPc = pc: {
|
||||
@setEvalBranchQuota(50000);
|
||||
@setEvalBranchQuota(70000);
|
||||
break :pc precompute(P384.basePoint, 15);
|
||||
};
|
||||
|
||||
|
||||
@ -6,7 +6,7 @@ const testing = std.testing;
|
||||
const EnumField = std.builtin.Type.EnumField;
|
||||
|
||||
/// Increment this value when adding APIs that add single backwards branches.
|
||||
const eval_branch_quota_cushion = 5;
|
||||
const eval_branch_quota_cushion = 10;
|
||||
|
||||
/// Returns a struct with a field matching each unique named enum element.
|
||||
/// If the enum is extern and has multiple names for the same value, only
|
||||
|
||||
@ -890,7 +890,7 @@ test "xxhash32 smhasher" {
|
||||
}
|
||||
};
|
||||
try Test.do();
|
||||
@setEvalBranchQuota(75000);
|
||||
@setEvalBranchQuota(85000);
|
||||
comptime try Test.do();
|
||||
}
|
||||
|
||||
|
||||
@ -749,31 +749,23 @@ test rotl {
|
||||
try testing.expect(rotl(@Vector(1, u32), @Vector(1, u32){1 << 31}, @as(isize, -1))[0] == @as(u32, 1) << 30);
|
||||
}
|
||||
|
||||
/// Returns an unsigned int type that can hold the number of bits in T
|
||||
/// - 1. Suitable for 0-based bit indices of T.
|
||||
/// Returns an unsigned int type that can hold the number of bits in T - 1.
|
||||
/// Suitable for 0-based bit indices of T.
|
||||
pub fn Log2Int(comptime T: type) type {
|
||||
// comptime ceil log2
|
||||
if (T == comptime_int) return comptime_int;
|
||||
comptime var count = 0;
|
||||
comptime var s = @typeInfo(T).Int.bits - 1;
|
||||
inline while (s != 0) : (s >>= 1) {
|
||||
count += 1;
|
||||
}
|
||||
|
||||
return std.meta.Int(.unsigned, count);
|
||||
const bits: u16 = @typeInfo(T).Int.bits;
|
||||
const log2_bits = 16 - @clz(bits - 1);
|
||||
return std.meta.Int(.unsigned, log2_bits);
|
||||
}
|
||||
|
||||
/// Returns an unsigned int type that can hold the number of bits in T.
|
||||
pub fn Log2IntCeil(comptime T: type) type {
|
||||
// comptime ceil log2
|
||||
if (T == comptime_int) return comptime_int;
|
||||
comptime var count = 0;
|
||||
comptime var s = @typeInfo(T).Int.bits;
|
||||
inline while (s != 0) : (s >>= 1) {
|
||||
count += 1;
|
||||
}
|
||||
|
||||
return std.meta.Int(.unsigned, count);
|
||||
const bits: u16 = @typeInfo(T).Int.bits;
|
||||
const log2_bits = 16 - @clz(bits);
|
||||
return std.meta.Int(.unsigned, log2_bits);
|
||||
}
|
||||
|
||||
/// Returns the smallest integer type that can hold both from and to.
|
||||
|
||||
@ -114,6 +114,7 @@ test "hypot.precise" {
|
||||
}
|
||||
|
||||
test "hypot.special" {
|
||||
@setEvalBranchQuota(2000);
|
||||
inline for (.{ f16, f32, f64, f128 }) |T| {
|
||||
try expect(math.isNan(hypot(nan(T), 0.0)));
|
||||
try expect(math.isNan(hypot(0.0, nan(T))));
|
||||
|
||||
@ -144,7 +144,7 @@ test "int" {
|
||||
}
|
||||
|
||||
test "float" {
|
||||
@setEvalBranchQuota(3000);
|
||||
@setEvalBranchQuota(4000);
|
||||
|
||||
// normal -> normal
|
||||
try expect(nextAfter(f16, 0x1.234p0, 2.0) == 0x1.238p0);
|
||||
|
||||
@ -535,6 +535,7 @@ fn testUtf16CountCodepoints() !void {
|
||||
}
|
||||
|
||||
test "utf16 count codepoints" {
|
||||
@setEvalBranchQuota(2000);
|
||||
try testUtf16CountCodepoints();
|
||||
try comptime testUtf16CountCodepoints();
|
||||
}
|
||||
|
||||
@ -66,6 +66,10 @@ scratch: std.ArrayListUnmanaged(u32) = .{},
|
||||
/// of ZIR.
|
||||
/// The key is the ref operand; the value is the ref instruction.
|
||||
ref_table: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{},
|
||||
/// Any information which should trigger invalidation of incremental compilation
|
||||
/// data should be used to update this hasher. The result is the final source
|
||||
/// hash of the enclosing declaration/etc.
|
||||
src_hasher: std.zig.SrcHasher,
|
||||
|
||||
const InnerError = error{ OutOfMemory, AnalysisFail };
|
||||
|
||||
@ -137,6 +141,7 @@ pub fn generate(gpa: Allocator, tree: Ast) Allocator.Error!Zir {
|
||||
.arena = arena.allocator(),
|
||||
.tree = &tree,
|
||||
.nodes_need_rl = &nodes_need_rl,
|
||||
.src_hasher = undefined, // `structDeclInner` for the root struct will set this
|
||||
};
|
||||
defer astgen.deinit(gpa);
|
||||
|
||||
@ -1422,6 +1427,8 @@ fn fnProtoExpr(
|
||||
.is_extern = false,
|
||||
.is_noinline = false,
|
||||
.noalias_bits = noalias_bits,
|
||||
|
||||
.proto_hash = undefined, // ignored for `body_gz == null`
|
||||
});
|
||||
|
||||
_ = try block_scope.addBreak(.break_inline, block_inst, result);
|
||||
@ -4007,6 +4014,13 @@ fn fnDecl(
|
||||
const tree = astgen.tree;
|
||||
const token_tags = tree.tokens.items(.tag);
|
||||
|
||||
const old_hasher = astgen.src_hasher;
|
||||
defer astgen.src_hasher = old_hasher;
|
||||
astgen.src_hasher = std.zig.SrcHasher.init(.{});
|
||||
// We don't add the full source yet, because we also need the prototype hash!
|
||||
// The source slice is added towards the *end* of this function.
|
||||
astgen.src_hasher.update(std.mem.asBytes(&astgen.source_column));
|
||||
|
||||
// missing function name already happened in scanDecls()
|
||||
const fn_name_token = fn_proto.name_token orelse return error.AnalysisFail;
|
||||
|
||||
@ -4300,11 +4314,21 @@ fn fnDecl(
|
||||
.is_extern = true,
|
||||
.is_noinline = is_noinline,
|
||||
.noalias_bits = noalias_bits,
|
||||
.proto_hash = undefined, // ignored for `body_gz == null`
|
||||
});
|
||||
} else func: {
|
||||
// as a scope, fn_gz encloses ret_gz, but for instruction list, fn_gz stacks on ret_gz
|
||||
fn_gz.instructions_top = ret_gz.instructions.items.len;
|
||||
|
||||
// Construct the prototype hash.
|
||||
// Leave `astgen.src_hasher` unmodified; this will be used for hashing
|
||||
// the *whole* function declaration, including its body.
|
||||
var proto_hasher = astgen.src_hasher;
|
||||
const proto_node = tree.nodes.items(.data)[decl_node].lhs;
|
||||
proto_hasher.update(tree.getNodeSource(proto_node));
|
||||
var proto_hash: std.zig.SrcHash = undefined;
|
||||
proto_hasher.final(&proto_hash);
|
||||
|
||||
const prev_fn_block = astgen.fn_block;
|
||||
const prev_fn_ret_ty = astgen.fn_ret_ty;
|
||||
defer {
|
||||
@ -4362,16 +4386,22 @@ fn fnDecl(
|
||||
.is_extern = false,
|
||||
.is_noinline = is_noinline,
|
||||
.noalias_bits = noalias_bits,
|
||||
.proto_hash = proto_hash,
|
||||
});
|
||||
};
|
||||
|
||||
// *Now* we can incorporate the full source code into the hasher.
|
||||
astgen.src_hasher.update(tree.getNodeSource(decl_node));
|
||||
|
||||
// We add this at the end so that its instruction index marks the end range
|
||||
// of the top level declaration. addFunc already unstacked fn_gz and ret_gz.
|
||||
_ = try decl_gz.addBreak(.break_inline, decl_inst, func_inst);
|
||||
|
||||
var hash: std.zig.SrcHash = undefined;
|
||||
astgen.src_hasher.final(&hash);
|
||||
try setDeclaration(
|
||||
decl_inst,
|
||||
std.zig.hashSrc(tree.getNodeSource(decl_node)),
|
||||
hash,
|
||||
.{ .named = fn_name_token },
|
||||
decl_gz.decl_line,
|
||||
is_pub,
|
||||
@ -4395,6 +4425,12 @@ fn globalVarDecl(
|
||||
const tree = astgen.tree;
|
||||
const token_tags = tree.tokens.items(.tag);
|
||||
|
||||
const old_hasher = astgen.src_hasher;
|
||||
defer astgen.src_hasher = old_hasher;
|
||||
astgen.src_hasher = std.zig.SrcHasher.init(.{});
|
||||
astgen.src_hasher.update(tree.getNodeSource(node));
|
||||
astgen.src_hasher.update(std.mem.asBytes(&astgen.source_column));
|
||||
|
||||
const is_mutable = token_tags[var_decl.ast.mut_token] == .keyword_var;
|
||||
// We do this at the beginning so that the instruction index marks the range start
|
||||
// of the top level declaration.
|
||||
@ -4534,9 +4570,11 @@ fn globalVarDecl(
|
||||
_ = try addrspace_gz.addBreakWithSrcNode(.break_inline, decl_inst, addrspace_inst, node);
|
||||
}
|
||||
|
||||
var hash: std.zig.SrcHash = undefined;
|
||||
astgen.src_hasher.final(&hash);
|
||||
try setDeclaration(
|
||||
decl_inst,
|
||||
std.zig.hashSrc(tree.getNodeSource(node)),
|
||||
hash,
|
||||
.{ .named = name_token },
|
||||
block_scope.decl_line,
|
||||
is_pub,
|
||||
@ -4562,6 +4600,12 @@ fn comptimeDecl(
|
||||
const node_datas = tree.nodes.items(.data);
|
||||
const body_node = node_datas[node].lhs;
|
||||
|
||||
const old_hasher = astgen.src_hasher;
|
||||
defer astgen.src_hasher = old_hasher;
|
||||
astgen.src_hasher = std.zig.SrcHasher.init(.{});
|
||||
astgen.src_hasher.update(tree.getNodeSource(node));
|
||||
astgen.src_hasher.update(std.mem.asBytes(&astgen.source_column));
|
||||
|
||||
// Up top so the ZIR instruction index marks the start range of this
|
||||
// top-level declaration.
|
||||
const decl_inst = try gz.makeDeclaration(node);
|
||||
@ -4584,9 +4628,11 @@ fn comptimeDecl(
|
||||
_ = try decl_block.addBreak(.break_inline, decl_inst, .void_value);
|
||||
}
|
||||
|
||||
var hash: std.zig.SrcHash = undefined;
|
||||
astgen.src_hasher.final(&hash);
|
||||
try setDeclaration(
|
||||
decl_inst,
|
||||
std.zig.hashSrc(tree.getNodeSource(node)),
|
||||
hash,
|
||||
.@"comptime",
|
||||
decl_block.decl_line,
|
||||
false,
|
||||
@ -4607,6 +4653,12 @@ fn usingnamespaceDecl(
|
||||
const tree = astgen.tree;
|
||||
const node_datas = tree.nodes.items(.data);
|
||||
|
||||
const old_hasher = astgen.src_hasher;
|
||||
defer astgen.src_hasher = old_hasher;
|
||||
astgen.src_hasher = std.zig.SrcHasher.init(.{});
|
||||
astgen.src_hasher.update(tree.getNodeSource(node));
|
||||
astgen.src_hasher.update(std.mem.asBytes(&astgen.source_column));
|
||||
|
||||
const type_expr = node_datas[node].lhs;
|
||||
const is_pub = blk: {
|
||||
const main_tokens = tree.nodes.items(.main_token);
|
||||
@ -4634,9 +4686,11 @@ fn usingnamespaceDecl(
|
||||
const namespace_inst = try typeExpr(&decl_block, &decl_block.base, type_expr);
|
||||
_ = try decl_block.addBreak(.break_inline, decl_inst, namespace_inst);
|
||||
|
||||
var hash: std.zig.SrcHash = undefined;
|
||||
astgen.src_hasher.final(&hash);
|
||||
try setDeclaration(
|
||||
decl_inst,
|
||||
std.zig.hashSrc(tree.getNodeSource(node)),
|
||||
hash,
|
||||
.@"usingnamespace",
|
||||
decl_block.decl_line,
|
||||
is_pub,
|
||||
@ -4658,6 +4712,12 @@ fn testDecl(
|
||||
const node_datas = tree.nodes.items(.data);
|
||||
const body_node = node_datas[node].rhs;
|
||||
|
||||
const old_hasher = astgen.src_hasher;
|
||||
defer astgen.src_hasher = old_hasher;
|
||||
astgen.src_hasher = std.zig.SrcHasher.init(.{});
|
||||
astgen.src_hasher.update(tree.getNodeSource(node));
|
||||
astgen.src_hasher.update(std.mem.asBytes(&astgen.source_column));
|
||||
|
||||
// Up top so the ZIR instruction index marks the start range of this
|
||||
// top-level declaration.
|
||||
const decl_inst = try gz.makeDeclaration(node);
|
||||
@ -4819,13 +4879,18 @@ fn testDecl(
|
||||
.is_extern = false,
|
||||
.is_noinline = false,
|
||||
.noalias_bits = 0,
|
||||
|
||||
// Tests don't have a prototype that needs hashing
|
||||
.proto_hash = .{0} ** 16,
|
||||
});
|
||||
|
||||
_ = try decl_block.addBreak(.break_inline, decl_inst, func_inst);
|
||||
|
||||
var hash: std.zig.SrcHash = undefined;
|
||||
astgen.src_hasher.final(&hash);
|
||||
try setDeclaration(
|
||||
decl_inst,
|
||||
std.zig.hashSrc(tree.getNodeSource(node)),
|
||||
hash,
|
||||
test_name,
|
||||
decl_block.decl_line,
|
||||
false,
|
||||
@ -4983,10 +5048,12 @@ fn structDeclInner(
|
||||
}
|
||||
};
|
||||
|
||||
var fields_hasher = std.zig.SrcHasher.init(.{});
|
||||
fields_hasher.update(@tagName(layout));
|
||||
const old_hasher = astgen.src_hasher;
|
||||
defer astgen.src_hasher = old_hasher;
|
||||
astgen.src_hasher = std.zig.SrcHasher.init(.{});
|
||||
astgen.src_hasher.update(@tagName(layout));
|
||||
if (backing_int_node != 0) {
|
||||
fields_hasher.update(tree.getNodeSource(backing_int_node));
|
||||
astgen.src_hasher.update(tree.getNodeSource(backing_int_node));
|
||||
}
|
||||
|
||||
var sfba = std.heap.stackFallback(256, astgen.arena);
|
||||
@ -5009,7 +5076,7 @@ fn structDeclInner(
|
||||
.field => |field| field,
|
||||
};
|
||||
|
||||
fields_hasher.update(tree.getNodeSource(member_node));
|
||||
astgen.src_hasher.update(tree.getNodeSource(member_node));
|
||||
|
||||
if (!is_tuple) {
|
||||
const field_name = try astgen.identAsString(member.ast.main_token);
|
||||
@ -5139,7 +5206,7 @@ fn structDeclInner(
|
||||
}
|
||||
|
||||
var fields_hash: std.zig.SrcHash = undefined;
|
||||
fields_hasher.final(&fields_hash);
|
||||
astgen.src_hasher.final(&fields_hash);
|
||||
|
||||
try gz.setStruct(decl_inst, .{
|
||||
.src_node = node,
|
||||
@ -5240,11 +5307,13 @@ fn unionDeclInner(
|
||||
var wip_members = try WipMembers.init(gpa, &astgen.scratch, decl_count, field_count, bits_per_field, max_field_size);
|
||||
defer wip_members.deinit();
|
||||
|
||||
var fields_hasher = std.zig.SrcHasher.init(.{});
|
||||
fields_hasher.update(@tagName(layout));
|
||||
fields_hasher.update(&.{@intFromBool(auto_enum_tok != null)});
|
||||
const old_hasher = astgen.src_hasher;
|
||||
defer astgen.src_hasher = old_hasher;
|
||||
astgen.src_hasher = std.zig.SrcHasher.init(.{});
|
||||
astgen.src_hasher.update(@tagName(layout));
|
||||
astgen.src_hasher.update(&.{@intFromBool(auto_enum_tok != null)});
|
||||
if (arg_node != 0) {
|
||||
fields_hasher.update(astgen.tree.getNodeSource(arg_node));
|
||||
astgen.src_hasher.update(astgen.tree.getNodeSource(arg_node));
|
||||
}
|
||||
|
||||
var sfba = std.heap.stackFallback(256, astgen.arena);
|
||||
@ -5261,7 +5330,7 @@ fn unionDeclInner(
|
||||
.decl => continue,
|
||||
.field => |field| field,
|
||||
};
|
||||
fields_hasher.update(astgen.tree.getNodeSource(member_node));
|
||||
astgen.src_hasher.update(astgen.tree.getNodeSource(member_node));
|
||||
member.convertToNonTupleLike(astgen.tree.nodes);
|
||||
if (member.ast.tuple_like) {
|
||||
return astgen.failTok(member.ast.main_token, "union field missing name", .{});
|
||||
@ -5364,7 +5433,7 @@ fn unionDeclInner(
|
||||
}
|
||||
|
||||
var fields_hash: std.zig.SrcHash = undefined;
|
||||
fields_hasher.final(&fields_hash);
|
||||
astgen.src_hasher.final(&fields_hash);
|
||||
|
||||
if (!block_scope.isEmpty()) {
|
||||
_ = try block_scope.addBreak(.break_inline, decl_inst, .void_value);
|
||||
@ -5578,11 +5647,13 @@ fn containerDecl(
|
||||
var wip_members = try WipMembers.init(gpa, &astgen.scratch, @intCast(counts.decls), @intCast(counts.total_fields), bits_per_field, max_field_size);
|
||||
defer wip_members.deinit();
|
||||
|
||||
var fields_hasher = std.zig.SrcHasher.init(.{});
|
||||
const old_hasher = astgen.src_hasher;
|
||||
defer astgen.src_hasher = old_hasher;
|
||||
astgen.src_hasher = std.zig.SrcHasher.init(.{});
|
||||
if (container_decl.ast.arg != 0) {
|
||||
fields_hasher.update(tree.getNodeSource(container_decl.ast.arg));
|
||||
astgen.src_hasher.update(tree.getNodeSource(container_decl.ast.arg));
|
||||
}
|
||||
fields_hasher.update(&.{@intFromBool(nonexhaustive)});
|
||||
astgen.src_hasher.update(&.{@intFromBool(nonexhaustive)});
|
||||
|
||||
var sfba = std.heap.stackFallback(256, astgen.arena);
|
||||
const sfba_allocator = sfba.get();
|
||||
@ -5596,7 +5667,7 @@ fn containerDecl(
|
||||
for (container_decl.ast.members) |member_node| {
|
||||
if (member_node == counts.nonexhaustive_node)
|
||||
continue;
|
||||
fields_hasher.update(tree.getNodeSource(member_node));
|
||||
astgen.src_hasher.update(tree.getNodeSource(member_node));
|
||||
var member = switch (try containerMember(&block_scope, &namespace.base, &wip_members, member_node)) {
|
||||
.decl => continue,
|
||||
.field => |field| field,
|
||||
@ -5676,7 +5747,7 @@ fn containerDecl(
|
||||
}
|
||||
|
||||
var fields_hash: std.zig.SrcHash = undefined;
|
||||
fields_hasher.final(&fields_hash);
|
||||
astgen.src_hasher.final(&fields_hash);
|
||||
|
||||
const body = block_scope.instructionsSlice();
|
||||
const body_len = astgen.countBodyLenAfterFixups(body);
|
||||
@ -8478,6 +8549,10 @@ fn tunnelThroughClosure(
|
||||
});
|
||||
}
|
||||
|
||||
// Incorporate the capture index into the source hash, so that changes in
|
||||
// the order of captures cause suitable re-analysis.
|
||||
astgen.src_hasher.update(std.mem.asBytes(&cur_capture_index));
|
||||
|
||||
// Add an instruction to get the value from the closure.
|
||||
return gz.addExtendedNodeSmall(.closure_get, inner_ref_node, cur_capture_index);
|
||||
}
|
||||
@ -9306,6 +9381,13 @@ fn builtinCall(
|
||||
},
|
||||
|
||||
.src => {
|
||||
// Incorporate the source location into the source hash, so that
|
||||
// changes in the source location of `@src()` result in re-analysis.
|
||||
astgen.src_hasher.update(
|
||||
std.mem.asBytes(&astgen.source_line) ++
|
||||
std.mem.asBytes(&astgen.source_column),
|
||||
);
|
||||
|
||||
const token_starts = tree.tokens.items(.start);
|
||||
const node_start = token_starts[tree.firstToken(node)];
|
||||
astgen.advanceSourceCursor(node_start);
|
||||
@ -12122,6 +12204,9 @@ const GenZir = struct {
|
||||
is_test: bool,
|
||||
is_extern: bool,
|
||||
is_noinline: bool,
|
||||
|
||||
/// Ignored if `body_gz == null`.
|
||||
proto_hash: std.zig.SrcHash,
|
||||
}) !Zir.Inst.Ref {
|
||||
assert(args.src_node != 0);
|
||||
const astgen = gz.astgen;
|
||||
@ -12150,15 +12235,7 @@ const GenZir = struct {
|
||||
|
||||
const columns = args.lbrace_column | (rbrace_column << 16);
|
||||
|
||||
const proto_hash: std.zig.SrcHash = switch (node_tags[fn_decl]) {
|
||||
.fn_decl => sig_hash: {
|
||||
const proto_node = node_datas[fn_decl].lhs;
|
||||
break :sig_hash std.zig.hashSrc(tree.getNodeSource(proto_node));
|
||||
},
|
||||
.test_decl => std.zig.hashSrc(""), // tests don't have a prototype
|
||||
else => unreachable,
|
||||
};
|
||||
const proto_hash_arr: [4]u32 = @bitCast(proto_hash);
|
||||
const proto_hash_arr: [4]u32 = @bitCast(args.proto_hash);
|
||||
|
||||
src_locs_and_hash_buffer = .{
|
||||
args.lbrace_line,
|
||||
|
||||
@ -2391,6 +2391,7 @@ pub const Key = union(enum) {
|
||||
func: Index,
|
||||
arg_values: []const Index,
|
||||
result: Index,
|
||||
branch_count: u32,
|
||||
};
|
||||
|
||||
pub fn hash32(key: Key, ip: *const InternPool) u32 {
|
||||
@ -6157,6 +6158,7 @@ pub const MemoizedCall = struct {
|
||||
func: Index,
|
||||
args_len: u32,
|
||||
result: Index,
|
||||
branch_count: u32,
|
||||
};
|
||||
|
||||
pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void {
|
||||
@ -6785,6 +6787,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
|
||||
.func = extra.data.func,
|
||||
.arg_values = @ptrCast(extra_list.view().items(.@"0")[extra.end..][0..extra.data.args_len]),
|
||||
.result = extra.data.result,
|
||||
.branch_count = extra.data.branch_count,
|
||||
} };
|
||||
},
|
||||
};
|
||||
@ -7955,6 +7958,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All
|
||||
.func = memoized_call.func,
|
||||
.args_len = @intCast(memoized_call.arg_values.len),
|
||||
.result = memoized_call.result,
|
||||
.branch_count = memoized_call.branch_count,
|
||||
}),
|
||||
});
|
||||
extra.appendSliceAssumeCapacity(.{@ptrCast(memoized_call.arg_values)});
|
||||
|
||||
192
src/Sema.zig
192
src/Sema.zig
@ -113,6 +113,11 @@ type_references: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{},
|
||||
/// `AnalUnit` multiple times.
|
||||
dependencies: std.AutoArrayHashMapUnmanaged(InternPool.Dependee, void) = .{},
|
||||
|
||||
/// Whether memoization of this call is permitted. Operations with side effects global
|
||||
/// to the `Sema`, such as `@setEvalBranchQuota`, set this to `false`. It is observed
|
||||
/// by `analyzeCall`.
|
||||
allow_memoize: bool = true,
|
||||
|
||||
const MaybeComptimeAlloc = struct {
|
||||
/// The runtime index of the `alloc` instruction.
|
||||
runtime_index: Value.RuntimeIndex,
|
||||
@ -5524,6 +5529,7 @@ fn zirSetEvalBranchQuota(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compi
|
||||
.needed_comptime_reason = "eval branch quota must be comptime-known",
|
||||
}));
|
||||
sema.branch_quota = @max(sema.branch_quota, quota);
|
||||
sema.allow_memoize = false;
|
||||
}
|
||||
|
||||
fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
||||
@ -6416,6 +6422,7 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst
|
||||
}
|
||||
|
||||
zcu.intern_pool.funcMaxStackAlignment(sema.func_index, alignment);
|
||||
sema.allow_memoize = false;
|
||||
}
|
||||
|
||||
fn zirSetCold(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
|
||||
@ -6434,6 +6441,7 @@ fn zirSetCold(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData)
|
||||
.cau => return, // does nothing outside a function
|
||||
};
|
||||
ip.funcSetCold(func, is_cold);
|
||||
sema.allow_memoize = false;
|
||||
}
|
||||
|
||||
fn zirDisableInstrumentation(sema: *Sema) CompileError!void {
|
||||
@ -6445,6 +6453,7 @@ fn zirDisableInstrumentation(sema: *Sema) CompileError!void {
|
||||
.cau => return, // does nothing outside a function
|
||||
};
|
||||
ip.funcSetDisableInstrumentation(func);
|
||||
sema.allow_memoize = false;
|
||||
}
|
||||
|
||||
fn zirSetFloatMode(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
|
||||
@ -7589,6 +7598,9 @@ fn analyzeCall(
|
||||
|
||||
const module_fn = zcu.funcInfo(module_fn_index);
|
||||
|
||||
// The call site definitely depends on the function's signature.
|
||||
try sema.declareDependency(.{ .src_hash = module_fn.zir_body_inst });
|
||||
|
||||
// This is not a function instance, so the function's `Nav` has a
|
||||
// `Cau` -- we don't need to check `generic_owner`.
|
||||
const fn_nav = ip.getNav(module_fn.owner_nav);
|
||||
@ -7725,101 +7737,121 @@ fn analyzeCall(
|
||||
} }));
|
||||
}
|
||||
|
||||
// This `res2` is here instead of directly breaking from `res` due to a stage1
|
||||
// bug generating invalid LLVM IR.
|
||||
const res2: Air.Inst.Ref = res2: {
|
||||
if (should_memoize and is_comptime_call) {
|
||||
if (zcu.intern_pool.getIfExists(.{ .memoized_call = .{
|
||||
memoize: {
|
||||
if (!should_memoize) break :memoize;
|
||||
if (!is_comptime_call) break :memoize;
|
||||
const memoized_call_index = ip.getIfExists(.{
|
||||
.memoized_call = .{
|
||||
.func = module_fn_index,
|
||||
.arg_values = memoized_arg_values,
|
||||
.result = .none,
|
||||
} })) |memoized_call_index| {
|
||||
const memoized_call = zcu.intern_pool.indexToKey(memoized_call_index).memoized_call;
|
||||
break :res2 Air.internedToRef(memoized_call.result);
|
||||
}
|
||||
.result = undefined, // ignored by hash+eql
|
||||
.branch_count = undefined, // ignored by hash+eql
|
||||
},
|
||||
}) orelse break :memoize;
|
||||
const memoized_call = ip.indexToKey(memoized_call_index).memoized_call;
|
||||
if (sema.branch_count + memoized_call.branch_count > sema.branch_quota) {
|
||||
// Let the call play out se we get the correct source location for the
|
||||
// "evaluation exceeded X backwards branches" error.
|
||||
break :memoize;
|
||||
}
|
||||
sema.branch_count += memoized_call.branch_count;
|
||||
break :res Air.internedToRef(memoized_call.result);
|
||||
}
|
||||
|
||||
new_fn_info.return_type = sema.fn_ret_ty.toIntern();
|
||||
if (!is_comptime_call and !block.is_typeof) {
|
||||
const zir_tags = sema.code.instructions.items(.tag);
|
||||
for (fn_info.param_body) |param| switch (zir_tags[@intFromEnum(param)]) {
|
||||
.param, .param_comptime => {
|
||||
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(param)].pl_tok;
|
||||
const extra = sema.code.extraData(Zir.Inst.Param, inst_data.payload_index);
|
||||
const param_name = sema.code.nullTerminatedString(extra.data.name);
|
||||
const inst = sema.inst_map.get(param).?;
|
||||
// Since we're doing an inline call, we depend on the source code of the whole
|
||||
// function declaration.
|
||||
try sema.declareDependency(.{ .src_hash = fn_cau.zir_index });
|
||||
|
||||
try sema.addDbgVar(&child_block, inst, .dbg_arg_inline, param_name);
|
||||
},
|
||||
.param_anytype, .param_anytype_comptime => {
|
||||
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(param)].str_tok;
|
||||
const param_name = inst_data.get(sema.code);
|
||||
const inst = sema.inst_map.get(param).?;
|
||||
new_fn_info.return_type = sema.fn_ret_ty.toIntern();
|
||||
if (!is_comptime_call and !block.is_typeof) {
|
||||
const zir_tags = sema.code.instructions.items(.tag);
|
||||
for (fn_info.param_body) |param| switch (zir_tags[@intFromEnum(param)]) {
|
||||
.param, .param_comptime => {
|
||||
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(param)].pl_tok;
|
||||
const extra = sema.code.extraData(Zir.Inst.Param, inst_data.payload_index);
|
||||
const param_name = sema.code.nullTerminatedString(extra.data.name);
|
||||
const inst = sema.inst_map.get(param).?;
|
||||
|
||||
try sema.addDbgVar(&child_block, inst, .dbg_arg_inline, param_name);
|
||||
},
|
||||
else => continue,
|
||||
};
|
||||
}
|
||||
try sema.addDbgVar(&child_block, inst, .dbg_arg_inline, param_name);
|
||||
},
|
||||
.param_anytype, .param_anytype_comptime => {
|
||||
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(param)].str_tok;
|
||||
const param_name = inst_data.get(sema.code);
|
||||
const inst = sema.inst_map.get(param).?;
|
||||
|
||||
if (is_comptime_call and ensure_result_used) {
|
||||
try sema.ensureResultUsed(block, sema.fn_ret_ty, call_src);
|
||||
}
|
||||
|
||||
if (is_comptime_call or block.is_typeof) {
|
||||
// Save the error trace as our first action in the function
|
||||
// to match the behavior of runtime function calls.
|
||||
const error_return_trace_index = try sema.analyzeSaveErrRetIndex(&child_block);
|
||||
sema.error_return_trace_index_on_fn_entry = error_return_trace_index;
|
||||
child_block.error_return_trace_index = error_return_trace_index;
|
||||
}
|
||||
|
||||
const result = result: {
|
||||
sema.analyzeFnBody(&child_block, fn_info.body) catch |err| switch (err) {
|
||||
error.ComptimeReturn => break :result inlining.comptime_result,
|
||||
else => |e| return e,
|
||||
};
|
||||
break :result try sema.resolveAnalyzedBlock(block, call_src, &child_block, merges, need_debug_scope);
|
||||
try sema.addDbgVar(&child_block, inst, .dbg_arg_inline, param_name);
|
||||
},
|
||||
else => continue,
|
||||
};
|
||||
}
|
||||
|
||||
if (is_comptime_call) {
|
||||
const result_val = try sema.resolveConstValue(block, LazySrcLoc.unneeded, result, undefined);
|
||||
const result_interned = result_val.toIntern();
|
||||
if (is_comptime_call and ensure_result_used) {
|
||||
try sema.ensureResultUsed(block, sema.fn_ret_ty, call_src);
|
||||
}
|
||||
|
||||
// Transform ad-hoc inferred error set types into concrete error sets.
|
||||
const result_transformed = try sema.resolveAdHocInferredErrorSet(block, call_src, result_interned);
|
||||
if (is_comptime_call or block.is_typeof) {
|
||||
// Save the error trace as our first action in the function
|
||||
// to match the behavior of runtime function calls.
|
||||
const error_return_trace_index = try sema.analyzeSaveErrRetIndex(&child_block);
|
||||
sema.error_return_trace_index_on_fn_entry = error_return_trace_index;
|
||||
child_block.error_return_trace_index = error_return_trace_index;
|
||||
}
|
||||
|
||||
// If the result can mutate comptime vars, we must not memoize it, as it contains
|
||||
// a reference to `comptime_allocs` so is not stable across instances of `Sema`.
|
||||
// TODO: check whether any external comptime memory was mutated by the
|
||||
// comptime function call. If so, then do not memoize the call here.
|
||||
if (should_memoize and !Value.fromInterned(result_interned).canMutateComptimeVarState(zcu)) {
|
||||
_ = try pt.intern(.{ .memoized_call = .{
|
||||
.func = module_fn_index,
|
||||
.arg_values = memoized_arg_values,
|
||||
.result = result_transformed,
|
||||
} });
|
||||
}
|
||||
// We temporarily set `allow_memoize` to `true` to track this comptime call.
|
||||
// It is restored after this call finishes analysis, so that a caller may
|
||||
// know whether an in-progress call (containing this call) may be memoized.
|
||||
const old_allow_memoize = sema.allow_memoize;
|
||||
defer sema.allow_memoize = old_allow_memoize and sema.allow_memoize;
|
||||
sema.allow_memoize = true;
|
||||
|
||||
break :res2 Air.internedToRef(result_transformed);
|
||||
}
|
||||
// Store the current eval branch count so we can find out how many eval branches
|
||||
// the comptime call caused.
|
||||
const old_branch_count = sema.branch_count;
|
||||
|
||||
if (try sema.resolveValue(result)) |result_val| {
|
||||
const result_transformed = try sema.resolveAdHocInferredErrorSet(block, call_src, result_val.toIntern());
|
||||
break :res2 Air.internedToRef(result_transformed);
|
||||
}
|
||||
|
||||
const new_ty = try sema.resolveAdHocInferredErrorSetTy(block, call_src, sema.typeOf(result).toIntern());
|
||||
if (new_ty != .none) {
|
||||
// TODO: mutate in place the previous instruction if possible
|
||||
// rather than adding a bitcast instruction.
|
||||
break :res2 try block.addBitCast(Type.fromInterned(new_ty), result);
|
||||
}
|
||||
|
||||
break :res2 result;
|
||||
const result = result: {
|
||||
sema.analyzeFnBody(&child_block, fn_info.body) catch |err| switch (err) {
|
||||
error.ComptimeReturn => break :result inlining.comptime_result,
|
||||
else => |e| return e,
|
||||
};
|
||||
break :result try sema.resolveAnalyzedBlock(block, call_src, &child_block, merges, need_debug_scope);
|
||||
};
|
||||
|
||||
break :res res2;
|
||||
if (is_comptime_call) {
|
||||
const result_val = try sema.resolveConstValue(block, LazySrcLoc.unneeded, result, undefined);
|
||||
const result_interned = result_val.toIntern();
|
||||
|
||||
// Transform ad-hoc inferred error set types into concrete error sets.
|
||||
const result_transformed = try sema.resolveAdHocInferredErrorSet(block, call_src, result_interned);
|
||||
|
||||
// If the result can mutate comptime vars, we must not memoize it, as it contains
|
||||
// a reference to `comptime_allocs` so is not stable across instances of `Sema`.
|
||||
// TODO: check whether any external comptime memory was mutated by the
|
||||
// comptime function call. If so, then do not memoize the call here.
|
||||
if (should_memoize and sema.allow_memoize and !Value.fromInterned(result_interned).canMutateComptimeVarState(zcu)) {
|
||||
_ = try pt.intern(.{ .memoized_call = .{
|
||||
.func = module_fn_index,
|
||||
.arg_values = memoized_arg_values,
|
||||
.result = result_transformed,
|
||||
.branch_count = sema.branch_count - old_branch_count,
|
||||
} });
|
||||
}
|
||||
|
||||
break :res Air.internedToRef(result_transformed);
|
||||
}
|
||||
|
||||
if (try sema.resolveValue(result)) |result_val| {
|
||||
const result_transformed = try sema.resolveAdHocInferredErrorSet(block, call_src, result_val.toIntern());
|
||||
break :res Air.internedToRef(result_transformed);
|
||||
}
|
||||
|
||||
const new_ty = try sema.resolveAdHocInferredErrorSetTy(block, call_src, sema.typeOf(result).toIntern());
|
||||
if (new_ty != .none) {
|
||||
// TODO: mutate in place the previous instruction if possible
|
||||
// rather than adding a bitcast instruction.
|
||||
break :res try block.addBitCast(Type.fromInterned(new_ty), result);
|
||||
}
|
||||
|
||||
break :res result;
|
||||
} else res: {
|
||||
assert(!func_ty_info.is_generic);
|
||||
|
||||
|
||||
@ -93,6 +93,8 @@ pub fn RegisterManager(
|
||||
comptime set: []const Register,
|
||||
reg: Register,
|
||||
) ?std.math.IntFittingRange(0, set.len - 1) {
|
||||
@setEvalBranchQuota(3000);
|
||||
|
||||
const Id = @TypeOf(reg.id());
|
||||
comptime var min_id: Id = std.math.maxInt(Id);
|
||||
comptime var max_id: Id = std.math.minInt(Id);
|
||||
|
||||
@ -31,7 +31,7 @@ pub fn main() void {}
|
||||
comptime {
|
||||
const x: [*c]u8 = null;
|
||||
var runtime_len: usize = undefined;
|
||||
runtime_len = 0;
|
||||
runtime_len = 0;
|
||||
const y = x[0..runtime_len];
|
||||
_ = y;
|
||||
}
|
||||
|
||||
23
test/incremental/modify_inline_fn
Normal file
23
test/incremental/modify_inline_fn
Normal file
@ -0,0 +1,23 @@
|
||||
#target=x86_64-linux
|
||||
#update=initial version
|
||||
#file=main.zig
|
||||
const std = @import("std");
|
||||
pub fn main() !void {
|
||||
const str = getStr();
|
||||
try std.io.getStdOut().writeAll(str);
|
||||
}
|
||||
inline fn getStr() []const u8 {
|
||||
return "foo\n";
|
||||
}
|
||||
#expect_stdout="foo\n"
|
||||
#update=change the string
|
||||
#file=main.zig
|
||||
const std = @import("std");
|
||||
pub fn main() !void {
|
||||
const str = getStr();
|
||||
try std.io.getStdOut().writeAll(str);
|
||||
}
|
||||
inline fn getStr() []const u8 {
|
||||
return "bar\n";
|
||||
}
|
||||
#expect_stdout="bar\n"
|
||||
29
test/incremental/move_src
Normal file
29
test/incremental/move_src
Normal file
@ -0,0 +1,29 @@
|
||||
#target=x86_64-linux
|
||||
#update=initial version
|
||||
#file=main.zig
|
||||
const std = @import("std");
|
||||
pub fn main() !void {
|
||||
try std.io.getStdOut().writer().print("{d} {d}\n", .{ foo(), bar() });
|
||||
}
|
||||
fn foo() u32 {
|
||||
return @src().line;
|
||||
}
|
||||
fn bar() u32 {
|
||||
return 123;
|
||||
}
|
||||
#expect_stdout="6 123\n"
|
||||
|
||||
#update=add newline
|
||||
#file=main.zig
|
||||
const std = @import("std");
|
||||
pub fn main() !void {
|
||||
try std.io.getStdOut().writer().print("{d} {d}\n", .{ foo(), bar() });
|
||||
}
|
||||
|
||||
fn foo() u32 {
|
||||
return @src().line;
|
||||
}
|
||||
fn bar() u32 {
|
||||
return 123;
|
||||
}
|
||||
#expect_stdout="7 123\n"
|
||||
Loading…
x
Reference in New Issue
Block a user