stage2: fix incremental compilation Decl deletion logic

* `analyzeContainer` now has an `outdated_decls` set as well as
   `deleted_decls`. Instead of queuing up outdated Decls for re-analysis
   right away, they are added to this new set. When processing the
   `deleted_decls` set, we remove deleted Decls from the
   `outdated_decls` set, to avoid deleted Decl pointers from being in
   the work_queue. Only after processing the deleted decls do we add
   analyze_decl work items to the queue.

 * Module.deletion_set is now an `AutoArrayHashMap` rather than `ArrayList`.
   `declareDeclDependency` will now remove a Decl from it as appropriate.
   When processing the `deletion_set` in `Compilation.performAllTheWork`,
   it now assumes all Decl in the set are to be deleted.

 * Fix crash when handling parse errors. Currently we unload the
   `ast.Tree` if any parse errors occur. Previously the code emitted a
   LazySrcLoc pointing to a token index, but then when we try to resolve
   the token index to a byte offset to create a compile error message,
   the  ast.Tree` would be unloaded. Now we use
   `LazySrcLoc.byte_abs` instead of `token_abs` so the error message can
   be created even with the `ast.Tree` unloaded.

Together, these changes solve a crash that happened with incremental
compilation when Decls were added and removed in some combinations.
This commit is contained in:
Andrew Kelley 2021-04-07 19:38:00 -07:00
parent 8f28e26e7a
commit 4996c2b6a9
5 changed files with 145 additions and 70 deletions

View File

@ -1899,9 +1899,9 @@ fn containerDecl(
if (member.ast.type_expr != 0) {
return mod.failNode(scope, member.ast.type_expr, "enum fields do not have types", .{});
}
if (member.ast.align_expr != 0) {
return mod.failNode(scope, member.ast.align_expr, "enum fields do not have alignments", .{});
}
// Alignment expressions in enums are caught by the parser.
assert(member.ast.align_expr == 0);
const name_token = member.ast.name_token;
if (mem.eql(u8, tree.tokenSlice(name_token), "_")) {
if (nonexhaustive_node != 0) {

View File

@ -1377,14 +1377,17 @@ pub fn update(self: *Compilation) !void {
if (!use_stage1) {
if (self.bin_file.options.module) |module| {
// Process the deletion set.
while (module.deletion_set.popOrNull()) |decl| {
if (decl.dependants.items().len != 0) {
decl.deletion_flag = false;
continue;
}
try module.deleteDecl(decl);
// Process the deletion set. We use a while loop here because the
// deletion set may grow as we call `deleteDecl` within this loop,
// and more unreferenced Decls are revealed.
var entry_i: usize = 0;
while (entry_i < module.deletion_set.entries.items.len) : (entry_i += 1) {
const decl = module.deletion_set.entries.items[entry_i].key;
assert(decl.deletion_flag);
assert(decl.dependants.items().len == 0);
try module.deleteDecl(decl, null);
}
module.deletion_set.shrinkRetainingCapacity(0);
}
}

View File

@ -75,7 +75,7 @@ next_anon_name_index: usize = 0,
/// Candidates for deletion. After a semantic analysis update completes, this list
/// contains Decls that need to be deleted if they end up having no references to them.
deletion_set: ArrayListUnmanaged(*Decl) = .{},
deletion_set: std.AutoArrayHashMapUnmanaged(*Decl, void) = .{},
/// Error tags and their values, tag names are duped with mod.gpa.
/// Corresponds with `error_name_list`.
@ -192,7 +192,7 @@ pub const Decl = struct {
/// to require re-analysis.
outdated,
},
/// This flag is set when this Decl is added to a check_for_deletion set, and cleared
/// This flag is set when this Decl is added to `Module.deletion_set`, and cleared
/// when removed.
deletion_flag: bool,
/// Whether the corresponding AST decl has a `pub` keyword.
@ -2393,7 +2393,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl: *Decl) InnerError!void {
// We don't perform a deletion here, because this Decl or another one
// may end up referencing it before the update is complete.
dep.deletion_flag = true;
try mod.deletion_set.append(mod.gpa, dep);
try mod.deletion_set.put(mod.gpa, dep, {});
}
}
decl.dependencies.clearRetainingCapacity();
@ -3197,6 +3197,11 @@ pub fn declareDeclDependency(mod: *Module, depender: *Decl, dependee: *Decl) !u3
try depender.dependencies.ensureCapacity(mod.gpa, depender.dependencies.count() + 1);
try dependee.dependants.ensureCapacity(mod.gpa, dependee.dependants.count() + 1);
if (dependee.deletion_flag) {
dependee.deletion_flag = false;
mod.deletion_set.removeAssertDiscard(dependee);
}
dependee.dependants.putAssumeCapacity(depender, {});
const gop = depender.dependencies.getOrPutAssumeCapacity(dependee);
return @intCast(u32, gop.index);
@ -3224,12 +3229,14 @@ pub fn getAstTree(mod: *Module, root_scope: *Scope.File) !*const ast.Tree {
var msg = std.ArrayList(u8).init(mod.gpa);
defer msg.deinit();
const token_starts = tree.tokens.items(.start);
try tree.renderError(parse_err, msg.writer());
const err_msg = try mod.gpa.create(ErrorMsg);
err_msg.* = .{
.src_loc = .{
.container = .{ .file_scope = root_scope },
.lazy = .{ .token_abs = parse_err.token },
.lazy = .{ .byte_abs = token_starts[parse_err.token] },
},
.msg = msg.toOwnedSlice(),
};
@ -3274,6 +3281,14 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void {
deleted_decls.putAssumeCapacityNoClobber(entry.key, {});
}
// Keep track of decls that are invalidated from the update. Ultimately,
// the goal is to queue up `analyze_decl` tasks in the work queue for
// the outdated decls, but we cannot queue up the tasks until after
// we find out which ones have been deleted, otherwise there would be
// deleted Decl pointers in the work queue.
var outdated_decls = std.AutoArrayHashMap(*Decl, void).init(mod.gpa);
defer outdated_decls.deinit();
for (decls) |decl_node, decl_i| switch (node_tags[decl_node]) {
.fn_decl => {
const fn_proto = node_datas[decl_node].lhs;
@ -3284,6 +3299,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void {
try mod.semaContainerFn(
container_scope,
&deleted_decls,
&outdated_decls,
decl_node,
decl_i,
tree.*,
@ -3294,6 +3310,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void {
.fn_proto_multi => try mod.semaContainerFn(
container_scope,
&deleted_decls,
&outdated_decls,
decl_node,
decl_i,
tree.*,
@ -3305,6 +3322,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void {
try mod.semaContainerFn(
container_scope,
&deleted_decls,
&outdated_decls,
decl_node,
decl_i,
tree.*,
@ -3315,6 +3333,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void {
.fn_proto => try mod.semaContainerFn(
container_scope,
&deleted_decls,
&outdated_decls,
decl_node,
decl_i,
tree.*,
@ -3329,6 +3348,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void {
try mod.semaContainerFn(
container_scope,
&deleted_decls,
&outdated_decls,
decl_node,
decl_i,
tree.*,
@ -3339,6 +3359,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void {
.fn_proto_multi => try mod.semaContainerFn(
container_scope,
&deleted_decls,
&outdated_decls,
decl_node,
decl_i,
tree.*,
@ -3350,6 +3371,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void {
try mod.semaContainerFn(
container_scope,
&deleted_decls,
&outdated_decls,
decl_node,
decl_i,
tree.*,
@ -3360,6 +3382,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void {
.fn_proto => try mod.semaContainerFn(
container_scope,
&deleted_decls,
&outdated_decls,
decl_node,
decl_i,
tree.*,
@ -3370,6 +3393,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void {
.global_var_decl => try mod.semaContainerVar(
container_scope,
&deleted_decls,
&outdated_decls,
decl_node,
decl_i,
tree.*,
@ -3378,6 +3402,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void {
.local_var_decl => try mod.semaContainerVar(
container_scope,
&deleted_decls,
&outdated_decls,
decl_node,
decl_i,
tree.*,
@ -3386,6 +3411,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void {
.simple_var_decl => try mod.semaContainerVar(
container_scope,
&deleted_decls,
&outdated_decls,
decl_node,
decl_i,
tree.*,
@ -3394,6 +3420,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void {
.aligned_var_decl => try mod.semaContainerVar(
container_scope,
&deleted_decls,
&outdated_decls,
decl_node,
decl_i,
tree.*,
@ -3446,11 +3473,27 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void {
},
else => unreachable,
};
// Handle explicitly deleted decls from the source code. Not to be confused
// with when we delete decls because they are no longer referenced.
// Handle explicitly deleted decls from the source code. This is one of two
// places that Decl deletions happen. The other is in `Compilation`, after
// `performAllTheWork`, where we iterate over `Module.deletion_set` and
// delete Decls which are no longer referenced.
// If a Decl is explicitly deleted from source, and also no longer referenced,
// it may be both in this `deleted_decls` set, as well as in the
// `Module.deletion_set`. To avoid deleting it twice, we remove it from the
// deletion set at this time.
for (deleted_decls.items()) |entry| {
log.debug("noticed '{s}' deleted from source", .{entry.key.name});
try mod.deleteDecl(entry.key);
const decl = entry.key;
log.debug("'{s}' deleted from source", .{decl.name});
if (decl.deletion_flag) {
log.debug("'{s}' redundantly in deletion set; removing", .{decl.name});
mod.deletion_set.removeAssertDiscard(decl);
}
try mod.deleteDecl(decl, &outdated_decls);
}
// Finally we can queue up re-analysis tasks after we have processed
// the deleted decls.
for (outdated_decls.items()) |entry| {
try mod.markOutdatedDecl(entry.key);
}
}
@ -3458,6 +3501,7 @@ fn semaContainerFn(
mod: *Module,
container_scope: *Scope.Container,
deleted_decls: *std.AutoArrayHashMap(*Decl, void),
outdated_decls: *std.AutoArrayHashMap(*Decl, void),
decl_node: ast.Node.Index,
decl_i: usize,
tree: ast.Tree,
@ -3489,7 +3533,7 @@ fn semaContainerFn(
try mod.failed_decls.putNoClobber(mod.gpa, decl, msg);
} else {
if (!srcHashEql(decl.contents_hash, contents_hash)) {
try mod.markOutdatedDecl(decl);
try outdated_decls.put(decl, {});
decl.contents_hash = contents_hash;
} else switch (mod.comp.bin_file.tag) {
.coff => {
@ -3524,6 +3568,7 @@ fn semaContainerVar(
mod: *Module,
container_scope: *Scope.Container,
deleted_decls: *std.AutoArrayHashMap(*Decl, void),
outdated_decls: *std.AutoArrayHashMap(*Decl, void),
decl_node: ast.Node.Index,
decl_i: usize,
tree: ast.Tree,
@ -3549,7 +3594,7 @@ fn semaContainerVar(
errdefer err_msg.destroy(mod.gpa);
try mod.failed_decls.putNoClobber(mod.gpa, decl, err_msg);
} else if (!srcHashEql(decl.contents_hash, contents_hash)) {
try mod.markOutdatedDecl(decl);
try outdated_decls.put(decl, {});
decl.contents_hash = contents_hash;
}
} else {
@ -3579,17 +3624,27 @@ fn semaContainerField(
log.err("TODO: analyze container field", .{});
}
pub fn deleteDecl(mod: *Module, decl: *Decl) !void {
pub fn deleteDecl(
mod: *Module,
decl: *Decl,
outdated_decls: ?*std.AutoArrayHashMap(*Decl, void),
) !void {
const tracy = trace(@src());
defer tracy.end();
try mod.deletion_set.ensureCapacity(mod.gpa, mod.deletion_set.items.len + decl.dependencies.items().len);
log.debug("deleting decl '{s}'", .{decl.name});
if (outdated_decls) |map| {
_ = map.swapRemove(decl);
try map.ensureCapacity(map.count() + decl.dependants.count());
}
try mod.deletion_set.ensureCapacity(mod.gpa, mod.deletion_set.count() +
decl.dependencies.count());
// Remove from the namespace it resides in. In the case of an anonymous Decl it will
// not be present in the set, and this does nothing.
decl.container.removeDecl(decl);
log.debug("deleting decl '{s}'", .{decl.name});
const name_hash = decl.fullyQualifiedNameHash();
mod.decl_table.removeAssertDiscard(name_hash);
// Remove itself from its dependencies, because we are about to destroy the decl pointer.
@ -3600,16 +3655,22 @@ pub fn deleteDecl(mod: *Module, decl: *Decl) !void {
// We don't recursively perform a deletion here, because during the update,
// another reference to it may turn up.
dep.deletion_flag = true;
mod.deletion_set.appendAssumeCapacity(dep);
mod.deletion_set.putAssumeCapacity(dep, {});
}
}
// Anything that depends on this deleted decl certainly needs to be re-analyzed.
// Anything that depends on this deleted decl needs to be re-analyzed.
for (decl.dependants.items()) |entry| {
const dep = entry.key;
dep.removeDependency(decl);
if (dep.analysis != .outdated) {
// TODO Move this failure possibility to the top of the function.
try mod.markOutdatedDecl(dep);
if (outdated_decls) |map| {
map.putAssumeCapacity(dep, {});
} else if (std.debug.runtime_safety) {
// If `outdated_decls` is `null`, it means we're being called from
// `Compilation` after `performAllTheWork` and we cannot queue up any
// more work. `dep` must necessarily be another Decl that is no longer
// being referenced, and will be in the `deletion_set`. Otherwise,
// something has gone wrong.
assert(mod.deletion_set.contains(dep));
}
}
if (mod.failed_decls.swapRemove(decl)) |entry| {

View File

@ -538,6 +538,45 @@ pub fn addCases(ctx: *TestContext) !void {
{
var case = ctx.exeFromCompiledC("enums", .{});
case.addError(
\\const E1 = packed enum { a, b, c };
\\const E2 = extern enum { a, b, c };
\\export fn foo() void {
\\ const x = E1.a;
\\}
\\export fn bar() void {
\\ const x = E2.a;
\\}
, &.{
":1:12: error: enums do not support 'packed' or 'extern'; instead provide an explicit integer tag type",
":2:12: error: enums do not support 'packed' or 'extern'; instead provide an explicit integer tag type",
});
// comptime and types are caught in AstGen.
case.addError(
\\const E1 = enum {
\\ a,
\\ comptime b,
\\ c,
\\};
\\const E2 = enum {
\\ a,
\\ b: i32,
\\ c,
\\};
\\export fn foo() void {
\\ const x = E1.a;
\\}
\\export fn bar() void {
\\ const x = E2.a;
\\}
, &.{
":3:5: error: enum fields cannot be marked comptime",
":8:8: error: enum fields do not have types",
});
// @enumToInt, @intToEnum, enum literal coercion, field access syntax, comparison, switch
case.addCompareOutput(
\\const Number = enum { One, Two, Three };
\\
@ -559,6 +598,20 @@ pub fn addCases(ctx: *TestContext) !void {
\\ }
\\}
, "");
// Specifying alignment is a parse error.
case.addError(
\\const E1 = enum {
\\ a,
\\ b align(4),
\\ c,
\\};
\\export fn foo() void {
\\ const x = E1.a;
\\}
, &.{
":3:7: error: expected ',', found 'align'",
});
}
ctx.c("empty start function", linux_x64,

View File

@ -1598,46 +1598,4 @@ pub fn addCases(ctx: *TestContext) !void {
"",
);
}
{
var case = ctx.exe("enum_literal -> enum", linux_x64);
case.addCompareOutput(
\\const E = enum { a, b };
\\export fn _start() noreturn {
\\ const a: E = .a;
\\ const b: E = .b;
\\ exit();
\\}
\\fn exit() noreturn {
\\ asm volatile ("syscall"
\\ :
\\ : [number] "{rax}" (231),
\\ [arg1] "{rdi}" (0)
\\ : "rcx", "r11", "memory"
\\ );
\\ unreachable;
\\}
,
"",
);
case.addError(
\\export fn _start() noreturn {
\\ const a: E = .c;
\\ exit();
\\}
\\const E = enum { a, b };
\\fn exit() noreturn {
\\ asm volatile ("syscall"
\\ :
\\ : [number] "{rax}" (231),
\\ [arg1] "{rdi}" (0)
\\ : "rcx", "r11", "memory"
\\ );
\\ unreachable;
\\}
, &.{
":2:19: error: enum 'E' has no field named 'c'",
":5:11: note: enum declared here",
});
}
}