mirror of
https://github.com/ziglang/zig.git
synced 2026-02-13 21:08:36 +00:00
stage2: rewire the frontend driver to whole-file-zir
* Remove some unused imports in AstGen.zig. I think it would make sense
to start decoupling AstGen from the rest of the compiler code,
similar to how the tokenizer and parser are decoupled.
* AstGen: For decls, move the block_inline instructions to the top of
the function so that they get lower ZIR instruction indexes. With
this, the block_inline instruction index combined with its corresponding
break_inline instruction index can be used to form a ZIR instruction
range. This is useful for allocating an array to map ZIR instructions
to semantically analyzed instructions.
* Module: extract emit-h functionality into a struct, and only allocate
it when emit-h is activated.
* Module: remove the `decl_table` field. This previously was a table of
all Decls in the entire Module. A "name hash" strategy was used to
find decls within a given namespace, using this global table. Now,
each Namespace has its own map of name to children Decls.
- Additionally, there were 3 places that relied on iterating over
decl_table in order to function:
- C backend and SPIR-V backend. These now have their own decl_table
that they keep populated when `updateDecl` and `removeDecl` are
called.
- emit-h. A `decl_table` field has been added to the new GlobalEmitH
struct which is only allocated when emit-h is activated.
* Module: fix ZIR serialization/deserialization bug in debug mode having
to do with the secret safety tag for untagged unions. There is still an
open TODO to investigate a friendlier solution to this problem with
the language.
* Module: improve deserialization of ZIR to allocate only exactly as
much capacity as length in the instructions array so as to not waste
space.
* Module: move `srcHashEql` to `std.zig` to live next to the definition
of `SrcHash` itself.
* Module: re-introduce the logic for scanning top level declarations
within a namespace.
* Compilation: add an `analyze_pkg` Job which is used to kick off the
start of semantic analysis by doing the equivalent of
`_ = @import("std");`. The `analyze_pkg` job is unconditionally added
to the work queue on every update(), with pkg set to the std lib pkg.
* Rename TZIR to AIR in a few places. A more comprehensive rename will
come later.
This commit is contained in:
parent
91c317bb9a
commit
bfded492f0
359
BRANCH_TODO
359
BRANCH_TODO
@ -1,3 +1,6 @@
|
||||
* reimplement semaDecl
|
||||
* use a hash map for instructions because the array is too big
|
||||
|
||||
* keep track of file dependencies/dependants
|
||||
* unload files from memory when a dependency is dropped
|
||||
|
||||
@ -5,6 +8,7 @@
|
||||
|
||||
* get rid of failed_root_src_file
|
||||
* get rid of Scope.DeclRef
|
||||
* get rid of NameHash
|
||||
* handle decl collision with usingnamespace
|
||||
* the decl doing the looking up needs to create a decl dependency
|
||||
on each usingnamespace decl
|
||||
@ -35,58 +39,6 @@
|
||||
* AstGen: add result location pointers to function calls
|
||||
* nested function decl: how to refer to params?
|
||||
|
||||
* detect when to put cached ZIR into the local cache instead of the global one
|
||||
|
||||
const container_name_hash: Scope.NameHash = if (found_pkg) |pkg|
|
||||
pkg.namespace_hash
|
||||
else
|
||||
std.zig.hashName(cur_pkg.namespace_hash, "/", resolved_path);
|
||||
|
||||
file_scope.* = .{
|
||||
.root_container = .{
|
||||
.parent = null,
|
||||
.file_scope = file_scope,
|
||||
.decls = .{},
|
||||
.ty = struct_ty,
|
||||
.parent_name_hash = container_name_hash,
|
||||
},
|
||||
};
|
||||
mod.analyzeContainer(&file_scope.root_container) catch |err| switch (err) {
|
||||
error.AnalysisFail => {
|
||||
assert(mod.comp.totalErrorCount() != 0);
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
return file_scope;
|
||||
|
||||
|
||||
|
||||
// Until then we simulate a full cache miss. Source files could have been loaded
|
||||
// for any reason; to force a refresh we unload now.
|
||||
module.unloadFile(module.root_scope);
|
||||
module.failed_root_src_file = null;
|
||||
module.analyzeNamespace(&module.root_scope.root_container) catch |err| switch (err) {
|
||||
error.AnalysisFail => {
|
||||
assert(self.totalErrorCount() != 0);
|
||||
},
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
else => |e| {
|
||||
module.failed_root_src_file = e;
|
||||
},
|
||||
};
|
||||
|
||||
// TODO only analyze imports if they are still referenced
|
||||
for (module.import_table.items()) |entry| {
|
||||
module.unloadFile(entry.value);
|
||||
module.analyzeNamespace(&entry.value.root_container) catch |err| switch (err) {
|
||||
error.AnalysisFail => {
|
||||
assert(self.totalErrorCount() != 0);
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
pub fn createContainerDecl(
|
||||
mod: *Module,
|
||||
scope: *Scope,
|
||||
@ -131,123 +83,6 @@ fn getAnonTypeName(mod: *Module, scope: *Scope, base_token: std.zig.ast.TokenInd
|
||||
}
|
||||
|
||||
|
||||
const parent_name_hash: Scope.NameHash = if (found_pkg) |pkg|
|
||||
pkg.namespace_hash
|
||||
else
|
||||
std.zig.hashName(cur_pkg.namespace_hash, "/", resolved_path);
|
||||
|
||||
// We need a Decl to pass to AstGen and collect dependencies. But ultimately we
|
||||
// want to pass them on to the Decl for the struct that represents the file.
|
||||
var tmp_namespace: Scope.Namespace = .{
|
||||
.parent = null,
|
||||
.file_scope = new_file,
|
||||
.parent_name_hash = parent_name_hash,
|
||||
.ty = Type.initTag(.type),
|
||||
};
|
||||
|
||||
const tree = try mod.getAstTree(new_file);
|
||||
|
||||
|
||||
const top_decl = try mod.createNewDecl(
|
||||
&tmp_namespace,
|
||||
resolved_path,
|
||||
0,
|
||||
parent_name_hash,
|
||||
std.zig.hashSrc(tree.source),
|
||||
);
|
||||
defer {
|
||||
mod.decl_table.removeAssertDiscard(parent_name_hash);
|
||||
top_decl.destroy(mod);
|
||||
}
|
||||
|
||||
var gen_scope_arena = std.heap.ArenaAllocator.init(gpa);
|
||||
defer gen_scope_arena.deinit();
|
||||
|
||||
var astgen = try AstGen.init(mod, top_decl, &gen_scope_arena.allocator);
|
||||
defer astgen.deinit();
|
||||
|
||||
var gen_scope: Scope.GenZir = .{
|
||||
.force_comptime = true,
|
||||
.parent = &new_file.base,
|
||||
.astgen = &astgen,
|
||||
};
|
||||
defer gen_scope.instructions.deinit(gpa);
|
||||
|
||||
const container_decl: ast.full.ContainerDecl = .{
|
||||
.layout_token = null,
|
||||
.ast = .{
|
||||
.main_token = undefined,
|
||||
.enum_token = null,
|
||||
.members = tree.rootDecls(),
|
||||
.arg = 0,
|
||||
},
|
||||
};
|
||||
|
||||
const struct_decl_ref = try AstGen.structDeclInner(
|
||||
&gen_scope,
|
||||
&gen_scope.base,
|
||||
0,
|
||||
container_decl,
|
||||
.struct_decl,
|
||||
);
|
||||
_ = try gen_scope.addBreak(.break_inline, 0, struct_decl_ref);
|
||||
|
||||
var code = try gen_scope.finish();
|
||||
defer code.deinit(gpa);
|
||||
if (std.builtin.mode == .Debug and mod.comp.verbose_ir) {
|
||||
code.dump(gpa, "import", &gen_scope.base, 0) catch {};
|
||||
}
|
||||
|
||||
var sema: Sema = .{
|
||||
.mod = mod,
|
||||
.gpa = gpa,
|
||||
.arena = &gen_scope_arena.allocator,
|
||||
.code = code,
|
||||
.inst_map = try gen_scope_arena.allocator.alloc(*ir.Inst, code.instructions.len),
|
||||
.owner_decl = top_decl,
|
||||
.namespace = top_decl.namespace,
|
||||
.func = null,
|
||||
.owner_func = null,
|
||||
.param_inst_list = &.{},
|
||||
};
|
||||
var block_scope: Scope.Block = .{
|
||||
.parent = null,
|
||||
.sema = &sema,
|
||||
.src_decl = top_decl,
|
||||
.instructions = .{},
|
||||
.inlining = null,
|
||||
.is_comptime = true,
|
||||
};
|
||||
defer block_scope.instructions.deinit(gpa);
|
||||
|
||||
const init_inst_zir_ref = try sema.rootAsRef(&block_scope);
|
||||
const analyzed_struct_inst = try sema.resolveInst(init_inst_zir_ref);
|
||||
assert(analyzed_struct_inst.ty.zigTypeTag() == .Type);
|
||||
const val = analyzed_struct_inst.value().?;
|
||||
const struct_ty = try val.toType(&gen_scope_arena.allocator);
|
||||
const struct_decl = struct_ty.getOwnerDecl();
|
||||
|
||||
struct_decl.contents_hash = top_decl.contents_hash;
|
||||
new_file.namespace = struct_ty.getNamespace().?;
|
||||
new_file.namespace.parent = null;
|
||||
//new_file.namespace.parent_name_hash = tmp_namespace.parent_name_hash;
|
||||
|
||||
// Transfer the dependencies to `owner_decl`.
|
||||
assert(top_decl.dependants.count() == 0);
|
||||
for (top_decl.dependencies.items()) |entry| {
|
||||
const dep = entry.key;
|
||||
dep.removeDependant(top_decl);
|
||||
if (dep == struct_decl) continue;
|
||||
_ = try mod.declareDeclDependency(struct_decl, dep);
|
||||
}
|
||||
|
||||
return new_file;
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
pub fn analyzeFile(mod: *Module, file: *Scope.File) !void {
|
||||
// We call `getAstTree` here so that `analyzeFile` has the error set that includes
|
||||
// file system operations, but `analyzeNamespace` does not.
|
||||
@ -467,38 +302,6 @@ fn astgenAndSemaFn(
|
||||
}
|
||||
return type_changed or is_inline != prev_is_inline;
|
||||
}
|
||||
|
||||
fn astgenAndSemaVarDecl(
|
||||
mod: *Module,
|
||||
decl: *Decl,
|
||||
tree: ast.Tree,
|
||||
var_decl: ast.full.VarDecl,
|
||||
) !bool {
|
||||
const token_tags = tree.tokens.items(.tag);
|
||||
|
||||
}
|
||||
|
||||
|
||||
/// Asserts the scope is a child of a File and has an AST tree and returns the tree.
|
||||
pub fn tree(scope: *Scope) *const ast.Tree {
|
||||
switch (scope.tag) {
|
||||
.file => return &scope.cast(File).?.tree,
|
||||
.block => return &scope.cast(Block).?.src_decl.namespace.file_scope.tree,
|
||||
.gen_zir => return scope.cast(GenZir).?.tree(),
|
||||
.local_val => return &scope.cast(LocalVal).?.gen_zir.astgen.decl.namespace.file_scope.tree,
|
||||
.local_ptr => return &scope.cast(LocalPtr).?.gen_zir.astgen.decl.namespace.file_scope.tree,
|
||||
.namespace => return &scope.cast(Namespace).?.file_scope.tree,
|
||||
.decl_ref => return &scope.cast(DeclRef).?.decl.namespace.file_scope.tree,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
error.FileNotFound => {
|
||||
return mod.fail(&block.base, src, "unable to find '{s}'", .{operand});
|
||||
},
|
||||
|
||||
|
||||
|
||||
log.debug("extern fn symbol expected in lib '{s}'", .{lib_name_str});
|
||||
mod.comp.stage1AddLinkLib(lib_name_str) catch |err| {
|
||||
return mod.failTok(
|
||||
@ -540,86 +343,6 @@ fn astgenAndSemaVarDecl(
|
||||
);
|
||||
}
|
||||
|
||||
if (counts.values == 0 and counts.decls == 0 and arg_inst == .none) {
|
||||
// No explicitly provided tag values and no top level declarations! In this case,
|
||||
// we can construct the enum type in AstGen and it will be correctly shared by all
|
||||
// generic function instantiations and comptime function calls.
|
||||
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
|
||||
errdefer new_decl_arena.deinit();
|
||||
const arena = &new_decl_arena.allocator;
|
||||
|
||||
var fields_map: std.StringArrayHashMapUnmanaged(void) = .{};
|
||||
try fields_map.ensureCapacity(arena, counts.total_fields);
|
||||
for (container_decl.ast.members) |member_node| {
|
||||
if (member_node == counts.nonexhaustive_node)
|
||||
continue;
|
||||
const member = switch (node_tags[member_node]) {
|
||||
.container_field_init => tree.containerFieldInit(member_node),
|
||||
.container_field_align => tree.containerFieldAlign(member_node),
|
||||
.container_field => tree.containerField(member_node),
|
||||
else => unreachable, // We checked earlier.
|
||||
};
|
||||
const name_token = member.ast.name_token;
|
||||
const tag_name = try mod.identifierTokenStringTreeArena(
|
||||
scope,
|
||||
name_token,
|
||||
tree,
|
||||
arena,
|
||||
);
|
||||
const gop = fields_map.getOrPutAssumeCapacity(tag_name);
|
||||
if (gop.found_existing) {
|
||||
const msg = msg: {
|
||||
const msg = try mod.errMsg(
|
||||
scope,
|
||||
gz.tokSrcLoc(name_token),
|
||||
"duplicate enum tag",
|
||||
.{},
|
||||
);
|
||||
errdefer msg.destroy(gpa);
|
||||
// Iterate to find the other tag. We don't eagerly store it in a hash
|
||||
// map because in the hot path there will be no compile error and we
|
||||
// don't need to waste time with a hash map.
|
||||
const bad_node = for (container_decl.ast.members) |other_member_node| {
|
||||
const other_member = switch (node_tags[other_member_node]) {
|
||||
.container_field_init => tree.containerFieldInit(other_member_node),
|
||||
.container_field_align => tree.containerFieldAlign(other_member_node),
|
||||
.container_field => tree.containerField(other_member_node),
|
||||
else => unreachable, // We checked earlier.
|
||||
};
|
||||
const other_tag_name = try mod.identifierTokenStringTreeArena(
|
||||
scope,
|
||||
other_member.ast.name_token,
|
||||
tree,
|
||||
arena,
|
||||
);
|
||||
if (mem.eql(u8, tag_name, other_tag_name))
|
||||
break other_member_node;
|
||||
} else unreachable;
|
||||
const other_src = gz.nodeSrcLoc(bad_node);
|
||||
try mod.errNote(scope, other_src, msg, "other tag here", .{});
|
||||
break :msg msg;
|
||||
};
|
||||
return mod.failWithOwnedErrorMsg(scope, msg);
|
||||
}
|
||||
}
|
||||
const enum_simple = try arena.create(Module.EnumSimple);
|
||||
enum_simple.* = .{
|
||||
.owner_decl = astgen.decl,
|
||||
.node_offset = astgen.decl.nodeIndexToRelative(node),
|
||||
.fields = fields_map,
|
||||
};
|
||||
const enum_ty = try Type.Tag.enum_simple.create(arena, enum_simple);
|
||||
const enum_val = try Value.Tag.ty.create(arena, enum_ty);
|
||||
const new_decl = try mod.createAnonymousDecl(scope, &new_decl_arena, .{
|
||||
.ty = Type.initTag(.type),
|
||||
.val = enum_val,
|
||||
});
|
||||
const decl_index = try mod.declareDeclDependency(astgen.decl, new_decl);
|
||||
const result = try gz.addDecl(.decl_val, decl_index, node);
|
||||
return rvalue(gz, scope, rl, result, node);
|
||||
}
|
||||
|
||||
|
||||
if (mod.lookupIdentifier(scope, ident_name)) |decl| {
|
||||
const msg = msg: {
|
||||
const msg = try mod.errMsg(
|
||||
@ -687,29 +410,6 @@ fn astgenAndSemaVarDecl(
|
||||
}
|
||||
}
|
||||
|
||||
fn writeFuncExtra(
|
||||
self: *Writer,
|
||||
stream: anytype,
|
||||
inst: Inst.Index,
|
||||
var_args: bool,
|
||||
) !void {
|
||||
const inst_data = self.code.instructions.items(.data)[inst].pl_node;
|
||||
const src = inst_data.src();
|
||||
const extra = self.code.extraData(Inst.FuncExtra, inst_data.payload_index);
|
||||
const param_types = self.code.refSlice(extra.end, extra.data.param_types_len);
|
||||
const cc = extra.data.cc;
|
||||
const body = self.code.extra[extra.end + param_types.len ..][0..extra.data.body_len];
|
||||
return self.writeFuncCommon(
|
||||
stream,
|
||||
param_types,
|
||||
extra.data.return_type,
|
||||
var_args,
|
||||
cc,
|
||||
body,
|
||||
src,
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
const error_set = try arena.create(Module.ErrorSet);
|
||||
error_set.* = .{
|
||||
@ -732,3 +432,54 @@ fn astgenAndSemaVarDecl(
|
||||
|
||||
// when implementing this be sure to add test coverage for the asm return type
|
||||
// not resolving into a type (the node_offset_asm_ret_ty field of LazySrcLoc)
|
||||
|
||||
|
||||
|
||||
pub fn analyzeNamespace(
|
||||
mod: *Module,
|
||||
namespace: *Scope.Namespace,
|
||||
decls: []const ast.Node.Index,
|
||||
) InnerError!void {
|
||||
for (decls) |decl_node| switch (node_tags[decl_node]) {
|
||||
.@"comptime" => {
|
||||
const name_index = mod.getNextAnonNameIndex();
|
||||
const name = try std.fmt.allocPrint(mod.gpa, "__comptime_{d}", .{name_index});
|
||||
defer mod.gpa.free(name);
|
||||
|
||||
const contents_hash = std.zig.hashSrc(tree.getNodeSource(decl_node));
|
||||
|
||||
const new_decl = try mod.createNewDecl(namespace, name, decl_node, name_hash, contents_hash);
|
||||
namespace.decls.putAssumeCapacity(new_decl, {});
|
||||
mod.comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
|
||||
},
|
||||
|
||||
// Container fields are handled in AstGen.
|
||||
.container_field_init,
|
||||
.container_field_align,
|
||||
.container_field,
|
||||
=> continue,
|
||||
|
||||
.test_decl => {
|
||||
if (mod.comp.bin_file.options.is_test) {
|
||||
log.err("TODO: analyze test decl", .{});
|
||||
}
|
||||
},
|
||||
.@"usingnamespace" => {
|
||||
const name_index = mod.getNextAnonNameIndex();
|
||||
const name = try std.fmt.allocPrint(mod.gpa, "__usingnamespace_{d}", .{name_index});
|
||||
defer mod.gpa.free(name);
|
||||
|
||||
const contents_hash = std.zig.hashSrc(tree.getNodeSource(decl_node));
|
||||
|
||||
const new_decl = try mod.createNewDecl(namespace, name, decl_node, name_hash, contents_hash);
|
||||
namespace.decls.putAssumeCapacity(new_decl, {});
|
||||
|
||||
mod.ensureDeclAnalyzed(new_decl) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.AnalysisFail => continue,
|
||||
};
|
||||
},
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@ -24,6 +24,10 @@ pub fn hashSrc(src: []const u8) SrcHash {
|
||||
return out;
|
||||
}
|
||||
|
||||
pub fn srcHashEql(a: SrcHash, b: SrcHash) bool {
|
||||
return @bitCast(u128, a) == @bitCast(u128, b);
|
||||
}
|
||||
|
||||
pub fn hashName(parent_hash: SrcHash, sep: []const u8, name: []const u8) SrcHash {
|
||||
var out: SrcHash = undefined;
|
||||
var hasher = std.crypto.hash.Blake3.init(.{});
|
||||
|
||||
@ -12,9 +12,6 @@ const Allocator = std.mem.Allocator;
|
||||
const assert = std.debug.assert;
|
||||
const ArrayListUnmanaged = std.ArrayListUnmanaged;
|
||||
|
||||
const Value = @import("value.zig").Value;
|
||||
const Type = @import("type.zig").Type;
|
||||
const TypedValue = @import("TypedValue.zig");
|
||||
const Zir = @import("Zir.zig");
|
||||
const Module = @import("Module.zig");
|
||||
const trace = @import("tracy.zig").trace;
|
||||
@ -2648,6 +2645,10 @@ fn fnDecl(
|
||||
const tree = &astgen.file.tree;
|
||||
const token_tags = tree.tokens.items(.tag);
|
||||
|
||||
// We insert this at the beginning so that its instruction index marks the
|
||||
// start of the top level declaration.
|
||||
const block_inst = try gz.addBlock(.block_inline, fn_proto.ast.proto_node);
|
||||
|
||||
var decl_gz: GenZir = .{
|
||||
.force_comptime = true,
|
||||
.decl_node_index = fn_proto.ast.proto_node,
|
||||
@ -2843,7 +2844,8 @@ fn fnDecl(
|
||||
};
|
||||
const fn_name_str_index = try decl_gz.identAsString(fn_name_token);
|
||||
|
||||
const block_inst = try gz.addBlock(.block_inline, fn_proto.ast.proto_node);
|
||||
// We add this at the end so that its instruction index marks the end range
|
||||
// of the top level declaration.
|
||||
_ = try decl_gz.addBreak(.break_inline, block_inst, func_inst);
|
||||
try decl_gz.setBlockBody(block_inst);
|
||||
|
||||
@ -2875,6 +2877,12 @@ fn globalVarDecl(
|
||||
const tree = &astgen.file.tree;
|
||||
const token_tags = tree.tokens.items(.tag);
|
||||
|
||||
const is_mutable = token_tags[var_decl.ast.mut_token] == .keyword_var;
|
||||
const tag: Zir.Inst.Tag = if (is_mutable) .block_inline_var else .block_inline;
|
||||
// We do this at the beginning so that the instruction index marks the range start
|
||||
// of the top level declaration.
|
||||
const block_inst = try gz.addBlock(tag, node);
|
||||
|
||||
var block_scope: GenZir = .{
|
||||
.parent = scope,
|
||||
.decl_node_index = node,
|
||||
@ -2900,7 +2908,6 @@ fn globalVarDecl(
|
||||
};
|
||||
try wip_decls.next(gpa, is_pub, is_export, align_inst != .none, section_inst != .none);
|
||||
|
||||
const is_mutable = token_tags[var_decl.ast.mut_token] == .keyword_var;
|
||||
const is_threadlocal = if (var_decl.threadlocal_token) |tok| blk: {
|
||||
if (!is_mutable) {
|
||||
return astgen.failTok(tok, "threadlocal variable cannot be constant", .{});
|
||||
@ -2940,8 +2947,8 @@ fn globalVarDecl(
|
||||
var_decl.ast.init_node,
|
||||
);
|
||||
|
||||
const tag: Zir.Inst.Tag = if (is_mutable) .block_inline_var else .block_inline;
|
||||
const block_inst = try gz.addBlock(tag, node);
|
||||
// We do this at the end so that the instruction index marks the end
|
||||
// range of a top level declaration.
|
||||
_ = try block_scope.addBreak(.break_inline, block_inst, init_inst);
|
||||
try block_scope.setBlockBody(block_inst);
|
||||
break :vi block_inst;
|
||||
|
||||
@ -180,6 +180,8 @@ const Job = union(enum) {
|
||||
/// The source file containing the Decl has been updated, and so the
|
||||
/// Decl may need its line number information updated in the debug info.
|
||||
update_line_number: *Module.Decl,
|
||||
/// The main source file for the package needs to be analyzed.
|
||||
analyze_pkg: *Package,
|
||||
|
||||
/// one of the glibc static objects
|
||||
glibc_crt_file: glibc.CRTFile,
|
||||
@ -278,6 +280,7 @@ pub const MiscTask = enum {
|
||||
compiler_rt,
|
||||
libssp,
|
||||
zig_libc,
|
||||
analyze_pkg,
|
||||
};
|
||||
|
||||
pub const MiscError = struct {
|
||||
@ -1155,6 +1158,13 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
|
||||
.path = try options.global_cache_directory.join(arena, &[_][]const u8{zir_sub_dir}),
|
||||
};
|
||||
|
||||
const emit_h: ?*Module.GlobalEmitH = if (options.emit_h) |loc| eh: {
|
||||
const eh = try gpa.create(Module.GlobalEmitH);
|
||||
eh.* = .{ .loc = loc };
|
||||
break :eh eh;
|
||||
} else null;
|
||||
errdefer if (emit_h) |eh| gpa.destroy(eh);
|
||||
|
||||
// TODO when we implement serialization and deserialization of incremental
|
||||
// compilation metadata, this is where we would load it. We have open a handle
|
||||
// to the directory where the output either already is, or will be.
|
||||
@ -1170,7 +1180,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
|
||||
.zig_cache_artifact_directory = zig_cache_artifact_directory,
|
||||
.global_zir_cache = global_zir_cache,
|
||||
.local_zir_cache = local_zir_cache,
|
||||
.emit_h = options.emit_h,
|
||||
.emit_h = emit_h,
|
||||
.error_name_list = try std.ArrayListUnmanaged([]const u8).initCapacity(gpa, 1),
|
||||
};
|
||||
module.error_name_list.appendAssumeCapacity("(no error)");
|
||||
@ -1595,6 +1605,8 @@ pub fn update(self: *Compilation) !void {
|
||||
for (module.import_table.items()) |entry| {
|
||||
self.astgen_work_queue.writeItemAssumeCapacity(entry.value);
|
||||
}
|
||||
|
||||
try self.work_queue.writeItem(.{ .analyze_pkg = std_pkg });
|
||||
}
|
||||
}
|
||||
|
||||
@ -1672,11 +1684,13 @@ pub fn totalErrorCount(self: *Compilation) usize {
|
||||
}
|
||||
total += 1;
|
||||
}
|
||||
for (module.emit_h_failed_decls.items()) |entry| {
|
||||
if (entry.key.namespace.file_scope.status == .parse_failure) {
|
||||
continue;
|
||||
if (module.emit_h) |emit_h| {
|
||||
for (emit_h.failed_decls.items()) |entry| {
|
||||
if (entry.key.namespace.file_scope.status == .parse_failure) {
|
||||
continue;
|
||||
}
|
||||
total += 1;
|
||||
}
|
||||
total += 1;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1743,13 +1757,15 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
|
||||
}
|
||||
try AllErrors.add(module, &arena, &errors, entry.value.*);
|
||||
}
|
||||
for (module.emit_h_failed_decls.items()) |entry| {
|
||||
if (entry.key.namespace.file_scope.status == .parse_failure) {
|
||||
// Skip errors for Decls within files that had a parse failure.
|
||||
// We'll try again once parsing succeeds.
|
||||
continue;
|
||||
if (module.emit_h) |emit_h| {
|
||||
for (emit_h.failed_decls.items()) |entry| {
|
||||
if (entry.key.namespace.file_scope.status == .parse_failure) {
|
||||
// Skip errors for Decls within files that had a parse failure.
|
||||
// We'll try again once parsing succeeds.
|
||||
continue;
|
||||
}
|
||||
try AllErrors.add(module, &arena, &errors, entry.value.*);
|
||||
}
|
||||
try AllErrors.add(module, &arena, &errors, entry.value.*);
|
||||
}
|
||||
for (module.failed_exports.items()) |entry| {
|
||||
try AllErrors.add(module, &arena, &errors, entry.value.*);
|
||||
@ -1942,10 +1958,11 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
|
||||
if (build_options.omit_stage2)
|
||||
@panic("sadly stage2 is omitted from this build to save memory on the CI server");
|
||||
const module = self.bin_file.options.module.?;
|
||||
const emit_loc = module.emit_h.?;
|
||||
const emit_h = module.emit_h.?;
|
||||
_ = try emit_h.decl_table.getOrPut(module.gpa, decl);
|
||||
const tv = decl.typed_value.most_recent.typed_value;
|
||||
const emit_h = decl.getEmitH(module);
|
||||
const fwd_decl = &emit_h.fwd_decl;
|
||||
const decl_emit_h = decl.getEmitH(module);
|
||||
const fwd_decl = &decl_emit_h.fwd_decl;
|
||||
fwd_decl.shrinkRetainingCapacity(0);
|
||||
|
||||
var dg: c_codegen.DeclGen = .{
|
||||
@ -1960,7 +1977,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
|
||||
|
||||
c_codegen.genHeader(&dg) catch |err| switch (err) {
|
||||
error.AnalysisFail => {
|
||||
try module.emit_h_failed_decls.put(module.gpa, decl, dg.error_msg.?);
|
||||
try emit_h.failed_decls.put(module.gpa, decl, dg.error_msg.?);
|
||||
continue;
|
||||
},
|
||||
else => |e| return e,
|
||||
@ -1994,6 +2011,22 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
|
||||
decl.analysis = .codegen_failure_retryable;
|
||||
};
|
||||
},
|
||||
.analyze_pkg => |pkg| {
|
||||
if (build_options.omit_stage2)
|
||||
@panic("sadly stage2 is omitted from this build to save memory on the CI server");
|
||||
const module = self.bin_file.options.module.?;
|
||||
module.semaPkg(pkg) catch |err| switch (err) {
|
||||
error.CurrentWorkingDirectoryUnlinked,
|
||||
error.Unexpected,
|
||||
=> try self.setMiscFailure(
|
||||
.analyze_pkg,
|
||||
"unexpected problem analyzing package '{s}'",
|
||||
.{pkg.root_src_path},
|
||||
),
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.AnalysisFail => continue,
|
||||
};
|
||||
},
|
||||
.glibc_crt_file => |crt_file| {
|
||||
glibc.buildCRTFile(self, crt_file) catch |err| {
|
||||
// TODO Surface more error details.
|
||||
|
||||
703
src/Module.zig
703
src/Module.zig
@ -54,8 +54,6 @@ symbol_exports: std.StringArrayHashMapUnmanaged(*Export) = .{},
|
||||
/// is performing the export of another Decl.
|
||||
/// This table owns the Export memory.
|
||||
export_owners: std.AutoArrayHashMapUnmanaged(*Decl, []*Export) = .{},
|
||||
/// Maps fully qualified namespaced names to the Decl struct for them.
|
||||
decl_table: std.ArrayHashMapUnmanaged(Scope.NameHash, *Decl, Scope.name_hash_hash, Scope.name_hash_eql, false) = .{},
|
||||
/// The set of all the files in the Module. We keep track of this in order to iterate
|
||||
/// over it and check which source files have been modified on the file system when
|
||||
/// an update is requested, as well as to cache `@import` results.
|
||||
@ -68,10 +66,6 @@ import_table: std.StringArrayHashMapUnmanaged(*Scope.File) = .{},
|
||||
/// Note that a Decl can succeed but the Fn it represents can fail. In this case,
|
||||
/// a Decl can have a failed_decls entry but have analysis status of success.
|
||||
failed_decls: std.AutoArrayHashMapUnmanaged(*Decl, *ErrorMsg) = .{},
|
||||
/// When emit_h is non-null, each Decl gets one more compile error slot for
|
||||
/// emit-h failing for that Decl. This table is also how we tell if a Decl has
|
||||
/// failed emit-h or succeeded.
|
||||
emit_h_failed_decls: std.AutoArrayHashMapUnmanaged(*Decl, *ErrorMsg) = .{},
|
||||
/// Keep track of one `@compileLog` callsite per owner Decl.
|
||||
compile_log_decls: std.AutoArrayHashMapUnmanaged(*Decl, SrcLoc) = .{},
|
||||
/// Using a map here for consistency with the other fields here.
|
||||
@ -113,12 +107,24 @@ stage1_flags: packed struct {
|
||||
reserved: u2 = 0,
|
||||
} = .{},
|
||||
|
||||
emit_h: ?Compilation.EmitLoc,
|
||||
|
||||
job_queued_update_builtin_zig: bool = true,
|
||||
|
||||
compile_log_text: ArrayListUnmanaged(u8) = .{},
|
||||
|
||||
emit_h: ?*GlobalEmitH,
|
||||
|
||||
/// A `Module` has zero or one of these depending on whether `-femit-h` is enabled.
|
||||
pub const GlobalEmitH = struct {
|
||||
/// Where to put the output.
|
||||
loc: Compilation.EmitLoc,
|
||||
/// When emit_h is non-null, each Decl gets one more compile error slot for
|
||||
/// emit-h failing for that Decl. This table is also how we tell if a Decl has
|
||||
/// failed emit-h or succeeded.
|
||||
failed_decls: std.AutoArrayHashMapUnmanaged(*Decl, *ErrorMsg) = .{},
|
||||
/// Tracks all decls in order to iterate over them and emit .h code for them.
|
||||
decl_table: std.AutoArrayHashMapUnmanaged(*Decl, void) = .{},
|
||||
};
|
||||
|
||||
pub const ErrorInt = u32;
|
||||
|
||||
pub const Export = struct {
|
||||
@ -293,10 +299,6 @@ pub const Decl = struct {
|
||||
return tree.tokens.items(.start)[decl.srcToken()];
|
||||
}
|
||||
|
||||
pub fn fullyQualifiedNameHash(decl: Decl) Scope.NameHash {
|
||||
return decl.namespace.fullyQualifiedNameHash(mem.spanZ(decl.name));
|
||||
}
|
||||
|
||||
pub fn renderFullyQualifiedName(decl: Decl, writer: anytype) !void {
|
||||
const unqualified_name = mem.spanZ(decl.name);
|
||||
return decl.namespace.renderFullyQualifiedName(unqualified_name, writer);
|
||||
@ -318,6 +320,11 @@ pub const Decl = struct {
|
||||
return (try decl.typedValue()).val;
|
||||
}
|
||||
|
||||
pub fn isFunction(decl: *Decl) !bool {
|
||||
const tv = try decl.typedValue();
|
||||
return tv.ty.zigTypeTag() == .Fn;
|
||||
}
|
||||
|
||||
pub fn dump(decl: *Decl) void {
|
||||
const loc = std.zig.findLineColumn(decl.scope.source.bytes, decl.src);
|
||||
std.debug.print("{s}:{d}:{d} name={s} status={s}", .{
|
||||
@ -611,14 +618,6 @@ pub const Scope = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn name_hash_hash(x: NameHash) u32 {
|
||||
return @truncate(u32, @bitCast(u128, x));
|
||||
}
|
||||
|
||||
fn name_hash_eql(a: NameHash, b: NameHash) bool {
|
||||
return @bitCast(u128, a) == @bitCast(u128, b);
|
||||
}
|
||||
|
||||
pub const Tag = enum {
|
||||
/// .zig source code.
|
||||
file,
|
||||
@ -643,28 +642,32 @@ pub const Scope = struct {
|
||||
|
||||
parent: ?*Namespace,
|
||||
file_scope: *Scope.File,
|
||||
parent_name_hash: NameHash,
|
||||
/// Will be a struct, enum, union, or opaque.
|
||||
ty: Type,
|
||||
/// Direct children of the namespace. Used during an update to detect
|
||||
/// which decls have been added/removed from source.
|
||||
decls: std.AutoArrayHashMapUnmanaged(*Decl, void) = .{},
|
||||
usingnamespace_set: std.AutoHashMapUnmanaged(*Namespace, bool) = .{},
|
||||
/// Declaration order is preserved via entry order.
|
||||
/// Key memory references the string table of the containing `File` ZIR.
|
||||
/// TODO save memory with https://github.com/ziglang/zig/issues/8619.
|
||||
/// Does not contain anonymous decls.
|
||||
decls: std.StringArrayHashMapUnmanaged(*Decl) = .{},
|
||||
/// Names imported into the namespace via `usingnamespace`.
|
||||
/// The key memory is owned by the ZIR of the `File` containing the `Namespace`.
|
||||
usingnamespace_decls: std.StringArrayHashMapUnmanaged(*Namespace) = .{},
|
||||
|
||||
pub fn deinit(ns: *Namespace, gpa: *Allocator) void {
|
||||
pub fn deinit(ns: *Namespace, mod: *Module) void {
|
||||
const gpa = mod.gpa;
|
||||
|
||||
for (ns.decls.items()) |entry| {
|
||||
entry.value.destroy(mod);
|
||||
}
|
||||
ns.decls.deinit(gpa);
|
||||
ns.* = undefined;
|
||||
}
|
||||
|
||||
pub fn removeDecl(ns: *Namespace, child: *Decl) void {
|
||||
_ = ns.decls.swapRemove(child);
|
||||
}
|
||||
|
||||
/// Must generate unique bytes with no collisions with other decls.
|
||||
/// The point of hashing here is only to limit the number of bytes of
|
||||
/// the unique identifier to a fixed size (16 bytes).
|
||||
pub fn fullyQualifiedNameHash(ns: Namespace, name: []const u8) NameHash {
|
||||
return std.zig.hashName(ns.parent_name_hash, ".", name);
|
||||
// Preserve declaration order.
|
||||
_ = ns.decls.orderedRemove(mem.spanZ(child.name));
|
||||
}
|
||||
|
||||
pub fn renderFullyQualifiedName(ns: Namespace, name: []const u8, writer: anytype) !void {
|
||||
@ -738,7 +741,9 @@ pub const Scope = struct {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deinit(file: *File, gpa: *Allocator) void {
|
||||
pub fn deinit(file: *File, mod: *Module) void {
|
||||
const gpa = mod.gpa;
|
||||
file.namespace.deinit(mod);
|
||||
gpa.free(file.sub_file_path);
|
||||
file.unload(gpa);
|
||||
file.* = undefined;
|
||||
@ -786,8 +791,9 @@ pub const Scope = struct {
|
||||
return &file.tree;
|
||||
}
|
||||
|
||||
pub fn destroy(file: *File, gpa: *Allocator) void {
|
||||
file.deinit(gpa);
|
||||
pub fn destroy(file: *File, mod: *Module) void {
|
||||
const gpa = mod.gpa;
|
||||
file.deinit(mod);
|
||||
gpa.destroy(file);
|
||||
}
|
||||
|
||||
@ -798,7 +804,7 @@ pub const Scope = struct {
|
||||
};
|
||||
|
||||
/// This is the context needed to semantically analyze ZIR instructions and
|
||||
/// produce TZIR instructions.
|
||||
/// produce AIR instructions.
|
||||
/// This is a temporary structure stored on the stack; references to it are valid only
|
||||
/// during semantic analysis of the block.
|
||||
pub const Block = struct {
|
||||
@ -818,7 +824,7 @@ pub const Scope = struct {
|
||||
is_comptime: bool,
|
||||
|
||||
/// This `Block` maps a block ZIR instruction to the corresponding
|
||||
/// TZIR instruction for break instruction analysis.
|
||||
/// AIR instruction for break instruction analysis.
|
||||
pub const Label = struct {
|
||||
zir_block: Zir.Inst.Index,
|
||||
merges: Merges,
|
||||
@ -826,7 +832,7 @@ pub const Scope = struct {
|
||||
|
||||
/// This `Block` indicates that an inline function call is happening
|
||||
/// and return instructions should be analyzed as a break instruction
|
||||
/// to this TZIR block instruction.
|
||||
/// to this AIR block instruction.
|
||||
/// It is shared among all the blocks in an inline or comptime called
|
||||
/// function.
|
||||
pub const Inlining = struct {
|
||||
@ -2632,20 +2638,19 @@ pub fn deinit(mod: *Module) void {
|
||||
|
||||
mod.deletion_set.deinit(gpa);
|
||||
|
||||
for (mod.decl_table.items()) |entry| {
|
||||
entry.value.destroy(mod);
|
||||
}
|
||||
mod.decl_table.deinit(gpa);
|
||||
|
||||
for (mod.failed_decls.items()) |entry| {
|
||||
entry.value.destroy(gpa);
|
||||
}
|
||||
mod.failed_decls.deinit(gpa);
|
||||
|
||||
for (mod.emit_h_failed_decls.items()) |entry| {
|
||||
entry.value.destroy(gpa);
|
||||
if (mod.emit_h) |emit_h| {
|
||||
for (emit_h.failed_decls.items()) |entry| {
|
||||
entry.value.destroy(gpa);
|
||||
}
|
||||
emit_h.failed_decls.deinit(gpa);
|
||||
emit_h.decl_table.deinit(gpa);
|
||||
gpa.destroy(emit_h);
|
||||
}
|
||||
mod.emit_h_failed_decls.deinit(gpa);
|
||||
|
||||
for (mod.failed_files.items()) |entry| {
|
||||
if (entry.value) |msg| msg.destroy(gpa);
|
||||
@ -2682,7 +2687,7 @@ pub fn deinit(mod: *Module) void {
|
||||
|
||||
for (mod.import_table.items()) |entry| {
|
||||
gpa.free(entry.key);
|
||||
entry.value.destroy(gpa);
|
||||
entry.value.destroy(mod);
|
||||
}
|
||||
mod.import_table.deinit(gpa);
|
||||
}
|
||||
@ -2700,8 +2705,8 @@ const data_has_safety_tag = @sizeOf(Zir.Inst.Data) != 8;
|
||||
// We need a better language feature for initializing a union with
|
||||
// a runtime known tag.
|
||||
const Stage1DataLayout = extern struct {
|
||||
safety_tag: u8,
|
||||
data: [8]u8 align(8),
|
||||
safety_tag: u8,
|
||||
};
|
||||
comptime {
|
||||
if (data_has_safety_tag) {
|
||||
@ -2783,12 +2788,15 @@ pub fn astGenFile(mod: *Module, file: *Scope.File, prog_node: *std.Progress.Node
|
||||
log.debug("AstGen cache stale: {s}", .{file.sub_file_path});
|
||||
break :cached;
|
||||
}
|
||||
log.debug("AstGen cache hit: {s}", .{file.sub_file_path});
|
||||
log.debug("AstGen cache hit: {s} instructions_len={d}", .{
|
||||
file.sub_file_path, header.instructions_len,
|
||||
});
|
||||
|
||||
var instructions: std.MultiArrayList(Zir.Inst) = .{};
|
||||
defer instructions.deinit(gpa);
|
||||
|
||||
try instructions.resize(gpa, header.instructions_len);
|
||||
try instructions.setCapacity(gpa, header.instructions_len);
|
||||
instructions.len = header.instructions_len;
|
||||
|
||||
var zir: Zir = .{
|
||||
.instructions = instructions.toOwnedSlice(),
|
||||
@ -3126,6 +3134,88 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl: *Decl) InnerError!void {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn semaPkg(mod: *Module, pkg: *Package) !void {
|
||||
const file = (try mod.importPkg(mod.root_pkg, pkg)).file;
|
||||
return mod.semaFile(file);
|
||||
}
|
||||
|
||||
pub fn semaFile(mod: *Module, file: *Scope.File) InnerError!void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
assert(file.zir_loaded);
|
||||
assert(!file.zir.hasCompileErrors());
|
||||
|
||||
const gpa = mod.gpa;
|
||||
var decl_arena = std.heap.ArenaAllocator.init(gpa);
|
||||
defer decl_arena.deinit();
|
||||
|
||||
// We need a Decl to pass to Sema and collect dependencies. But ultimately we
|
||||
// want to pass them on to the Decl for the struct that represents the file.
|
||||
var tmp_namespace: Scope.Namespace = .{
|
||||
.parent = null,
|
||||
.file_scope = file,
|
||||
.ty = Type.initTag(.type),
|
||||
};
|
||||
var top_decl: Decl = .{
|
||||
.name = "",
|
||||
.namespace = &tmp_namespace,
|
||||
.generation = mod.generation,
|
||||
.src_node = 0, // the root AST node for the file
|
||||
.typed_value = .never_succeeded,
|
||||
.analysis = .in_progress,
|
||||
.deletion_flag = false,
|
||||
.is_pub = true,
|
||||
.link = undefined, // don't try to codegen this
|
||||
.fn_link = undefined, // not a function
|
||||
.contents_hash = undefined, // top-level struct has no contents hash
|
||||
};
|
||||
defer top_decl.dependencies.deinit(gpa);
|
||||
|
||||
var sema: Sema = .{
|
||||
.mod = mod,
|
||||
.gpa = gpa,
|
||||
.arena = &decl_arena.allocator,
|
||||
.code = file.zir,
|
||||
// TODO use a map because this array is too big
|
||||
.inst_map = try decl_arena.allocator.alloc(*ir.Inst, file.zir.instructions.len),
|
||||
.owner_decl = &top_decl,
|
||||
.namespace = &tmp_namespace,
|
||||
.func = null,
|
||||
.owner_func = null,
|
||||
.param_inst_list = &.{},
|
||||
};
|
||||
var block_scope: Scope.Block = .{
|
||||
.parent = null,
|
||||
.sema = &sema,
|
||||
.src_decl = &top_decl,
|
||||
.instructions = .{},
|
||||
.inlining = null,
|
||||
.is_comptime = true,
|
||||
};
|
||||
defer block_scope.instructions.deinit(gpa);
|
||||
|
||||
const main_struct_inst = file.zir.extra[@enumToInt(Zir.ExtraIndex.main_struct)] -
|
||||
@intCast(u32, Zir.Inst.Ref.typed_value_map.len);
|
||||
const air_inst = try sema.zirStructDecl(&block_scope, main_struct_inst, .Auto);
|
||||
assert(air_inst.ty.zigTypeTag() == .Type);
|
||||
const val = air_inst.value().?;
|
||||
const struct_ty = try val.toType(&decl_arena.allocator);
|
||||
const struct_decl = struct_ty.getOwnerDecl();
|
||||
|
||||
file.namespace = struct_ty.getNamespace().?;
|
||||
file.namespace.parent = null;
|
||||
|
||||
// Transfer the dependencies to `owner_decl`.
|
||||
assert(top_decl.dependants.count() == 0);
|
||||
for (top_decl.dependencies.items()) |entry| {
|
||||
const dep = entry.key;
|
||||
dep.removeDependant(&top_decl);
|
||||
if (dep == struct_decl) continue;
|
||||
_ = try mod.declareDeclDependency(struct_decl, dep);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if the Decl type changed.
|
||||
/// Returns `true` if this is the first time analyzing the Decl.
|
||||
/// Returns `false` otherwise.
|
||||
@ -3268,31 +3358,32 @@ pub fn importFile(
|
||||
};
|
||||
}
|
||||
|
||||
pub fn analyzeNamespace(
|
||||
pub fn scanNamespace(
|
||||
mod: *Module,
|
||||
namespace: *Scope.Namespace,
|
||||
decls: []const ast.Node.Index,
|
||||
) InnerError!void {
|
||||
extra_start: usize,
|
||||
decls_len: u32,
|
||||
parent_decl: *Decl,
|
||||
) InnerError!usize {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
// We may be analyzing it for the first time, or this may be
|
||||
// an incremental update. This code handles both cases.
|
||||
assert(namespace.file_scope.tree_loaded); // Caller must ensure tree loaded.
|
||||
const tree: *const ast.Tree = &namespace.file_scope.tree;
|
||||
const node_tags = tree.nodes.items(.tag);
|
||||
const node_datas = tree.nodes.items(.data);
|
||||
const gpa = mod.gpa;
|
||||
const zir = namespace.file_scope.zir;
|
||||
|
||||
try mod.comp.work_queue.ensureUnusedCapacity(decls.len);
|
||||
try namespace.decls.ensureCapacity(mod.gpa, decls.len);
|
||||
try mod.comp.work_queue.ensureUnusedCapacity(decls_len);
|
||||
try namespace.decls.ensureCapacity(gpa, decls_len);
|
||||
|
||||
// Keep track of the decls that we expect to see in this namespace so that
|
||||
// we know which ones have been deleted.
|
||||
var deleted_decls = std.AutoArrayHashMap(*Decl, void).init(mod.gpa);
|
||||
var deleted_decls = std.AutoArrayHashMap(*Decl, void).init(gpa);
|
||||
defer deleted_decls.deinit();
|
||||
try deleted_decls.ensureCapacity(namespace.decls.items().len);
|
||||
for (namespace.decls.items()) |entry| {
|
||||
deleted_decls.putAssumeCapacityNoClobber(entry.key, {});
|
||||
{
|
||||
const namespace_decls = namespace.decls.items();
|
||||
try deleted_decls.ensureCapacity(namespace_decls.len);
|
||||
for (namespace_decls) |entry| {
|
||||
deleted_decls.putAssumeCapacityNoClobber(entry.value, {});
|
||||
}
|
||||
}
|
||||
|
||||
// Keep track of decls that are invalidated from the update. Ultimately,
|
||||
@ -3300,177 +3391,61 @@ pub fn analyzeNamespace(
|
||||
// the outdated decls, but we cannot queue up the tasks until after
|
||||
// we find out which ones have been deleted, otherwise there would be
|
||||
// deleted Decl pointers in the work queue.
|
||||
var outdated_decls = std.AutoArrayHashMap(*Decl, void).init(mod.gpa);
|
||||
var outdated_decls = std.AutoArrayHashMap(*Decl, void).init(gpa);
|
||||
defer outdated_decls.deinit();
|
||||
|
||||
for (decls) |decl_node| switch (node_tags[decl_node]) {
|
||||
.fn_decl => {
|
||||
const fn_proto = node_datas[decl_node].lhs;
|
||||
const body = node_datas[decl_node].rhs;
|
||||
switch (node_tags[fn_proto]) {
|
||||
.fn_proto_simple => {
|
||||
var params: [1]ast.Node.Index = undefined;
|
||||
try mod.semaContainerFn(
|
||||
namespace,
|
||||
&deleted_decls,
|
||||
&outdated_decls,
|
||||
decl_node,
|
||||
tree.*,
|
||||
body,
|
||||
tree.fnProtoSimple(¶ms, fn_proto),
|
||||
);
|
||||
},
|
||||
.fn_proto_multi => try mod.semaContainerFn(
|
||||
namespace,
|
||||
&deleted_decls,
|
||||
&outdated_decls,
|
||||
decl_node,
|
||||
tree.*,
|
||||
body,
|
||||
tree.fnProtoMulti(fn_proto),
|
||||
),
|
||||
.fn_proto_one => {
|
||||
var params: [1]ast.Node.Index = undefined;
|
||||
try mod.semaContainerFn(
|
||||
namespace,
|
||||
&deleted_decls,
|
||||
&outdated_decls,
|
||||
decl_node,
|
||||
tree.*,
|
||||
body,
|
||||
tree.fnProtoOne(¶ms, fn_proto),
|
||||
);
|
||||
},
|
||||
.fn_proto => try mod.semaContainerFn(
|
||||
namespace,
|
||||
&deleted_decls,
|
||||
&outdated_decls,
|
||||
decl_node,
|
||||
tree.*,
|
||||
body,
|
||||
tree.fnProto(fn_proto),
|
||||
),
|
||||
else => unreachable,
|
||||
}
|
||||
},
|
||||
.fn_proto_simple => {
|
||||
var params: [1]ast.Node.Index = undefined;
|
||||
try mod.semaContainerFn(
|
||||
namespace,
|
||||
&deleted_decls,
|
||||
&outdated_decls,
|
||||
decl_node,
|
||||
tree.*,
|
||||
0,
|
||||
tree.fnProtoSimple(¶ms, decl_node),
|
||||
);
|
||||
},
|
||||
.fn_proto_multi => try mod.semaContainerFn(
|
||||
const bit_bags_count = std.math.divCeil(usize, decls_len, 8) catch unreachable;
|
||||
var extra_index = extra_start + bit_bags_count;
|
||||
var bit_bag_index: usize = extra_start;
|
||||
var cur_bit_bag: u32 = undefined;
|
||||
var decl_i: u32 = 0;
|
||||
while (decl_i < decls_len) : (decl_i += 1) {
|
||||
if (decl_i % 8 == 0) {
|
||||
cur_bit_bag = zir.extra[bit_bag_index];
|
||||
bit_bag_index += 1;
|
||||
}
|
||||
const is_pub = @truncate(u1, cur_bit_bag) != 0;
|
||||
cur_bit_bag >>= 1;
|
||||
const is_exported = @truncate(u1, cur_bit_bag) != 0;
|
||||
cur_bit_bag >>= 1;
|
||||
const has_align = @truncate(u1, cur_bit_bag) != 0;
|
||||
cur_bit_bag >>= 1;
|
||||
const has_section = @truncate(u1, cur_bit_bag) != 0;
|
||||
cur_bit_bag >>= 1;
|
||||
|
||||
const hash_u32s = zir.extra[extra_index..][0..4];
|
||||
extra_index += 4;
|
||||
const name_idx = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
const decl_index = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
const align_inst: Zir.Inst.Ref = if (!has_align) .none else inst: {
|
||||
const inst = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]);
|
||||
extra_index += 1;
|
||||
break :inst inst;
|
||||
};
|
||||
const section_inst: Zir.Inst.Ref = if (!has_section) .none else inst: {
|
||||
const inst = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]);
|
||||
extra_index += 1;
|
||||
break :inst inst;
|
||||
};
|
||||
const decl_name: ?[]const u8 = if (name_idx == 0) null else zir.nullTerminatedString(name_idx);
|
||||
const contents_hash = @bitCast(std.zig.SrcHash, hash_u32s.*);
|
||||
|
||||
try mod.scanDecl(
|
||||
namespace,
|
||||
&deleted_decls,
|
||||
&outdated_decls,
|
||||
decl_node,
|
||||
tree.*,
|
||||
0,
|
||||
tree.fnProtoMulti(decl_node),
|
||||
),
|
||||
.fn_proto_one => {
|
||||
var params: [1]ast.Node.Index = undefined;
|
||||
try mod.semaContainerFn(
|
||||
namespace,
|
||||
&deleted_decls,
|
||||
&outdated_decls,
|
||||
decl_node,
|
||||
tree.*,
|
||||
0,
|
||||
tree.fnProtoOne(¶ms, decl_node),
|
||||
);
|
||||
},
|
||||
.fn_proto => try mod.semaContainerFn(
|
||||
namespace,
|
||||
&deleted_decls,
|
||||
&outdated_decls,
|
||||
decl_node,
|
||||
tree.*,
|
||||
0,
|
||||
tree.fnProto(decl_node),
|
||||
),
|
||||
|
||||
.global_var_decl => try mod.semaContainerVar(
|
||||
namespace,
|
||||
&deleted_decls,
|
||||
&outdated_decls,
|
||||
decl_node,
|
||||
tree.*,
|
||||
tree.globalVarDecl(decl_node),
|
||||
),
|
||||
.local_var_decl => try mod.semaContainerVar(
|
||||
namespace,
|
||||
&deleted_decls,
|
||||
&outdated_decls,
|
||||
decl_node,
|
||||
tree.*,
|
||||
tree.localVarDecl(decl_node),
|
||||
),
|
||||
.simple_var_decl => try mod.semaContainerVar(
|
||||
namespace,
|
||||
&deleted_decls,
|
||||
&outdated_decls,
|
||||
decl_node,
|
||||
tree.*,
|
||||
tree.simpleVarDecl(decl_node),
|
||||
),
|
||||
.aligned_var_decl => try mod.semaContainerVar(
|
||||
namespace,
|
||||
&deleted_decls,
|
||||
&outdated_decls,
|
||||
decl_node,
|
||||
tree.*,
|
||||
tree.alignedVarDecl(decl_node),
|
||||
),
|
||||
|
||||
.@"comptime" => {
|
||||
const name_index = mod.getNextAnonNameIndex();
|
||||
const name = try std.fmt.allocPrint(mod.gpa, "__comptime_{d}", .{name_index});
|
||||
defer mod.gpa.free(name);
|
||||
|
||||
const name_hash = namespace.fullyQualifiedNameHash(name);
|
||||
const contents_hash = std.zig.hashSrc(tree.getNodeSource(decl_node));
|
||||
|
||||
const new_decl = try mod.createNewDecl(namespace, name, decl_node, name_hash, contents_hash);
|
||||
namespace.decls.putAssumeCapacity(new_decl, {});
|
||||
mod.comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
|
||||
},
|
||||
|
||||
// Container fields are handled in AstGen.
|
||||
.container_field_init,
|
||||
.container_field_align,
|
||||
.container_field,
|
||||
=> continue,
|
||||
|
||||
.test_decl => {
|
||||
if (mod.comp.bin_file.options.is_test) {
|
||||
log.err("TODO: analyze test decl", .{});
|
||||
}
|
||||
},
|
||||
.@"usingnamespace" => {
|
||||
const name_index = mod.getNextAnonNameIndex();
|
||||
const name = try std.fmt.allocPrint(mod.gpa, "__usingnamespace_{d}", .{name_index});
|
||||
defer mod.gpa.free(name);
|
||||
|
||||
const name_hash = namespace.fullyQualifiedNameHash(name);
|
||||
const contents_hash = std.zig.hashSrc(tree.getNodeSource(decl_node));
|
||||
|
||||
const new_decl = try mod.createNewDecl(namespace, name, decl_node, name_hash, contents_hash);
|
||||
namespace.decls.putAssumeCapacity(new_decl, {});
|
||||
|
||||
mod.ensureDeclAnalyzed(new_decl) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.AnalysisFail => continue,
|
||||
};
|
||||
},
|
||||
else => unreachable,
|
||||
};
|
||||
contents_hash,
|
||||
decl_name,
|
||||
decl_index,
|
||||
is_pub,
|
||||
is_exported,
|
||||
align_inst,
|
||||
section_inst,
|
||||
parent_decl,
|
||||
);
|
||||
}
|
||||
// Handle explicitly deleted decls from the source code. This is one of two
|
||||
// places that Decl deletions happen. The other is in `Compilation`, after
|
||||
// `performAllTheWork`, where we iterate over `Module.deletion_set` and
|
||||
@ -3493,133 +3468,98 @@ pub fn analyzeNamespace(
|
||||
for (outdated_decls.items()) |entry| {
|
||||
try mod.markOutdatedDecl(entry.key);
|
||||
}
|
||||
return extra_index;
|
||||
}
|
||||
|
||||
fn semaContainerFn(
|
||||
fn scanDecl(
|
||||
mod: *Module,
|
||||
namespace: *Scope.Namespace,
|
||||
deleted_decls: *std.AutoArrayHashMap(*Decl, void),
|
||||
outdated_decls: *std.AutoArrayHashMap(*Decl, void),
|
||||
decl_node: ast.Node.Index,
|
||||
tree: ast.Tree,
|
||||
body_node: ast.Node.Index,
|
||||
fn_proto: ast.full.FnProto,
|
||||
) !void {
|
||||
contents_hash: std.zig.SrcHash,
|
||||
decl_name: ?[]const u8,
|
||||
decl_index: Zir.Inst.Index,
|
||||
is_pub: bool,
|
||||
is_exported: bool,
|
||||
align_inst: Zir.Inst.Ref,
|
||||
section_inst: Zir.Inst.Ref,
|
||||
parent_decl: *Decl,
|
||||
) InnerError!void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
// We will create a Decl for it regardless of analysis status.
|
||||
const name_token = fn_proto.name_token orelse {
|
||||
// This problem will go away with #1717.
|
||||
@panic("TODO missing function name");
|
||||
};
|
||||
const name = tree.tokenSlice(name_token); // TODO use identifierTokenString
|
||||
const name_hash = namespace.fullyQualifiedNameHash(name);
|
||||
const contents_hash = std.zig.hashSrc(tree.getNodeSource(decl_node));
|
||||
if (mod.decl_table.get(name_hash)) |decl| {
|
||||
// Update the AST node of the decl; even if its contents are unchanged, it may
|
||||
// have been re-ordered.
|
||||
const prev_src_node = decl.src_node;
|
||||
decl.src_node = decl_node;
|
||||
if (deleted_decls.swapRemove(decl) == null) {
|
||||
decl.analysis = .sema_failure;
|
||||
const msg = try ErrorMsg.create(mod.gpa, .{
|
||||
.file_scope = namespace.file_scope,
|
||||
.parent_decl_node = 0,
|
||||
.lazy = .{ .token_abs = name_token },
|
||||
}, "redeclaration of '{s}'", .{decl.name});
|
||||
errdefer msg.destroy(mod.gpa);
|
||||
const other_src_loc: SrcLoc = .{
|
||||
.file_scope = namespace.file_scope,
|
||||
.parent_decl_node = 0,
|
||||
.lazy = .{ .node_abs = prev_src_node },
|
||||
};
|
||||
try mod.errNoteNonLazy(other_src_loc, msg, "previously declared here", .{});
|
||||
try mod.failed_decls.putNoClobber(mod.gpa, decl, msg);
|
||||
} else {
|
||||
if (!srcHashEql(decl.contents_hash, contents_hash)) {
|
||||
try outdated_decls.put(decl, {});
|
||||
decl.contents_hash = contents_hash;
|
||||
} else switch (mod.comp.bin_file.tag) {
|
||||
.coff => {
|
||||
// TODO Implement for COFF
|
||||
},
|
||||
.elf => if (decl.fn_link.elf.len != 0) {
|
||||
// TODO Look into detecting when this would be unnecessary by storing enough state
|
||||
// in `Decl` to notice that the line number did not change.
|
||||
mod.comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl });
|
||||
},
|
||||
.macho => if (decl.fn_link.macho.len != 0) {
|
||||
// TODO Look into detecting when this would be unnecessary by storing enough state
|
||||
// in `Decl` to notice that the line number did not change.
|
||||
mod.comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl });
|
||||
},
|
||||
.c, .wasm, .spirv => {},
|
||||
}
|
||||
const gpa = mod.gpa;
|
||||
const zir = namespace.file_scope.zir;
|
||||
const decl_block_inst_data = zir.instructions.items(.data)[decl_index].pl_node;
|
||||
const decl_node = parent_decl.relativeToNodeIndex(decl_block_inst_data.src_node);
|
||||
|
||||
// We create a Decl for it regardless of analysis status.
|
||||
// Decls that have names are keyed in the namespace by the name. Decls without
|
||||
// names are keyed by their contents hash. This way we can detect if, for example,
|
||||
// a comptime decl gets moved around in the file.
|
||||
const decl_key = decl_name orelse &contents_hash;
|
||||
const gop = try namespace.decls.getOrPut(gpa, decl_key);
|
||||
if (!gop.found_existing) {
|
||||
if (align_inst != .none) {
|
||||
return mod.fail(&namespace.base, .{ .node_abs = decl_node }, "TODO: implement decls with align()", .{});
|
||||
}
|
||||
} else {
|
||||
const new_decl = try mod.createNewDecl(namespace, name, decl_node, name_hash, contents_hash);
|
||||
namespace.decls.putAssumeCapacity(new_decl, {});
|
||||
if (fn_proto.extern_export_token) |maybe_export_token| {
|
||||
const token_tags = tree.tokens.items(.tag);
|
||||
if (token_tags[maybe_export_token] == .keyword_export) {
|
||||
mod.comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
|
||||
}
|
||||
if (section_inst != .none) {
|
||||
return mod.fail(&namespace.base, .{ .node_abs = decl_node }, "TODO: implement decls with linksection()", .{});
|
||||
}
|
||||
new_decl.is_pub = fn_proto.visib_token != null;
|
||||
const new_decl = try mod.createNewDecl(namespace, decl_key, decl_node, contents_hash);
|
||||
// Update the key reference to the longer-lived memory.
|
||||
gop.entry.key = &new_decl.contents_hash;
|
||||
gop.entry.value = new_decl;
|
||||
// exported decls, comptime, test, and usingnamespace decls get analyzed.
|
||||
if (decl_name == null or is_exported) {
|
||||
mod.comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
|
||||
}
|
||||
new_decl.is_pub = is_pub;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
fn semaContainerVar(
|
||||
mod: *Module,
|
||||
namespace: *Scope.Namespace,
|
||||
deleted_decls: *std.AutoArrayHashMap(*Decl, void),
|
||||
outdated_decls: *std.AutoArrayHashMap(*Decl, void),
|
||||
decl_node: ast.Node.Index,
|
||||
tree: ast.Tree,
|
||||
var_decl: ast.full.VarDecl,
|
||||
) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
const name_token = var_decl.ast.mut_token + 1;
|
||||
const name = tree.tokenSlice(name_token); // TODO identifierTokenString
|
||||
const name_hash = namespace.fullyQualifiedNameHash(name);
|
||||
const contents_hash = std.zig.hashSrc(tree.getNodeSource(decl_node));
|
||||
if (mod.decl_table.get(name_hash)) |decl| {
|
||||
// Update the AST Node index of the decl, even if its contents are unchanged, it may
|
||||
// have been re-ordered.
|
||||
const prev_src_node = decl.src_node;
|
||||
decl.src_node = decl_node;
|
||||
if (deleted_decls.swapRemove(decl) == null) {
|
||||
decl.analysis = .sema_failure;
|
||||
const msg = try ErrorMsg.create(mod.gpa, .{
|
||||
.file_scope = namespace.file_scope,
|
||||
.parent_decl_node = 0,
|
||||
.lazy = .{ .token_abs = name_token },
|
||||
}, "redeclaration of '{s}'", .{decl.name});
|
||||
errdefer msg.destroy(mod.gpa);
|
||||
const other_src_loc: SrcLoc = .{
|
||||
.file_scope = decl.namespace.file_scope,
|
||||
.parent_decl_node = 0,
|
||||
.lazy = .{ .node_abs = prev_src_node },
|
||||
};
|
||||
try mod.errNoteNonLazy(other_src_loc, msg, "previously declared here", .{});
|
||||
try mod.failed_decls.putNoClobber(mod.gpa, decl, msg);
|
||||
} else if (!srcHashEql(decl.contents_hash, contents_hash)) {
|
||||
const decl = gop.entry.value;
|
||||
// Update the AST node of the decl; even if its contents are unchanged, it may
|
||||
// have been re-ordered.
|
||||
const prev_src_node = decl.src_node;
|
||||
decl.src_node = decl_node;
|
||||
if (deleted_decls.swapRemove(decl) == null) {
|
||||
if (true) {
|
||||
@panic("TODO I think this code path is unreachable; should be caught by AstGen.");
|
||||
}
|
||||
decl.analysis = .sema_failure;
|
||||
const msg = try ErrorMsg.create(gpa, .{
|
||||
.file_scope = namespace.file_scope,
|
||||
.parent_decl_node = 0,
|
||||
.lazy = .{ .token_abs = name_token },
|
||||
}, "redeclaration of '{s}'", .{decl.name});
|
||||
errdefer msg.destroy(gpa);
|
||||
const other_src_loc: SrcLoc = .{
|
||||
.file_scope = namespace.file_scope,
|
||||
.parent_decl_node = 0,
|
||||
.lazy = .{ .node_abs = prev_src_node },
|
||||
};
|
||||
try mod.errNoteNonLazy(other_src_loc, msg, "previously declared here", .{});
|
||||
try mod.failed_decls.putNoClobber(gpa, decl, msg);
|
||||
} else {
|
||||
if (!std.zig.srcHashEql(decl.contents_hash, contents_hash)) {
|
||||
try outdated_decls.put(decl, {});
|
||||
decl.contents_hash = contents_hash;
|
||||
}
|
||||
} else {
|
||||
const new_decl = try mod.createNewDecl(namespace, name, decl_node, name_hash, contents_hash);
|
||||
namespace.decls.putAssumeCapacity(new_decl, {});
|
||||
if (var_decl.extern_export_token) |maybe_export_token| {
|
||||
const token_tags = tree.tokens.items(.tag);
|
||||
if (token_tags[maybe_export_token] == .keyword_export) {
|
||||
mod.comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
|
||||
}
|
||||
}
|
||||
new_decl.is_pub = var_decl.visib_token != null;
|
||||
} else if (try decl.isFunction()) switch (mod.comp.bin_file.tag) {
|
||||
.coff => {
|
||||
// TODO Implement for COFF
|
||||
},
|
||||
.elf => if (decl.fn_link.elf.len != 0) {
|
||||
// TODO Look into detecting when this would be unnecessary by storing enough state
|
||||
// in `Decl` to notice that the line number did not change.
|
||||
mod.comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl });
|
||||
},
|
||||
.macho => if (decl.fn_link.macho.len != 0) {
|
||||
// TODO Look into detecting when this would be unnecessary by storing enough state
|
||||
// in `Decl` to notice that the line number did not change.
|
||||
mod.comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl });
|
||||
},
|
||||
.c, .wasm, .spirv => {},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@ -3644,8 +3584,6 @@ pub fn deleteDecl(
|
||||
// not be present in the set, and this does nothing.
|
||||
decl.namespace.removeDecl(decl);
|
||||
|
||||
const name_hash = decl.fullyQualifiedNameHash();
|
||||
mod.decl_table.removeAssertDiscard(name_hash);
|
||||
// Remove itself from its dependencies, because we are about to destroy the decl pointer.
|
||||
for (decl.dependencies.items()) |entry| {
|
||||
const dep = entry.key;
|
||||
@ -3675,8 +3613,11 @@ pub fn deleteDecl(
|
||||
if (mod.failed_decls.swapRemove(decl)) |entry| {
|
||||
entry.value.destroy(mod.gpa);
|
||||
}
|
||||
if (mod.emit_h_failed_decls.swapRemove(decl)) |entry| {
|
||||
entry.value.destroy(mod.gpa);
|
||||
if (mod.emit_h) |emit_h| {
|
||||
if (emit_h.failed_decls.swapRemove(decl)) |entry| {
|
||||
entry.value.destroy(mod.gpa);
|
||||
}
|
||||
emit_h.decl_table.removeAssertDiscard(decl);
|
||||
}
|
||||
_ = mod.compile_log_decls.swapRemove(decl);
|
||||
mod.deleteDeclExports(decl);
|
||||
@ -3776,7 +3717,7 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !void {
|
||||
};
|
||||
defer inner_block.instructions.deinit(mod.gpa);
|
||||
|
||||
// TZIR currently requires the arg parameters to be the first N instructions
|
||||
// AIR currently requires the arg parameters to be the first N instructions
|
||||
try inner_block.instructions.appendSlice(mod.gpa, param_inst_list);
|
||||
|
||||
func.state = .in_progress;
|
||||
@ -3796,8 +3737,10 @@ fn markOutdatedDecl(mod: *Module, decl: *Decl) !void {
|
||||
if (mod.failed_decls.swapRemove(decl)) |entry| {
|
||||
entry.value.destroy(mod.gpa);
|
||||
}
|
||||
if (mod.emit_h_failed_decls.swapRemove(decl)) |entry| {
|
||||
entry.value.destroy(mod.gpa);
|
||||
if (mod.emit_h) |emit_h| {
|
||||
if (emit_h.failed_decls.swapRemove(decl)) |entry| {
|
||||
entry.value.destroy(mod.gpa);
|
||||
}
|
||||
}
|
||||
_ = mod.compile_log_decls.swapRemove(decl);
|
||||
decl.analysis = .outdated;
|
||||
@ -3854,18 +3797,11 @@ fn createNewDecl(
|
||||
namespace: *Scope.Namespace,
|
||||
decl_name: []const u8,
|
||||
src_node: ast.Node.Index,
|
||||
name_hash: Scope.NameHash,
|
||||
contents_hash: std.zig.SrcHash,
|
||||
) !*Decl {
|
||||
try mod.decl_table.ensureCapacity(mod.gpa, mod.decl_table.items().len + 1);
|
||||
const new_decl = try mod.allocateNewDecl(namespace, src_node, contents_hash);
|
||||
errdefer mod.gpa.destroy(new_decl);
|
||||
new_decl.name = try mem.dupeZ(mod.gpa, u8, decl_name);
|
||||
log.debug("insert Decl {s} with hash {}", .{
|
||||
new_decl.name,
|
||||
std.fmt.fmtSliceHexLower(&name_hash),
|
||||
});
|
||||
mod.decl_table.putAssumeCapacityNoClobber(name_hash, new_decl);
|
||||
return new_decl;
|
||||
}
|
||||
|
||||
@ -4074,9 +4010,8 @@ pub fn createAnonymousDecl(
|
||||
const name = try std.fmt.allocPrint(mod.gpa, "{s}__anon_{d}", .{ scope_decl.name, name_index });
|
||||
defer mod.gpa.free(name);
|
||||
const namespace = scope_decl.namespace;
|
||||
const name_hash = namespace.fullyQualifiedNameHash(name);
|
||||
const src_hash: std.zig.SrcHash = undefined;
|
||||
const new_decl = try mod.createNewDecl(namespace, name, scope_decl.src_node, name_hash, src_hash);
|
||||
const new_decl = try mod.createNewDecl(namespace, name, scope_decl.src_node, src_hash);
|
||||
const decl_arena_state = try decl_arena.allocator.create(std.heap.ArenaAllocator.State);
|
||||
|
||||
decl_arena_state.* = decl_arena.state;
|
||||
@ -4125,30 +4060,26 @@ pub fn lookupInNamespace(
|
||||
ident_name: []const u8,
|
||||
only_pub_usingnamespaces: bool,
|
||||
) ?*Decl {
|
||||
const name_hash = namespace.fullyQualifiedNameHash(ident_name);
|
||||
log.debug("lookup Decl {s} with hash {}", .{
|
||||
ident_name,
|
||||
std.fmt.fmtSliceHexLower(&name_hash),
|
||||
});
|
||||
// TODO handle decl collision with usingnamespace
|
||||
// TODO the decl doing the looking up needs to create a decl dependency
|
||||
// on each usingnamespace decl here.
|
||||
if (mod.decl_table.get(name_hash)) |decl| {
|
||||
return decl;
|
||||
}
|
||||
{
|
||||
var it = namespace.usingnamespace_set.iterator();
|
||||
while (it.next()) |entry| {
|
||||
const other_ns = entry.key;
|
||||
const other_is_pub = entry.value;
|
||||
if (only_pub_usingnamespaces and !other_is_pub) continue;
|
||||
// TODO handle cycles
|
||||
if (mod.lookupInNamespace(other_ns, ident_name, true)) |decl| {
|
||||
return decl;
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
@panic("TODO lookupInNamespace");
|
||||
//// TODO handle decl collision with usingnamespace
|
||||
//// TODO the decl doing the looking up needs to create a decl dependency
|
||||
//// on each usingnamespace decl here.
|
||||
//if (mod.decl_table.get(name_hash)) |decl| {
|
||||
// return decl;
|
||||
//}
|
||||
//{
|
||||
// var it = namespace.usingnamespace_set.iterator();
|
||||
// while (it.next()) |entry| {
|
||||
// const other_ns = entry.key;
|
||||
// const other_is_pub = entry.value;
|
||||
// if (only_pub_usingnamespaces and !other_is_pub) continue;
|
||||
// // TODO handle cycles
|
||||
// if (mod.lookupInNamespace(other_ns, ident_name, true)) |decl| {
|
||||
// return decl;
|
||||
// }
|
||||
// }
|
||||
//}
|
||||
//return null;
|
||||
}
|
||||
|
||||
pub fn makeIntType(arena: *Allocator, signedness: std.builtin.Signedness, bits: u16) !Type {
|
||||
@ -4274,10 +4205,6 @@ pub fn failWithOwnedErrorMsg(mod: *Module, scope: *Scope, err_msg: *ErrorMsg) In
|
||||
return error.AnalysisFail;
|
||||
}
|
||||
|
||||
fn srcHashEql(a: std.zig.SrcHash, b: std.zig.SrcHash) bool {
|
||||
return @bitCast(u128, a) == @bitCast(u128, b);
|
||||
}
|
||||
|
||||
pub fn intAdd(allocator: *Allocator, lhs: Value, rhs: Value) !Value {
|
||||
// TODO is this a performance issue? maybe we should try the operation without
|
||||
// resorting to BigInt first.
|
||||
|
||||
78
src/Sema.zig
78
src/Sema.zig
@ -655,7 +655,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In
|
||||
return sema.mod.fail(&block.base, sema.src, "TODO implement zirCoerceResultPtr", .{});
|
||||
}
|
||||
|
||||
fn zirStructDecl(
|
||||
pub fn zirStructDecl(
|
||||
sema: *Sema,
|
||||
block: *Scope.Block,
|
||||
inst: Zir.Inst.Index,
|
||||
@ -668,8 +668,8 @@ fn zirStructDecl(
|
||||
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
||||
const src = inst_data.src();
|
||||
const extra = sema.code.extraData(Zir.Inst.StructDecl, inst_data.payload_index);
|
||||
const body = sema.code.extra[extra.end..][0..extra.data.body_len];
|
||||
const fields_len = extra.data.fields_len;
|
||||
const decls_len = extra.data.decls_len;
|
||||
|
||||
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
|
||||
|
||||
@ -686,37 +686,19 @@ fn zirStructDecl(
|
||||
.node_offset = inst_data.src_node,
|
||||
.namespace = .{
|
||||
.parent = sema.owner_decl.namespace,
|
||||
.parent_name_hash = new_decl.fullyQualifiedNameHash(),
|
||||
.ty = struct_ty,
|
||||
.file_scope = block.getFileScope(),
|
||||
},
|
||||
};
|
||||
|
||||
{
|
||||
const ast = std.zig.ast;
|
||||
const node = sema.owner_decl.relativeToNodeIndex(inst_data.src_node);
|
||||
const tree: *const ast.Tree = &struct_obj.namespace.file_scope.tree;
|
||||
const node_tags = tree.nodes.items(.tag);
|
||||
var buf: [2]ast.Node.Index = undefined;
|
||||
const members: []const ast.Node.Index = switch (node_tags[node]) {
|
||||
.container_decl,
|
||||
.container_decl_trailing,
|
||||
=> tree.containerDecl(node).ast.members,
|
||||
|
||||
.container_decl_two,
|
||||
.container_decl_two_trailing,
|
||||
=> tree.containerDeclTwo(&buf, node).ast.members,
|
||||
|
||||
.container_decl_arg,
|
||||
.container_decl_arg_trailing,
|
||||
=> tree.containerDeclArg(node).ast.members,
|
||||
|
||||
.root => tree.rootDecls(),
|
||||
else => unreachable,
|
||||
};
|
||||
try sema.mod.analyzeNamespace(&struct_obj.namespace, members);
|
||||
}
|
||||
var extra_index: usize = try sema.mod.scanNamespace(
|
||||
&struct_obj.namespace,
|
||||
extra.end,
|
||||
decls_len,
|
||||
new_decl,
|
||||
);
|
||||
|
||||
const body = sema.code.extra[extra_index..][0..extra.data.body_len];
|
||||
if (fields_len == 0) {
|
||||
assert(body.len == 0);
|
||||
return sema.analyzeDeclVal(block, src, new_decl);
|
||||
@ -760,8 +742,8 @@ fn zirStructDecl(
|
||||
sema.branch_quota = struct_sema.branch_quota;
|
||||
}
|
||||
const bit_bags_count = std.math.divCeil(usize, fields_len, 16) catch unreachable;
|
||||
const body_end = extra.end + body.len;
|
||||
var extra_index: usize = body_end + bit_bags_count;
|
||||
const body_end = extra_index + body.len;
|
||||
extra_index += bit_bags_count;
|
||||
var bit_bag_index: usize = body_end;
|
||||
var cur_bit_bag: u32 = undefined;
|
||||
var field_i: u32 = 0;
|
||||
@ -829,8 +811,8 @@ fn zirEnumDecl(
|
||||
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
||||
const src = inst_data.src();
|
||||
const extra = sema.code.extraData(Zir.Inst.EnumDecl, inst_data.payload_index);
|
||||
const body = sema.code.extra[extra.end..][0..extra.data.body_len];
|
||||
const fields_len = extra.data.fields_len;
|
||||
const decls_len = extra.data.decls_len;
|
||||
|
||||
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
|
||||
|
||||
@ -865,44 +847,27 @@ fn zirEnumDecl(
|
||||
.node_offset = inst_data.src_node,
|
||||
.namespace = .{
|
||||
.parent = sema.owner_decl.namespace,
|
||||
.parent_name_hash = new_decl.fullyQualifiedNameHash(),
|
||||
.ty = enum_ty,
|
||||
.file_scope = block.getFileScope(),
|
||||
},
|
||||
};
|
||||
|
||||
{
|
||||
const ast = std.zig.ast;
|
||||
const node = sema.owner_decl.relativeToNodeIndex(inst_data.src_node);
|
||||
const tree: *const ast.Tree = &enum_obj.namespace.file_scope.tree;
|
||||
const node_tags = tree.nodes.items(.tag);
|
||||
var buf: [2]ast.Node.Index = undefined;
|
||||
const members: []const ast.Node.Index = switch (node_tags[node]) {
|
||||
.container_decl,
|
||||
.container_decl_trailing,
|
||||
=> tree.containerDecl(node).ast.members,
|
||||
|
||||
.container_decl_two,
|
||||
.container_decl_two_trailing,
|
||||
=> tree.containerDeclTwo(&buf, node).ast.members,
|
||||
|
||||
.container_decl_arg,
|
||||
.container_decl_arg_trailing,
|
||||
=> tree.containerDeclArg(node).ast.members,
|
||||
|
||||
.root => tree.rootDecls(),
|
||||
else => unreachable,
|
||||
};
|
||||
try sema.mod.analyzeNamespace(&enum_obj.namespace, members);
|
||||
}
|
||||
var extra_index: usize = try sema.mod.scanNamespace(
|
||||
&enum_obj.namespace,
|
||||
extra.end,
|
||||
decls_len,
|
||||
new_decl,
|
||||
);
|
||||
|
||||
const body = sema.code.extra[extra_index..][0..extra.data.body_len];
|
||||
if (fields_len == 0) {
|
||||
assert(body.len == 0);
|
||||
return sema.analyzeDeclVal(block, src, new_decl);
|
||||
}
|
||||
|
||||
const bit_bags_count = std.math.divCeil(usize, fields_len, 32) catch unreachable;
|
||||
const body_end = extra.end + body.len;
|
||||
const body_end = extra_index + body.len;
|
||||
extra_index += bit_bags_count;
|
||||
|
||||
try enum_obj.fields.ensureCapacity(&new_decl_arena.allocator, fields_len);
|
||||
const any_values = for (sema.code.extra[body_end..][0..bit_bags_count]) |bag| {
|
||||
@ -947,7 +912,6 @@ fn zirEnumDecl(
|
||||
sema.branch_count = enum_sema.branch_count;
|
||||
sema.branch_quota = enum_sema.branch_quota;
|
||||
}
|
||||
var extra_index: usize = body_end + bit_bags_count;
|
||||
var bit_bag_index: usize = body_end;
|
||||
var cur_bit_bag: u32 = undefined;
|
||||
var field_i: u32 = 0;
|
||||
|
||||
@ -15,6 +15,10 @@ pub const base_tag: link.File.Tag = .c;
|
||||
pub const zig_h = @embedFile("C/zig.h");
|
||||
|
||||
base: link.File,
|
||||
/// This linker backend does not try to incrementally link output C source code.
|
||||
/// Instead, it tracks all declarations in this table, and iterates over it
|
||||
/// in the flush function, stitching pre-rendered pieces of C code together.
|
||||
decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, void) = .{},
|
||||
|
||||
/// Per-declaration data. For functions this is the body, and
|
||||
/// the forward declaration is stored in the FnBlock.
|
||||
@ -66,10 +70,10 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio
|
||||
}
|
||||
|
||||
pub fn deinit(self: *C) void {
|
||||
const module = self.base.options.module orelse return;
|
||||
for (module.decl_table.items()) |entry| {
|
||||
self.freeDecl(entry.value);
|
||||
for (self.decl_table.items()) |entry| {
|
||||
self.freeDecl(entry.key);
|
||||
}
|
||||
self.decl_table.deinit(self.base.allocator);
|
||||
}
|
||||
|
||||
pub fn allocateDeclIndexes(self: *C, decl: *Module.Decl) !void {}
|
||||
@ -88,6 +92,9 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
// Keep track of all decls so we can iterate over them on flush().
|
||||
_ = try self.decl_table.getOrPut(self.base.allocator, decl);
|
||||
|
||||
const fwd_decl = &decl.fn_link.c.fwd_decl;
|
||||
const typedefs = &decl.fn_link.c.typedefs;
|
||||
const code = &decl.link.c.code;
|
||||
@ -168,7 +175,7 @@ pub fn flushModule(self: *C, comp: *Compilation) !void {
|
||||
defer all_buffers.deinit();
|
||||
|
||||
// This is at least enough until we get to the function bodies without error handling.
|
||||
try all_buffers.ensureCapacity(module.decl_table.count() + 2);
|
||||
try all_buffers.ensureCapacity(self.decl_table.count() + 2);
|
||||
|
||||
var file_size: u64 = zig_h.len;
|
||||
all_buffers.appendAssumeCapacity(.{
|
||||
@ -197,8 +204,8 @@ pub fn flushModule(self: *C, comp: *Compilation) !void {
|
||||
// Typedefs, forward decls and non-functions first.
|
||||
// TODO: performance investigation: would keeping a list of Decls that we should
|
||||
// generate, rather than querying here, be faster?
|
||||
for (module.decl_table.items()) |kv| {
|
||||
const decl = kv.value;
|
||||
for (self.decl_table.items()) |kv| {
|
||||
const decl = kv.key;
|
||||
switch (decl.typed_value) {
|
||||
.most_recent => |tvm| {
|
||||
const buf = buf: {
|
||||
@ -237,8 +244,8 @@ pub fn flushModule(self: *C, comp: *Compilation) !void {
|
||||
|
||||
// Now the function bodies.
|
||||
try all_buffers.ensureCapacity(all_buffers.items.len + fn_count);
|
||||
for (module.decl_table.items()) |kv| {
|
||||
const decl = kv.value;
|
||||
for (self.decl_table.items()) |kv| {
|
||||
const decl = kv.key;
|
||||
switch (decl.typed_value) {
|
||||
.most_recent => |tvm| {
|
||||
if (tvm.typed_value.val.castTag(.function)) |_| {
|
||||
@ -263,13 +270,13 @@ pub fn flushEmitH(module: *Module) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
const emit_h_loc = module.emit_h orelse return;
|
||||
const emit_h = module.emit_h orelse return;
|
||||
|
||||
// We collect a list of buffers to write, and write them all at once with pwritev 😎
|
||||
var all_buffers = std.ArrayList(std.os.iovec_const).init(module.gpa);
|
||||
defer all_buffers.deinit();
|
||||
|
||||
try all_buffers.ensureCapacity(module.decl_table.count() + 1);
|
||||
try all_buffers.ensureCapacity(emit_h.decl_table.count() + 1);
|
||||
|
||||
var file_size: u64 = zig_h.len;
|
||||
all_buffers.appendAssumeCapacity(.{
|
||||
@ -277,9 +284,10 @@ pub fn flushEmitH(module: *Module) !void {
|
||||
.iov_len = zig_h.len,
|
||||
});
|
||||
|
||||
for (module.decl_table.items()) |kv| {
|
||||
const emit_h = kv.value.getEmitH(module);
|
||||
const buf = emit_h.fwd_decl.items;
|
||||
for (emit_h.decl_table.items()) |kv| {
|
||||
const decl = kv.key;
|
||||
const decl_emit_h = decl.getEmitH(module);
|
||||
const buf = decl_emit_h.fwd_decl.items;
|
||||
all_buffers.appendAssumeCapacity(.{
|
||||
.iov_base = buf.ptr,
|
||||
.iov_len = buf.len,
|
||||
@ -287,8 +295,8 @@ pub fn flushEmitH(module: *Module) !void {
|
||||
file_size += buf.len;
|
||||
}
|
||||
|
||||
const directory = emit_h_loc.directory orelse module.comp.local_cache_directory;
|
||||
const file = try directory.handle.createFile(emit_h_loc.basename, .{
|
||||
const directory = emit_h.loc.directory orelse module.comp.local_cache_directory;
|
||||
const file = try directory.handle.createFile(emit_h.loc.basename, .{
|
||||
// We set the end position explicitly below; by not truncating the file, we possibly
|
||||
// make it easier on the file system by doing 1 reallocation instead of two.
|
||||
.truncate = false,
|
||||
|
||||
@ -37,9 +37,14 @@ pub const FnData = struct {
|
||||
|
||||
base: link.File,
|
||||
|
||||
// TODO: Does this file need to support multiple independent modules?
|
||||
/// TODO: Does this file need to support multiple independent modules?
|
||||
spirv_module: codegen.SPIRVModule,
|
||||
|
||||
/// This linker backend does not try to incrementally link output SPIR-V code.
|
||||
/// Instead, it tracks all declarations in this table, and iterates over it
|
||||
/// in the flush function.
|
||||
decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, void) = .{},
|
||||
|
||||
pub fn createEmpty(gpa: *Allocator, options: link.Options) !*SpirV {
|
||||
const spirv = try gpa.create(SpirV);
|
||||
spirv.* = .{
|
||||
@ -88,6 +93,7 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio
|
||||
}
|
||||
|
||||
pub fn deinit(self: *SpirV) void {
|
||||
self.decl_table.deinit(self.base.allocator);
|
||||
self.spirv_module.deinit();
|
||||
}
|
||||
|
||||
@ -95,6 +101,9 @@ pub fn updateDecl(self: *SpirV, module: *Module, decl: *Module.Decl) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
// Keep track of all decls so we can iterate over them on flush().
|
||||
_ = try self.decl_table.getOrPut(self.base.allocator, decl);
|
||||
|
||||
const fn_data = &decl.fn_link.spirv;
|
||||
if (fn_data.id == null) {
|
||||
fn_data.id = self.spirv_module.allocId();
|
||||
@ -164,12 +173,12 @@ pub fn flushModule(self: *SpirV, comp: *Compilation) !void {
|
||||
defer all_buffers.deinit();
|
||||
|
||||
// Pre-allocate enough for the binary info + all functions
|
||||
try all_buffers.ensureCapacity(module.decl_table.count() + 1);
|
||||
try all_buffers.ensureCapacity(self.decl_table.count() + 1);
|
||||
|
||||
all_buffers.appendAssumeCapacity(wordsToIovConst(binary.items));
|
||||
|
||||
for (module.decl_table.items()) |entry| {
|
||||
const decl = entry.value;
|
||||
for (self.decl_table.items()) |entry| {
|
||||
const decl = entry.key;
|
||||
switch (decl.typed_value) {
|
||||
.most_recent => |tvm| {
|
||||
const fn_data = &decl.fn_link.spirv;
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user