stage2: entry point via std lib and proper updated file detection

Instead of Module setting up the root_scope with the root source file,
instead, Module relies on the package table graph being set up properly,
and inside `update()`, it does the equivalent of `_ = @import("std");`.
This, in term, imports start.zig, which has the logic to call main (or
not). `Module` no longer has `root_scope` - the root source file is no
longer special, it's just in the package table mapped to "root".

I also went ahead and implemented proper detection of updated files.
mtime, inode, size, and source hash are kept in `Scope.File`.
During an update, iterate over `import_table` and stat each file to find
out which ones are updated.

The source hash is redundant with the source hash used by the struct
decl that corresponds to the file, so it should be removed in a future
commit before merging the branch.

 * AstGen: add "previously declared here" notes for variables shadowing
   decls.
 * Parse imports as structs. Module now calls `AstGen.structDeclInner`,
   which is called by `AstGen.containerDecl`.
   - `importFile` is a bit kludgy with how it handles the top level Decl
     that kinda gets merged into the struct decl at the end of the
     function. Be on the look out for bugs related to that as well as
     possibly cleaner ways to implement this.
 * Module: factor out lookupDeclName into lookupIdentifier and lookupNa
 * Rename `Scope.Container` to `Scope.Namespace`.
 * Delete some dead code.

This branch won't work until `usingnamespace` is implemented because it
relies on `@import("builtin").OutputMode` and `OutputMode` comes from a
`usingnamespace`.
This commit is contained in:
Andrew Kelley 2021-04-09 23:17:50 -07:00
parent ccdf55310b
commit f458192e56
10 changed files with 757 additions and 507 deletions

96
BRANCH_TODO Normal file
View File

@ -0,0 +1,96 @@
* get rid of failed_root_src_file
const container_name_hash: Scope.NameHash = if (found_pkg) |pkg|
pkg.namespace_hash
else
std.zig.hashName(cur_pkg.namespace_hash, "/", resolved_path);
file_scope.* = .{
.root_container = .{
.parent = null,
.file_scope = file_scope,
.decls = .{},
.ty = struct_ty,
.parent_name_hash = container_name_hash,
},
};
mod.analyzeContainer(&file_scope.root_container) catch |err| switch (err) {
error.AnalysisFail => {
assert(mod.comp.totalErrorCount() != 0);
},
else => |e| return e,
};
return file_scope;
// Until then we simulate a full cache miss. Source files could have been loaded
// for any reason; to force a refresh we unload now.
module.unloadFile(module.root_scope);
module.failed_root_src_file = null;
module.analyzeNamespace(&module.root_scope.root_container) catch |err| switch (err) {
error.AnalysisFail => {
assert(self.totalErrorCount() != 0);
},
error.OutOfMemory => return error.OutOfMemory,
else => |e| {
module.failed_root_src_file = e;
},
};
// TODO only analyze imports if they are still referenced
for (module.import_table.items()) |entry| {
module.unloadFile(entry.value);
module.analyzeNamespace(&entry.value.root_container) catch |err| switch (err) {
error.AnalysisFail => {
assert(self.totalErrorCount() != 0);
},
else => |e| return e,
};
}
pub fn createContainerDecl(
mod: *Module,
scope: *Scope,
base_token: std.zig.ast.TokenIndex,
decl_arena: *std.heap.ArenaAllocator,
typed_value: TypedValue,
) !*Decl {
const scope_decl = scope.ownerDecl().?;
const name = try mod.getAnonTypeName(scope, base_token);
defer mod.gpa.free(name);
const name_hash = scope.namespace().fullyQualifiedNameHash(name);
const src_hash: std.zig.SrcHash = undefined;
const new_decl = try mod.createNewDecl(scope, name, scope_decl.src_node, name_hash, src_hash);
const decl_arena_state = try decl_arena.allocator.create(std.heap.ArenaAllocator.State);
decl_arena_state.* = decl_arena.state;
new_decl.typed_value = .{
.most_recent = .{
.typed_value = typed_value,
.arena = decl_arena_state,
},
};
new_decl.analysis = .complete;
new_decl.generation = mod.generation;
return new_decl;
}
fn getAnonTypeName(mod: *Module, scope: *Scope, base_token: std.zig.ast.TokenIndex) ![]u8 {
// TODO add namespaces, generic function signatrues
const tree = scope.tree();
const token_tags = tree.tokens.items(.tag);
const base_name = switch (token_tags[base_token]) {
.keyword_struct => "struct",
.keyword_enum => "enum",
.keyword_union => "union",
.keyword_opaque => "opaque",
else => unreachable,
};
const loc = tree.tokenLocation(0, base_token);
return std.fmt.allocPrint(mod.gpa, "{s}:{d}:{d}", .{ base_name, loc.line, loc.column });
}

View File

@ -1420,6 +1420,7 @@ fn varDecl(
return mod.failNode(scope, var_decl.ast.align_node, "TODO implement alignment on locals", .{});
}
const astgen = gz.astgen;
const gpa = mod.gpa;
const tree = gz.tree();
const token_tags = tree.tokens.items(.tag);
@ -1438,7 +1439,7 @@ fn varDecl(
const msg = try mod.errMsg(scope, name_src, "redefinition of '{s}'", .{
ident_name,
});
errdefer msg.destroy(mod.gpa);
errdefer msg.destroy(gpa);
try mod.errNote(scope, local_val.src, msg, "previous definition is here", .{});
break :msg msg;
};
@ -1453,7 +1454,7 @@ fn varDecl(
const msg = try mod.errMsg(scope, name_src, "redefinition of '{s}'", .{
ident_name,
});
errdefer msg.destroy(mod.gpa);
errdefer msg.destroy(gpa);
try mod.errNote(scope, local_ptr.src, msg, "previous definition is here", .{});
break :msg msg;
};
@ -1467,9 +1468,19 @@ fn varDecl(
}
// Namespace vars shadowing detection
if (mod.lookupDeclName(scope, ident_name)) |_| {
// TODO add note for other definition
return mod.fail(scope, name_src, "redefinition of '{s}'", .{ident_name});
if (mod.lookupIdentifier(scope, ident_name)) |decl| {
const msg = msg: {
const msg = try mod.errMsg(
scope,
name_src,
"redeclaration of '{s}'",
.{ident_name},
);
errdefer msg.destroy(gpa);
try mod.errNoteNonLazy(decl.srcLoc(), msg, "previously declared here", .{});
break :msg msg;
};
return mod.failWithOwnedErrorMsg(scope, msg);
}
if (var_decl.ast.init_node == 0) {
return mod.fail(scope, name_src, "variables must be initialized", .{});
@ -1503,7 +1514,7 @@ fn varDecl(
.force_comptime = gz.force_comptime,
.astgen = astgen,
};
defer init_scope.instructions.deinit(mod.gpa);
defer init_scope.instructions.deinit(gpa);
var resolve_inferred_alloc: zir.Inst.Ref = .none;
var opt_type_inst: zir.Inst.Ref = .none;
@ -1529,7 +1540,7 @@ fn varDecl(
// Move the init_scope instructions into the parent scope, eliding
// the alloc instruction and the store_to_block_ptr instruction.
const expected_len = parent_zir.items.len + init_scope.instructions.items.len - 2;
try parent_zir.ensureCapacity(mod.gpa, expected_len);
try parent_zir.ensureCapacity(gpa, expected_len);
for (init_scope.instructions.items) |src_inst| {
if (astgen.indexToRef(src_inst) == init_scope.rl_ptr) continue;
if (zir_tags[src_inst] == .store_to_block_ptr) {
@ -1554,7 +1565,7 @@ fn varDecl(
// Move the init_scope instructions into the parent scope, swapping
// store_to_block_ptr for store_to_inferred_ptr.
const expected_len = parent_zir.items.len + init_scope.instructions.items.len;
try parent_zir.ensureCapacity(mod.gpa, expected_len);
try parent_zir.ensureCapacity(gpa, expected_len);
for (init_scope.instructions.items) |src_inst| {
if (zir_tags[src_inst] == .store_to_block_ptr) {
if (zir_datas[src_inst].bin.lhs == init_scope.rl_ptr) {
@ -1798,6 +1809,91 @@ fn arrayTypeSentinel(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.I
return rvalue(gz, scope, rl, result, node);
}
pub fn structDeclInner(
gz: *GenZir,
scope: *Scope,
node: ast.Node.Index,
container_decl: ast.full.ContainerDecl,
tag: zir.Inst.Tag,
) InnerError!zir.Inst.Ref {
if (container_decl.ast.members.len == 0) {
return gz.addPlNode(tag, node, zir.Inst.StructDecl{ .fields_len = 0 });
}
const astgen = gz.astgen;
const mod = astgen.mod;
const gpa = mod.gpa;
const tree = gz.tree();
const node_tags = tree.nodes.items(.tag);
var fields_data = ArrayListUnmanaged(u32){};
defer fields_data.deinit(gpa);
// field_name and field_type are both mandatory
try fields_data.ensureCapacity(gpa, container_decl.ast.members.len * 2);
// We only need this if there are greater than 16 fields.
var bit_bag = ArrayListUnmanaged(u32){};
defer bit_bag.deinit(gpa);
var cur_bit_bag: u32 = 0;
var field_index: usize = 0;
for (container_decl.ast.members) |member_node| {
const member = switch (node_tags[member_node]) {
.container_field_init => tree.containerFieldInit(member_node),
.container_field_align => tree.containerFieldAlign(member_node),
.container_field => tree.containerField(member_node),
else => continue,
};
if (field_index % 16 == 0 and field_index != 0) {
try bit_bag.append(gpa, cur_bit_bag);
cur_bit_bag = 0;
}
if (member.comptime_token) |comptime_token| {
return mod.failTok(scope, comptime_token, "TODO implement comptime struct fields", .{});
}
try fields_data.ensureCapacity(gpa, fields_data.items.len + 4);
const field_name = try gz.identAsString(member.ast.name_token);
fields_data.appendAssumeCapacity(field_name);
const field_type = try typeExpr(gz, scope, member.ast.type_expr);
fields_data.appendAssumeCapacity(@enumToInt(field_type));
const have_align = member.ast.align_expr != 0;
const have_value = member.ast.value_expr != 0;
cur_bit_bag = (cur_bit_bag >> 2) |
(@as(u32, @boolToInt(have_align)) << 30) |
(@as(u32, @boolToInt(have_value)) << 31);
if (have_align) {
const align_inst = try comptimeExpr(gz, scope, .{ .ty = .u32_type }, member.ast.align_expr);
fields_data.appendAssumeCapacity(@enumToInt(align_inst));
}
if (have_value) {
const default_inst = try comptimeExpr(gz, scope, .{ .ty = field_type }, member.ast.value_expr);
fields_data.appendAssumeCapacity(@enumToInt(default_inst));
}
field_index += 1;
}
if (field_index == 0) {
return gz.addPlNode(tag, node, zir.Inst.StructDecl{ .fields_len = 0 });
}
const empty_slot_count = 16 - (field_index % 16);
cur_bit_bag >>= @intCast(u5, empty_slot_count * 2);
const result = try gz.addPlNode(tag, node, zir.Inst.StructDecl{
.fields_len = @intCast(u32, container_decl.ast.members.len),
});
try astgen.extra.ensureCapacity(gpa, astgen.extra.items.len +
bit_bag.items.len + 1 + fields_data.items.len);
astgen.extra.appendSliceAssumeCapacity(bit_bag.items); // Likely empty.
astgen.extra.appendAssumeCapacity(cur_bit_bag);
astgen.extra.appendSliceAssumeCapacity(fields_data.items);
return result;
}
fn containerDecl(
gz: *GenZir,
scope: *Scope,
@ -1827,76 +1923,10 @@ fn containerDecl(
.keyword_extern => zir.Inst.Tag.struct_decl_extern,
else => unreachable,
} else zir.Inst.Tag.struct_decl;
if (container_decl.ast.members.len == 0) {
const result = try gz.addPlNode(tag, node, zir.Inst.StructDecl{
.fields_len = 0,
});
return rvalue(gz, scope, rl, result, node);
}
assert(arg_inst == .none);
var fields_data = ArrayListUnmanaged(u32){};
defer fields_data.deinit(gpa);
// field_name and field_type are both mandatory
try fields_data.ensureCapacity(gpa, container_decl.ast.members.len * 2);
// We only need this if there are greater than 16 fields.
var bit_bag = ArrayListUnmanaged(u32){};
defer bit_bag.deinit(gpa);
var cur_bit_bag: u32 = 0;
var field_index: usize = 0;
for (container_decl.ast.members) |member_node| {
const member = switch (node_tags[member_node]) {
.container_field_init => tree.containerFieldInit(member_node),
.container_field_align => tree.containerFieldAlign(member_node),
.container_field => tree.containerField(member_node),
else => continue,
};
if (field_index % 16 == 0 and field_index != 0) {
try bit_bag.append(gpa, cur_bit_bag);
cur_bit_bag = 0;
}
if (member.comptime_token) |comptime_token| {
return mod.failTok(scope, comptime_token, "TODO implement comptime struct fields", .{});
}
try fields_data.ensureCapacity(gpa, fields_data.items.len + 4);
const field_name = try gz.identAsString(member.ast.name_token);
fields_data.appendAssumeCapacity(field_name);
const field_type = try typeExpr(gz, scope, member.ast.type_expr);
fields_data.appendAssumeCapacity(@enumToInt(field_type));
const have_align = member.ast.align_expr != 0;
const have_value = member.ast.value_expr != 0;
cur_bit_bag = (cur_bit_bag >> 2) |
(@as(u32, @boolToInt(have_align)) << 30) |
(@as(u32, @boolToInt(have_value)) << 31);
if (have_align) {
const align_inst = try comptimeExpr(gz, scope, .{ .ty = .u32_type }, member.ast.align_expr);
fields_data.appendAssumeCapacity(@enumToInt(align_inst));
}
if (have_value) {
const default_inst = try comptimeExpr(gz, scope, .{ .ty = field_type }, member.ast.value_expr);
fields_data.appendAssumeCapacity(@enumToInt(default_inst));
}
field_index += 1;
}
const empty_slot_count = 16 - (field_index % 16);
cur_bit_bag >>= @intCast(u5, empty_slot_count * 2);
const result = try gz.addPlNode(tag, node, zir.Inst.StructDecl{
.fields_len = @intCast(u32, container_decl.ast.members.len),
});
try astgen.extra.ensureCapacity(gpa, astgen.extra.items.len +
bit_bag.items.len + 1 + fields_data.items.len);
astgen.extra.appendSliceAssumeCapacity(bit_bag.items); // Likely empty.
astgen.extra.appendAssumeCapacity(cur_bit_bag);
astgen.extra.appendSliceAssumeCapacity(fields_data.items);
const result = try structDeclInner(gz, scope, node, container_decl, tag);
return rvalue(gz, scope, rl, result, node);
},
.keyword_union => {
@ -2930,7 +2960,7 @@ pub const SwitchProngSrc = union(enum) {
) LazySrcLoc {
@setCold(true);
const switch_node = decl.relativeToNodeIndex(switch_node_offset);
const tree = decl.container.file_scope.base.tree();
const tree = decl.namespace.file_scope.base.tree();
const main_tokens = tree.nodes.items(.main_token);
const node_datas = tree.nodes.items(.data);
const node_tags = tree.nodes.items(.tag);
@ -3692,7 +3722,7 @@ fn identifier(
};
}
const decl = mod.lookupDeclName(scope, ident_name) orelse {
const decl = mod.lookupIdentifier(scope, ident_name) orelse {
// TODO insert a "dependency on the non-existence of a decl" here to make this
// compile error go away when the decl is introduced. This data should be in a global
// sparse map since it is only relevant when a compile error occurs.

View File

@ -385,7 +385,7 @@ pub const AllErrors = struct {
const notes = try arena.allocator.alloc(Message, module_err_msg.notes.len);
for (notes) |*note, i| {
const module_note = module_err_msg.notes[i];
const source = try module_note.src_loc.fileScope().getSource(module);
const source = try module_note.src_loc.fileScope().getSource(module.gpa);
const byte_offset = try module_note.src_loc.byteOffset();
const loc = std.zig.findLineColumn(source, byte_offset);
const sub_file_path = module_note.src_loc.fileScope().sub_file_path;
@ -400,7 +400,7 @@ pub const AllErrors = struct {
},
};
}
const source = try module_err_msg.src_loc.fileScope().getSource(module);
const source = try module_err_msg.src_loc.fileScope().getSource(module.gpa);
const byte_offset = try module_err_msg.src_loc.byteOffset();
const loc = std.zig.findLineColumn(source, byte_offset);
const sub_file_path = module_err_msg.src_loc.fileScope().sub_file_path;
@ -1049,37 +1049,18 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
// However we currently do not have serialization of such metadata, so for now
// we set up an empty Module that does the entire compilation fresh.
const root_scope = try gpa.create(Module.Scope.File);
errdefer gpa.destroy(root_scope);
const struct_ty = try Type.Tag.empty_struct.create(gpa, &root_scope.root_container);
root_scope.* = .{
// TODO this is duped so it can be freed in Container.deinit
.sub_file_path = try gpa.dupe(u8, root_pkg.root_src_path),
.source = .{ .unloaded = {} },
.tree = undefined,
.status = .never_loaded,
.pkg = root_pkg,
.root_container = .{
.file_scope = root_scope,
.decls = .{},
.ty = struct_ty,
.parent_name_hash = root_pkg.namespace_hash,
},
};
const module = try arena.create(Module);
errdefer module.deinit();
module.* = .{
.gpa = gpa,
.comp = comp,
.root_pkg = root_pkg,
.root_scope = root_scope,
.zig_cache_artifact_directory = zig_cache_artifact_directory,
.emit_h = options.emit_h,
.error_name_list = try std.ArrayListUnmanaged([]const u8).initCapacity(gpa, 1),
};
module.error_name_list.appendAssumeCapacity("(no error)");
break :blk module;
} else blk: {
if (options.emit_h != null) return error.NoZigModuleForCHeader;
@ -1485,31 +1466,50 @@ pub fn update(self: *Compilation) !void {
module.compile_log_text.shrinkAndFree(module.gpa, 0);
module.generation += 1;
// TODO Detect which source files changed.
// Until then we simulate a full cache miss. Source files could have been loaded
// for any reason; to force a refresh we unload now.
module.unloadFile(module.root_scope);
module.failed_root_src_file = null;
module.analyzeContainer(&module.root_scope.root_container) catch |err| switch (err) {
error.AnalysisFail => {
assert(self.totalErrorCount() != 0);
},
error.OutOfMemory => return error.OutOfMemory,
else => |e| {
module.failed_root_src_file = e;
},
};
// TODO only analyze imports if they are still referenced
// Detect which source files changed.
for (module.import_table.items()) |entry| {
module.unloadFile(entry.value);
module.analyzeContainer(&entry.value.root_container) catch |err| switch (err) {
error.AnalysisFail => {
assert(self.totalErrorCount() != 0);
},
const file = entry.value;
var f = try file.pkg.root_src_directory.handle.openFile(file.sub_file_path, .{});
defer f.close();
// TODO handle error here by populating a retryable compile error
const stat = try f.stat();
const unchanged_metadata =
stat.size == file.stat_size and
stat.mtime == file.stat_mtime and
stat.inode == file.stat_inode;
if (unchanged_metadata) {
log.debug("unmodified metadata of file: {s}", .{file.sub_file_path});
continue;
}
const prev_hash = file.source_hash;
file.unloadSource(module.gpa);
// TODO handle error here by populating a retryable compile error
try file.finishGettingSource(module.gpa, f, stat);
assert(file.source_loaded);
if (mem.eql(u8, &prev_hash, &file.source_hash)) {
file.updateTreeToNewSource();
log.debug("unmodified source hash of file: {s}", .{file.sub_file_path});
continue;
}
log.debug("source contents changed: {s}", .{file.sub_file_path});
if (file.status == .unloaded_parse_failure) {
module.failed_files.swapRemove(file).?.value.destroy(module.gpa);
}
file.unloadTree(module.gpa);
module.analyzeFile(file) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => continue,
else => |e| return e,
};
}
// Simulate `_ = @import("std");` which in turn imports start.zig.
_ = try module.importFile(module.root_pkg, "std");
}
}
@ -1551,7 +1551,9 @@ pub fn update(self: *Compilation) !void {
// to report error messages. Otherwise we unload all source files to save memory.
if (self.totalErrorCount() == 0 and !self.keep_source_files_loaded) {
if (self.bin_file.options.module) |module| {
module.root_scope.unload(self.gpa);
for (module.import_table.items()) |entry| {
entry.value.unload(self.gpa);
}
}
}
}
@ -1580,13 +1582,13 @@ pub fn totalErrorCount(self: *Compilation) usize {
// the previous parse success, including compile errors, but we cannot
// emit them until the file succeeds parsing.
for (module.failed_decls.items()) |entry| {
if (entry.key.container.file_scope.status == .unloaded_parse_failure) {
if (entry.key.namespace.file_scope.status == .unloaded_parse_failure) {
continue;
}
total += 1;
}
for (module.emit_h_failed_decls.items()) |entry| {
if (entry.key.container.file_scope.status == .unloaded_parse_failure) {
if (entry.key.namespace.file_scope.status == .unloaded_parse_failure) {
continue;
}
total += 1;
@ -1641,7 +1643,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
try AllErrors.add(module, &arena, &errors, entry.value.*);
}
for (module.failed_decls.items()) |entry| {
if (entry.key.container.file_scope.status == .unloaded_parse_failure) {
if (entry.key.namespace.file_scope.status == .unloaded_parse_failure) {
// Skip errors for Decls within files that had a parse failure.
// We'll try again once parsing succeeds.
continue;
@ -1649,7 +1651,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
try AllErrors.add(module, &arena, &errors, entry.value.*);
}
for (module.emit_h_failed_decls.items()) |entry| {
if (entry.key.container.file_scope.status == .unloaded_parse_failure) {
if (entry.key.namespace.file_scope.status == .unloaded_parse_failure) {
// Skip errors for Decls within files that had a parse failure.
// We'll try again once parsing succeeds.
continue;

File diff suppressed because it is too large Load Diff

View File

@ -609,10 +609,11 @@ fn zirStructDecl(
.owner_decl = sema.owner_decl,
.fields = fields_map,
.node_offset = inst_data.src_node,
.container = .{
.namespace = .{
.parent = sema.owner_decl.namespace,
.parent_name_hash = new_decl.fullyQualifiedNameHash(),
.ty = struct_ty,
.file_scope = block.getFileScope(),
.parent_name_hash = new_decl.fullyQualifiedNameHash(),
},
};
return sema.analyzeDeclVal(block, src, new_decl);
@ -3640,42 +3641,43 @@ fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError
const mod = sema.mod;
const arena = sema.arena;
const container_scope = container_type.getContainerScope() orelse return mod.fail(
const namespace = container_type.getNamespace() orelse return mod.fail(
&block.base,
lhs_src,
"expected struct, enum, union, or opaque, found '{}'",
.{container_type},
);
if (mod.lookupDeclName(&container_scope.base, decl_name)) |decl| {
// TODO if !decl.is_pub and inDifferentFiles() return false
return mod.constBool(arena, src, true);
} else {
return mod.constBool(arena, src, false);
if (mod.lookupInNamespace(namespace, decl_name)) |decl| {
if (decl.is_pub or decl.namespace.file_scope == block.base.namespace().file_scope) {
return mod.constBool(arena, src, true);
}
}
return mod.constBool(arena, src, false);
}
fn zirImport(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand = try sema.resolveConstString(block, operand_src, inst_data.operand);
const file_scope = sema.analyzeImport(block, src, operand) catch |err| switch (err) {
const file = mod.importFile(block.getFileScope().pkg, operand) catch |err| switch (err) {
error.ImportOutsidePkgPath => {
return sema.mod.fail(&block.base, src, "import of file outside package path: '{s}'", .{operand});
return mod.fail(&block.base, src, "import of file outside package path: '{s}'", .{operand});
},
error.FileNotFound => {
return sema.mod.fail(&block.base, src, "unable to find '{s}'", .{operand});
return mod.fail(&block.base, src, "unable to find '{s}'", .{operand});
},
else => {
// TODO: make sure this gets retried and not cached
return sema.mod.fail(&block.base, src, "unable to open '{s}': {s}", .{ operand, @errorName(err) });
return mod.fail(&block.base, src, "unable to open '{s}': {s}", .{ operand, @errorName(err) });
},
};
return sema.mod.constType(sema.arena, src, file_scope.root_container.ty);
return mod.constType(sema.arena, src, file.namespace.ty);
}
fn zirShl(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
@ -4707,21 +4709,9 @@ fn namedFieldPtr(
});
},
.Struct, .Opaque, .Union => {
if (child_type.getContainerScope()) |container_scope| {
if (mod.lookupDeclName(&container_scope.base, field_name)) |decl| {
if (!decl.is_pub and !(decl.container.file_scope == block.base.namespace().file_scope))
return mod.fail(&block.base, src, "'{s}' is private", .{field_name});
return sema.analyzeDeclRef(block, src, decl);
}
// TODO this will give false positives for structs inside the root file
if (container_scope.file_scope == mod.root_scope) {
return mod.fail(
&block.base,
src,
"root source file has no member named '{s}'",
.{field_name},
);
if (child_type.getNamespace()) |namespace| {
if (try sema.analyzeNamespaceLookup(block, src, namespace, field_name)) |inst| {
return inst;
}
}
// TODO add note: declared here
@ -4736,11 +4726,9 @@ fn namedFieldPtr(
});
},
.Enum => {
if (child_type.getContainerScope()) |container_scope| {
if (mod.lookupDeclName(&container_scope.base, field_name)) |decl| {
if (!decl.is_pub and !(decl.container.file_scope == block.base.namespace().file_scope))
return mod.fail(&block.base, src, "'{s}' is private", .{field_name});
return sema.analyzeDeclRef(block, src, decl);
if (child_type.getNamespace()) |namespace| {
if (try sema.analyzeNamespaceLookup(block, src, namespace, field_name)) |inst| {
return inst;
}
}
const field_index = child_type.enumFieldIndex(field_name) orelse {
@ -4778,6 +4766,32 @@ fn namedFieldPtr(
return mod.fail(&block.base, src, "type '{}' does not support field access", .{elem_ty});
}
fn analyzeNamespaceLookup(
sema: *Sema,
block: *Scope.Block,
src: LazySrcLoc,
namespace: *Scope.Namespace,
decl_name: []const u8,
) InnerError!?*Inst {
const mod = sema.mod;
const gpa = sema.gpa;
if (mod.lookupInNamespace(namespace, decl_name)) |decl| {
if (!decl.is_pub and decl.namespace.file_scope != block.getFileScope()) {
const msg = msg: {
const msg = try mod.errMsg(&block.base, src, "'{s}' is not marked 'pub'", .{
decl_name,
});
errdefer msg.destroy(gpa);
try mod.errNoteNonLazy(decl.srcLoc(), msg, "declared here", .{});
break :msg msg;
};
return mod.failWithOwnedErrorMsg(&block.base, msg);
}
return try sema.analyzeDeclRef(block, src, decl);
}
return null;
}
fn analyzeStructFieldPtr(
sema: *Sema,
block: *Scope.Block,
@ -5326,65 +5340,6 @@ fn analyzeSlice(
return sema.mod.fail(&block.base, src, "TODO implement analysis of slice", .{});
}
fn analyzeImport(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, target_string: []const u8) !*Scope.File {
const cur_pkg = block.getFileScope().pkg;
const cur_pkg_dir_path = cur_pkg.root_src_directory.path orelse ".";
const found_pkg = cur_pkg.table.get(target_string);
const resolved_path = if (found_pkg) |pkg|
try std.fs.path.resolve(sema.gpa, &[_][]const u8{ pkg.root_src_directory.path orelse ".", pkg.root_src_path })
else
try std.fs.path.resolve(sema.gpa, &[_][]const u8{ cur_pkg_dir_path, target_string });
errdefer sema.gpa.free(resolved_path);
if (sema.mod.import_table.get(resolved_path)) |cached_import| {
sema.gpa.free(resolved_path);
return cached_import;
}
if (found_pkg == null) {
const resolved_root_path = try std.fs.path.resolve(sema.gpa, &[_][]const u8{cur_pkg_dir_path});
defer sema.gpa.free(resolved_root_path);
if (!mem.startsWith(u8, resolved_path, resolved_root_path)) {
return error.ImportOutsidePkgPath;
}
}
// TODO Scope.Container arena for ty and sub_file_path
const file_scope = try sema.gpa.create(Scope.File);
errdefer sema.gpa.destroy(file_scope);
const struct_ty = try Type.Tag.empty_struct.create(sema.gpa, &file_scope.root_container);
errdefer sema.gpa.destroy(struct_ty.castTag(.empty_struct).?);
const container_name_hash: Scope.NameHash = if (found_pkg) |pkg|
pkg.namespace_hash
else
std.zig.hashName(cur_pkg.namespace_hash, "/", resolved_path);
file_scope.* = .{
.sub_file_path = resolved_path,
.source = .{ .unloaded = {} },
.tree = undefined,
.status = .never_loaded,
.pkg = found_pkg orelse cur_pkg,
.root_container = .{
.file_scope = file_scope,
.decls = .{},
.ty = struct_ty,
.parent_name_hash = container_name_hash,
},
};
sema.mod.analyzeContainer(&file_scope.root_container) catch |err| switch (err) {
error.AnalysisFail => {
assert(sema.mod.comp.totalErrorCount() != 0);
},
else => |e| return e,
};
try sema.mod.import_table.put(sema.gpa, file_scope.sub_file_path, file_scope);
return file_scope;
}
/// Asserts that lhs and rhs types are both numeric.
fn cmpNumeric(
sema: *Sema,

View File

@ -411,8 +411,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
try branch_stack.append(.{});
const src_data: struct { lbrace_src: usize, rbrace_src: usize, source: []const u8 } = blk: {
const container_scope = module_fn.owner_decl.container;
const tree = container_scope.file_scope.tree;
const namespace = module_fn.owner_decl.namespace;
const tree = namespace.file_scope.tree;
const node_tags = tree.nodes.items(.tag);
const node_datas = tree.nodes.items(.data);
const token_starts = tree.tokens.items(.start);

View File

@ -2223,7 +2223,7 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
try dbg_line_buffer.ensureCapacity(26);
const line_off: u28 = blk: {
const tree = decl.container.file_scope.tree;
const tree = decl.namespace.file_scope.tree;
const node_tags = tree.nodes.items(.tag);
const node_datas = tree.nodes.items(.data);
const token_starts = tree.tokens.items(.start);
@ -2749,7 +2749,7 @@ pub fn updateDeclLineNumber(self: *Elf, module: *Module, decl: *const Module.Dec
if (self.llvm_object) |_| return;
const tree = decl.container.file_scope.tree;
const tree = decl.namespace.file_scope.tree;
const node_tags = tree.nodes.items(.tag);
const node_datas = tree.nodes.items(.data);
const token_starts = tree.tokens.items(.start);

View File

@ -904,7 +904,7 @@ pub fn updateDeclLineNumber(self: *DebugSymbols, module: *Module, decl: *const M
const tracy = trace(@src());
defer tracy.end();
const tree = decl.container.file_scope.tree;
const tree = decl.namespace.file_scope.tree;
const node_tags = tree.nodes.items(.tag);
const node_datas = tree.nodes.items(.data);
const token_starts = tree.tokens.items(.start);
@ -953,7 +953,7 @@ pub fn initDeclDebugBuffers(
try dbg_line_buffer.ensureCapacity(26);
const line_off: u28 = blk: {
const tree = decl.container.file_scope.tree;
const tree = decl.namespace.file_scope.tree;
const node_tags = tree.nodes.items(.tag);
const node_datas = tree.nodes.items(.data);
const token_starts = tree.tokens.items(.start);

View File

@ -2052,11 +2052,11 @@ pub const Type = extern union {
(self.isSinglePointer() and self.elemType().zigTypeTag() == .Array);
}
/// Returns null if the type has no container.
pub fn getContainerScope(self: Type) ?*Module.Scope.Container {
/// Returns null if the type has no namespace.
pub fn getNamespace(self: Type) ?*Module.Scope.Namespace {
return switch (self.tag()) {
.@"struct" => &self.castTag(.@"struct").?.data.container,
.enum_full => &self.castTag(.enum_full).?.data.container,
.@"struct" => &self.castTag(.@"struct").?.data.namespace,
.enum_full => &self.castTag(.enum_full).?.data.namespace,
.empty_struct => self.castTag(.empty_struct).?.data,
.@"opaque" => &self.castTag(.@"opaque").?.data,
@ -2226,6 +2226,29 @@ pub const Type = extern union {
}
}
pub fn getOwnerDecl(ty: Type) *Module.Decl {
switch (ty.tag()) {
.enum_full, .enum_nonexhaustive => {
const enum_full = ty.cast(Payload.EnumFull).?.data;
return enum_full.owner_decl;
},
.enum_simple => {
const enum_simple = ty.castTag(.enum_simple).?.data;
return enum_simple.owner_decl;
},
.@"struct" => {
const struct_obj = ty.castTag(.@"struct").?.data;
return struct_obj.owner_decl;
},
.error_set => {
const error_set = ty.castTag(.error_set).?.data;
return error_set.owner_decl;
},
.@"opaque" => @panic("TODO"),
else => unreachable,
}
}
/// Asserts the type is an enum.
pub fn enumHasInt(ty: Type, int: Value, target: Target) bool {
const S = struct {
@ -2564,12 +2587,12 @@ pub const Type = extern union {
/// Most commonly used for files.
pub const ContainerScope = struct {
base: Payload,
data: *Module.Scope.Container,
data: *Module.Scope.Namespace,
};
pub const Opaque = struct {
base: Payload = .{ .tag = .@"opaque" },
data: Module.Scope.Container,
data: Module.Scope.Namespace,
};
pub const Struct = struct {

View File

@ -1048,7 +1048,7 @@ pub fn addCases(ctx: *TestContext) !void {
"Hello, World!\n",
);
try case.files.append(.{
.src =
.src =
\\pub fn print() void {
\\ asm volatile ("syscall"
\\ :
@ -1082,10 +1082,14 @@ pub fn addCases(ctx: *TestContext) !void {
\\ unreachable;
\\}
,
&.{":2:25: error: 'print' is private"},
&.{
":2:25: error: 'print' is not marked 'pub'",
"print.zig:2:1: note: declared here",
},
);
try case.files.append(.{
.src =
.src =
\\// dummy comment to make print be on line 2
\\fn print() void {
\\ asm volatile ("syscall"
\\ :
@ -1102,22 +1106,22 @@ pub fn addCases(ctx: *TestContext) !void {
});
}
ctx.compileError("function redefinition", linux_x64,
ctx.compileError("function redeclaration", linux_x64,
\\// dummy comment
\\fn entry() void {}
\\fn entry() void {}
, &[_][]const u8{
":3:4: error: redefinition of 'entry'",
":2:1: note: previous definition here",
":3:4: error: redeclaration of 'entry'",
":2:1: note: previously declared here",
});
ctx.compileError("global variable redefinition", linux_x64,
ctx.compileError("global variable redeclaration", linux_x64,
\\// dummy comment
\\var foo = false;
\\var foo = true;
, &[_][]const u8{
":3:5: error: redefinition of 'foo'",
":2:1: note: previous definition here",
":3:5: error: redeclaration of 'foo'",
":2:1: note: previously declared here",
});
ctx.compileError("compileError", linux_x64,