Merge pull request #12456 from Vexu/stage2

Stage2 namespacing fixes
This commit is contained in:
Andrew Kelley 2022-08-16 19:57:22 -04:00 committed by GitHub
commit a12abc6d6c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 103 additions and 9 deletions

View File

@ -15,16 +15,16 @@ const testing = std.testing;
/// var slice = a.slice(); // a slice of the 64-byte array
/// var a_clone = a; // creates a copy - the structure doesn't use any internal pointers
/// ```
pub fn BoundedArray(comptime T: type, comptime capacity: usize) type {
pub fn BoundedArray(comptime T: type, comptime buffer_capacity: usize) type {
return struct {
const Self = @This();
buffer: [capacity]T = undefined,
buffer: [buffer_capacity]T = undefined,
len: usize = 0,
/// Set the actual length of the slice.
/// Returns error.Overflow if it exceeds the length of the backing array.
pub fn init(len: usize) error{Overflow}!Self {
if (len > capacity) return error.Overflow;
if (len > buffer_capacity) return error.Overflow;
return Self{ .len = len };
}
@ -41,7 +41,7 @@ pub fn BoundedArray(comptime T: type, comptime capacity: usize) type {
/// Adjust the slice's length to `len`.
/// Does not initialize added items if any.
pub fn resize(self: *Self, len: usize) error{Overflow}!void {
if (len > capacity) return error.Overflow;
if (len > buffer_capacity) return error.Overflow;
self.len = len;
}
@ -69,7 +69,7 @@ pub fn BoundedArray(comptime T: type, comptime capacity: usize) type {
/// Check that the slice can hold at least `additional_count` items.
pub fn ensureUnusedCapacity(self: Self, additional_count: usize) error{Overflow}!void {
if (self.len + additional_count > capacity) {
if (self.len + additional_count > buffer_capacity) {
return error.Overflow;
}
}
@ -83,7 +83,7 @@ pub fn BoundedArray(comptime T: type, comptime capacity: usize) type {
/// Increase length by 1, returning pointer to the new item.
/// Asserts that there is space for the new item.
pub fn addOneAssumeCapacity(self: *Self) *T {
assert(self.len < capacity);
assert(self.len < buffer_capacity);
self.len += 1;
return &self.slice()[self.len - 1];
}
@ -236,7 +236,7 @@ pub fn BoundedArray(comptime T: type, comptime capacity: usize) type {
pub fn appendNTimesAssumeCapacity(self: *Self, value: T, n: usize) void {
const old_len = self.len;
self.len += n;
assert(self.len <= capacity);
assert(self.len <= buffer_capacity);
mem.set(T, self.slice()[old_len..self.len], value);
}

View File

@ -1947,7 +1947,7 @@ noinline fn showMyTrace() usize {
/// For more advanced usage, see `ConfigurableTrace`.
pub const Trace = ConfigurableTrace(2, 4, builtin.mode == .Debug);
pub fn ConfigurableTrace(comptime size: usize, comptime stack_frame_count: usize, comptime enabled: bool) type {
pub fn ConfigurableTrace(comptime size: usize, comptime stack_frame_count: usize, comptime is_enabled: bool) type {
return struct {
addrs: [actual_size][stack_frame_count]usize = undefined,
notes: [actual_size][]const u8 = undefined,
@ -1956,7 +1956,7 @@ pub fn ConfigurableTrace(comptime size: usize, comptime stack_frame_count: usize
const actual_size = if (enabled) size else 0;
const Index = if (enabled) usize else u0;
pub const enabled = enabled;
pub const enabled = is_enabled;
pub const add = if (enabled) addNoInline else addNoOp;

View File

@ -11751,6 +11751,46 @@ fn scanDecls(astgen: *AstGen, namespace: *Scope.Namespace, members: []const Ast.
error.OutOfMemory => return error.OutOfMemory,
}
}
// const index_name = try astgen.identAsString(index_token);
var s = namespace.parent;
while (true) switch (s.tag) {
.local_val => {
const local_val = s.cast(Scope.LocalVal).?;
if (local_val.name == name_str_index) {
return astgen.failTokNotes(name_token, "redeclaration of {s} '{s}'", .{
@tagName(local_val.id_cat), token_bytes,
}, &[_]u32{
try astgen.errNoteTok(
local_val.token_src,
"previous declaration here",
.{},
),
});
}
s = local_val.parent;
},
.local_ptr => {
const local_ptr = s.cast(Scope.LocalPtr).?;
if (local_ptr.name == name_str_index) {
return astgen.failTokNotes(name_token, "redeclaration of {s} '{s}'", .{
@tagName(local_ptr.id_cat), token_bytes,
}, &[_]u32{
try astgen.errNoteTok(
local_ptr.token_src,
"previous declaration here",
.{},
),
});
}
s = local_ptr.parent;
},
.namespace => s = s.cast(Scope.Namespace).?.parent,
.gen_zir => s = s.cast(GenZir).?.parent,
.defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent,
.defer_gen => s = s.cast(Scope.DeferGen).?.parent,
.top => break,
};
gop.value_ptr.* = member_node;
}
return decl_count;

View File

@ -5384,6 +5384,17 @@ fn lookupInNamespace(
}
}
{
var i: usize = 0;
while (i < candidates.items.len) {
if (candidates.items[i] == sema.owner_decl_index) {
_ = candidates.orderedRemove(i);
} else {
i += 1;
}
}
}
switch (candidates.items.len) {
0 => {},
1 => {

View File

@ -1104,3 +1104,24 @@ test "namespace lookup ignores decl causing the lookup" {
};
_ = S.foo();
}
test "ambiguous reference error ignores current declaration" {
const S = struct {
const foo = 666;
const a = @This();
const b = struct {
const foo = a.foo;
const bar = struct {
bar: u32 = b.foo,
};
comptime {
_ = b.foo;
}
};
usingnamespace b;
};
try expect(S.b.foo == 666);
}

View File

@ -0,0 +1,22 @@
fn foo(a: usize) void {
struct {
const a = 1;
};
}
fn bar(a: usize) void {
struct {
const b = struct {
const a = 1;
};
};
_ = a;
}
// error
// backend=stage2
// target=native
//
// :3:15: error: redeclaration of function parameter 'a'
// :1:8: note: previous declaration here
// :9:19: error: redeclaration of function parameter 'a'
// :6:8: note: previous declaration here