From 711bf55eaa643c3d05640bebbf3e4315477b8ed8 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 18 Apr 2022 07:49:23 -0700 Subject: [PATCH 1/5] std: bring back SegmentedList I want to use it for the self-hosted compiler. --- lib/std/segmented_list.zig | 464 +++++++++++++++++++++++++++++++++++++ lib/std/std.zig | 1 + 2 files changed, 465 insertions(+) create mode 100644 lib/std/segmented_list.zig diff --git a/lib/std/segmented_list.zig b/lib/std/segmented_list.zig new file mode 100644 index 0000000000..ae3697904d --- /dev/null +++ b/lib/std/segmented_list.zig @@ -0,0 +1,464 @@ +const std = @import("std.zig"); +const assert = std.debug.assert; +const testing = std.testing; +const Allocator = std.mem.Allocator; + +// Imagine that `fn at(self: *Self, index: usize) &T` is a customer asking for a box +// from a warehouse, based on a flat array, boxes ordered from 0 to N - 1. +// But the warehouse actually stores boxes in shelves of increasing powers of 2 sizes. +// So when the customer requests a box index, we have to translate it to shelf index +// and box index within that shelf. Illustration: +// +// customer indexes: +// shelf 0: 0 +// shelf 1: 1 2 +// shelf 2: 3 4 5 6 +// shelf 3: 7 8 9 10 11 12 13 14 +// shelf 4: 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 +// shelf 5: 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 +// ... +// +// warehouse indexes: +// shelf 0: 0 +// shelf 1: 0 1 +// shelf 2: 0 1 2 3 +// shelf 3: 0 1 2 3 4 5 6 7 +// shelf 4: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 +// shelf 5: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 +// ... +// +// With this arrangement, here are the equations to get the shelf index and +// box index based on customer box index: +// +// shelf_index = floor(log2(customer_index + 1)) +// shelf_count = ceil(log2(box_count + 1)) +// box_index = customer_index + 1 - 2 ** shelf +// shelf_size = 2 ** shelf_index +// +// Now we complicate it a little bit further by adding a preallocated shelf, which must be +// a power of 2: +// prealloc=4 +// +// customer indexes: +// prealloc: 0 1 2 3 +// shelf 0: 4 5 6 7 8 9 10 11 +// shelf 1: 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 +// shelf 2: 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 +// ... +// +// warehouse indexes: +// prealloc: 0 1 2 3 +// shelf 0: 0 1 2 3 4 5 6 7 +// shelf 1: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 +// shelf 2: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 +// ... +// +// Now the equations are: +// +// shelf_index = floor(log2(customer_index + prealloc)) - log2(prealloc) - 1 +// shelf_count = ceil(log2(box_count + prealloc)) - log2(prealloc) - 1 +// box_index = customer_index + prealloc - 2 ** (log2(prealloc) + 1 + shelf) +// shelf_size = prealloc * 2 ** (shelf_index + 1) + +/// This is a stack data structure where pointers to indexes have the same lifetime as the data structure +/// itself, unlike ArrayList where push() invalidates all existing element pointers. +/// The tradeoff is that elements are not guaranteed to be contiguous. For that, use ArrayList. +/// Note however that most elements are contiguous, making this data structure cache-friendly. +/// +/// Because it never has to copy elements from an old location to a new location, it does not require +/// its elements to be copyable, and it avoids wasting memory when backed by an ArenaAllocator. +/// Note that the push() and pop() convenience methods perform a copy, but you can instead use +/// addOne(), at(), setCapacity(), and shrinkCapacity() to avoid copying items. +/// +/// This data structure has O(1) push and O(1) pop. +/// +/// It supports preallocated elements, making it especially well suited when the expected maximum +/// size is small. `prealloc_item_count` must be 0, or a power of 2. +pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type { + return struct { + const Self = @This(); + const ShelfIndex = std.math.Log2Int(usize); + + const prealloc_exp: ShelfIndex = blk: { + // we don't use the prealloc_exp constant when prealloc_item_count is 0 + // but lazy-init may still be triggered by other code so supply a value + if (prealloc_item_count == 0) { + break :blk 0; + } else { + assert(std.math.isPowerOfTwo(prealloc_item_count)); + const value = std.math.log2_int(usize, prealloc_item_count); + break :blk value; + } + }; + + prealloc_segment: [prealloc_item_count]T, + dynamic_segments: [][*]T, + allocator: Allocator, + len: usize, + + pub const prealloc_count = prealloc_item_count; + + fn AtType(comptime SelfType: type) type { + if (@typeInfo(SelfType).Pointer.is_const) { + return *const T; + } else { + return *T; + } + } + + /// Deinitialize with `deinit` + pub fn init(allocator: Allocator) Self { + return Self{ + .allocator = allocator, + .len = 0, + .prealloc_segment = undefined, + .dynamic_segments = &[_][*]T{}, + }; + } + + pub fn deinit(self: *Self) void { + self.freeShelves(@intCast(ShelfIndex, self.dynamic_segments.len), 0); + self.allocator.free(self.dynamic_segments); + self.* = undefined; + } + + pub fn at(self: anytype, i: usize) AtType(@TypeOf(self)) { + assert(i < self.len); + return self.uncheckedAt(i); + } + + pub fn count(self: Self) usize { + return self.len; + } + + pub fn push(self: *Self, item: T) !void { + const new_item_ptr = try self.addOne(); + new_item_ptr.* = item; + } + + pub fn pushMany(self: *Self, items: []const T) !void { + for (items) |item| { + try self.push(item); + } + } + + pub fn pop(self: *Self) ?T { + if (self.len == 0) return null; + + const index = self.len - 1; + const result = uncheckedAt(self, index).*; + self.len = index; + return result; + } + + pub fn addOne(self: *Self) !*T { + const new_length = self.len + 1; + try self.growCapacity(new_length); + const result = uncheckedAt(self, self.len); + self.len = new_length; + return result; + } + + /// Grows or shrinks capacity to match usage. + pub fn setCapacity(self: *Self, new_capacity: usize) !void { + if (prealloc_item_count != 0) { + if (new_capacity <= @as(usize, 1) << (prealloc_exp + @intCast(ShelfIndex, self.dynamic_segments.len))) { + return self.shrinkCapacity(new_capacity); + } + } + return self.growCapacity(new_capacity); + } + + /// Only grows capacity, or retains current capacity + pub fn growCapacity(self: *Self, new_capacity: usize) !void { + const new_cap_shelf_count = shelfCount(new_capacity); + const old_shelf_count = @intCast(ShelfIndex, self.dynamic_segments.len); + if (new_cap_shelf_count > old_shelf_count) { + self.dynamic_segments = try self.allocator.realloc(self.dynamic_segments, new_cap_shelf_count); + var i = old_shelf_count; + errdefer { + self.freeShelves(i, old_shelf_count); + self.dynamic_segments = self.allocator.shrink(self.dynamic_segments, old_shelf_count); + } + while (i < new_cap_shelf_count) : (i += 1) { + self.dynamic_segments[i] = (try self.allocator.alloc(T, shelfSize(i))).ptr; + } + } + } + + /// Only shrinks capacity or retains current capacity + pub fn shrinkCapacity(self: *Self, new_capacity: usize) void { + if (new_capacity <= prealloc_item_count) { + const len = @intCast(ShelfIndex, self.dynamic_segments.len); + self.freeShelves(len, 0); + self.allocator.free(self.dynamic_segments); + self.dynamic_segments = &[_][*]T{}; + return; + } + + const new_cap_shelf_count = shelfCount(new_capacity); + const old_shelf_count = @intCast(ShelfIndex, self.dynamic_segments.len); + assert(new_cap_shelf_count <= old_shelf_count); + if (new_cap_shelf_count == old_shelf_count) { + return; + } + + self.freeShelves(old_shelf_count, new_cap_shelf_count); + self.dynamic_segments = self.allocator.shrink(self.dynamic_segments, new_cap_shelf_count); + } + + pub fn shrink(self: *Self, new_len: usize) void { + assert(new_len <= self.len); + // TODO take advantage of the new realloc semantics + self.len = new_len; + } + + pub fn writeToSlice(self: *Self, dest: []T, start: usize) void { + const end = start + dest.len; + assert(end <= self.len); + + var i = start; + if (end <= prealloc_item_count) { + std.mem.copy(T, dest[i - start ..], self.prealloc_segment[i..end]); + return; + } else if (i < prealloc_item_count) { + std.mem.copy(T, dest[i - start ..], self.prealloc_segment[i..]); + i = prealloc_item_count; + } + + while (i < end) { + const shelf_index = shelfIndex(i); + const copy_start = boxIndex(i, shelf_index); + const copy_end = std.math.min(shelfSize(shelf_index), copy_start + end - i); + + std.mem.copy( + T, + dest[i - start ..], + self.dynamic_segments[shelf_index][copy_start..copy_end], + ); + + i += (copy_end - copy_start); + } + } + + pub fn uncheckedAt(self: anytype, index: usize) AtType(@TypeOf(self)) { + if (index < prealloc_item_count) { + return &self.prealloc_segment[index]; + } + const shelf_index = shelfIndex(index); + const box_index = boxIndex(index, shelf_index); + return &self.dynamic_segments[shelf_index][box_index]; + } + + fn shelfCount(box_count: usize) ShelfIndex { + if (prealloc_item_count == 0) { + return log2_int_ceil(usize, box_count + 1); + } + return log2_int_ceil(usize, box_count + prealloc_item_count) - prealloc_exp - 1; + } + + fn shelfSize(shelf_index: ShelfIndex) usize { + if (prealloc_item_count == 0) { + return @as(usize, 1) << shelf_index; + } + return @as(usize, 1) << (shelf_index + (prealloc_exp + 1)); + } + + fn shelfIndex(list_index: usize) ShelfIndex { + if (prealloc_item_count == 0) { + return std.math.log2_int(usize, list_index + 1); + } + return std.math.log2_int(usize, list_index + prealloc_item_count) - prealloc_exp - 1; + } + + fn boxIndex(list_index: usize, shelf_index: ShelfIndex) usize { + if (prealloc_item_count == 0) { + return (list_index + 1) - (@as(usize, 1) << shelf_index); + } + return list_index + prealloc_item_count - (@as(usize, 1) << ((prealloc_exp + 1) + shelf_index)); + } + + fn freeShelves(self: *Self, from_count: ShelfIndex, to_count: ShelfIndex) void { + var i = from_count; + while (i != to_count) { + i -= 1; + self.allocator.free(self.dynamic_segments[i][0..shelfSize(i)]); + } + } + + pub const Iterator = struct { + list: *Self, + index: usize, + box_index: usize, + shelf_index: ShelfIndex, + shelf_size: usize, + + pub fn next(it: *Iterator) ?*T { + if (it.index >= it.list.len) return null; + if (it.index < prealloc_item_count) { + const ptr = &it.list.prealloc_segment[it.index]; + it.index += 1; + if (it.index == prealloc_item_count) { + it.box_index = 0; + it.shelf_index = 0; + it.shelf_size = prealloc_item_count * 2; + } + return ptr; + } + + const ptr = &it.list.dynamic_segments[it.shelf_index][it.box_index]; + it.index += 1; + it.box_index += 1; + if (it.box_index == it.shelf_size) { + it.shelf_index += 1; + it.box_index = 0; + it.shelf_size *= 2; + } + return ptr; + } + + pub fn prev(it: *Iterator) ?*T { + if (it.index == 0) return null; + + it.index -= 1; + if (it.index < prealloc_item_count) return &it.list.prealloc_segment[it.index]; + + if (it.box_index == 0) { + it.shelf_index -= 1; + it.shelf_size /= 2; + it.box_index = it.shelf_size - 1; + } else { + it.box_index -= 1; + } + + return &it.list.dynamic_segments[it.shelf_index][it.box_index]; + } + + pub fn peek(it: *Iterator) ?*T { + if (it.index >= it.list.len) + return null; + if (it.index < prealloc_item_count) + return &it.list.prealloc_segment[it.index]; + + return &it.list.dynamic_segments[it.shelf_index][it.box_index]; + } + + pub fn set(it: *Iterator, index: usize) void { + it.index = index; + if (index < prealloc_item_count) return; + it.shelf_index = shelfIndex(index); + it.box_index = boxIndex(index, it.shelf_index); + it.shelf_size = shelfSize(it.shelf_index); + } + }; + + pub fn iterator(self: *Self, start_index: usize) Iterator { + var it = Iterator{ + .list = self, + .index = undefined, + .shelf_index = undefined, + .box_index = undefined, + .shelf_size = undefined, + }; + it.set(start_index); + return it; + } + }; +} + +test "basic usage" { + const a = std.testing.allocator; + + try testSegmentedList(0, a); + try testSegmentedList(1, a); + try testSegmentedList(2, a); + try testSegmentedList(4, a); + try testSegmentedList(8, a); + try testSegmentedList(16, a); +} + +fn testSegmentedList(comptime prealloc: usize, allocator: Allocator) !void { + var list = SegmentedList(i32, prealloc).init(allocator); + defer list.deinit(); + + { + var i: usize = 0; + while (i < 100) : (i += 1) { + try list.push(@intCast(i32, i + 1)); + try testing.expect(list.len == i + 1); + } + } + + { + var i: usize = 0; + while (i < 100) : (i += 1) { + try testing.expect(list.at(i).* == @intCast(i32, i + 1)); + } + } + + { + var it = list.iterator(0); + var x: i32 = 0; + while (it.next()) |item| { + x += 1; + try testing.expect(item.* == x); + } + try testing.expect(x == 100); + while (it.prev()) |item| : (x -= 1) { + try testing.expect(item.* == x); + } + try testing.expect(x == 0); + } + + try testing.expect(list.pop().? == 100); + try testing.expect(list.len == 99); + + try list.pushMany(&[_]i32{ 1, 2, 3 }); + try testing.expect(list.len == 102); + try testing.expect(list.pop().? == 3); + try testing.expect(list.pop().? == 2); + try testing.expect(list.pop().? == 1); + try testing.expect(list.len == 99); + + try list.pushMany(&[_]i32{}); + try testing.expect(list.len == 99); + + { + var i: i32 = 99; + while (list.pop()) |item| : (i -= 1) { + try testing.expect(item == i); + list.shrinkCapacity(list.len); + } + } + + { + var control: [100]i32 = undefined; + var dest: [100]i32 = undefined; + + var i: i32 = 0; + while (i < 100) : (i += 1) { + try list.push(i + 1); + control[@intCast(usize, i)] = i + 1; + } + + std.mem.set(i32, dest[0..], 0); + list.writeToSlice(dest[0..], 0); + try testing.expect(std.mem.eql(i32, control[0..], dest[0..])); + + std.mem.set(i32, dest[0..], 0); + list.writeToSlice(dest[50..], 50); + try testing.expect(std.mem.eql(i32, control[50..], dest[50..])); + } + + try list.setCapacity(0); +} + +/// TODO look into why this std.math function was changed in +/// fc9430f56798a53f9393a697f4ccd6bf9981b970. +fn log2_int_ceil(comptime T: type, x: T) std.math.Log2Int(T) { + assert(x != 0); + const log2_val = std.math.log2_int(T, x); + if (@as(T, 1) << log2_val == x) + return log2_val; + return log2_val + 1; +} diff --git a/lib/std/std.zig b/lib/std/std.zig index 831a82cbbe..32e16b40c2 100644 --- a/lib/std/std.zig +++ b/lib/std/std.zig @@ -29,6 +29,7 @@ pub const PackedIntSliceEndian = @import("packed_int_array.zig").PackedIntSliceE pub const PriorityQueue = @import("priority_queue.zig").PriorityQueue; pub const PriorityDequeue = @import("priority_dequeue.zig").PriorityDequeue; pub const Progress = @import("Progress.zig"); +pub const SegmentedList = @import("segmented_list.zig").SegmentedList; pub const SemanticVersion = @import("SemanticVersion.zig"); pub const SinglyLinkedList = @import("linked_list.zig").SinglyLinkedList; pub const StaticBitSet = bit_set.StaticBitSet; From 99112b63bdeec512e164f289556b24fa1554a775 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 18 Apr 2022 08:28:43 -0700 Subject: [PATCH 2/5] std.SegmentedList: breaking API changes * Remove the Allocator field; instead it must be passed in as a parameter to any function that needs it. * Rename `push` to `append` and `pushMany` to `appendSlice` to match the conventions set by ArrayList. --- lib/std/segmented_list.zig | 106 +++++++++++++++++-------------------- 1 file changed, 48 insertions(+), 58 deletions(-) diff --git a/lib/std/segmented_list.zig b/lib/std/segmented_list.zig index ae3697904d..8bdfe7dceb 100644 --- a/lib/std/segmented_list.zig +++ b/lib/std/segmented_list.zig @@ -61,16 +61,16 @@ const Allocator = std.mem.Allocator; // shelf_size = prealloc * 2 ** (shelf_index + 1) /// This is a stack data structure where pointers to indexes have the same lifetime as the data structure -/// itself, unlike ArrayList where push() invalidates all existing element pointers. +/// itself, unlike ArrayList where append() invalidates all existing element pointers. /// The tradeoff is that elements are not guaranteed to be contiguous. For that, use ArrayList. /// Note however that most elements are contiguous, making this data structure cache-friendly. /// /// Because it never has to copy elements from an old location to a new location, it does not require /// its elements to be copyable, and it avoids wasting memory when backed by an ArenaAllocator. -/// Note that the push() and pop() convenience methods perform a copy, but you can instead use +/// Note that the append() and pop() convenience methods perform a copy, but you can instead use /// addOne(), at(), setCapacity(), and shrinkCapacity() to avoid copying items. /// -/// This data structure has O(1) push and O(1) pop. +/// This data structure has O(1) append and O(1) pop. /// /// It supports preallocated elements, making it especially well suited when the expected maximum /// size is small. `prealloc_item_count` must be 0, or a power of 2. @@ -91,10 +91,9 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type } }; - prealloc_segment: [prealloc_item_count]T, - dynamic_segments: [][*]T, - allocator: Allocator, - len: usize, + prealloc_segment: [prealloc_item_count]T = undefined, + dynamic_segments: [][*]T = &[_][*]T{}, + len: usize = 0, pub const prealloc_count = prealloc_item_count; @@ -106,19 +105,9 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type } } - /// Deinitialize with `deinit` - pub fn init(allocator: Allocator) Self { - return Self{ - .allocator = allocator, - .len = 0, - .prealloc_segment = undefined, - .dynamic_segments = &[_][*]T{}, - }; - } - - pub fn deinit(self: *Self) void { - self.freeShelves(@intCast(ShelfIndex, self.dynamic_segments.len), 0); - self.allocator.free(self.dynamic_segments); + pub fn deinit(self: *Self, allocator: Allocator) void { + self.freeShelves(allocator, @intCast(ShelfIndex, self.dynamic_segments.len), 0); + allocator.free(self.dynamic_segments); self.* = undefined; } @@ -131,14 +120,14 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type return self.len; } - pub fn push(self: *Self, item: T) !void { - const new_item_ptr = try self.addOne(); + pub fn append(self: *Self, allocator: Allocator, item: T) Allocator.Error!void { + const new_item_ptr = try self.addOne(allocator); new_item_ptr.* = item; } - pub fn pushMany(self: *Self, items: []const T) !void { + pub fn appendSlice(self: *Self, allocator: Allocator, items: []const T) Allocator.Error!void { for (items) |item| { - try self.push(item); + try self.append(allocator, item); } } @@ -151,47 +140,48 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type return result; } - pub fn addOne(self: *Self) !*T { + pub fn addOne(self: *Self, allocator: Allocator) Allocator.Error!*T { const new_length = self.len + 1; - try self.growCapacity(new_length); + try self.growCapacity(allocator, new_length); const result = uncheckedAt(self, self.len); self.len = new_length; return result; } /// Grows or shrinks capacity to match usage. - pub fn setCapacity(self: *Self, new_capacity: usize) !void { + /// TODO update this and related methods to match the conventions set by ArrayList + pub fn setCapacity(self: *Self, allocator: Allocator, new_capacity: usize) Allocator.Error!void { if (prealloc_item_count != 0) { if (new_capacity <= @as(usize, 1) << (prealloc_exp + @intCast(ShelfIndex, self.dynamic_segments.len))) { - return self.shrinkCapacity(new_capacity); + return self.shrinkCapacity(allocator, new_capacity); } } - return self.growCapacity(new_capacity); + return self.growCapacity(allocator, new_capacity); } /// Only grows capacity, or retains current capacity - pub fn growCapacity(self: *Self, new_capacity: usize) !void { + pub fn growCapacity(self: *Self, allocator: Allocator, new_capacity: usize) Allocator.Error!void { const new_cap_shelf_count = shelfCount(new_capacity); const old_shelf_count = @intCast(ShelfIndex, self.dynamic_segments.len); if (new_cap_shelf_count > old_shelf_count) { - self.dynamic_segments = try self.allocator.realloc(self.dynamic_segments, new_cap_shelf_count); + self.dynamic_segments = try allocator.realloc(self.dynamic_segments, new_cap_shelf_count); var i = old_shelf_count; errdefer { - self.freeShelves(i, old_shelf_count); - self.dynamic_segments = self.allocator.shrink(self.dynamic_segments, old_shelf_count); + self.freeShelves(allocator, i, old_shelf_count); + self.dynamic_segments = allocator.shrink(self.dynamic_segments, old_shelf_count); } while (i < new_cap_shelf_count) : (i += 1) { - self.dynamic_segments[i] = (try self.allocator.alloc(T, shelfSize(i))).ptr; + self.dynamic_segments[i] = (try allocator.alloc(T, shelfSize(i))).ptr; } } } /// Only shrinks capacity or retains current capacity - pub fn shrinkCapacity(self: *Self, new_capacity: usize) void { + pub fn shrinkCapacity(self: *Self, allocator: Allocator, new_capacity: usize) void { if (new_capacity <= prealloc_item_count) { const len = @intCast(ShelfIndex, self.dynamic_segments.len); - self.freeShelves(len, 0); - self.allocator.free(self.dynamic_segments); + self.freeShelves(allocator, len, 0); + allocator.free(self.dynamic_segments); self.dynamic_segments = &[_][*]T{}; return; } @@ -203,8 +193,8 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type return; } - self.freeShelves(old_shelf_count, new_cap_shelf_count); - self.dynamic_segments = self.allocator.shrink(self.dynamic_segments, new_cap_shelf_count); + self.freeShelves(allocator, old_shelf_count, new_cap_shelf_count); + self.dynamic_segments = allocator.shrink(self.dynamic_segments, new_cap_shelf_count); } pub fn shrink(self: *Self, new_len: usize) void { @@ -278,11 +268,11 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type return list_index + prealloc_item_count - (@as(usize, 1) << ((prealloc_exp + 1) + shelf_index)); } - fn freeShelves(self: *Self, from_count: ShelfIndex, to_count: ShelfIndex) void { + fn freeShelves(self: *Self, allocator: Allocator, from_count: ShelfIndex, to_count: ShelfIndex) void { var i = from_count; while (i != to_count) { i -= 1; - self.allocator.free(self.dynamic_segments[i][0..shelfSize(i)]); + allocator.free(self.dynamic_segments[i][0..shelfSize(i)]); } } @@ -367,24 +357,24 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type } test "basic usage" { - const a = std.testing.allocator; - - try testSegmentedList(0, a); - try testSegmentedList(1, a); - try testSegmentedList(2, a); - try testSegmentedList(4, a); - try testSegmentedList(8, a); - try testSegmentedList(16, a); + try testSegmentedList(0); + try testSegmentedList(1); + try testSegmentedList(2); + try testSegmentedList(4); + try testSegmentedList(8); + try testSegmentedList(16); } -fn testSegmentedList(comptime prealloc: usize, allocator: Allocator) !void { - var list = SegmentedList(i32, prealloc).init(allocator); - defer list.deinit(); +fn testSegmentedList(comptime prealloc: usize) !void { + const gpa = std.testing.allocator; + + var list: SegmentedList(i32, prealloc) = .{}; + defer list.deinit(gpa); { var i: usize = 0; while (i < 100) : (i += 1) { - try list.push(@intCast(i32, i + 1)); + try list.append(gpa, @intCast(i32, i + 1)); try testing.expect(list.len == i + 1); } } @@ -413,21 +403,21 @@ fn testSegmentedList(comptime prealloc: usize, allocator: Allocator) !void { try testing.expect(list.pop().? == 100); try testing.expect(list.len == 99); - try list.pushMany(&[_]i32{ 1, 2, 3 }); + try list.appendSlice(gpa, &[_]i32{ 1, 2, 3 }); try testing.expect(list.len == 102); try testing.expect(list.pop().? == 3); try testing.expect(list.pop().? == 2); try testing.expect(list.pop().? == 1); try testing.expect(list.len == 99); - try list.pushMany(&[_]i32{}); + try list.appendSlice(gpa, &[_]i32{}); try testing.expect(list.len == 99); { var i: i32 = 99; while (list.pop()) |item| : (i -= 1) { try testing.expect(item == i); - list.shrinkCapacity(list.len); + list.shrinkCapacity(gpa, list.len); } } @@ -437,7 +427,7 @@ fn testSegmentedList(comptime prealloc: usize, allocator: Allocator) !void { var i: i32 = 0; while (i < 100) : (i += 1) { - try list.push(i + 1); + try list.append(gpa, i + 1); control[@intCast(usize, i)] = i + 1; } @@ -450,7 +440,7 @@ fn testSegmentedList(comptime prealloc: usize, allocator: Allocator) !void { try testing.expect(std.mem.eql(i32, control[50..], dest[50..])); } - try list.setCapacity(0); + try list.setCapacity(gpa, 0); } /// TODO look into why this std.math function was changed in From 4f527e5d36f66a83ff6a263a03f16e2c4d049f1e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 20 Apr 2022 17:16:32 -0700 Subject: [PATCH 3/5] std: fix missing hash map safety There was a missing compile error for calling ensureUnusedCapacity without a Context in the case that the Context is non-void. --- lib/std/array_hash_map.zig | 2 +- lib/std/hash_map.zig | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig index 31860963af..304c98a2a9 100644 --- a/lib/std/array_hash_map.zig +++ b/lib/std/array_hash_map.zig @@ -798,7 +798,7 @@ pub fn ArrayHashMapUnmanaged( allocator: Allocator, additional_capacity: usize, ) !void { - if (@sizeOf(ByIndexContext) != 0) + if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call ensureTotalCapacityContext instead."); return self.ensureUnusedCapacityContext(allocator, additional_capacity, undefined); } diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig index 96df243f6e..eb24ef591b 100644 --- a/lib/std/hash_map.zig +++ b/lib/std/hash_map.zig @@ -913,6 +913,8 @@ pub fn HashMapUnmanaged( } pub fn ensureUnusedCapacity(self: *Self, allocator: Allocator, additional_size: Size) Allocator.Error!void { + if (@sizeOf(Context) != 0) + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call ensureUnusedCapacityContext instead."); return ensureUnusedCapacityContext(self, allocator, additional_size, undefined); } pub fn ensureUnusedCapacityContext(self: *Self, allocator: Allocator, additional_size: Size, ctx: Context) Allocator.Error!void { From f7596ae9423e9de8276629803147e1a243f2177b Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 19 Apr 2022 21:51:08 -0700 Subject: [PATCH 4/5] stage2: use indexes for Decl objects Rather than allocating Decl objects with an Allocator, we instead allocate them with a SegmentedList. This provides four advantages: * Stable memory so that one thread can access a Decl object while another thread allocates additional Decl objects from this list. * It allows us to use u32 indexes to reference Decl objects rather than pointers, saving memory in Type, Value, and dependency sets. * Using integers to reference Decl objects rather than pointers makes serialization trivial. * It provides a unique integer to be used for anonymous symbol names, avoiding multi-threaded contention on an atomic counter. --- lib/std/segmented_list.zig | 18 + src/Compilation.zig | 219 ++--- src/Module.zig | 910 +++++++++++--------- src/RangeSet.zig | 32 +- src/Sema.zig | 1564 +++++++++++++++++----------------- src/TypedValue.zig | 52 +- src/arch/aarch64/CodeGen.zig | 58 +- src/arch/arm/CodeGen.zig | 53 +- src/arch/riscv64/CodeGen.zig | 42 +- src/arch/sparcv9/CodeGen.zig | 26 +- src/arch/wasm/CodeGen.zig | 87 +- src/arch/x86_64/CodeGen.zig | 53 +- src/codegen.zig | 24 +- src/codegen/c.zig | 87 +- src/codegen/llvm.zig | 247 +++--- src/codegen/spirv.zig | 16 +- src/crash_report.zig | 15 +- src/link.zig | 98 ++- src/link/C.zig | 63 +- src/link/Coff.zig | 49 +- src/link/Dwarf.zig | 46 +- src/link/Elf.zig | 98 ++- src/link/MachO.zig | 112 ++- src/link/NvPtx.zig | 12 +- src/link/Plan9.zig | 94 +- src/link/SpirV.zig | 24 +- src/link/Wasm.zig | 112 ++- src/main.zig | 8 +- src/print_air.zig | 11 +- src/type.zig | 274 +++--- src/value.zig | 261 +++--- 31 files changed, 2584 insertions(+), 2181 deletions(-) diff --git a/lib/std/segmented_list.zig b/lib/std/segmented_list.zig index 8bdfe7dceb..27667353ef 100644 --- a/lib/std/segmented_list.zig +++ b/lib/std/segmented_list.zig @@ -148,6 +148,24 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type return result; } + /// Reduce length to `new_len`. + /// Invalidates pointers for the elements at index new_len and beyond. + pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void { + assert(new_len <= self.len); + self.len = new_len; + } + + /// Invalidates all element pointers. + pub fn clearRetainingCapacity(self: *Self) void { + self.items.len = 0; + } + + /// Invalidates all element pointers. + pub fn clearAndFree(self: *Self, allocator: Allocator) void { + self.setCapacity(allocator, 0) catch unreachable; + self.items.len = 0; + } + /// Grows or shrinks capacity to match usage. /// TODO update this and related methods to match the conventions set by ArrayList pub fn setCapacity(self: *Self, allocator: Allocator, new_capacity: usize) Allocator.Error!void { diff --git a/src/Compilation.zig b/src/Compilation.zig index f0e490c67d..6019fc0856 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -191,22 +191,22 @@ pub const CSourceFile = struct { const Job = union(enum) { /// Write the constant value for a Decl to the output file. - codegen_decl: *Module.Decl, + codegen_decl: Module.Decl.Index, /// Write the machine code for a function to the output file. codegen_func: *Module.Fn, /// Render the .h file snippet for the Decl. - emit_h_decl: *Module.Decl, + emit_h_decl: Module.Decl.Index, /// The Decl needs to be analyzed and possibly export itself. /// It may have already be analyzed, or it may have been determined /// to be outdated; in this case perform semantic analysis again. - analyze_decl: *Module.Decl, + analyze_decl: Module.Decl.Index, /// The file that was loaded with `@embedFile` has changed on disk /// and has been re-loaded into memory. All Decls that depend on it /// need to be re-analyzed. update_embed_file: *Module.EmbedFile, /// The source file containing the Decl has been updated, and so the /// Decl may need its line number information updated in the debug info. - update_line_number: *Module.Decl, + update_line_number: Module.Decl.Index, /// The main source file for the package needs to be analyzed. analyze_pkg: *Package, @@ -2105,17 +2105,18 @@ pub fn update(comp: *Compilation) !void { // deletion set may grow as we call `clearDecl` within this loop, // and more unreferenced Decls are revealed. while (module.deletion_set.count() != 0) { - const decl = module.deletion_set.keys()[0]; + const decl_index = module.deletion_set.keys()[0]; + const decl = module.declPtr(decl_index); assert(decl.deletion_flag); assert(decl.dependants.count() == 0); const is_anon = if (decl.zir_decl_index == 0) blk: { - break :blk decl.src_namespace.anon_decls.swapRemove(decl); + break :blk decl.src_namespace.anon_decls.swapRemove(decl_index); } else false; - try module.clearDecl(decl, null); + try module.clearDecl(decl_index, null); if (is_anon) { - decl.destroy(module); + module.destroyDecl(decl_index); } } @@ -2444,13 +2445,15 @@ pub fn totalErrorCount(self: *Compilation) usize { // the previous parse success, including compile errors, but we cannot // emit them until the file succeeds parsing. for (module.failed_decls.keys()) |key| { - if (key.getFileScope().okToReportErrors()) { + const decl = module.declPtr(key); + if (decl.getFileScope().okToReportErrors()) { total += 1; } } if (module.emit_h) |emit_h| { for (emit_h.failed_decls.keys()) |key| { - if (key.getFileScope().okToReportErrors()) { + const decl = module.declPtr(key); + if (decl.getFileScope().okToReportErrors()) { total += 1; } } @@ -2529,9 +2532,10 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors { { var it = module.failed_decls.iterator(); while (it.next()) |entry| { + const decl = module.declPtr(entry.key_ptr.*); // Skip errors for Decls within files that had a parse failure. // We'll try again once parsing succeeds. - if (entry.key_ptr.*.getFileScope().okToReportErrors()) { + if (decl.getFileScope().okToReportErrors()) { try AllErrors.add(module, &arena, &errors, entry.value_ptr.*.*); } } @@ -2539,9 +2543,10 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors { if (module.emit_h) |emit_h| { var it = emit_h.failed_decls.iterator(); while (it.next()) |entry| { + const decl = module.declPtr(entry.key_ptr.*); // Skip errors for Decls within files that had a parse failure. // We'll try again once parsing succeeds. - if (entry.key_ptr.*.getFileScope().okToReportErrors()) { + if (decl.getFileScope().okToReportErrors()) { try AllErrors.add(module, &arena, &errors, entry.value_ptr.*.*); } } @@ -2564,7 +2569,8 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors { const keys = module.compile_log_decls.keys(); const values = module.compile_log_decls.values(); // First one will be the error; subsequent ones will be notes. - const src_loc = keys[0].nodeOffsetSrcLoc(values[0]); + const err_decl = module.declPtr(keys[0]); + const src_loc = err_decl.nodeOffsetSrcLoc(values[0]); const err_msg = Module.ErrorMsg{ .src_loc = src_loc, .msg = "found compile log statement", @@ -2573,8 +2579,9 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors { defer self.gpa.free(err_msg.notes); for (keys[1..]) |key, i| { + const note_decl = module.declPtr(key); err_msg.notes[i] = .{ - .src_loc = key.nodeOffsetSrcLoc(values[i + 1]), + .src_loc = note_decl.nodeOffsetSrcLoc(values[i + 1]), .msg = "also here", }; } @@ -2708,38 +2715,42 @@ pub fn performAllTheWork( fn processOneJob(comp: *Compilation, job: Job) !void { switch (job) { - .codegen_decl => |decl| switch (decl.analysis) { - .unreferenced => unreachable, - .in_progress => unreachable, - .outdated => unreachable, + .codegen_decl => |decl_index| { + if (build_options.omit_stage2) + @panic("sadly stage2 is omitted from this build to save memory on the CI server"); - .file_failure, - .sema_failure, - .codegen_failure, - .dependency_failure, - .sema_failure_retryable, - => return, + const module = comp.bin_file.options.module.?; + const decl = module.declPtr(decl_index); - .complete, .codegen_failure_retryable => { - if (build_options.omit_stage2) - @panic("sadly stage2 is omitted from this build to save memory on the CI server"); + switch (decl.analysis) { + .unreferenced => unreachable, + .in_progress => unreachable, + .outdated => unreachable, - const named_frame = tracy.namedFrame("codegen_decl"); - defer named_frame.end(); + .file_failure, + .sema_failure, + .codegen_failure, + .dependency_failure, + .sema_failure_retryable, + => return, - const module = comp.bin_file.options.module.?; - assert(decl.has_tv); + .complete, .codegen_failure_retryable => { + const named_frame = tracy.namedFrame("codegen_decl"); + defer named_frame.end(); - if (decl.alive) { - try module.linkerUpdateDecl(decl); + assert(decl.has_tv); + + if (decl.alive) { + try module.linkerUpdateDecl(decl_index); + return; + } + + // Instead of sending this decl to the linker, we actually will delete it + // because we found out that it in fact was never referenced. + module.deleteUnusedDecl(decl_index); return; - } - - // Instead of sending this decl to the linker, we actually will delete it - // because we found out that it in fact was never referenced. - module.deleteUnusedDecl(decl); - return; - }, + }, + } }, .codegen_func => |func| { if (build_options.omit_stage2) @@ -2754,68 +2765,73 @@ fn processOneJob(comp: *Compilation, job: Job) !void { error.AnalysisFail => return, }; }, - .emit_h_decl => |decl| switch (decl.analysis) { - .unreferenced => unreachable, - .in_progress => unreachable, - .outdated => unreachable, - - .file_failure, - .sema_failure, - .dependency_failure, - .sema_failure_retryable, - => return, - - // emit-h only requires semantic analysis of the Decl to be complete, - // it does not depend on machine code generation to succeed. - .codegen_failure, .codegen_failure_retryable, .complete => { - if (build_options.omit_stage2) - @panic("sadly stage2 is omitted from this build to save memory on the CI server"); - - const named_frame = tracy.namedFrame("emit_h_decl"); - defer named_frame.end(); - - const gpa = comp.gpa; - const module = comp.bin_file.options.module.?; - const emit_h = module.emit_h.?; - _ = try emit_h.decl_table.getOrPut(gpa, decl); - const decl_emit_h = decl.getEmitH(module); - const fwd_decl = &decl_emit_h.fwd_decl; - fwd_decl.shrinkRetainingCapacity(0); - var typedefs_arena = std.heap.ArenaAllocator.init(gpa); - defer typedefs_arena.deinit(); - - var dg: c_codegen.DeclGen = .{ - .gpa = gpa, - .module = module, - .error_msg = null, - .decl = decl, - .fwd_decl = fwd_decl.toManaged(gpa), - .typedefs = c_codegen.TypedefMap.initContext(gpa, .{ - .target = comp.getTarget(), - }), - .typedefs_arena = typedefs_arena.allocator(), - }; - defer dg.fwd_decl.deinit(); - defer dg.typedefs.deinit(); - - c_codegen.genHeader(&dg) catch |err| switch (err) { - error.AnalysisFail => { - try emit_h.failed_decls.put(gpa, decl, dg.error_msg.?); - return; - }, - else => |e| return e, - }; - - fwd_decl.* = dg.fwd_decl.moveToUnmanaged(); - fwd_decl.shrinkAndFree(gpa, fwd_decl.items.len); - }, - }, - .analyze_decl => |decl| { + .emit_h_decl => |decl_index| { if (build_options.omit_stage2) @panic("sadly stage2 is omitted from this build to save memory on the CI server"); const module = comp.bin_file.options.module.?; - module.ensureDeclAnalyzed(decl) catch |err| switch (err) { + const decl = module.declPtr(decl_index); + + switch (decl.analysis) { + .unreferenced => unreachable, + .in_progress => unreachable, + .outdated => unreachable, + + .file_failure, + .sema_failure, + .dependency_failure, + .sema_failure_retryable, + => return, + + // emit-h only requires semantic analysis of the Decl to be complete, + // it does not depend on machine code generation to succeed. + .codegen_failure, .codegen_failure_retryable, .complete => { + const named_frame = tracy.namedFrame("emit_h_decl"); + defer named_frame.end(); + + const gpa = comp.gpa; + const emit_h = module.emit_h.?; + _ = try emit_h.decl_table.getOrPut(gpa, decl_index); + const decl_emit_h = emit_h.declPtr(decl_index); + const fwd_decl = &decl_emit_h.fwd_decl; + fwd_decl.shrinkRetainingCapacity(0); + var typedefs_arena = std.heap.ArenaAllocator.init(gpa); + defer typedefs_arena.deinit(); + + var dg: c_codegen.DeclGen = .{ + .gpa = gpa, + .module = module, + .error_msg = null, + .decl_index = decl_index, + .decl = decl, + .fwd_decl = fwd_decl.toManaged(gpa), + .typedefs = c_codegen.TypedefMap.initContext(gpa, .{ + .mod = module, + }), + .typedefs_arena = typedefs_arena.allocator(), + }; + defer dg.fwd_decl.deinit(); + defer dg.typedefs.deinit(); + + c_codegen.genHeader(&dg) catch |err| switch (err) { + error.AnalysisFail => { + try emit_h.failed_decls.put(gpa, decl_index, dg.error_msg.?); + return; + }, + else => |e| return e, + }; + + fwd_decl.* = dg.fwd_decl.moveToUnmanaged(); + fwd_decl.shrinkAndFree(gpa, fwd_decl.items.len); + }, + } + }, + .analyze_decl => |decl_index| { + if (build_options.omit_stage2) + @panic("sadly stage2 is omitted from this build to save memory on the CI server"); + + const module = comp.bin_file.options.module.?; + module.ensureDeclAnalyzed(decl_index) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => return, }; @@ -2833,7 +2849,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void { error.AnalysisFail => return, }; }, - .update_line_number => |decl| { + .update_line_number => |decl_index| { if (build_options.omit_stage2) @panic("sadly stage2 is omitted from this build to save memory on the CI server"); @@ -2842,9 +2858,10 @@ fn processOneJob(comp: *Compilation, job: Job) !void { const gpa = comp.gpa; const module = comp.bin_file.options.module.?; + const decl = module.declPtr(decl_index); comp.bin_file.updateDeclLineNumber(module, decl) catch |err| { try module.failed_decls.ensureUnusedCapacity(gpa, 1); - module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( + module.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create( gpa, decl.srcLoc(), "unable to update line number: {s}", @@ -3472,7 +3489,7 @@ fn reportRetryableEmbedFileError( const mod = comp.bin_file.options.module.?; const gpa = mod.gpa; - const src_loc: Module.SrcLoc = embed_file.owner_decl.srcLoc(); + const src_loc: Module.SrcLoc = mod.declPtr(embed_file.owner_decl).srcLoc(); const err_msg = if (embed_file.pkg.root_src_directory.path) |dir_path| try Module.ErrorMsg.create( diff --git a/src/Module.zig b/src/Module.zig index 95ae55feb8..1119d73ab0 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -49,15 +49,15 @@ global_zir_cache: Compilation.Directory, /// Used by AstGen worker to load and store ZIR cache. local_zir_cache: Compilation.Directory, /// It's rare for a decl to be exported, so we save memory by having a sparse -/// map of Decl pointers to details about them being exported. +/// map of Decl indexes to details about them being exported. /// The Export memory is owned by the `export_owners` table; the slice itself /// is owned by this table. The slice is guaranteed to not be empty. -decl_exports: std.AutoArrayHashMapUnmanaged(*Decl, []*Export) = .{}, +decl_exports: std.AutoArrayHashMapUnmanaged(Decl.Index, []*Export) = .{}, /// This models the Decls that perform exports, so that `decl_exports` can be updated when a Decl /// is modified. Note that the key of this table is not the Decl being exported, but the Decl that /// is performing the export of another Decl. /// This table owns the Export memory. -export_owners: std.AutoArrayHashMapUnmanaged(*Decl, []*Export) = .{}, +export_owners: std.AutoArrayHashMapUnmanaged(Decl.Index, []*Export) = .{}, /// The set of all the Zig source files in the Module. We keep track of this in order /// to iterate over it and check which source files have been modified on the file system when /// an update is requested, as well as to cache `@import` results. @@ -89,10 +89,10 @@ align_stack_fns: std.AutoHashMapUnmanaged(*const Fn, SetAlignStack) = .{}, /// The ErrorMsg memory is owned by the decl, using Module's general purpose allocator. /// Note that a Decl can succeed but the Fn it represents can fail. In this case, /// a Decl can have a failed_decls entry but have analysis status of success. -failed_decls: std.AutoArrayHashMapUnmanaged(*Decl, *ErrorMsg) = .{}, +failed_decls: std.AutoArrayHashMapUnmanaged(Decl.Index, *ErrorMsg) = .{}, /// Keep track of one `@compileLog` callsite per owner Decl. /// The value is the AST node index offset from the Decl. -compile_log_decls: std.AutoArrayHashMapUnmanaged(*Decl, i32) = .{}, +compile_log_decls: std.AutoArrayHashMapUnmanaged(Decl.Index, i32) = .{}, /// Using a map here for consistency with the other fields here. /// The ErrorMsg memory is owned by the `File`, using Module's general purpose allocator. failed_files: std.AutoArrayHashMapUnmanaged(*File, ?*ErrorMsg) = .{}, @@ -102,11 +102,9 @@ failed_embed_files: std.AutoArrayHashMapUnmanaged(*EmbedFile, *ErrorMsg) = .{}, /// The ErrorMsg memory is owned by the `Export`, using Module's general purpose allocator. failed_exports: std.AutoArrayHashMapUnmanaged(*Export, *ErrorMsg) = .{}, -next_anon_name_index: usize = 0, - /// Candidates for deletion. After a semantic analysis update completes, this list /// contains Decls that need to be deleted if they end up having no references to them. -deletion_set: std.AutoArrayHashMapUnmanaged(*Decl, void) = .{}, +deletion_set: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{}, /// Error tags and their values, tag names are duped with mod.gpa. /// Corresponds with `error_name_list`. @@ -137,7 +135,21 @@ compile_log_text: ArrayListUnmanaged(u8) = .{}, emit_h: ?*GlobalEmitH, -test_functions: std.AutoArrayHashMapUnmanaged(*Decl, void) = .{}, +test_functions: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{}, + +/// Rather than allocating Decl objects with an Allocator, we instead allocate +/// them with this SegmentedList. This provides four advantages: +/// * Stable memory so that one thread can access a Decl object while another +/// thread allocates additional Decl objects from this list. +/// * It allows us to use u32 indexes to reference Decl objects rather than +/// pointers, saving memory in Type, Value, and dependency sets. +/// * Using integers to reference Decl objects rather than pointers makes +/// serialization trivial. +/// * It provides a unique integer to be used for anonymous symbol names, avoiding +/// multi-threaded contention on an atomic counter. +allocated_decls: std.SegmentedList(Decl, 0) = .{}, +/// When a Decl object is freed from `allocated_decls`, it is pushed into this stack. +decls_free_list: std.ArrayListUnmanaged(Decl.Index) = .{}, const MonomorphedFuncsSet = std.HashMapUnmanaged( *Fn, @@ -173,7 +185,7 @@ pub const MemoizedCallSet = std.HashMapUnmanaged( ); pub const MemoizedCall = struct { - target: std.Target, + module: *Module, pub const Key = struct { func: *Fn, @@ -191,7 +203,7 @@ pub const MemoizedCall = struct { assert(a.args.len == b.args.len); for (a.args) |a_arg, arg_i| { const b_arg = b.args[arg_i]; - if (!a_arg.eql(b_arg, ctx.target)) { + if (!a_arg.eql(b_arg, ctx.module)) { return false; } } @@ -210,7 +222,7 @@ pub const MemoizedCall = struct { // This logic must be kept in sync with the logic in `analyzeCall` that // computes the hash. for (key.args) |arg| { - arg.hash(&hasher, ctx.target); + arg.hash(&hasher, ctx.module); } return hasher.final(); @@ -231,9 +243,17 @@ pub const GlobalEmitH = struct { /// When emit_h is non-null, each Decl gets one more compile error slot for /// emit-h failing for that Decl. This table is also how we tell if a Decl has /// failed emit-h or succeeded. - failed_decls: std.AutoArrayHashMapUnmanaged(*Decl, *ErrorMsg) = .{}, + failed_decls: std.AutoArrayHashMapUnmanaged(Decl.Index, *ErrorMsg) = .{}, /// Tracks all decls in order to iterate over them and emit .h code for them. - decl_table: std.AutoArrayHashMapUnmanaged(*Decl, void) = .{}, + decl_table: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{}, + /// Similar to the allocated_decls field of Module, this is where `EmitH` objects + /// are allocated. There will be exactly one EmitH object per Decl object, with + /// identical indexes. + allocated_emit_h: std.SegmentedList(EmitH, 0) = .{}, + + pub fn declPtr(global_emit_h: *GlobalEmitH, decl_index: Decl.Index) *EmitH { + return global_emit_h.allocated_emit_h.at(@enumToInt(decl_index)); + } }; pub const ErrorInt = u32; @@ -244,12 +264,12 @@ pub const Export = struct { /// Represents the position of the export, if any, in the output file. link: link.File.Export, /// The Decl that performs the export. Note that this is *not* the Decl being exported. - owner_decl: *Decl, + owner_decl: Decl.Index, /// The Decl containing the export statement. Inline function calls /// may cause this to be different from the owner_decl. - src_decl: *Decl, + src_decl: Decl.Index, /// The Decl being exported. Note this is *not* the Decl performing the export. - exported_decl: *Decl, + exported_decl: Decl.Index, status: enum { in_progress, failed, @@ -259,22 +279,16 @@ pub const Export = struct { complete, }, - pub fn getSrcLoc(exp: Export) SrcLoc { + pub fn getSrcLoc(exp: Export, mod: *Module) SrcLoc { + const src_decl = mod.declPtr(exp.src_decl); return .{ - .file_scope = exp.src_decl.getFileScope(), - .parent_decl_node = exp.src_decl.src_node, + .file_scope = src_decl.getFileScope(), + .parent_decl_node = src_decl.src_node, .lazy = exp.src, }; } }; -/// When Module emit_h field is non-null, each Decl is allocated via this struct, so that -/// there can be EmitH state attached to each Decl. -pub const DeclPlusEmitH = struct { - decl: Decl, - emit_h: EmitH, -}; - pub const CaptureScope = struct { parent: ?*CaptureScope, @@ -458,38 +472,35 @@ pub const Decl = struct { /// typed_value may need to be regenerated. dependencies: DepsTable = .{}, - pub const DepsTable = std.AutoArrayHashMapUnmanaged(*Decl, void); + pub const Index = enum(u32) { + _, + + pub fn toOptional(i: Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(i)); + } + }; + + pub const OptionalIndex = enum(u32) { + none = std.math.maxInt(u32), + _, + + pub fn init(oi: ?Index) OptionalIndex { + return oi orelse .none; + } + + pub fn unwrap(oi: OptionalIndex) ?Index { + if (oi == .none) return null; + return @intToEnum(Index, @enumToInt(oi)); + } + }; + + pub const DepsTable = std.AutoArrayHashMapUnmanaged(Decl.Index, void); pub fn clearName(decl: *Decl, gpa: Allocator) void { gpa.free(mem.sliceTo(decl.name, 0)); decl.name = undefined; } - pub fn destroy(decl: *Decl, module: *Module) void { - const gpa = module.gpa; - log.debug("destroy {*} ({s})", .{ decl, decl.name }); - _ = module.test_functions.swapRemove(decl); - if (decl.deletion_flag) { - assert(module.deletion_set.swapRemove(decl)); - } - if (decl.has_tv) { - if (decl.getInnerNamespace()) |namespace| { - namespace.destroyDecls(module); - } - decl.clearValues(gpa); - } - decl.dependants.deinit(gpa); - decl.dependencies.deinit(gpa); - decl.clearName(gpa); - if (module.emit_h != null) { - const decl_plus_emit_h = @fieldParentPtr(DeclPlusEmitH, "decl", decl); - decl_plus_emit_h.emit_h.fwd_decl.deinit(gpa); - gpa.destroy(decl_plus_emit_h); - } else { - gpa.destroy(decl); - } - } - pub fn clearValues(decl: *Decl, gpa: Allocator) void { if (decl.getExternFn()) |extern_fn| { extern_fn.deinit(gpa); @@ -573,13 +584,6 @@ pub const Decl = struct { return @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); } - /// Returns true if and only if the Decl is the top level struct associated with a File. - pub fn isRoot(decl: *const Decl) bool { - if (decl.src_namespace.parent != null) - return false; - return decl == decl.src_namespace.getDecl(); - } - pub fn relativeToLine(decl: Decl, offset: u32) u32 { return decl.src_line + offset; } @@ -622,20 +626,20 @@ pub const Decl = struct { return tree.tokens.items(.start)[decl.srcToken()]; } - pub fn renderFullyQualifiedName(decl: Decl, writer: anytype) !void { + pub fn renderFullyQualifiedName(decl: Decl, mod: *Module, writer: anytype) !void { const unqualified_name = mem.sliceTo(decl.name, 0); - return decl.src_namespace.renderFullyQualifiedName(unqualified_name, writer); + return decl.src_namespace.renderFullyQualifiedName(mod, unqualified_name, writer); } - pub fn renderFullyQualifiedDebugName(decl: Decl, writer: anytype) !void { + pub fn renderFullyQualifiedDebugName(decl: Decl, mod: *Module, writer: anytype) !void { const unqualified_name = mem.sliceTo(decl.name, 0); - return decl.src_namespace.renderFullyQualifiedDebugName(unqualified_name, writer); + return decl.src_namespace.renderFullyQualifiedDebugName(mod, unqualified_name, writer); } - pub fn getFullyQualifiedName(decl: Decl, gpa: Allocator) ![:0]u8 { - var buffer = std.ArrayList(u8).init(gpa); + pub fn getFullyQualifiedName(decl: Decl, mod: *Module) ![:0]u8 { + var buffer = std.ArrayList(u8).init(mod.gpa); defer buffer.deinit(); - try decl.renderFullyQualifiedName(buffer.writer()); + try decl.renderFullyQualifiedName(mod, buffer.writer()); return buffer.toOwnedSliceSentinel(0); } @@ -662,7 +666,6 @@ pub const Decl = struct { if (!decl.owns_tv) return null; const ty = (decl.val.castTag(.ty) orelse return null).data; const struct_obj = (ty.castTag(.@"struct") orelse return null).data; - assert(struct_obj.owner_decl == decl); return struct_obj; } @@ -672,7 +675,6 @@ pub const Decl = struct { if (!decl.owns_tv) return null; const ty = (decl.val.castTag(.ty) orelse return null).data; const union_obj = (ty.cast(Type.Payload.Union) orelse return null).data; - assert(union_obj.owner_decl == decl); return union_obj; } @@ -681,7 +683,6 @@ pub const Decl = struct { pub fn getFunction(decl: *const Decl) ?*Fn { if (!decl.owns_tv) return null; const func = (decl.val.castTag(.function) orelse return null).data; - assert(func.owner_decl == decl); return func; } @@ -690,16 +691,14 @@ pub const Decl = struct { pub fn getExternFn(decl: *const Decl) ?*ExternFn { if (!decl.owns_tv) return null; const extern_fn = (decl.val.castTag(.extern_fn) orelse return null).data; - assert(extern_fn.owner_decl == decl); return extern_fn; } /// If the Decl has a value and it is a variable, returns it, /// otherwise null. - pub fn getVariable(decl: *Decl) ?*Var { + pub fn getVariable(decl: *const Decl) ?*Var { if (!decl.owns_tv) return null; const variable = (decl.val.castTag(.variable) orelse return null).data; - assert(variable.owner_decl == decl); return variable; } @@ -712,12 +711,10 @@ pub const Decl = struct { switch (ty.tag()) { .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; - assert(struct_obj.owner_decl == decl); return &struct_obj.namespace; }, .enum_full, .enum_nonexhaustive => { const enum_obj = ty.cast(Type.Payload.EnumFull).?.data; - assert(enum_obj.owner_decl == decl); return &enum_obj.namespace; }, .empty_struct => { @@ -725,12 +722,10 @@ pub const Decl = struct { }, .@"opaque" => { const opaque_obj = ty.cast(Type.Payload.Opaque).?.data; - assert(opaque_obj.owner_decl == decl); return &opaque_obj.namespace; }, .@"union", .union_tagged => { const union_obj = ty.cast(Type.Payload.Union).?.data; - assert(union_obj.owner_decl == decl); return &union_obj.namespace; }, @@ -757,17 +752,11 @@ pub const Decl = struct { return decl.src_namespace.file_scope; } - pub fn getEmitH(decl: *Decl, module: *Module) *EmitH { - assert(module.emit_h != null); - const decl_plus_emit_h = @fieldParentPtr(DeclPlusEmitH, "decl", decl); - return &decl_plus_emit_h.emit_h; - } - - pub fn removeDependant(decl: *Decl, other: *Decl) void { + pub fn removeDependant(decl: *Decl, other: Decl.Index) void { assert(decl.dependants.swapRemove(other)); } - pub fn removeDependency(decl: *Decl, other: *Decl) void { + pub fn removeDependency(decl: *Decl, other: Decl.Index) void { assert(decl.dependencies.swapRemove(other)); } @@ -790,16 +779,6 @@ pub const Decl = struct { return decl.ty.abiAlignment(target); } } - - pub fn markAlive(decl: *Decl) void { - if (decl.alive) return; - decl.alive = true; - - // This is the first time we are marking this Decl alive. We must - // therefore recurse into its value and mark any Decl it references - // as also alive, so that any Decl referenced does not get garbage collected. - decl.val.markReferencedDeclsAlive(); - } }; /// This state is attached to every Decl when Module emit_h is non-null. @@ -810,7 +789,7 @@ pub const EmitH = struct { /// Represents the data that an explicit error set syntax provides. pub const ErrorSet = struct { /// The Decl that corresponds to the error set itself. - owner_decl: *Decl, + owner_decl: Decl.Index, /// Offset from Decl node index, points to the error set AST node. node_offset: i32, /// The string bytes are stored in the owner Decl arena. @@ -819,10 +798,11 @@ pub const ErrorSet = struct { pub const NameMap = std.StringArrayHashMapUnmanaged(void); - pub fn srcLoc(self: ErrorSet) SrcLoc { + pub fn srcLoc(self: ErrorSet, mod: *Module) SrcLoc { + const owner_decl = mod.declPtr(self.owner_decl); return .{ - .file_scope = self.owner_decl.getFileScope(), - .parent_decl_node = self.owner_decl.src_node, + .file_scope = owner_decl.getFileScope(), + .parent_decl_node = owner_decl.src_node, .lazy = .{ .node_offset = self.node_offset }, }; } @@ -844,12 +824,12 @@ pub const PropertyBoolean = enum { no, yes, unknown, wip }; /// Represents the data that a struct declaration provides. pub const Struct = struct { - /// The Decl that corresponds to the struct itself. - owner_decl: *Decl, /// Set of field names in declaration order. fields: Fields, /// Represents the declarations inside this struct. namespace: Namespace, + /// The Decl that corresponds to the struct itself. + owner_decl: Decl.Index, /// Offset from `owner_decl`, points to the struct AST node. node_offset: i32, /// Index of the struct_decl ZIR instruction. @@ -900,30 +880,32 @@ pub const Struct = struct { } }; - pub fn getFullyQualifiedName(s: *Struct, gpa: Allocator) ![:0]u8 { - return s.owner_decl.getFullyQualifiedName(gpa); + pub fn getFullyQualifiedName(s: *Struct, mod: *Module) ![:0]u8 { + return mod.declPtr(s.owner_decl).getFullyQualifiedName(mod); } - pub fn srcLoc(s: Struct) SrcLoc { + pub fn srcLoc(s: Struct, mod: *Module) SrcLoc { + const owner_decl = mod.declPtr(s.owner_decl); return .{ - .file_scope = s.owner_decl.getFileScope(), - .parent_decl_node = s.owner_decl.src_node, + .file_scope = owner_decl.getFileScope(), + .parent_decl_node = owner_decl.src_node, .lazy = .{ .node_offset = s.node_offset }, }; } - pub fn fieldSrcLoc(s: Struct, gpa: Allocator, query: FieldSrcQuery) SrcLoc { + pub fn fieldSrcLoc(s: Struct, mod: *Module, query: FieldSrcQuery) SrcLoc { @setCold(true); - const tree = s.owner_decl.getFileScope().getTree(gpa) catch |err| { + const owner_decl = mod.declPtr(s.owner_decl); + const file = owner_decl.getFileScope(); + const tree = file.getTree(mod.gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ - s.owner_decl.getFileScope().sub_file_path, @errorName(err), + file.sub_file_path, @errorName(err), }); - return s.srcLoc(); + return s.srcLoc(mod); }; - const node = s.owner_decl.relativeToNodeIndex(s.node_offset); + const node = owner_decl.relativeToNodeIndex(s.node_offset); const node_tags = tree.nodes.items(.tag); - const file = s.owner_decl.getFileScope(); switch (node_tags[node]) { .container_decl, .container_decl_trailing, @@ -1013,18 +995,19 @@ pub const Struct = struct { /// the number of fields. pub const EnumSimple = struct { /// The Decl that corresponds to the enum itself. - owner_decl: *Decl, - /// Set of field names in declaration order. - fields: NameMap, + owner_decl: Decl.Index, /// Offset from `owner_decl`, points to the enum decl AST node. node_offset: i32, + /// Set of field names in declaration order. + fields: NameMap, pub const NameMap = EnumFull.NameMap; - pub fn srcLoc(self: EnumSimple) SrcLoc { + pub fn srcLoc(self: EnumSimple, mod: *Module) SrcLoc { + const owner_decl = mod.declPtr(self.owner_decl); return .{ - .file_scope = self.owner_decl.getFileScope(), - .parent_decl_node = self.owner_decl.src_node, + .file_scope = owner_decl.getFileScope(), + .parent_decl_node = owner_decl.src_node, .lazy = .{ .node_offset = self.node_offset }, }; } @@ -1035,7 +1018,9 @@ pub const EnumSimple = struct { /// are explicitly provided. pub const EnumNumbered = struct { /// The Decl that corresponds to the enum itself. - owner_decl: *Decl, + owner_decl: Decl.Index, + /// Offset from `owner_decl`, points to the enum decl AST node. + node_offset: i32, /// An integer type which is used for the numerical value of the enum. /// Whether zig chooses this type or the user specifies it, it is stored here. tag_ty: Type, @@ -1045,16 +1030,15 @@ pub const EnumNumbered = struct { /// Entries are in declaration order, same as `fields`. /// If this hash map is empty, it means the enum tags are auto-numbered. values: ValueMap, - /// Offset from `owner_decl`, points to the enum decl AST node. - node_offset: i32, pub const NameMap = EnumFull.NameMap; pub const ValueMap = EnumFull.ValueMap; - pub fn srcLoc(self: EnumNumbered) SrcLoc { + pub fn srcLoc(self: EnumNumbered, mod: *Module) SrcLoc { + const owner_decl = mod.declPtr(self.owner_decl); return .{ - .file_scope = self.owner_decl.getFileScope(), - .parent_decl_node = self.owner_decl.src_node, + .file_scope = owner_decl.getFileScope(), + .parent_decl_node = owner_decl.src_node, .lazy = .{ .node_offset = self.node_offset }, }; } @@ -1064,7 +1048,9 @@ pub const EnumNumbered = struct { /// at least one tag value explicitly specified, or at least one declaration. pub const EnumFull = struct { /// The Decl that corresponds to the enum itself. - owner_decl: *Decl, + owner_decl: Decl.Index, + /// Offset from `owner_decl`, points to the enum decl AST node. + node_offset: i32, /// An integer type which is used for the numerical value of the enum. /// Whether zig chooses this type or the user specifies it, it is stored here. tag_ty: Type, @@ -1076,26 +1062,23 @@ pub const EnumFull = struct { values: ValueMap, /// Represents the declarations inside this enum. namespace: Namespace, - /// Offset from `owner_decl`, points to the enum decl AST node. - node_offset: i32, /// true if zig inferred this tag type, false if user specified it tag_ty_inferred: bool, pub const NameMap = std.StringArrayHashMapUnmanaged(void); pub const ValueMap = std.ArrayHashMapUnmanaged(Value, void, Value.ArrayHashContext, false); - pub fn srcLoc(self: EnumFull) SrcLoc { + pub fn srcLoc(self: EnumFull, mod: *Module) SrcLoc { + const owner_decl = mod.declPtr(self.owner_decl); return .{ - .file_scope = self.owner_decl.getFileScope(), - .parent_decl_node = self.owner_decl.src_node, + .file_scope = owner_decl.getFileScope(), + .parent_decl_node = owner_decl.src_node, .lazy = .{ .node_offset = self.node_offset }, }; } }; pub const Union = struct { - /// The Decl that corresponds to the union itself. - owner_decl: *Decl, /// An enum type which is used for the tag of the union. /// This type is created even for untagged unions, even when the memory /// layout does not store the tag. @@ -1106,6 +1089,8 @@ pub const Union = struct { fields: Fields, /// Represents the declarations inside this union. namespace: Namespace, + /// The Decl that corresponds to the union itself. + owner_decl: Decl.Index, /// Offset from `owner_decl`, points to the union decl AST node. node_offset: i32, /// Index of the union_decl ZIR instruction. @@ -1145,30 +1130,32 @@ pub const Union = struct { pub const Fields = std.StringArrayHashMapUnmanaged(Field); - pub fn getFullyQualifiedName(s: *Union, gpa: Allocator) ![:0]u8 { - return s.owner_decl.getFullyQualifiedName(gpa); + pub fn getFullyQualifiedName(s: *Union, mod: *Module) ![:0]u8 { + return mod.declPtr(s.owner_decl).getFullyQualifiedName(mod); } - pub fn srcLoc(self: Union) SrcLoc { + pub fn srcLoc(self: Union, mod: *Module) SrcLoc { + const owner_decl = mod.declPtr(self.owner_decl); return .{ - .file_scope = self.owner_decl.getFileScope(), - .parent_decl_node = self.owner_decl.src_node, + .file_scope = owner_decl.getFileScope(), + .parent_decl_node = owner_decl.src_node, .lazy = .{ .node_offset = self.node_offset }, }; } - pub fn fieldSrcLoc(u: Union, gpa: Allocator, query: FieldSrcQuery) SrcLoc { + pub fn fieldSrcLoc(u: Union, mod: *Module, query: FieldSrcQuery) SrcLoc { @setCold(true); - const tree = u.owner_decl.getFileScope().getTree(gpa) catch |err| { + const owner_decl = mod.declPtr(u.owner_decl); + const file = owner_decl.getFileScope(); + const tree = file.getTree(mod.gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ - u.owner_decl.getFileScope().sub_file_path, @errorName(err), + file.sub_file_path, @errorName(err), }); - return u.srcLoc(); + return u.srcLoc(mod); }; - const node = u.owner_decl.relativeToNodeIndex(u.node_offset); + const node = owner_decl.relativeToNodeIndex(u.node_offset); const node_tags = tree.nodes.items(.tag); - const file = u.owner_decl.getFileScope(); switch (node_tags[node]) { .container_decl, .container_decl_trailing, @@ -1348,22 +1335,23 @@ pub const Union = struct { pub const Opaque = struct { /// The Decl that corresponds to the opaque itself. - owner_decl: *Decl, - /// Represents the declarations inside this opaque. - namespace: Namespace, + owner_decl: Decl.Index, /// Offset from `owner_decl`, points to the opaque decl AST node. node_offset: i32, + /// Represents the declarations inside this opaque. + namespace: Namespace, - pub fn srcLoc(self: Opaque) SrcLoc { + pub fn srcLoc(self: Opaque, mod: *Module) SrcLoc { + const owner_decl = mod.declPtr(self.owner_decl); return .{ - .file_scope = self.owner_decl.getFileScope(), - .parent_decl_node = self.owner_decl.src_node, + .file_scope = owner_decl.getFileScope(), + .parent_decl_node = owner_decl.src_node, .lazy = .{ .node_offset = self.node_offset }, }; } - pub fn getFullyQualifiedName(s: *Opaque, gpa: Allocator) ![:0]u8 { - return s.owner_decl.getFullyQualifiedName(gpa); + pub fn getFullyQualifiedName(s: *Opaque, mod: *Module) ![:0]u8 { + return mod.declPtr(s.owner_decl).getFullyQualifiedName(mod); } }; @@ -1371,7 +1359,7 @@ pub const Opaque = struct { /// arena allocator. pub const ExternFn = struct { /// The Decl that corresponds to the function itself. - owner_decl: *Decl, + owner_decl: Decl.Index, /// Library name if specified. /// For example `extern "c" fn write(...) usize` would have 'c' as library name. /// Allocated with Module's allocator; outlives the ZIR code. @@ -1389,7 +1377,12 @@ pub const ExternFn = struct { /// instead. pub const Fn = struct { /// The Decl that corresponds to the function itself. - owner_decl: *Decl, + owner_decl: Decl.Index, + /// The ZIR instruction that is a function instruction. Use this to find + /// the body. We store this rather than the body directly so that when ZIR + /// is regenerated on update(), we can map this to the new corresponding + /// ZIR instruction. + zir_body_inst: Zir.Inst.Index, /// If this is not null, this function is a generic function instantiation, and /// there is a `TypedValue` here for each parameter of the function. /// Non-comptime parameters are marked with a `generic_poison` for the value. @@ -1403,11 +1396,6 @@ pub const Fn = struct { /// parameter and tells whether it is anytype. /// TODO apply the same enhancement for param_names below to this field. anytype_args: [*]bool, - /// The ZIR instruction that is a function instruction. Use this to find - /// the body. We store this rather than the body directly so that when ZIR - /// is regenerated on update(), we can map this to the new corresponding - /// ZIR instruction. - zir_body_inst: Zir.Inst.Index, /// Prefer to use `getParamName` to access this because of the future improvement /// we want to do mentioned in the TODO below. @@ -1537,8 +1525,9 @@ pub const Fn = struct { return func.param_names[index]; } - pub fn hasInferredErrorSet(func: Fn) bool { - const zir = func.owner_decl.getFileScope().zir; + pub fn hasInferredErrorSet(func: Fn, mod: *Module) bool { + const owner_decl = mod.declPtr(func.owner_decl); + const zir = owner_decl.getFileScope().zir; const zir_tags = zir.instructions.items(.tag); switch (zir_tags[func.zir_body_inst]) { .func => return false, @@ -1556,7 +1545,7 @@ pub const Fn = struct { pub const Var = struct { /// if is_extern == true this is undefined init: Value, - owner_decl: *Decl, + owner_decl: Decl.Index, /// Library name if specified. /// For example `extern "c" var stderrp = ...` would have 'c' as library name. @@ -1576,14 +1565,16 @@ pub const Var = struct { }; pub const DeclAdapter = struct { + mod: *Module, + pub fn hash(self: @This(), s: []const u8) u32 { _ = self; return @truncate(u32, std.hash.Wyhash.hash(0, s)); } - pub fn eql(self: @This(), a: []const u8, b_decl: *Decl, b_index: usize) bool { - _ = self; + pub fn eql(self: @This(), a: []const u8, b_decl_index: Decl.Index, b_index: usize) bool { _ = b_index; + const b_decl = self.mod.declPtr(b_decl_index); return mem.eql(u8, a, mem.sliceTo(b_decl.name, 0)); } }; @@ -1599,25 +1590,30 @@ pub const Namespace = struct { /// Declaration order is preserved via entry order. /// Key memory is owned by `decl.name`. /// Anonymous decls are not stored here; they are kept in `anon_decls` instead. - decls: std.ArrayHashMapUnmanaged(*Decl, void, DeclContext, true) = .{}, + decls: std.ArrayHashMapUnmanaged(Decl.Index, void, DeclContext, true) = .{}, - anon_decls: std.AutoArrayHashMapUnmanaged(*Decl, void) = .{}, + anon_decls: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{}, /// Key is usingnamespace Decl itself. To find the namespace being included, /// the Decl Value has to be resolved as a Type which has a Namespace. /// Value is whether the usingnamespace decl is marked `pub`. - usingnamespace_set: std.AutoHashMapUnmanaged(*Decl, bool) = .{}, + usingnamespace_set: std.AutoHashMapUnmanaged(Decl.Index, bool) = .{}, const DeclContext = struct { - pub fn hash(self: @This(), decl: *Decl) u32 { - _ = self; + module: *Module, + + pub fn hash(ctx: @This(), decl_index: Decl.Index) u32 { + const decl = ctx.module.declPtr(decl_index); return @truncate(u32, std.hash.Wyhash.hash(0, mem.sliceTo(decl.name, 0))); } - pub fn eql(self: @This(), a: *Decl, b: *Decl, b_index: usize) bool { - _ = self; + pub fn eql(ctx: @This(), a_decl_index: Decl.Index, b_decl_index: Decl.Index, b_index: usize) bool { _ = b_index; - return mem.eql(u8, mem.sliceTo(a.name, 0), mem.sliceTo(b.name, 0)); + const a_decl = ctx.module.declPtr(a_decl_index); + const b_decl = ctx.module.declPtr(b_decl_index); + const a_name = mem.sliceTo(a_decl.name, 0); + const b_name = mem.sliceTo(b_decl.name, 0); + return mem.eql(u8, a_name, b_name); } }; @@ -1637,13 +1633,13 @@ pub const Namespace = struct { var anon_decls = ns.anon_decls; ns.anon_decls = .{}; - for (decls.keys()) |decl| { - decl.destroy(mod); + for (decls.keys()) |decl_index| { + mod.destroyDecl(decl_index); } decls.deinit(gpa); for (anon_decls.keys()) |key| { - key.destroy(mod); + mod.destroyDecl(key); } anon_decls.deinit(gpa); ns.usingnamespace_set.deinit(gpa); @@ -1652,7 +1648,7 @@ pub const Namespace = struct { pub fn deleteAllDecls( ns: *Namespace, mod: *Module, - outdated_decls: ?*std.AutoArrayHashMap(*Decl, void), + outdated_decls: ?*std.AutoArrayHashMap(Decl.Index, void), ) !void { const gpa = mod.gpa; @@ -1669,13 +1665,13 @@ pub const Namespace = struct { for (decls.keys()) |child_decl| { mod.clearDecl(child_decl, outdated_decls) catch @panic("out of memory"); - child_decl.destroy(mod); + mod.destroyDecl(child_decl); } decls.deinit(gpa); for (anon_decls.keys()) |child_decl| { mod.clearDecl(child_decl, outdated_decls) catch @panic("out of memory"); - child_decl.destroy(mod); + mod.destroyDecl(child_decl); } anon_decls.deinit(gpa); @@ -1685,12 +1681,14 @@ pub const Namespace = struct { // This renders e.g. "std.fs.Dir.OpenOptions" pub fn renderFullyQualifiedName( ns: Namespace, + mod: *Module, name: []const u8, writer: anytype, ) @TypeOf(writer).Error!void { if (ns.parent) |parent| { - const decl = ns.getDecl(); - try parent.renderFullyQualifiedName(mem.sliceTo(decl.name, 0), writer); + const decl_index = ns.getDeclIndex(); + const decl = mod.declPtr(decl_index); + try parent.renderFullyQualifiedName(mod, mem.sliceTo(decl.name, 0), writer); } else { try ns.file_scope.renderFullyQualifiedName(writer); } @@ -1703,13 +1701,15 @@ pub const Namespace = struct { /// This renders e.g. "std/fs.zig:Dir.OpenOptions" pub fn renderFullyQualifiedDebugName( ns: Namespace, + mod: *Module, name: []const u8, writer: anytype, ) @TypeOf(writer).Error!void { var separator_char: u8 = '.'; if (ns.parent) |parent| { - const decl = ns.getDecl(); - try parent.renderFullyQualifiedDebugName(mem.sliceTo(decl.name, 0), writer); + const decl_index = ns.getDeclIndex(); + const decl = mod.declPtr(decl_index); + try parent.renderFullyQualifiedDebugName(mod, mem.sliceTo(decl.name, 0), writer); } else { try ns.file_scope.renderFullyQualifiedDebugName(writer); separator_char = ':'; @@ -1720,12 +1720,14 @@ pub const Namespace = struct { } } - pub fn getDecl(ns: Namespace) *Decl { + pub fn getDeclIndex(ns: Namespace) Decl.Index { return ns.ty.getOwnerDecl(); } }; pub const File = struct { + /// The Decl of the struct that represents this File. + root_decl: Decl.OptionalIndex, status: enum { never_loaded, retryable_failure, @@ -1749,16 +1751,14 @@ pub const File = struct { zir: Zir, /// Package that this file is a part of, managed externally. pkg: *Package, - /// The Decl of the struct that represents this File. - root_decl: ?*Decl, /// Used by change detection algorithm, after astgen, contains the /// set of decls that existed in the previous ZIR but not in the new one. - deleted_decls: std.ArrayListUnmanaged(*Decl) = .{}, + deleted_decls: std.ArrayListUnmanaged(Decl.Index) = .{}, /// Used by change detection algorithm, after astgen, contains the /// set of decls that existed both in the previous ZIR and in the new one, /// but their source code has been modified. - outdated_decls: std.ArrayListUnmanaged(*Decl) = .{}, + outdated_decls: std.ArrayListUnmanaged(Decl.Index) = .{}, /// The most recent successful ZIR for this file, with no errors. /// This is only populated when a previously successful ZIR @@ -1798,8 +1798,8 @@ pub const File = struct { log.debug("deinit File {s}", .{file.sub_file_path}); file.deleted_decls.deinit(gpa); file.outdated_decls.deinit(gpa); - if (file.root_decl) |root_decl| { - root_decl.destroy(mod); + if (file.root_decl.unwrap()) |root_decl| { + mod.destroyDecl(root_decl); } gpa.free(file.sub_file_path); file.unload(gpa); @@ -1932,7 +1932,7 @@ pub const EmbedFile = struct { /// The Decl that was created from the `@embedFile` to own this resource. /// This is how zig knows what other Decl objects to invalidate if the file /// changes on disk. - owner_decl: *Decl, + owner_decl: Decl.Index, fn destroy(embed_file: *EmbedFile, mod: *Module) void { const gpa = mod.gpa; @@ -2776,6 +2776,7 @@ pub fn deinit(mod: *Module) void { } emit_h.failed_decls.deinit(gpa); emit_h.decl_table.deinit(gpa); + emit_h.allocated_emit_h.deinit(gpa); gpa.destroy(emit_h); } @@ -2827,6 +2828,52 @@ pub fn deinit(mod: *Module) void { } mod.memoized_calls.deinit(gpa); } + + mod.decls_free_list.deinit(gpa); + mod.allocated_decls.deinit(gpa); +} + +pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { + const gpa = mod.gpa; + { + const decl = mod.declPtr(decl_index); + log.debug("destroy {*} ({s})", .{ decl, decl.name }); + _ = mod.test_functions.swapRemove(decl_index); + if (decl.deletion_flag) { + assert(mod.deletion_set.swapRemove(decl_index)); + } + if (decl.has_tv) { + if (decl.getInnerNamespace()) |namespace| { + namespace.destroyDecls(mod); + } + decl.clearValues(gpa); + } + decl.dependants.deinit(gpa); + decl.dependencies.deinit(gpa); + decl.clearName(gpa); + decl.* = undefined; + } + mod.decls_free_list.append(gpa, decl_index) catch { + // In order to keep `destroyDecl` a non-fallible function, we ignore memory + // allocation failures here, instead leaking the Decl until garbage collection. + }; + if (mod.emit_h) |mod_emit_h| { + const decl_emit_h = mod_emit_h.declPtr(decl_index); + decl_emit_h.fwd_decl.deinit(gpa); + decl_emit_h.* = undefined; + } +} + +pub fn declPtr(mod: *Module, decl_index: Decl.Index) *Decl { + return mod.allocated_decls.at(@enumToInt(decl_index)); +} + +/// Returns true if and only if the Decl is the top level struct associated with a File. +pub fn declIsRoot(mod: *Module, decl_index: Decl.Index) bool { + const decl = mod.declPtr(decl_index); + if (decl.src_namespace.parent != null) + return false; + return decl_index == decl.src_namespace.getDeclIndex(); } fn freeExportList(gpa: Allocator, export_list: []*Export) void { @@ -3230,14 +3277,14 @@ pub fn astGenFile(mod: *Module, file: *File) !void { // We do not need to hold any locks at this time because all the Decl and Namespace // objects being touched are specific to this File, and the only other concurrent // tasks are touching other File objects. - try updateZirRefs(gpa, file, prev_zir.*); + try updateZirRefs(mod, file, prev_zir.*); // At this point, `file.outdated_decls` and `file.deleted_decls` are populated, // and semantic analysis will deal with them properly. // No need to keep previous ZIR. prev_zir.deinit(gpa); gpa.destroy(prev_zir); file.prev_zir = null; - } else if (file.root_decl) |root_decl| { + } else if (file.root_decl.unwrap()) |root_decl| { // This is an update, but it is the first time the File has succeeded // ZIR. We must mark it outdated since we have already tried to // semantically analyze it. @@ -3251,7 +3298,8 @@ pub fn astGenFile(mod: *Module, file: *File) !void { /// * Decl.zir_index /// * Fn.zir_body_inst /// * Decl.zir_decl_index -fn updateZirRefs(gpa: Allocator, file: *File, old_zir: Zir) !void { +fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void { + const gpa = mod.gpa; const new_zir = file.zir; // Maps from old ZIR to new ZIR, struct_decl, enum_decl, etc. Any instruction which @@ -3268,10 +3316,10 @@ fn updateZirRefs(gpa: Allocator, file: *File, old_zir: Zir) !void { // Walk the Decl graph, updating ZIR indexes, strings, and populating // the deleted and outdated lists. - var decl_stack: std.ArrayListUnmanaged(*Decl) = .{}; + var decl_stack: std.ArrayListUnmanaged(Decl.Index) = .{}; defer decl_stack.deinit(gpa); - const root_decl = file.root_decl.?; + const root_decl = file.root_decl.unwrap().?; try decl_stack.append(gpa, root_decl); file.deleted_decls.clearRetainingCapacity(); @@ -3281,7 +3329,8 @@ fn updateZirRefs(gpa: Allocator, file: *File, old_zir: Zir) !void { // to re-generate ZIR for the File. try file.outdated_decls.append(gpa, root_decl); - while (decl_stack.popOrNull()) |decl| { + while (decl_stack.popOrNull()) |decl_index| { + const decl = mod.declPtr(decl_index); // Anonymous decls and the root decl have this set to 0. We still need // to walk them but we do not need to modify this value. // Anonymous decls should not be marked outdated. They will be re-generated @@ -3292,7 +3341,7 @@ fn updateZirRefs(gpa: Allocator, file: *File, old_zir: Zir) !void { log.debug("updateZirRefs {s}: delete {*} ({s})", .{ file.sub_file_path, decl, decl.name, }); - try file.deleted_decls.append(gpa, decl); + try file.deleted_decls.append(gpa, decl_index); continue; }; const old_hash = decl.contentsHashZir(old_zir); @@ -3302,7 +3351,7 @@ fn updateZirRefs(gpa: Allocator, file: *File, old_zir: Zir) !void { log.debug("updateZirRefs {s}: outdated {*} ({s}) {d} => {d}", .{ file.sub_file_path, decl, decl.name, old_zir_decl_index, new_zir_decl_index, }); - try file.outdated_decls.append(gpa, decl); + try file.outdated_decls.append(gpa, decl_index); } else { log.debug("updateZirRefs {s}: unchanged {*} ({s}) {d} => {d}", .{ file.sub_file_path, decl, decl.name, old_zir_decl_index, new_zir_decl_index, @@ -3314,21 +3363,21 @@ fn updateZirRefs(gpa: Allocator, file: *File, old_zir: Zir) !void { if (decl.getStruct()) |struct_obj| { struct_obj.zir_index = inst_map.get(struct_obj.zir_index) orelse { - try file.deleted_decls.append(gpa, decl); + try file.deleted_decls.append(gpa, decl_index); continue; }; } if (decl.getUnion()) |union_obj| { union_obj.zir_index = inst_map.get(union_obj.zir_index) orelse { - try file.deleted_decls.append(gpa, decl); + try file.deleted_decls.append(gpa, decl_index); continue; }; } if (decl.getFunction()) |func| { func.zir_body_inst = inst_map.get(func.zir_body_inst) orelse { - try file.deleted_decls.append(gpa, decl); + try file.deleted_decls.append(gpa, decl_index); continue; }; } @@ -3485,10 +3534,12 @@ pub fn mapOldZirToNew( /// However the resolution status of the Type may not be fully resolved. /// For example an inferred error set is not resolved until after `analyzeFnBody`. /// is called. -pub fn ensureDeclAnalyzed(mod: *Module, decl: *Decl) SemaError!void { +pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { const tracy = trace(@src()); defer tracy.end(); + const decl = mod.declPtr(decl_index); + const subsequent_analysis = switch (decl.analysis) { .in_progress => unreachable, @@ -3507,15 +3558,16 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl: *Decl) SemaError!void { // The exports this Decl performs will be re-discovered, so we remove them here // prior to re-analysis. - mod.deleteDeclExports(decl); + mod.deleteDeclExports(decl_index); // Dependencies will be re-discovered, so we remove them here prior to re-analysis. - for (decl.dependencies.keys()) |dep| { - dep.removeDependant(decl); + for (decl.dependencies.keys()) |dep_index| { + const dep = mod.declPtr(dep_index); + dep.removeDependant(decl_index); if (dep.dependants.count() == 0 and !dep.deletion_flag) { log.debug("insert {*} ({s}) dependant {*} ({s}) into deletion set", .{ decl, decl.name, dep, dep.name, }); - try mod.markDeclForDeletion(dep); + try mod.markDeclForDeletion(dep_index); } } decl.dependencies.clearRetainingCapacity(); @@ -3530,7 +3582,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl: *Decl) SemaError!void { decl_prog_node.activate(); defer decl_prog_node.end(); - const type_changed = mod.semaDecl(decl) catch |err| switch (err) { + const type_changed = mod.semaDecl(decl_index) catch |err| switch (err) { error.AnalysisFail => { if (decl.analysis == .in_progress) { // If this decl caused the compile error, the analysis field would @@ -3545,7 +3597,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl: *Decl) SemaError!void { else => |e| { decl.analysis = .sema_failure_retryable; try mod.failed_decls.ensureUnusedCapacity(mod.gpa, 1); - mod.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create( + mod.failed_decls.putAssumeCapacityNoClobber(decl_index, try ErrorMsg.create( mod.gpa, decl.srcLoc(), "unable to analyze: {s}", @@ -3559,7 +3611,8 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl: *Decl) SemaError!void { // We may need to chase the dependants and re-analyze them. // However, if the decl is a function, and the type is the same, we do not need to. if (type_changed or decl.ty.zigTypeTag() != .Fn) { - for (decl.dependants.keys()) |dep| { + for (decl.dependants.keys()) |dep_index| { + const dep = mod.declPtr(dep_index); switch (dep.analysis) { .unreferenced => unreachable, .in_progress => continue, // already doing analysis, ok @@ -3573,7 +3626,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl: *Decl) SemaError!void { .codegen_failure_retryable, .complete, => if (dep.generation != mod.generation) { - try mod.markOutdatedDecl(dep); + try mod.markOutdatedDecl(dep_index); }, } } @@ -3585,7 +3638,10 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { const tracy = trace(@src()); defer tracy.end(); - switch (func.owner_decl.analysis) { + const decl_index = func.owner_decl; + const decl = mod.declPtr(decl_index); + + switch (decl.analysis) { .unreferenced => unreachable, .in_progress => unreachable, .outdated => unreachable, @@ -3607,13 +3663,12 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { } const gpa = mod.gpa; - const decl = func.owner_decl; var tmp_arena = std.heap.ArenaAllocator.init(gpa); defer tmp_arena.deinit(); const sema_arena = tmp_arena.allocator(); - var air = mod.analyzeFnBody(decl, func, sema_arena) catch |err| switch (err) { + var air = mod.analyzeFnBody(func, sema_arena) catch |err| switch (err) { error.AnalysisFail => { if (func.state == .in_progress) { // If this decl caused the compile error, the analysis field would @@ -3635,7 +3690,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { if (builtin.mode == .Debug and mod.comp.verbose_air) { std.debug.print("# Begin Function AIR: {s}:\n", .{decl.name}); - @import("print_air.zig").dump(gpa, air, liveness); + @import("print_air.zig").dump(mod, air, liveness); std.debug.print("# End Function AIR: {s}\n\n", .{decl.name}); } @@ -3647,7 +3702,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { }, else => { try mod.failed_decls.ensureUnusedCapacity(gpa, 1); - mod.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( + mod.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create( gpa, decl.srcLoc(), "unable to codegen: {s}", @@ -3668,7 +3723,9 @@ pub fn updateEmbedFile(mod: *Module, embed_file: *EmbedFile) SemaError!void { // TODO we can potentially relax this if we store some more information along // with decl dependency edges - for (embed_file.owner_decl.dependants.keys()) |dep| { + const owner_decl = mod.declPtr(embed_file.owner_decl); + for (owner_decl.dependants.keys()) |dep_index| { + const dep = mod.declPtr(dep_index); switch (dep.analysis) { .unreferenced => unreachable, .in_progress => continue, // already doing analysis, ok @@ -3682,7 +3739,7 @@ pub fn updateEmbedFile(mod: *Module, embed_file: *EmbedFile) SemaError!void { .codegen_failure_retryable, .complete, => if (dep.generation != mod.generation) { - try mod.markOutdatedDecl(dep); + try mod.markOutdatedDecl(dep_index); }, } } @@ -3699,7 +3756,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { const tracy = trace(@src()); defer tracy.end(); - if (file.root_decl != null) return; + if (file.root_decl != .none) return; const gpa = mod.gpa; var new_decl_arena = std.heap.ArenaAllocator.init(gpa); @@ -3724,10 +3781,11 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { .file_scope = file, }, }; - const decl_name = try file.fullyQualifiedNameZ(gpa); - const new_decl = try mod.allocateNewDecl(decl_name, &struct_obj.namespace, 0, null); - file.root_decl = new_decl; - struct_obj.owner_decl = new_decl; + const new_decl_index = try mod.allocateNewDecl(&struct_obj.namespace, 0, null); + const new_decl = mod.declPtr(new_decl_index); + file.root_decl = new_decl_index.toOptional(); + struct_obj.owner_decl = new_decl_index; + new_decl.name = try file.fullyQualifiedNameZ(gpa); new_decl.src_line = 0; new_decl.is_pub = true; new_decl.is_exported = false; @@ -3757,6 +3815,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { .perm_arena = new_decl_arena_allocator, .code = file.zir, .owner_decl = new_decl, + .owner_decl_index = new_decl_index, .func = null, .fn_ret_ty = Type.void, .owner_func = null, @@ -3769,7 +3828,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { var block_scope: Sema.Block = .{ .parent = null, .sema = &sema, - .src_decl = new_decl, + .src_decl = new_decl_index, .namespace = &struct_obj.namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, @@ -3808,10 +3867,12 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { /// Returns `true` if the Decl type changed. /// Returns `true` if this is the first time analyzing the Decl. /// Returns `false` otherwise. -fn semaDecl(mod: *Module, decl: *Decl) !bool { +fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { const tracy = trace(@src()); defer tracy.end(); + const decl = mod.declPtr(decl_index); + if (decl.getFileScope().status != .success_zir) { return error.AnalysisFail; } @@ -3838,13 +3899,14 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, + .owner_decl_index = decl_index, .func = null, .fn_ret_ty = Type.void, .owner_func = null, }; defer sema.deinit(); - if (decl.isRoot()) { + if (mod.declIsRoot(decl_index)) { log.debug("semaDecl root {*} ({s})", .{ decl, decl.name }); const main_struct_inst = Zir.main_struct_inst; const struct_obj = decl.getStruct().?; @@ -3864,7 +3926,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { var block_scope: Sema.Block = .{ .parent = null, .sema = &sema, - .src_decl = decl, + .src_decl = decl_index, .namespace = decl.src_namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, @@ -3922,15 +3984,15 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { const decl_arena_state = try decl_arena_allocator.create(std.heap.ArenaAllocator.State); if (decl.is_usingnamespace) { - if (!decl_tv.ty.eql(Type.type, target)) { + if (!decl_tv.ty.eql(Type.type, mod)) { return sema.fail(&block_scope, src, "expected type, found {}", .{ - decl_tv.ty.fmt(target), + decl_tv.ty.fmt(mod), }); } var buffer: Value.ToTypeBuffer = undefined; const ty = try decl_tv.val.toType(&buffer).copy(decl_arena_allocator); if (ty.getNamespace() == null) { - return sema.fail(&block_scope, src, "type {} has no namespace", .{ty.fmt(target)}); + return sema.fail(&block_scope, src, "type {} has no namespace", .{ty.fmt(mod)}); } decl.ty = Type.type; @@ -3949,7 +4011,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { if (decl_tv.val.castTag(.function)) |fn_payload| { const func = fn_payload.data; - const owns_tv = func.owner_decl == decl; + const owns_tv = func.owner_decl == decl_index; if (owns_tv) { var prev_type_has_bits = false; var prev_is_inline = false; @@ -3957,7 +4019,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { if (decl.has_tv) { prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits(); - type_changed = !decl.ty.eql(decl_tv.ty, target); + type_changed = !decl.ty.eql(decl_tv.ty, mod); if (decl.getFunction()) |prev_func| { prev_is_inline = prev_func.state == .inline_only; } @@ -3982,13 +4044,13 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { // We don't fully codegen the decl until later, but we do need to reserve a global // offset table index for it. This allows us to codegen decls out of dependency // order, increasing how many computations can be done in parallel. - try mod.comp.bin_file.allocateDeclIndexes(decl); + try mod.comp.bin_file.allocateDeclIndexes(decl_index); try mod.comp.work_queue.writeItem(.{ .codegen_func = func }); if (type_changed and mod.emit_h != null) { - try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl }); + try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl_index }); } } else if (!prev_is_inline and prev_type_has_bits) { - mod.comp.bin_file.freeDecl(decl); + mod.comp.bin_file.freeDecl(decl_index); } const is_inline = decl.ty.fnCallingConvention() == .Inline; @@ -3999,14 +4061,14 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { } // The scope needs to have the decl in it. const options: std.builtin.ExportOptions = .{ .name = mem.sliceTo(decl.name, 0) }; - try sema.analyzeExport(&block_scope, export_src, options, decl); + try sema.analyzeExport(&block_scope, export_src, options, decl_index); } return type_changed or is_inline != prev_is_inline; } } var type_changed = true; if (decl.has_tv) { - type_changed = !decl.ty.eql(decl_tv.ty, target); + type_changed = !decl.ty.eql(decl_tv.ty, mod); decl.clearValues(gpa); } @@ -4016,7 +4078,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { switch (decl_tv.val.tag()) { .variable => { const variable = decl_tv.val.castTag(.variable).?.data; - if (variable.owner_decl == decl) { + if (variable.owner_decl == decl_index) { decl.owns_tv = true; queue_linker_work = true; @@ -4026,7 +4088,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { }, .extern_fn => { const extern_fn = decl_tv.val.castTag(.extern_fn).?.data; - if (extern_fn.owner_decl == decl) { + if (extern_fn.owner_decl == decl_index) { decl.owns_tv = true; queue_linker_work = true; is_extern = true; @@ -4065,11 +4127,11 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { // codegen backend wants full access to the Decl Type. try sema.resolveTypeFully(&block_scope, src, decl.ty); - try mod.comp.bin_file.allocateDeclIndexes(decl); - try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl }); + try mod.comp.bin_file.allocateDeclIndexes(decl_index); + try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl_index }); if (type_changed and mod.emit_h != null) { - try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl }); + try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl_index }); } } @@ -4077,15 +4139,18 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { const export_src = src; // TODO point to the export token // The scope needs to have the decl in it. const options: std.builtin.ExportOptions = .{ .name = mem.sliceTo(decl.name, 0) }; - try sema.analyzeExport(&block_scope, export_src, options, decl); + try sema.analyzeExport(&block_scope, export_src, options, decl_index); } return type_changed; } /// Returns the depender's index of the dependee. -pub fn declareDeclDependency(mod: *Module, depender: *Decl, dependee: *Decl) !void { - if (depender == dependee) return; +pub fn declareDeclDependency(mod: *Module, depender_index: Decl.Index, dependee_index: Decl.Index) !void { + if (depender_index == dependee_index) return; + + const depender = mod.declPtr(depender_index); + const dependee = mod.declPtr(dependee_index); log.debug("{*} ({s}) depends on {*} ({s})", .{ depender, depender.name, dependee, dependee.name, @@ -4096,11 +4161,11 @@ pub fn declareDeclDependency(mod: *Module, depender: *Decl, dependee: *Decl) !vo if (dependee.deletion_flag) { dependee.deletion_flag = false; - assert(mod.deletion_set.swapRemove(dependee)); + assert(mod.deletion_set.swapRemove(dependee_index)); } - dependee.dependants.putAssumeCapacity(depender, {}); - depender.dependencies.putAssumeCapacity(dependee, {}); + dependee.dependants.putAssumeCapacity(depender_index, {}); + depender.dependencies.putAssumeCapacity(dependee_index, {}); } pub const ImportFileResult = struct { @@ -4146,7 +4211,7 @@ pub fn importPkg(mod: *Module, pkg: *Package) !ImportFileResult { .zir = undefined, .status = .never_loaded, .pkg = pkg, - .root_decl = null, + .root_decl = .none, }; return ImportFileResult{ .file = new_file, @@ -4214,7 +4279,7 @@ pub fn importFile( .zir = undefined, .status = .never_loaded, .pkg = cur_file.pkg, - .root_decl = null, + .root_decl = .none, }; return ImportFileResult{ .file = new_file, @@ -4388,8 +4453,8 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi const line = iter.parent_decl.relativeToLine(line_off); const decl_name_index = zir.extra[decl_sub_index + 5]; const decl_doccomment_index = zir.extra[decl_sub_index + 7]; - const decl_index = zir.extra[decl_sub_index + 6]; - const decl_block_inst_data = zir.instructions.items(.data)[decl_index].pl_node; + const decl_zir_index = zir.extra[decl_sub_index + 6]; + const decl_block_inst_data = zir.instructions.items(.data)[decl_zir_index].pl_node; const decl_node = iter.parent_decl.relativeToNodeIndex(decl_block_inst_data.src_node); // Every Decl needs a name. @@ -4432,15 +4497,22 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi if (is_usingnamespace) try namespace.usingnamespace_set.ensureUnusedCapacity(gpa, 1); // We create a Decl for it regardless of analysis status. - const gop = try namespace.decls.getOrPutAdapted(gpa, @as([]const u8, mem.sliceTo(decl_name, 0)), DeclAdapter{}); + const gop = try namespace.decls.getOrPutContextAdapted( + gpa, + @as([]const u8, mem.sliceTo(decl_name, 0)), + DeclAdapter{ .mod = mod }, + Namespace.DeclContext{ .module = mod }, + ); if (!gop.found_existing) { - const new_decl = try mod.allocateNewDecl(decl_name, namespace, decl_node, iter.parent_decl.src_scope); + const new_decl_index = try mod.allocateNewDecl(namespace, decl_node, iter.parent_decl.src_scope); + const new_decl = mod.declPtr(new_decl_index); + new_decl.name = decl_name; if (is_usingnamespace) { - namespace.usingnamespace_set.putAssumeCapacity(new_decl, is_pub); + namespace.usingnamespace_set.putAssumeCapacity(new_decl_index, is_pub); } log.debug("scan new {*} ({s}) into {*}", .{ new_decl, decl_name, namespace }); new_decl.src_line = line; - gop.key_ptr.* = new_decl; + gop.key_ptr.* = new_decl_index; // Exported decls, comptime decls, usingnamespace decls, and // test decls if in test mode, get analyzed. const decl_pkg = namespace.file_scope.pkg; @@ -4451,7 +4523,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi // the test name filter. if (!mod.comp.bin_file.options.is_test) break :blk false; if (decl_pkg != mod.main_pkg) break :blk false; - try mod.test_functions.put(gpa, new_decl, {}); + try mod.test_functions.put(gpa, new_decl_index, {}); break :blk true; }, else => blk: { @@ -4459,12 +4531,12 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi if (!mod.comp.bin_file.options.is_test) break :blk false; if (decl_pkg != mod.main_pkg) break :blk false; // TODO check the name against --test-filter - try mod.test_functions.put(gpa, new_decl, {}); + try mod.test_functions.put(gpa, new_decl_index, {}); break :blk true; }, }; if (want_analysis) { - mod.comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl }); + mod.comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl_index }); } new_decl.is_pub = is_pub; new_decl.is_exported = is_exported; @@ -4476,7 +4548,8 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi return; } gpa.free(decl_name); - const decl = gop.key_ptr.*; + const decl_index = gop.key_ptr.*; + const decl = mod.declPtr(decl_index); log.debug("scan existing {*} ({s}) of {*}", .{ decl, decl.name, namespace }); // Update the AST node of the decl; even if its contents are unchanged, it may // have been re-ordered. @@ -4497,17 +4570,17 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi .elf => if (decl.fn_link.elf.len != 0) { // TODO Look into detecting when this would be unnecessary by storing enough state // in `Decl` to notice that the line number did not change. - mod.comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl }); + mod.comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index }); }, .macho => if (decl.fn_link.macho.len != 0) { // TODO Look into detecting when this would be unnecessary by storing enough state // in `Decl` to notice that the line number did not change. - mod.comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl }); + mod.comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index }); }, .plan9 => { // TODO Look into detecting when this would be unnecessary by storing enough state // in `Decl` to notice that the line number did not change. - mod.comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl }); + mod.comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index }); }, .c, .wasm, .spirv, .nvptx => {}, } @@ -4517,25 +4590,27 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi /// Make it as if the semantic analysis for this Decl never happened. pub fn clearDecl( mod: *Module, - decl: *Decl, - outdated_decls: ?*std.AutoArrayHashMap(*Decl, void), + decl_index: Decl.Index, + outdated_decls: ?*std.AutoArrayHashMap(Decl.Index, void), ) Allocator.Error!void { const tracy = trace(@src()); defer tracy.end(); + const decl = mod.declPtr(decl_index); log.debug("clearing {*} ({s})", .{ decl, decl.name }); const gpa = mod.gpa; try mod.deletion_set.ensureUnusedCapacity(gpa, decl.dependencies.count()); if (outdated_decls) |map| { - _ = map.swapRemove(decl); + _ = map.swapRemove(decl_index); try map.ensureUnusedCapacity(decl.dependants.count()); } // Remove itself from its dependencies. - for (decl.dependencies.keys()) |dep| { - dep.removeDependant(decl); + for (decl.dependencies.keys()) |dep_index| { + const dep = mod.declPtr(dep_index); + dep.removeDependant(decl_index); if (dep.dependants.count() == 0 and !dep.deletion_flag) { log.debug("insert {*} ({s}) dependant {*} ({s}) into deletion set", .{ decl, decl.name, dep, dep.name, @@ -4543,35 +4618,36 @@ pub fn clearDecl( // We don't recursively perform a deletion here, because during the update, // another reference to it may turn up. dep.deletion_flag = true; - mod.deletion_set.putAssumeCapacity(dep, {}); + mod.deletion_set.putAssumeCapacity(dep_index, {}); } } decl.dependencies.clearRetainingCapacity(); // Anything that depends on this deleted decl needs to be re-analyzed. - for (decl.dependants.keys()) |dep| { - dep.removeDependency(decl); + for (decl.dependants.keys()) |dep_index| { + const dep = mod.declPtr(dep_index); + dep.removeDependency(decl_index); if (outdated_decls) |map| { - map.putAssumeCapacity(dep, {}); + map.putAssumeCapacity(dep_index, {}); } } decl.dependants.clearRetainingCapacity(); - if (mod.failed_decls.fetchSwapRemove(decl)) |kv| { + if (mod.failed_decls.fetchSwapRemove(decl_index)) |kv| { kv.value.destroy(gpa); } if (mod.emit_h) |emit_h| { - if (emit_h.failed_decls.fetchSwapRemove(decl)) |kv| { + if (emit_h.failed_decls.fetchSwapRemove(decl_index)) |kv| { kv.value.destroy(gpa); } - assert(emit_h.decl_table.swapRemove(decl)); + assert(emit_h.decl_table.swapRemove(decl_index)); } - _ = mod.compile_log_decls.swapRemove(decl); - mod.deleteDeclExports(decl); + _ = mod.compile_log_decls.swapRemove(decl_index); + mod.deleteDeclExports(decl_index); if (decl.has_tv) { if (decl.ty.isFnOrHasRuntimeBits()) { - mod.comp.bin_file.freeDecl(decl); + mod.comp.bin_file.freeDecl(decl_index); // TODO instead of a union, put this memory trailing Decl objects, // and allow it to be variably sized. @@ -4604,15 +4680,16 @@ pub fn clearDecl( if (decl.deletion_flag) { decl.deletion_flag = false; - assert(mod.deletion_set.swapRemove(decl)); + assert(mod.deletion_set.swapRemove(decl_index)); } decl.analysis = .unreferenced; } /// This function is exclusively called for anonymous decls. -pub fn deleteUnusedDecl(mod: *Module, decl: *Decl) void { - log.debug("deleteUnusedDecl {*} ({s})", .{ decl, decl.name }); +pub fn deleteUnusedDecl(mod: *Module, decl_index: Decl.Index) void { + const decl = mod.declPtr(decl_index); + log.debug("deleteUnusedDecl {d} ({s})", .{ decl_index, decl.name }); // TODO: remove `allocateDeclIndexes` and make the API that the linker backends // are required to notice the first time `updateDecl` happens and keep track @@ -4626,55 +4703,58 @@ pub fn deleteUnusedDecl(mod: *Module, decl: *Decl) void { .c => {}, // this linker backend has already migrated to the new API else => if (decl.has_tv) { if (decl.ty.isFnOrHasRuntimeBits()) { - mod.comp.bin_file.freeDecl(decl); + mod.comp.bin_file.freeDecl(decl_index); } }, } - assert(!decl.isRoot()); - assert(decl.src_namespace.anon_decls.swapRemove(decl)); + assert(!mod.declIsRoot(decl_index)); + assert(decl.src_namespace.anon_decls.swapRemove(decl_index)); const dependants = decl.dependants.keys(); for (dependants) |dep| { - dep.removeDependency(decl); + mod.declPtr(dep).removeDependency(decl_index); } for (decl.dependencies.keys()) |dep| { - dep.removeDependant(decl); + mod.declPtr(dep).removeDependant(decl_index); } - decl.destroy(mod); + mod.destroyDecl(decl_index); } /// We don't perform a deletion here, because this Decl or another one /// may end up referencing it before the update is complete. -fn markDeclForDeletion(mod: *Module, decl: *Decl) !void { +fn markDeclForDeletion(mod: *Module, decl_index: Decl.Index) !void { + const decl = mod.declPtr(decl_index); decl.deletion_flag = true; - try mod.deletion_set.put(mod.gpa, decl, {}); + try mod.deletion_set.put(mod.gpa, decl_index, {}); } /// Cancel the creation of an anon decl and delete any references to it. /// If other decls depend on this decl, they must be aborted first. -pub fn abortAnonDecl(mod: *Module, decl: *Decl) void { +pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void { + const decl = mod.declPtr(decl_index); log.debug("abortAnonDecl {*} ({s})", .{ decl, decl.name }); - assert(!decl.isRoot()); - assert(decl.src_namespace.anon_decls.swapRemove(decl)); + assert(!mod.declIsRoot(decl_index)); + assert(decl.src_namespace.anon_decls.swapRemove(decl_index)); // An aborted decl must not have dependants -- they must have // been aborted first and removed from this list. assert(decl.dependants.count() == 0); - for (decl.dependencies.keys()) |dep| { - dep.removeDependant(decl); + for (decl.dependencies.keys()) |dep_index| { + const dep = mod.declPtr(dep_index); + dep.removeDependant(decl_index); } - decl.destroy(mod); + mod.destroyDecl(decl_index); } /// Delete all the Export objects that are caused by this Decl. Re-analysis of /// this Decl will cause them to be re-created (or not). -fn deleteDeclExports(mod: *Module, decl: *Decl) void { - const kv = mod.export_owners.fetchSwapRemove(decl) orelse return; +fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) void { + const kv = mod.export_owners.fetchSwapRemove(decl_index) orelse return; for (kv.value) |exp| { if (mod.decl_exports.getPtr(exp.exported_decl)) |value_ptr| { @@ -4683,7 +4763,7 @@ fn deleteDeclExports(mod: *Module, decl: *Decl) void { var i: usize = 0; var new_len = list.len; while (i < new_len) { - if (list[i].owner_decl == decl) { + if (list[i].owner_decl == decl_index) { mem.copyBackwards(*Export, list[i..], list[i + 1 .. new_len]); new_len -= 1; } else { @@ -4713,11 +4793,13 @@ fn deleteDeclExports(mod: *Module, decl: *Decl) void { mod.gpa.free(kv.value); } -pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn, arena: Allocator) SemaError!Air { +pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { const tracy = trace(@src()); defer tracy.end(); const gpa = mod.gpa; + const decl_index = func.owner_decl; + const decl = mod.declPtr(decl_index); // Use the Decl's arena for captured values. var decl_arena = decl.value_arena.?.promote(gpa); @@ -4731,8 +4813,9 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn, arena: Allocator) Sem .perm_arena = decl_arena_allocator, .code = decl.getFileScope().zir, .owner_decl = decl, + .owner_decl_index = decl_index, .func = func, - .fn_ret_ty = func.owner_decl.ty.fnReturnType(), + .fn_ret_ty = decl.ty.fnReturnType(), .owner_func = func, }; defer sema.deinit(); @@ -4748,7 +4831,7 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn, arena: Allocator) Sem var inner_block: Sema.Block = .{ .parent = null, .sema = &sema, - .src_decl = decl, + .src_decl = decl_index, .namespace = decl.src_namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, @@ -4903,10 +4986,11 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn, arena: Allocator) Sem }; } -fn markOutdatedDecl(mod: *Module, decl: *Decl) !void { +fn markOutdatedDecl(mod: *Module, decl_index: Decl.Index) !void { + const decl = mod.declPtr(decl_index); log.debug("mark outdated {*} ({s})", .{ decl, decl.name }); - try mod.comp.work_queue.writeItem(.{ .analyze_decl = decl }); - if (mod.failed_decls.fetchSwapRemove(decl)) |kv| { + try mod.comp.work_queue.writeItem(.{ .analyze_decl = decl_index }); + if (mod.failed_decls.fetchSwapRemove(decl_index)) |kv| { kv.value.destroy(mod.gpa); } if (decl.has_tv and decl.owns_tv) { @@ -4916,33 +5000,43 @@ fn markOutdatedDecl(mod: *Module, decl: *Decl) !void { } } if (mod.emit_h) |emit_h| { - if (emit_h.failed_decls.fetchSwapRemove(decl)) |kv| { + if (emit_h.failed_decls.fetchSwapRemove(decl_index)) |kv| { kv.value.destroy(mod.gpa); } } - _ = mod.compile_log_decls.swapRemove(decl); + _ = mod.compile_log_decls.swapRemove(decl_index); decl.analysis = .outdated; } pub fn allocateNewDecl( mod: *Module, - name: [:0]const u8, namespace: *Namespace, src_node: Ast.Node.Index, src_scope: ?*CaptureScope, -) !*Decl { - // If we have emit-h then we must allocate a bigger structure to store the emit-h state. - const new_decl: *Decl = if (mod.emit_h != null) blk: { - const parent_struct = try mod.gpa.create(DeclPlusEmitH); - parent_struct.* = .{ - .emit_h = .{}, - .decl = undefined, +) !Decl.Index { + const decl_and_index: struct { + new_decl: *Decl, + decl_index: Decl.Index, + } = if (mod.decls_free_list.popOrNull()) |decl_index| d: { + break :d .{ + .new_decl = mod.declPtr(decl_index), + .decl_index = decl_index, }; - break :blk &parent_struct.decl; - } else try mod.gpa.create(Decl); + } else d: { + const decl = try mod.allocated_decls.addOne(mod.gpa); + errdefer mod.allocated_decls.shrinkRetainingCapacity(mod.allocated_decls.len - 1); + if (mod.emit_h) |mod_emit_h| { + const decl_emit_h = try mod_emit_h.allocated_emit_h.addOne(mod.gpa); + decl_emit_h.* = .{}; + } + break :d .{ + .new_decl = decl, + .decl_index = @intToEnum(Decl.Index, mod.allocated_decls.len - 1), + }; + }; - new_decl.* = .{ - .name = name, + decl_and_index.new_decl.* = .{ + .name = undefined, .src_namespace = namespace, .src_node = src_node, .src_line = undefined, @@ -4986,7 +5080,7 @@ pub fn allocateNewDecl( .is_usingnamespace = false, }; - return new_decl; + return decl_and_index.decl_index; } /// Get error value for error tag `name`. @@ -5010,18 +5104,9 @@ pub fn getErrorValue(mod: *Module, name: []const u8) !std.StringHashMapUnmanaged }; } -/// Takes ownership of `name` even if it returns an error. -pub fn createAnonymousDeclNamed( - mod: *Module, - block: *Sema.Block, - typed_value: TypedValue, - name: [:0]u8, -) !*Decl { - return mod.createAnonymousDeclFromDeclNamed(block.src_decl, block.namespace, block.wip_capture_scope, typed_value, name); -} - -pub fn createAnonymousDecl(mod: *Module, block: *Sema.Block, typed_value: TypedValue) !*Decl { - return mod.createAnonymousDeclFromDecl(block.src_decl, block.namespace, block.wip_capture_scope, typed_value); +pub fn createAnonymousDecl(mod: *Module, block: *Sema.Block, typed_value: TypedValue) !Decl.Index { + const src_decl = mod.declPtr(block.src_decl); + return mod.createAnonymousDeclFromDecl(src_decl, block.namespace, block.wip_capture_scope, typed_value); } pub fn createAnonymousDeclFromDecl( @@ -5030,30 +5115,31 @@ pub fn createAnonymousDeclFromDecl( namespace: *Namespace, src_scope: ?*CaptureScope, tv: TypedValue, -) !*Decl { - const name_index = mod.getNextAnonNameIndex(); +) !Decl.Index { + const new_decl_index = try mod.allocateNewDecl(namespace, src_decl.src_node, src_scope); + errdefer mod.destroyDecl(new_decl_index); const name = try std.fmt.allocPrintZ(mod.gpa, "{s}__anon_{d}", .{ - src_decl.name, name_index, + src_decl.name, @enumToInt(new_decl_index), }); - return mod.createAnonymousDeclFromDeclNamed(src_decl, namespace, src_scope, tv, name); + try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, tv, name); + return new_decl_index; } /// Takes ownership of `name` even if it returns an error. -pub fn createAnonymousDeclFromDeclNamed( +pub fn initNewAnonDecl( mod: *Module, - src_decl: *Decl, + new_decl_index: Decl.Index, + src_line: u32, namespace: *Namespace, - src_scope: ?*CaptureScope, typed_value: TypedValue, name: [:0]u8, -) !*Decl { +) !void { errdefer mod.gpa.free(name); - try namespace.anon_decls.ensureUnusedCapacity(mod.gpa, 1); + const new_decl = mod.declPtr(new_decl_index); - const new_decl = try mod.allocateNewDecl(name, namespace, src_decl.src_node, src_scope); - - new_decl.src_line = src_decl.src_line; + new_decl.name = name; + new_decl.src_line = src_line; new_decl.ty = typed_value.ty; new_decl.val = typed_value.val; new_decl.@"align" = 0; @@ -5062,22 +5148,16 @@ pub fn createAnonymousDeclFromDeclNamed( new_decl.analysis = .complete; new_decl.generation = mod.generation; - namespace.anon_decls.putAssumeCapacityNoClobber(new_decl, {}); + try namespace.anon_decls.putNoClobber(mod.gpa, new_decl_index, {}); // The Decl starts off with alive=false and the codegen backend will set alive=true // if the Decl is referenced by an instruction or another constant. Otherwise, // the Decl will be garbage collected by the `codegen_decl` task instead of sent // to the linker. if (typed_value.ty.isFnOrHasRuntimeBits()) { - try mod.comp.bin_file.allocateDeclIndexes(new_decl); - try mod.comp.anon_work_queue.writeItem(.{ .codegen_decl = new_decl }); + try mod.comp.bin_file.allocateDeclIndexes(new_decl_index); + try mod.comp.anon_work_queue.writeItem(.{ .codegen_decl = new_decl_index }); } - - return new_decl; -} - -pub fn getNextAnonNameIndex(mod: *Module) usize { - return @atomicRmw(usize, &mod.next_anon_name_index, .Add, 1, .Monotonic); } pub fn makeIntType(arena: Allocator, signedness: std.builtin.Signedness, bits: u16) !Type { @@ -5339,12 +5419,12 @@ pub fn processOutdatedAndDeletedDecls(mod: *Module) !void { // for the outdated decls, but we cannot queue up the tasks until after // we find out which ones have been deleted, otherwise there would be // deleted Decl pointers in the work queue. - var outdated_decls = std.AutoArrayHashMap(*Decl, void).init(mod.gpa); + var outdated_decls = std.AutoArrayHashMap(Decl.Index, void).init(mod.gpa); defer outdated_decls.deinit(); for (mod.import_table.values()) |file| { try outdated_decls.ensureUnusedCapacity(file.outdated_decls.items.len); - for (file.outdated_decls.items) |decl| { - outdated_decls.putAssumeCapacity(decl, {}); + for (file.outdated_decls.items) |decl_index| { + outdated_decls.putAssumeCapacity(decl_index, {}); } file.outdated_decls.clearRetainingCapacity(); @@ -5356,15 +5436,16 @@ pub fn processOutdatedAndDeletedDecls(mod: *Module) !void { // it may be both in this `deleted_decls` set, as well as in the // `Module.deletion_set`. To avoid deleting it twice, we remove it from the // deletion set at this time. - for (file.deleted_decls.items) |decl| { + for (file.deleted_decls.items) |decl_index| { + const decl = mod.declPtr(decl_index); log.debug("deleted from source: {*} ({s})", .{ decl, decl.name }); // Remove from the namespace it resides in, preserving declaration order. assert(decl.zir_decl_index != 0); - _ = decl.src_namespace.decls.orderedRemoveAdapted(@as([]const u8, mem.sliceTo(decl.name, 0)), DeclAdapter{}); + _ = decl.src_namespace.decls.orderedRemoveAdapted(@as([]const u8, mem.sliceTo(decl.name, 0)), DeclAdapter{ .mod = mod }); - try mod.clearDecl(decl, &outdated_decls); - decl.destroy(mod); + try mod.clearDecl(decl_index, &outdated_decls); + mod.destroyDecl(decl_index); } file.deleted_decls.clearRetainingCapacity(); } @@ -5393,13 +5474,13 @@ pub fn processExports(mod: *Module) !void { if (gop.found_existing) { new_export.status = .failed_retryable; try mod.failed_exports.ensureUnusedCapacity(gpa, 1); - const src_loc = new_export.getSrcLoc(); + const src_loc = new_export.getSrcLoc(mod); const msg = try ErrorMsg.create(gpa, src_loc, "exported symbol collision: {s}", .{ new_export.options.name, }); errdefer msg.destroy(gpa); const other_export = gop.value_ptr.*; - const other_src_loc = other_export.getSrcLoc(); + const other_src_loc = other_export.getSrcLoc(mod); try mod.errNoteNonLazy(other_src_loc, msg, "other symbol here", .{}); mod.failed_exports.putAssumeCapacityNoClobber(new_export, msg); new_export.status = .failed; @@ -5413,7 +5494,7 @@ pub fn processExports(mod: *Module) !void { const new_export = exports[0]; new_export.status = .failed_retryable; try mod.failed_exports.ensureUnusedCapacity(gpa, 1); - const src_loc = new_export.getSrcLoc(); + const src_loc = new_export.getSrcLoc(mod); const msg = try ErrorMsg.create(gpa, src_loc, "unable to export: {s}", .{ @errorName(err), }); @@ -5427,12 +5508,14 @@ pub fn populateTestFunctions(mod: *Module) !void { const gpa = mod.gpa; const builtin_pkg = mod.main_pkg.table.get("builtin").?; const builtin_file = (mod.importPkg(builtin_pkg) catch unreachable).file; - const builtin_namespace = builtin_file.root_decl.?.src_namespace; - const decl = builtin_namespace.decls.getKeyAdapted(@as([]const u8, "test_functions"), DeclAdapter{}).?; + const root_decl = mod.declPtr(builtin_file.root_decl.unwrap().?); + const builtin_namespace = root_decl.src_namespace; + const decl_index = builtin_namespace.decls.getKeyAdapted(@as([]const u8, "test_functions"), DeclAdapter{ .mod = mod }).?; + const decl = mod.declPtr(decl_index); var buf: Type.SlicePtrFieldTypeBuffer = undefined; const tmp_test_fn_ty = decl.ty.slicePtrFieldType(&buf).elemType(); - const array_decl = d: { + const array_decl_index = d: { // Add mod.test_functions to an array decl then make the test_functions // decl reference it as a slice. var new_decl_arena = std.heap.ArenaAllocator.init(gpa); @@ -5440,50 +5523,52 @@ pub fn populateTestFunctions(mod: *Module) !void { const arena = new_decl_arena.allocator(); const test_fn_vals = try arena.alloc(Value, mod.test_functions.count()); - const array_decl = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{ + const array_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{ .ty = try Type.Tag.array.create(arena, .{ .len = test_fn_vals.len, .elem_type = try tmp_test_fn_ty.copy(arena), }), .val = try Value.Tag.aggregate.create(arena, test_fn_vals), }); + const array_decl = mod.declPtr(array_decl_index); // Add a dependency on each test name and function pointer. try array_decl.dependencies.ensureUnusedCapacity(gpa, test_fn_vals.len * 2); - for (mod.test_functions.keys()) |test_decl, i| { + for (mod.test_functions.keys()) |test_decl_index, i| { + const test_decl = mod.declPtr(test_decl_index); const test_name_slice = mem.sliceTo(test_decl.name, 0); - const test_name_decl = n: { + const test_name_decl_index = n: { var name_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer name_decl_arena.deinit(); const bytes = try name_decl_arena.allocator().dupe(u8, test_name_slice); - const test_name_decl = try mod.createAnonymousDeclFromDecl(array_decl, array_decl.src_namespace, null, .{ + const test_name_decl_index = try mod.createAnonymousDeclFromDecl(array_decl, array_decl.src_namespace, null, .{ .ty = try Type.Tag.array_u8.create(name_decl_arena.allocator(), bytes.len), .val = try Value.Tag.bytes.create(name_decl_arena.allocator(), bytes), }); - try test_name_decl.finalizeNewArena(&name_decl_arena); - break :n test_name_decl; + try mod.declPtr(test_name_decl_index).finalizeNewArena(&name_decl_arena); + break :n test_name_decl_index; }; - array_decl.dependencies.putAssumeCapacityNoClobber(test_decl, {}); - array_decl.dependencies.putAssumeCapacityNoClobber(test_name_decl, {}); - try mod.linkerUpdateDecl(test_name_decl); + array_decl.dependencies.putAssumeCapacityNoClobber(test_decl_index, {}); + array_decl.dependencies.putAssumeCapacityNoClobber(test_name_decl_index, {}); + try mod.linkerUpdateDecl(test_name_decl_index); const field_vals = try arena.create([3]Value); field_vals.* = .{ try Value.Tag.slice.create(arena, .{ - .ptr = try Value.Tag.decl_ref.create(arena, test_name_decl), + .ptr = try Value.Tag.decl_ref.create(arena, test_name_decl_index), .len = try Value.Tag.int_u64.create(arena, test_name_slice.len), }), // name - try Value.Tag.decl_ref.create(arena, test_decl), // func + try Value.Tag.decl_ref.create(arena, test_decl_index), // func Value.initTag(.null_value), // async_frame_size }; test_fn_vals[i] = try Value.Tag.aggregate.create(arena, field_vals); } try array_decl.finalizeNewArena(&new_decl_arena); - break :d array_decl; + break :d array_decl_index; }; - try mod.linkerUpdateDecl(array_decl); + try mod.linkerUpdateDecl(array_decl_index); { var new_decl_arena = std.heap.ArenaAllocator.init(gpa); @@ -5493,7 +5578,7 @@ pub fn populateTestFunctions(mod: *Module) !void { // This copy accesses the old Decl Type/Value so it must be done before `clearValues`. const new_ty = try Type.Tag.const_slice.create(arena, try tmp_test_fn_ty.copy(arena)); const new_val = try Value.Tag.slice.create(arena, .{ - .ptr = try Value.Tag.decl_ref.create(arena, array_decl), + .ptr = try Value.Tag.decl_ref.create(arena, array_decl_index), .len = try Value.Tag.int_u64.create(arena, mod.test_functions.count()), }); @@ -5506,15 +5591,17 @@ pub fn populateTestFunctions(mod: *Module) !void { try decl.finalizeNewArena(&new_decl_arena); } - try mod.linkerUpdateDecl(decl); + try mod.linkerUpdateDecl(decl_index); } -pub fn linkerUpdateDecl(mod: *Module, decl: *Decl) !void { +pub fn linkerUpdateDecl(mod: *Module, decl_index: Decl.Index) !void { const comp = mod.comp; if (comp.bin_file.options.emit == null) return; - comp.bin_file.updateDecl(mod, decl) catch |err| switch (err) { + const decl = mod.declPtr(decl_index); + + comp.bin_file.updateDecl(mod, decl_index) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => { decl.analysis = .codegen_failure; @@ -5523,7 +5610,7 @@ pub fn linkerUpdateDecl(mod: *Module, decl: *Decl) !void { else => { const gpa = mod.gpa; try mod.failed_decls.ensureUnusedCapacity(gpa, 1); - mod.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create( + mod.failed_decls.putAssumeCapacityNoClobber(decl_index, try ErrorMsg.create( gpa, decl.srcLoc(), "unable to codegen: {s}", @@ -5566,3 +5653,64 @@ fn reportRetryableFileError( } gop.value_ptr.* = err_msg; } + +pub fn markReferencedDeclsAlive(mod: *Module, val: Value) void { + switch (val.tag()) { + .decl_ref_mut => return mod.markDeclIndexAlive(val.castTag(.decl_ref_mut).?.data.decl_index), + .extern_fn => return mod.markDeclIndexAlive(val.castTag(.extern_fn).?.data.owner_decl), + .function => return mod.markDeclIndexAlive(val.castTag(.function).?.data.owner_decl), + .variable => return mod.markDeclIndexAlive(val.castTag(.variable).?.data.owner_decl), + .decl_ref => return mod.markDeclIndexAlive(val.cast(Value.Payload.Decl).?.data), + + .repeated, + .eu_payload, + .opt_payload, + .empty_array_sentinel, + => return mod.markReferencedDeclsAlive(val.cast(Value.Payload.SubValue).?.data), + + .eu_payload_ptr, + .opt_payload_ptr, + => return mod.markReferencedDeclsAlive(val.cast(Value.Payload.PayloadPtr).?.data.container_ptr), + + .slice => { + const slice = val.cast(Value.Payload.Slice).?.data; + mod.markReferencedDeclsAlive(slice.ptr); + mod.markReferencedDeclsAlive(slice.len); + }, + + .elem_ptr => { + const elem_ptr = val.cast(Value.Payload.ElemPtr).?.data; + return mod.markReferencedDeclsAlive(elem_ptr.array_ptr); + }, + .field_ptr => { + const field_ptr = val.cast(Value.Payload.FieldPtr).?.data; + return mod.markReferencedDeclsAlive(field_ptr.container_ptr); + }, + .aggregate => { + for (val.castTag(.aggregate).?.data) |field_val| { + mod.markReferencedDeclsAlive(field_val); + } + }, + .@"union" => { + const data = val.cast(Value.Payload.Union).?.data; + mod.markReferencedDeclsAlive(data.tag); + mod.markReferencedDeclsAlive(data.val); + }, + + else => {}, + } +} + +pub fn markDeclAlive(mod: *Module, decl: *Decl) void { + if (decl.alive) return; + decl.alive = true; + + // This is the first time we are marking this Decl alive. We must + // therefore recurse into its value and mark any Decl it references + // as also alive, so that any Decl referenced does not get garbage collected. + mod.markReferencedDeclsAlive(decl.val); +} + +fn markDeclIndexAlive(mod: *Module, decl_index: Decl.Index) void { + return mod.markDeclAlive(mod.declPtr(decl_index)); +} diff --git a/src/RangeSet.zig b/src/RangeSet.zig index 79bd22fd7f..5b4c654529 100644 --- a/src/RangeSet.zig +++ b/src/RangeSet.zig @@ -1,12 +1,14 @@ const std = @import("std"); const Order = std.math.Order; + +const RangeSet = @This(); +const Module = @import("Module.zig"); +const SwitchProngSrc = @import("Module.zig").SwitchProngSrc; const Type = @import("type.zig").Type; const Value = @import("value.zig").Value; -const RangeSet = @This(); -const SwitchProngSrc = @import("Module.zig").SwitchProngSrc; ranges: std.ArrayList(Range), -target: std.Target, +module: *Module, pub const Range = struct { first: Value, @@ -14,10 +16,10 @@ pub const Range = struct { src: SwitchProngSrc, }; -pub fn init(allocator: std.mem.Allocator, target: std.Target) RangeSet { +pub fn init(allocator: std.mem.Allocator, module: *Module) RangeSet { return .{ .ranges = std.ArrayList(Range).init(allocator), - .target = target, + .module = module, }; } @@ -32,11 +34,9 @@ pub fn add( ty: Type, src: SwitchProngSrc, ) !?SwitchProngSrc { - const target = self.target; - for (self.ranges.items) |range| { - if (last.compare(.gte, range.first, ty, target) and - first.compare(.lte, range.last, ty, target)) + if (last.compare(.gte, range.first, ty, self.module) and + first.compare(.lte, range.last, ty, self.module)) { return range.src; // They overlap. } @@ -49,26 +49,24 @@ pub fn add( return null; } -const LessThanContext = struct { ty: Type, target: std.Target }; +const LessThanContext = struct { ty: Type, module: *Module }; /// Assumes a and b do not overlap fn lessThan(ctx: LessThanContext, a: Range, b: Range) bool { - return a.first.compare(.lt, b.first, ctx.ty, ctx.target); + return a.first.compare(.lt, b.first, ctx.ty, ctx.module); } pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool { if (self.ranges.items.len == 0) return false; - const target = self.target; - std.sort.sort(Range, self.ranges.items, LessThanContext{ .ty = ty, - .target = target, + .module = self.module, }, lessThan); - if (!self.ranges.items[0].first.eql(first, ty, target) or - !self.ranges.items[self.ranges.items.len - 1].last.eql(last, ty, target)) + if (!self.ranges.items[0].first.eql(first, ty, self.module) or + !self.ranges.items[self.ranges.items.len - 1].last.eql(last, ty, self.module)) { return false; } @@ -78,6 +76,8 @@ pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool { var counter = try std.math.big.int.Managed.init(self.ranges.allocator); defer counter.deinit(); + const target = self.module.getTarget(); + // look for gaps for (self.ranges.items[1..]) |cur, i| { // i starts counting from the second item. diff --git a/src/Sema.zig b/src/Sema.zig index a3eef2dc86..14511fe82d 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -24,6 +24,7 @@ inst_map: InstMap = .{}, /// and `src_decl` of `Block` is the `Decl` of the callee. /// This `Decl` owns the arena memory of this `Sema`. owner_decl: *Decl, +owner_decl_index: Decl.Index, /// For an inline or comptime function call, this will be the root parent function /// which contains the callsite. Corresponds to `owner_decl`. owner_func: ?*Module.Fn, @@ -47,7 +48,7 @@ comptime_break_inst: Zir.Inst.Index = undefined, /// access to the source location set by the previous instruction which did /// contain a mapped source location. src: LazySrcLoc = .{ .token_offset = 0 }, -decl_val_table: std.AutoHashMapUnmanaged(*Decl, Air.Inst.Ref) = .{}, +decl_val_table: std.AutoHashMapUnmanaged(Decl.Index, Air.Inst.Ref) = .{}, /// When doing a generic function instantiation, this array collects a /// `Value` object for each parameter that is comptime known and thus elided /// from the generated function. This memory is allocated by a parent `Sema` and @@ -111,10 +112,6 @@ pub const Block = struct { parent: ?*Block, /// Shared among all child blocks. sema: *Sema, - /// This Decl is the Decl according to the Zig source code corresponding to this Block. - /// This can vary during inline or comptime function calls. See `Sema.owner_decl` - /// for the one that will be the same for all Block instances. - src_decl: *Decl, /// The namespace to use for lookups from this source block /// When analyzing fields, this is different from src_decl.src_namepsace. namespace: *Namespace, @@ -130,6 +127,10 @@ pub const Block = struct { /// If runtime_index is not 0 then one of these is guaranteed to be non null. runtime_cond: ?LazySrcLoc = null, runtime_loop: ?LazySrcLoc = null, + /// This Decl is the Decl according to the Zig source code corresponding to this Block. + /// This can vary during inline or comptime function calls. See `Sema.owner_decl` + /// for the one that will be the same for all Block instances. + src_decl: Decl.Index, /// Non zero if a non-inline loop or a runtime conditional have been encountered. /// Stores to to comptime variables are only allowed when var.runtime_index <= runtime_index. runtime_index: u32 = 0, @@ -512,20 +513,21 @@ pub const Block = struct { } /// `alignment` value of 0 means to use ABI alignment. - pub fn finish(wad: *WipAnonDecl, ty: Type, val: Value, alignment: u32) !*Decl { + pub fn finish(wad: *WipAnonDecl, ty: Type, val: Value, alignment: u32) !Decl.Index { const sema = wad.block.sema; // Do this ahead of time because `createAnonymousDecl` depends on calling // `type.hasRuntimeBits()`. _ = try sema.typeHasRuntimeBits(wad.block, wad.src, ty); - const new_decl = try sema.mod.createAnonymousDecl(wad.block, .{ + const new_decl_index = try sema.mod.createAnonymousDecl(wad.block, .{ .ty = ty, .val = val, }); + const new_decl = sema.mod.declPtr(new_decl_index); new_decl.@"align" = alignment; - errdefer sema.mod.abortAnonDecl(new_decl); + errdefer sema.mod.abortAnonDecl(new_decl_index); try new_decl.finalizeNewArena(&wad.new_decl_arena); wad.finished = true; - return new_decl; + return new_decl_index; } }; }; @@ -676,7 +678,7 @@ fn analyzeBodyInner( crash_info.setBodyIndex(i); const inst = body[i]; std.log.scoped(.sema_zir).debug("sema ZIR {s} %{d}", .{ - block.src_decl.src_namespace.file_scope.sub_file_path, inst, + sema.mod.declPtr(block.src_decl).src_namespace.file_scope.sub_file_path, inst, }); const air_inst: Air.Inst.Ref = switch (tags[inst]) { // zig fmt: off @@ -1383,8 +1385,7 @@ pub fn resolveConstString( const wanted_type = Type.initTag(.const_slice_u8); const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst); - const target = sema.mod.getTarget(); - return val.toAllocatedBytes(wanted_type, sema.arena, target); + return val.toAllocatedBytes(wanted_type, sema.arena, sema.mod); } pub fn resolveType(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) !Type { @@ -1538,28 +1539,24 @@ fn failWithDivideByZero(sema: *Sema, block: *Block, src: LazySrcLoc) CompileErro } fn failWithModRemNegative(sema: *Sema, block: *Block, src: LazySrcLoc, lhs_ty: Type, rhs_ty: Type) CompileError { - const target = sema.mod.getTarget(); return sema.fail(block, src, "remainder division with '{}' and '{}': signed integers and floats must use @rem or @mod", .{ - lhs_ty.fmt(target), rhs_ty.fmt(target), + lhs_ty.fmt(sema.mod), rhs_ty.fmt(sema.mod), }); } fn failWithExpectedOptionalType(sema: *Sema, block: *Block, src: LazySrcLoc, optional_ty: Type) CompileError { - const target = sema.mod.getTarget(); - return sema.fail(block, src, "expected optional type, found {}", .{optional_ty.fmt(target)}); + return sema.fail(block, src, "expected optional type, found {}", .{optional_ty.fmt(sema.mod)}); } fn failWithArrayInitNotSupported(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError { - const target = sema.mod.getTarget(); return sema.fail(block, src, "type '{}' does not support array initialization syntax", .{ - ty.fmt(target), + ty.fmt(sema.mod), }); } fn failWithStructInitNotSupported(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError { - const target = sema.mod.getTarget(); return sema.fail(block, src, "type '{}' does not support struct initialization syntax", .{ - ty.fmt(target), + ty.fmt(sema.mod), }); } @@ -1570,9 +1567,8 @@ fn failWithErrorSetCodeMissing( dest_err_set_ty: Type, src_err_set_ty: Type, ) CompileError { - const target = sema.mod.getTarget(); return sema.fail(block, src, "expected type '{}', found type '{}'", .{ - dest_err_set_ty.fmt(target), src_err_set_ty.fmt(target), + dest_err_set_ty.fmt(sema.mod), src_err_set_ty.fmt(sema.mod), }); } @@ -1586,7 +1582,9 @@ fn errNote( comptime format: []const u8, args: anytype, ) error{OutOfMemory}!void { - return sema.mod.errNoteNonLazy(src.toSrcLoc(block.src_decl), parent, format, args); + const mod = sema.mod; + const src_decl = mod.declPtr(block.src_decl); + return mod.errNoteNonLazy(src.toSrcLoc(src_decl), parent, format, args); } fn addFieldErrNote( @@ -1598,10 +1596,12 @@ fn addFieldErrNote( comptime format: []const u8, args: anytype, ) !void { - const decl = container_ty.getOwnerDecl(); + const mod = sema.mod; + const decl_index = container_ty.getOwnerDecl(); + const decl = mod.declPtr(decl_index); const tree = try sema.getAstTree(block); const field_src = enumFieldSrcLoc(decl, tree.*, container_ty.getNodeOffset(), field_index); - try sema.mod.errNoteNonLazy(field_src.toSrcLoc(decl), parent, format, args); + try mod.errNoteNonLazy(field_src.toSrcLoc(decl), parent, format, args); } fn errMsg( @@ -1611,7 +1611,9 @@ fn errMsg( comptime format: []const u8, args: anytype, ) error{OutOfMemory}!*Module.ErrorMsg { - return Module.ErrorMsg.create(sema.gpa, src.toSrcLoc(block.src_decl), format, args); + const mod = sema.mod; + const src_decl = mod.declPtr(block.src_decl); + return Module.ErrorMsg.create(sema.gpa, src.toSrcLoc(src_decl), format, args); } pub fn fail( @@ -1654,7 +1656,7 @@ fn failWithOwnedErrorMsg(sema: *Sema, block: *Block, err_msg: *Module.ErrorMsg) sema.owner_decl.analysis = .sema_failure; sema.owner_decl.generation = mod.generation; } - const gop = mod.failed_decls.getOrPutAssumeCapacity(sema.owner_decl); + const gop = mod.failed_decls.getOrPutAssumeCapacity(sema.owner_decl_index); if (gop.found_existing) { // If there are multiple errors for the same Decl, prefer the first one added. err_msg.destroy(mod.gpa); @@ -1756,7 +1758,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE try inferred_alloc.stored_inst_list.append(sema.arena, operand); try sema.requireRuntimeBlock(block, src); - const ptr_ty = try Type.ptr(sema.arena, target, .{ + const ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = pointee_ty, .@"align" = inferred_alloc.alignment, .@"addrspace" = addr_space, @@ -1770,7 +1772,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE // The alloc will turn into a Decl. var anon_decl = try block.startAnonDecl(src); defer anon_decl.deinit(); - iac.data.decl = try anon_decl.finish( + iac.data.decl_index = try anon_decl.finish( try pointee_ty.copy(anon_decl.arena()), Value.undef, iac.data.alignment, @@ -1778,7 +1780,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE if (iac.data.alignment != 0) { try sema.resolveTypeLayout(block, src, pointee_ty); } - const ptr_ty = try Type.ptr(sema.arena, target, .{ + const ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = pointee_ty, .@"align" = iac.data.alignment, .@"addrspace" = addr_space, @@ -1786,7 +1788,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE return sema.addConstant( ptr_ty, try Value.Tag.decl_ref_mut.create(sema.arena, .{ - .decl = iac.data.decl, + .decl_index = iac.data.decl_index, .runtime_index = block.runtime_index, }), ); @@ -1827,7 +1829,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE } } - const ptr_ty = try Type.ptr(sema.arena, target, .{ + const ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = pointee_ty, .@"addrspace" = addr_space, }); @@ -1848,7 +1850,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE } const ty_op = air_datas[trash_inst].ty_op; const operand_ty = sema.typeOf(ty_op.operand); - const ptr_operand_ty = try Type.ptr(sema.arena, target, .{ + const ptr_operand_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = operand_ty, .@"addrspace" = addr_space, }); @@ -1924,18 +1926,19 @@ fn zirStructDecl( errdefer new_decl_arena.deinit(); const new_decl_arena_allocator = new_decl_arena.allocator(); + const mod = sema.mod; const struct_obj = try new_decl_arena_allocator.create(Module.Struct); const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj); const struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty); - const type_name = try sema.createTypeName(block, small.name_strategy, "struct"); - const new_decl = try sema.mod.createAnonymousDeclNamed(block, .{ + const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, .{ .ty = Type.type, .val = struct_val, - }, type_name); + }, small.name_strategy, "struct"); + const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; - errdefer sema.mod.abortAnonDecl(new_decl); + errdefer mod.abortAnonDecl(new_decl_index); struct_obj.* = .{ - .owner_decl = new_decl, + .owner_decl = new_decl_index, .fields = .{}, .node_offset = src.node_offset, .zir_index = inst, @@ -1953,15 +1956,23 @@ fn zirStructDecl( }); try sema.analyzeStructDecl(new_decl, inst, struct_obj); try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl); + return sema.analyzeDeclVal(block, src, new_decl_index); } -fn createTypeName( +fn createAnonymousDeclTypeNamed( sema: *Sema, block: *Block, + typed_value: TypedValue, name_strategy: Zir.Inst.NameStrategy, anon_prefix: []const u8, -) ![:0]u8 { +) !Decl.Index { + const mod = sema.mod; + const namespace = block.namespace; + const src_scope = block.wip_capture_scope; + const src_decl = mod.declPtr(block.src_decl); + const new_decl_index = try mod.allocateNewDecl(namespace, src_decl.src_node, src_scope); + errdefer mod.destroyDecl(new_decl_index); + switch (name_strategy) { .anon => { // It would be neat to have "struct:line:column" but this name has @@ -1970,20 +1981,24 @@ fn createTypeName( // semantically analyzed. // This name is also used as the key in the parent namespace so it cannot be // renamed. - const name_index = sema.mod.getNextAnonNameIndex(); - return std.fmt.allocPrintZ(sema.gpa, "{s}__{s}_{d}", .{ - block.src_decl.name, anon_prefix, name_index, + const name = try std.fmt.allocPrintZ(sema.gpa, "{s}__{s}_{d}", .{ + src_decl.name, anon_prefix, @enumToInt(new_decl_index), }); + try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); + return new_decl_index; + }, + .parent => { + const name = try sema.gpa.dupeZ(u8, mem.sliceTo(sema.mod.declPtr(block.src_decl).name, 0)); + try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); + return new_decl_index; }, - .parent => return sema.gpa.dupeZ(u8, mem.sliceTo(block.src_decl.name, 0)), .func => { - const target = sema.mod.getTarget(); const fn_info = sema.code.getFnInfo(sema.func.?.zir_body_inst); const zir_tags = sema.code.instructions.items(.tag); var buf = std.ArrayList(u8).init(sema.gpa); defer buf.deinit(); - try buf.appendSlice(mem.sliceTo(block.src_decl.name, 0)); + try buf.appendSlice(mem.sliceTo(sema.mod.declPtr(block.src_decl).name, 0)); try buf.appendSlice("("); var arg_i: usize = 0; @@ -1995,7 +2010,7 @@ fn createTypeName( const arg_val = sema.resolveConstMaybeUndefVal(block, .unneeded, arg) catch unreachable; if (arg_i != 0) try buf.appendSlice(","); - try buf.writer().print("{}", .{arg_val.fmtValue(sema.typeOf(arg), target)}); + try buf.writer().print("{}", .{arg_val.fmtValue(sema.typeOf(arg), sema.mod)}); arg_i += 1; continue; @@ -2004,7 +2019,9 @@ fn createTypeName( }; try buf.appendSlice(")"); - return buf.toOwnedSliceSentinel(0); + const name = try buf.toOwnedSliceSentinel(0); + try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); + return new_decl_index; }, } } @@ -2064,16 +2081,16 @@ fn zirEnumDecl( }; const enum_ty = Type.initPayload(&enum_ty_payload.base); const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty); - const type_name = try sema.createTypeName(block, small.name_strategy, "enum"); - const new_decl = try mod.createAnonymousDeclNamed(block, .{ + const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, .{ .ty = Type.type, .val = enum_val, - }, type_name); + }, small.name_strategy, "enum"); + const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; - errdefer mod.abortAnonDecl(new_decl); + errdefer mod.abortAnonDecl(new_decl_index); enum_obj.* = .{ - .owner_decl = new_decl, + .owner_decl = new_decl_index, .tag_ty = Type.@"null", .tag_ty_inferred = true, .fields = .{}, @@ -2101,7 +2118,7 @@ fn zirEnumDecl( enum_obj.tag_ty_inferred = false; } try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl); + return sema.analyzeDeclVal(block, src, new_decl_index); } extra_index += body.len; @@ -2116,8 +2133,13 @@ fn zirEnumDecl( // should be the enum itself. const prev_owner_decl = sema.owner_decl; + const prev_owner_decl_index = sema.owner_decl_index; sema.owner_decl = new_decl; - defer sema.owner_decl = prev_owner_decl; + sema.owner_decl_index = new_decl_index; + defer { + sema.owner_decl = prev_owner_decl; + sema.owner_decl_index = prev_owner_decl_index; + } const prev_owner_func = sema.owner_func; sema.owner_func = null; @@ -2133,7 +2155,7 @@ fn zirEnumDecl( var enum_block: Block = .{ .parent = null, .sema = sema, - .src_decl = new_decl, + .src_decl = new_decl_index, .namespace = &enum_obj.namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, @@ -2168,7 +2190,7 @@ fn zirEnumDecl( if (any_values) { try enum_obj.values.ensureTotalCapacityContext(new_decl_arena_allocator, fields_len, .{ .ty = enum_obj.tag_ty, - .target = target, + .mod = mod, }); } @@ -2196,8 +2218,8 @@ fn zirEnumDecl( const gop = enum_obj.fields.getOrPutAssumeCapacity(field_name); if (gop.found_existing) { const tree = try sema.getAstTree(block); - const field_src = enumFieldSrcLoc(block.src_decl, tree.*, src.node_offset, field_i); - const other_tag_src = enumFieldSrcLoc(block.src_decl, tree.*, src.node_offset, gop.index); + const field_src = enumFieldSrcLoc(sema.mod.declPtr(block.src_decl), tree.*, src.node_offset, field_i); + const other_tag_src = enumFieldSrcLoc(sema.mod.declPtr(block.src_decl), tree.*, src.node_offset, gop.index); const msg = msg: { const msg = try sema.errMsg(block, field_src, "duplicate enum tag", .{}); errdefer msg.destroy(gpa); @@ -2218,7 +2240,7 @@ fn zirEnumDecl( const copied_tag_val = try tag_val.copy(new_decl_arena_allocator); enum_obj.values.putAssumeCapacityNoClobberContext(copied_tag_val, {}, .{ .ty = enum_obj.tag_ty, - .target = target, + .mod = mod, }); } else if (any_values) { const tag_val = if (last_tag_val) |val| @@ -2229,13 +2251,13 @@ fn zirEnumDecl( const copied_tag_val = try tag_val.copy(new_decl_arena_allocator); enum_obj.values.putAssumeCapacityNoClobberContext(copied_tag_val, {}, .{ .ty = enum_obj.tag_ty, - .target = target, + .mod = mod, }); } } try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl); + return sema.analyzeDeclVal(block, src, new_decl_index); } fn zirUnionDecl( @@ -2279,15 +2301,16 @@ fn zirUnionDecl( }; const union_ty = Type.initPayload(&union_payload.base); const union_val = try Value.Tag.ty.create(new_decl_arena_allocator, union_ty); - const type_name = try sema.createTypeName(block, small.name_strategy, "union"); - const new_decl = try sema.mod.createAnonymousDeclNamed(block, .{ + const mod = sema.mod; + const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, .{ .ty = Type.type, .val = union_val, - }, type_name); + }, small.name_strategy, "union"); + const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; - errdefer sema.mod.abortAnonDecl(new_decl); + errdefer mod.abortAnonDecl(new_decl_index); union_obj.* = .{ - .owner_decl = new_decl, + .owner_decl = new_decl_index, .tag_ty = Type.initTag(.@"null"), .fields = .{}, .node_offset = src.node_offset, @@ -2304,10 +2327,10 @@ fn zirUnionDecl( &union_obj.namespace, new_decl, new_decl.name, }); - _ = try sema.mod.scanNamespace(&union_obj.namespace, extra_index, decls_len, new_decl); + _ = try mod.scanNamespace(&union_obj.namespace, extra_index, decls_len, new_decl); try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl); + return sema.analyzeDeclVal(block, src, new_decl_index); } fn zirOpaqueDecl( @@ -2347,16 +2370,16 @@ fn zirOpaqueDecl( }; const opaque_ty = Type.initPayload(&opaque_ty_payload.base); const opaque_val = try Value.Tag.ty.create(new_decl_arena_allocator, opaque_ty); - const type_name = try sema.createTypeName(block, small.name_strategy, "opaque"); - const new_decl = try mod.createAnonymousDeclNamed(block, .{ + const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, .{ .ty = Type.type, .val = opaque_val, - }, type_name); + }, small.name_strategy, "opaque"); + const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; - errdefer mod.abortAnonDecl(new_decl); + errdefer mod.abortAnonDecl(new_decl_index); opaque_obj.* = .{ - .owner_decl = new_decl, + .owner_decl = new_decl_index, .node_offset = src.node_offset, .namespace = .{ .parent = block.namespace, @@ -2371,7 +2394,7 @@ fn zirOpaqueDecl( extra_index = try mod.scanNamespace(&opaque_obj.namespace, extra_index, decls_len, new_decl); try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl); + return sema.analyzeDeclVal(block, src, new_decl_index); } fn zirErrorSetDecl( @@ -2395,13 +2418,14 @@ fn zirErrorSetDecl( const error_set = try new_decl_arena_allocator.create(Module.ErrorSet); const error_set_ty = try Type.Tag.error_set.create(new_decl_arena_allocator, error_set); const error_set_val = try Value.Tag.ty.create(new_decl_arena_allocator, error_set_ty); - const type_name = try sema.createTypeName(block, name_strategy, "error"); - const new_decl = try sema.mod.createAnonymousDeclNamed(block, .{ + const mod = sema.mod; + const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, .{ .ty = Type.type, .val = error_set_val, - }, type_name); + }, name_strategy, "error"); + const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; - errdefer sema.mod.abortAnonDecl(new_decl); + errdefer mod.abortAnonDecl(new_decl_index); var names = Module.ErrorSet.NameMap{}; try names.ensureUnusedCapacity(new_decl_arena_allocator, extra.data.fields_len); @@ -2410,7 +2434,7 @@ fn zirErrorSetDecl( const extra_index_end = extra_index + (extra.data.fields_len * 2); while (extra_index < extra_index_end) : (extra_index += 2) { // +2 to skip over doc_string const str_index = sema.code.extra[extra_index]; - const kv = try sema.mod.getErrorValue(sema.code.nullTerminatedString(str_index)); + const kv = try mod.getErrorValue(sema.code.nullTerminatedString(str_index)); const result = names.getOrPutAssumeCapacity(kv.key); assert(!result.found_existing); // verified in AstGen } @@ -2419,12 +2443,12 @@ fn zirErrorSetDecl( Module.ErrorSet.sortNames(&names); error_set.* = .{ - .owner_decl = new_decl, + .owner_decl = new_decl_index, .node_offset = inst_data.src_node, .names = names, }; try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl); + return sema.analyzeDeclVal(block, src, new_decl_index); } fn zirRetPtr( @@ -2444,7 +2468,7 @@ fn zirRetPtr( } const target = sema.mod.getTarget(); - const ptr_type = try Type.ptr(sema.arena, target, .{ + const ptr_type = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = sema.fn_ret_ty, .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); @@ -2535,14 +2559,13 @@ fn zirIndexablePtrLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE else object_ty; - const target = sema.mod.getTarget(); if (!array_ty.isIndexable()) { const msg = msg: { const msg = try sema.errMsg( block, src, "type '{}' does not support indexing", - .{array_ty.fmt(target)}, + .{array_ty.fmt(sema.mod)}, ); errdefer msg.destroy(sema.gpa); try sema.errNote( @@ -2598,7 +2621,7 @@ fn zirAllocExtended( return sema.addConstant( inferred_alloc_ty, try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ - .decl = undefined, + .decl_index = undefined, .alignment = alignment, }), ); @@ -2612,7 +2635,7 @@ fn zirAllocExtended( const target = sema.mod.getTarget(); try sema.requireRuntimeBlock(block, src); try sema.resolveTypeLayout(block, src, var_ty); - const ptr_type = try Type.ptr(sema.arena, target, .{ + const ptr_type = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = var_ty, .@"align" = alignment, .@"addrspace" = target_util.defaultAddressSpace(target, .local), @@ -2649,7 +2672,7 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro const ptr_ty = sema.typeOf(ptr); var ptr_info = ptr_ty.ptrInfo().data; ptr_info.mutable = false; - const const_ptr_ty = try Type.ptr(sema.arena, sema.mod.getTarget(), ptr_info); + const const_ptr_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); if (try sema.resolveMaybeUndefVal(block, inst_data.src(), ptr)) |val| { return sema.addConstant(const_ptr_ty, val); @@ -2669,7 +2692,7 @@ fn zirAllocInferredComptime( return sema.addConstant( inferred_alloc_ty, try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ - .decl = undefined, + .decl_index = undefined, .alignment = 0, }), ); @@ -2687,7 +2710,7 @@ fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I return sema.analyzeComptimeAlloc(block, var_ty, 0, ty_src); } const target = sema.mod.getTarget(); - const ptr_type = try Type.ptr(sema.arena, target, .{ + const ptr_type = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = var_ty, .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); @@ -2709,7 +2732,7 @@ fn zirAllocMut(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } try sema.validateVarType(block, ty_src, var_ty, false); const target = sema.mod.getTarget(); - const ptr_type = try Type.ptr(sema.arena, target, .{ + const ptr_type = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = var_ty, .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); @@ -2735,7 +2758,7 @@ fn zirAllocInferred( return sema.addConstant( inferred_alloc_ty, try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ - .decl = undefined, + .decl_index = undefined, .alignment = 0, }), ); @@ -2776,11 +2799,12 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com switch (ptr_val.tag()) { .inferred_alloc_comptime => { const iac = ptr_val.castTag(.inferred_alloc_comptime).?; - const decl = iac.data.decl; - try sema.mod.declareDeclDependency(sema.owner_decl, decl); + const decl_index = iac.data.decl_index; + try sema.mod.declareDeclDependency(sema.owner_decl_index, decl_index); + const decl = sema.mod.declPtr(decl_index); const final_elem_ty = try decl.ty.copy(sema.arena); - const final_ptr_ty = try Type.ptr(sema.arena, target, .{ + const final_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = final_elem_ty, .mutable = var_is_mut, .@"align" = iac.data.alignment, @@ -2791,11 +2815,11 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com if (var_is_mut) { sema.air_values.items[value_index] = try Value.Tag.decl_ref_mut.create(sema.arena, .{ - .decl = decl, + .decl_index = decl_index, .runtime_index = block.runtime_index, }); } else { - sema.air_values.items[value_index] = try Value.Tag.decl_ref.create(sema.arena, decl); + sema.air_values.items[value_index] = try Value.Tag.decl_ref.create(sema.arena, decl_index); } }, .inferred_alloc => { @@ -2803,7 +2827,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com const peer_inst_list = inferred_alloc.data.stored_inst_list.items; const final_elem_ty = try sema.resolvePeerTypes(block, ty_src, peer_inst_list, .none); - const final_ptr_ty = try Type.ptr(sema.arena, target, .{ + const final_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = final_elem_ty, .mutable = var_is_mut, .@"align" = inferred_alloc.data.alignment, @@ -2873,22 +2897,22 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com if (store_op.lhs != Air.indexToRef(bitcast_inst)) break :ct; if (air_datas[bitcast_inst].ty_op.operand != Air.indexToRef(const_inst)) break :ct; - const new_decl = d: { + const new_decl_index = d: { var anon_decl = try block.startAnonDecl(src); defer anon_decl.deinit(); - const new_decl = try anon_decl.finish( + const new_decl_index = try anon_decl.finish( try final_elem_ty.copy(anon_decl.arena()), try store_val.copy(anon_decl.arena()), inferred_alloc.data.alignment, ); - break :d new_decl; + break :d new_decl_index; }; - try sema.mod.declareDeclDependency(sema.owner_decl, new_decl); + try sema.mod.declareDeclDependency(sema.owner_decl_index, new_decl_index); // Even though we reuse the constant instruction, we still remove it from the // block so that codegen does not see it. block.instructions.shrinkRetainingCapacity(block.instructions.items.len - 3); - sema.air_values.items[value_index] = try Value.Tag.decl_ref.create(sema.arena, new_decl); + sema.air_values.items[value_index] = try Value.Tag.decl_ref.create(sema.arena, new_decl_index); // if bitcast ty ref needs to be made const, make_ptr_const // ZIR handles it later, so we can just use the ty ref here. air_datas[ptr_inst].ty_pl.ty = air_datas[bitcast_inst].ty_op.ty; @@ -3218,10 +3242,11 @@ fn validateStructInit( } if (root_msg) |msg| { - const fqn = try struct_obj.getFullyQualifiedName(gpa); + const mod = sema.mod; + const fqn = try struct_obj.getFullyQualifiedName(mod); defer gpa.free(fqn); - try sema.mod.errNoteNonLazy( - struct_obj.srcLoc(), + try mod.errNoteNonLazy( + struct_obj.srcLoc(mod), msg, "struct '{s}' declared here", .{fqn}, @@ -3325,10 +3350,10 @@ fn validateStructInit( } if (root_msg) |msg| { - const fqn = try struct_obj.getFullyQualifiedName(gpa); + const fqn = try struct_obj.getFullyQualifiedName(sema.mod); defer gpa.free(fqn); try sema.mod.errNoteNonLazy( - struct_obj.srcLoc(), + struct_obj.srcLoc(sema.mod), msg, "struct '{s}' declared here", .{fqn}, @@ -3497,9 +3522,8 @@ fn failWithBadMemberAccess( else => unreachable, }; const msg = msg: { - const target = sema.mod.getTarget(); const msg = try sema.errMsg(block, field_src, "{s} '{}' has no member named '{s}'", .{ - kw_name, agg_ty.fmt(target), field_name, + kw_name, agg_ty.fmt(sema.mod), field_name, }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, agg_ty); @@ -3517,7 +3541,7 @@ fn failWithBadStructFieldAccess( ) CompileError { const gpa = sema.gpa; - const fqn = try struct_obj.getFullyQualifiedName(gpa); + const fqn = try struct_obj.getFullyQualifiedName(sema.mod); defer gpa.free(fqn); const msg = msg: { @@ -3528,7 +3552,7 @@ fn failWithBadStructFieldAccess( .{ field_name, fqn }, ); errdefer msg.destroy(gpa); - try sema.mod.errNoteNonLazy(struct_obj.srcLoc(), msg, "struct declared here", .{}); + try sema.mod.errNoteNonLazy(struct_obj.srcLoc(sema.mod), msg, "struct declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); @@ -3543,7 +3567,7 @@ fn failWithBadUnionFieldAccess( ) CompileError { const gpa = sema.gpa; - const fqn = try union_obj.getFullyQualifiedName(gpa); + const fqn = try union_obj.getFullyQualifiedName(sema.mod); defer gpa.free(fqn); const msg = msg: { @@ -3554,14 +3578,14 @@ fn failWithBadUnionFieldAccess( .{ field_name, fqn }, ); errdefer msg.destroy(gpa); - try sema.mod.errNoteNonLazy(union_obj.srcLoc(), msg, "union declared here", .{}); + try sema.mod.errNoteNonLazy(union_obj.srcLoc(sema.mod), msg, "union declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } fn addDeclaredHereNote(sema: *Sema, parent: *Module.ErrorMsg, decl_ty: Type) !void { - const src_loc = decl_ty.declSrcLocOrNull() orelse return; + const src_loc = decl_ty.declSrcLocOrNull(sema.mod) orelse return; const category = switch (decl_ty.zigTypeTag()) { .Union => "union", .Struct => "struct", @@ -3645,7 +3669,7 @@ fn storeToInferredAlloc( try inferred_alloc.data.stored_inst_list.append(sema.arena, operand); // Create a runtime bitcast instruction with exactly the type the pointer wants. const target = sema.mod.getTarget(); - const ptr_ty = try Type.ptr(sema.arena, target, .{ + const ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = operand_ty, .@"align" = inferred_alloc.data.alignment, .@"addrspace" = target_util.defaultAddressSpace(target, .local), @@ -3670,7 +3694,7 @@ fn storeToInferredAllocComptime( } var anon_decl = try block.startAnonDecl(src); defer anon_decl.deinit(); - iac.data.decl = try anon_decl.finish( + iac.data.decl_index = try anon_decl.finish( try operand_ty.copy(anon_decl.arena()), try operand_val.copy(anon_decl.arena()), iac.data.alignment, @@ -3869,7 +3893,6 @@ fn zirCompileLog( const src_node = extra.data.src_node; const src: LazySrcLoc = .{ .node_offset = src_node }; const args = sema.code.refSlice(extra.end, extended.small); - const target = sema.mod.getTarget(); for (args) |arg_ref, i| { if (i != 0) try writer.print(", ", .{}); @@ -3878,15 +3901,15 @@ fn zirCompileLog( const arg_ty = sema.typeOf(arg); if (try sema.resolveMaybeUndefVal(block, src, arg)) |val| { try writer.print("@as({}, {})", .{ - arg_ty.fmt(target), val.fmtValue(arg_ty, target), + arg_ty.fmt(sema.mod), val.fmtValue(arg_ty, sema.mod), }); } else { - try writer.print("@as({}, [runtime value])", .{arg_ty.fmt(target)}); + try writer.print("@as({}, [runtime value])", .{arg_ty.fmt(sema.mod)}); } } try writer.print("\n", .{}); - const gop = try sema.mod.compile_log_decls.getOrPut(sema.gpa, sema.owner_decl); + const gop = try sema.mod.compile_log_decls.getOrPut(sema.gpa, sema.owner_decl_index); if (!gop.found_existing) { gop.value_ptr.* = src_node; } @@ -3996,7 +4019,8 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr // Ignore the result, all the relevant operations have written to c_import_buf already. _ = try sema.analyzeBodyBreak(&child_block, body); - const c_import_res = sema.mod.comp.cImport(c_import_buf.items) catch |err| + const mod = sema.mod; + const c_import_res = mod.comp.cImport(c_import_buf.items) catch |err| return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); if (c_import_res.errors.len != 0) { @@ -4004,12 +4028,12 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr const msg = try sema.errMsg(&child_block, src, "C import failed", .{}); errdefer msg.destroy(sema.gpa); - if (!sema.mod.comp.bin_file.options.link_libc) + if (!mod.comp.bin_file.options.link_libc) try sema.errNote(&child_block, src, msg, "libc headers not available; compilation does not link against libc", .{}); for (c_import_res.errors) |_| { // TODO integrate with LazySrcLoc - // try sema.mod.errNoteNonLazy(.{}, msg, "{s}", .{clang_err.msg_ptr[0..clang_err.msg_len]}); + // try mod.errNoteNonLazy(.{}, msg, "{s}", .{clang_err.msg_ptr[0..clang_err.msg_len]}); // if (clang_err.filename_ptr) |p| p[0..clang_err.filename_len] else "(no file)", // clang_err.line + 1, // clang_err.column + 1, @@ -4027,20 +4051,21 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr error.OutOfMemory => return error.OutOfMemory, else => unreachable, // we pass null for root_src_dir_path }; - const std_pkg = sema.mod.main_pkg.table.get("std").?; - const builtin_pkg = sema.mod.main_pkg.table.get("builtin").?; + const std_pkg = mod.main_pkg.table.get("std").?; + const builtin_pkg = mod.main_pkg.table.get("builtin").?; try c_import_pkg.add(sema.gpa, "builtin", builtin_pkg); try c_import_pkg.add(sema.gpa, "std", std_pkg); - const result = sema.mod.importPkg(c_import_pkg) catch |err| + const result = mod.importPkg(c_import_pkg) catch |err| return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); - sema.mod.astGenFile(result.file) catch |err| + mod.astGenFile(result.file) catch |err| return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); - try sema.mod.semaFile(result.file); - const file_root_decl = result.file.root_decl.?; - try sema.mod.declareDeclDependency(sema.owner_decl, file_root_decl); + try mod.semaFile(result.file); + const file_root_decl_index = result.file.root_decl.unwrap().?; + const file_root_decl = mod.declPtr(file_root_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, file_root_decl_index); return sema.addConstant(file_root_decl.ty, file_root_decl.val); } @@ -4139,6 +4164,7 @@ fn analyzeBlockBody( defer tracy.end(); const gpa = sema.gpa; + const mod = sema.mod; // Blocks must terminate with noreturn instruction. assert(child_block.instructions.items.len != 0); @@ -4173,16 +4199,16 @@ fn analyzeBlockBody( const type_src = src; // TODO: better source location const valid_rt = try sema.validateRunTimeType(child_block, type_src, resolved_ty, false); - const target = sema.mod.getTarget(); if (!valid_rt) { const msg = msg: { - const msg = try sema.errMsg(child_block, type_src, "value with comptime only type '{}' depends on runtime control flow", .{resolved_ty.fmt(target)}); + const msg = try sema.errMsg(child_block, type_src, "value with comptime only type '{}' depends on runtime control flow", .{resolved_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); const runtime_src = child_block.runtime_cond orelse child_block.runtime_loop.?; try sema.errNote(child_block, runtime_src, msg, "runtime control flow here", .{}); - try sema.explainWhyTypeIsComptime(child_block, type_src, msg, type_src.toSrcLoc(child_block.src_decl), resolved_ty); + const child_src_decl = mod.declPtr(child_block.src_decl); + try sema.explainWhyTypeIsComptime(child_block, type_src, msg, type_src.toSrcLoc(child_src_decl), resolved_ty); break :msg msg; }; @@ -4204,7 +4230,7 @@ fn analyzeBlockBody( const br_operand = sema.air_instructions.items(.data)[br].br.operand; const br_operand_src = src; const br_operand_ty = sema.typeOf(br_operand); - if (br_operand_ty.eql(resolved_ty, target)) { + if (br_operand_ty.eql(resolved_ty, mod)) { // No type coercion needed. continue; } @@ -4262,9 +4288,9 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void if (extra.namespace != .none) { return sema.fail(block, src, "TODO: implement exporting with field access", .{}); } - const decl = try sema.lookupIdentifier(block, operand_src, decl_name); + const decl_index = try sema.lookupIdentifier(block, operand_src, decl_name); const options = try sema.resolveExportOptions(block, options_src, extra.options); - try sema.analyzeExport(block, src, options, decl); + try sema.analyzeExport(block, src, options, decl_index); } fn zirExportValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { @@ -4278,11 +4304,11 @@ fn zirExportValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const operand = try sema.resolveInstConst(block, operand_src, extra.operand); const options = try sema.resolveExportOptions(block, options_src, extra.options); - const decl = switch (operand.val.tag()) { + const decl_index = switch (operand.val.tag()) { .function => operand.val.castTag(.function).?.data.owner_decl, else => return sema.fail(block, operand_src, "TODO implement exporting arbitrary Value objects", .{}), // TODO put this Value into an anonymous Decl and then export it. }; - try sema.analyzeExport(block, src, options, decl); + try sema.analyzeExport(block, src, options, decl_index); } pub fn analyzeExport( @@ -4290,18 +4316,18 @@ pub fn analyzeExport( block: *Block, src: LazySrcLoc, borrowed_options: std.builtin.ExportOptions, - exported_decl: *Decl, + exported_decl_index: Decl.Index, ) !void { const Export = Module.Export; const mod = sema.mod; - const target = mod.getTarget(); - try mod.ensureDeclAnalyzed(exported_decl); + try mod.ensureDeclAnalyzed(exported_decl_index); + const exported_decl = mod.declPtr(exported_decl_index); // TODO run the same checks as we do for C ABI struct fields switch (exported_decl.ty.zigTypeTag()) { .Fn, .Int, .Enum, .Struct, .Union, .Array, .Float => {}, else => return sema.fail(block, src, "unable to export type '{}'", .{ - exported_decl.ty.fmt(target), + exported_decl.ty.fmt(sema.mod), }), } @@ -4319,13 +4345,6 @@ pub fn analyzeExport( const section: ?[]const u8 = if (borrowed_options.section) |s| try gpa.dupe(u8, s) else null; errdefer if (section) |s| gpa.free(s); - const src_decl = block.src_decl; - const owner_decl = sema.owner_decl; - - log.debug("exporting Decl '{s}' as symbol '{s}' from Decl '{s}'", .{ - exported_decl.name, symbol_name, owner_decl.name, - }); - new_export.* = .{ .options = .{ .name = symbol_name, @@ -4343,14 +4362,14 @@ pub fn analyzeExport( .spirv => .{ .spirv = {} }, .nvptx => .{ .nvptx = {} }, }, - .owner_decl = owner_decl, - .src_decl = src_decl, - .exported_decl = exported_decl, + .owner_decl = sema.owner_decl_index, + .src_decl = block.src_decl, + .exported_decl = exported_decl_index, .status = .in_progress, }; // Add to export_owners table. - const eo_gop = mod.export_owners.getOrPutAssumeCapacity(owner_decl); + const eo_gop = mod.export_owners.getOrPutAssumeCapacity(sema.owner_decl_index); if (!eo_gop.found_existing) { eo_gop.value_ptr.* = &[0]*Export{}; } @@ -4359,7 +4378,7 @@ pub fn analyzeExport( errdefer eo_gop.value_ptr.* = gpa.shrink(eo_gop.value_ptr.*, eo_gop.value_ptr.len - 1); // Add to exported_decl table. - const de_gop = mod.decl_exports.getOrPutAssumeCapacity(exported_decl); + const de_gop = mod.decl_exports.getOrPutAssumeCapacity(exported_decl_index); if (!de_gop.found_existing) { de_gop.value_ptr.* = &[0]*Export{}; } @@ -4381,7 +4400,8 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr const func = sema.owner_func orelse return sema.fail(block, src, "@setAlignStack outside function body", .{}); - switch (func.owner_decl.ty.fnCallingConvention()) { + const fn_owner_decl = sema.mod.declPtr(func.owner_decl); + switch (fn_owner_decl.ty.fnCallingConvention()) { .Naked => return sema.fail(block, src, "@setAlignStack in naked function", .{}), .Inline => return sema.fail(block, src, "@setAlignStack in inline function", .{}), else => {}, @@ -4561,8 +4581,8 @@ fn zirDeclRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); const decl_name = inst_data.get(sema.code); - const decl = try sema.lookupIdentifier(block, src, decl_name); - return sema.analyzeDeclRef(decl); + const decl_index = try sema.lookupIdentifier(block, src, decl_name); + return sema.analyzeDeclRef(decl_index); } fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -4573,11 +4593,11 @@ fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.analyzeDeclVal(block, src, decl); } -fn lookupIdentifier(sema: *Sema, block: *Block, src: LazySrcLoc, name: []const u8) !*Decl { +fn lookupIdentifier(sema: *Sema, block: *Block, src: LazySrcLoc, name: []const u8) !Decl.Index { var namespace = block.namespace; while (true) { - if (try sema.lookupInNamespace(block, src, namespace, name, false)) |decl| { - return decl; + if (try sema.lookupInNamespace(block, src, namespace, name, false)) |decl_index| { + return decl_index; } namespace = namespace.parent orelse break; } @@ -4593,12 +4613,13 @@ fn lookupInNamespace( namespace: *Namespace, ident_name: []const u8, observe_usingnamespace: bool, -) CompileError!?*Decl { +) CompileError!?Decl.Index { const mod = sema.mod; - const namespace_decl = namespace.getDecl(); + const namespace_decl_index = namespace.getDeclIndex(); + const namespace_decl = sema.mod.declPtr(namespace_decl_index); if (namespace_decl.analysis == .file_failure) { - try mod.declareDeclDependency(sema.owner_decl, namespace_decl); + try mod.declareDeclDependency(sema.owner_decl_index, namespace_decl_index); return error.AnalysisFail; } @@ -4610,7 +4631,7 @@ fn lookupInNamespace( defer checked_namespaces.deinit(gpa); // Keep track of name conflicts for error notes. - var candidates: std.ArrayListUnmanaged(*Decl) = .{}; + var candidates: std.ArrayListUnmanaged(Decl.Index) = .{}; defer candidates.deinit(gpa); try checked_namespaces.put(gpa, namespace, {}); @@ -4618,23 +4639,25 @@ fn lookupInNamespace( while (check_i < checked_namespaces.count()) : (check_i += 1) { const check_ns = checked_namespaces.keys()[check_i]; - if (check_ns.decls.getKeyAdapted(ident_name, Module.DeclAdapter{})) |decl| { + if (check_ns.decls.getKeyAdapted(ident_name, Module.DeclAdapter{ .mod = mod })) |decl_index| { // Skip decls which are not marked pub, which are in a different // file than the `a.b`/`@hasDecl` syntax. + const decl = mod.declPtr(decl_index); if (decl.is_pub or src_file == decl.getFileScope()) { - try candidates.append(gpa, decl); + try candidates.append(gpa, decl_index); } } var it = check_ns.usingnamespace_set.iterator(); while (it.next()) |entry| { - const sub_usingnamespace_decl = entry.key_ptr.*; + const sub_usingnamespace_decl_index = entry.key_ptr.*; + const sub_usingnamespace_decl = mod.declPtr(sub_usingnamespace_decl_index); const sub_is_pub = entry.value_ptr.*; if (!sub_is_pub and src_file != sub_usingnamespace_decl.getFileScope()) { // Skip usingnamespace decls which are not marked pub, which are in // a different file than the `a.b`/`@hasDecl` syntax. continue; } - try sema.ensureDeclAnalyzed(sub_usingnamespace_decl); + try sema.ensureDeclAnalyzed(sub_usingnamespace_decl_index); const ns_ty = sub_usingnamespace_decl.val.castTag(.ty).?.data; const sub_ns = ns_ty.getNamespace().?; try checked_namespaces.put(gpa, sub_ns, {}); @@ -4644,15 +4667,16 @@ fn lookupInNamespace( switch (candidates.items.len) { 0 => {}, 1 => { - const decl = candidates.items[0]; - try mod.declareDeclDependency(sema.owner_decl, decl); - return decl; + const decl_index = candidates.items[0]; + try mod.declareDeclDependency(sema.owner_decl_index, decl_index); + return decl_index; }, else => { const msg = msg: { const msg = try sema.errMsg(block, src, "ambiguous reference", .{}); errdefer msg.destroy(gpa); - for (candidates.items) |candidate| { + for (candidates.items) |candidate_index| { + const candidate = mod.declPtr(candidate_index); const src_loc = candidate.srcLoc(); try mod.errNoteNonLazy(src_loc, msg, "declared here", .{}); } @@ -4661,9 +4685,9 @@ fn lookupInNamespace( return sema.failWithOwnedErrorMsg(block, msg); }, } - } else if (namespace.decls.getKeyAdapted(ident_name, Module.DeclAdapter{})) |decl| { - try mod.declareDeclDependency(sema.owner_decl, decl); - return decl; + } else if (namespace.decls.getKeyAdapted(ident_name, Module.DeclAdapter{ .mod = mod })) |decl_index| { + try mod.declareDeclDependency(sema.owner_decl_index, decl_index); + return decl_index; } log.debug("{*} ({s}) depends on non-existence of '{s}' in {*} ({s})", .{ @@ -4672,7 +4696,7 @@ fn lookupInNamespace( // TODO This dependency is too strong. Really, it should only be a dependency // on the non-existence of `ident_name` in the namespace. We can lessen the number of // outdated declarations by making this dependency more sophisticated. - try mod.declareDeclDependency(sema.owner_decl, namespace_decl); + try mod.declareDeclDependency(sema.owner_decl_index, namespace_decl_index); return null; } @@ -4725,13 +4749,14 @@ const GenericCallAdapter = struct { /// Unlike comptime_args, the Type here is not always present. /// .generic_poison is used to communicate non-anytype parameters. comptime_tvs: []const TypedValue, - target: std.Target, + module: *Module, pub fn eql(ctx: @This(), adapted_key: void, other_key: *Module.Fn) bool { _ = adapted_key; // The generic function Decl is guaranteed to be the first dependency // of each of its instantiations. - const generic_owner_decl = other_key.owner_decl.dependencies.keys()[0]; + const other_owner_decl = ctx.module.declPtr(other_key.owner_decl); + const generic_owner_decl = other_owner_decl.dependencies.keys()[0]; if (ctx.generic_fn.owner_decl != generic_owner_decl) return false; const other_comptime_args = other_key.comptime_args.?; @@ -4747,18 +4772,18 @@ const GenericCallAdapter = struct { if (this_is_anytype) { // Both are anytype parameters. - if (!this_arg.ty.eql(other_arg.ty, ctx.target)) { + if (!this_arg.ty.eql(other_arg.ty, ctx.module)) { return false; } if (this_is_comptime) { // Both are comptime and anytype parameters with matching types. - if (!this_arg.val.eql(other_arg.val, other_arg.ty, ctx.target)) { + if (!this_arg.val.eql(other_arg.val, other_arg.ty, ctx.module)) { return false; } } } else if (this_is_comptime) { // Both are comptime parameters but not anytype parameters. - if (!this_arg.val.eql(other_arg.val, other_arg.ty, ctx.target)) { + if (!this_arg.val.eql(other_arg.val, other_arg.ty, ctx.module)) { return false; } } @@ -4787,7 +4812,6 @@ fn analyzeCall( const mod = sema.mod; const callee_ty = sema.typeOf(func); - const target = sema.mod.getTarget(); const func_ty = func_ty: { switch (callee_ty.zigTypeTag()) { .Fn => break :func_ty callee_ty, @@ -4799,7 +4823,7 @@ fn analyzeCall( }, else => {}, } - return sema.fail(block, func_src, "type '{}' not a function", .{callee_ty.fmt(target)}); + return sema.fail(block, func_src, "type '{}' not a function", .{callee_ty.fmt(sema.mod)}); }; const func_ty_info = func_ty.fnInfo(); @@ -4891,7 +4915,7 @@ fn analyzeCall( const result: Air.Inst.Ref = if (is_inline_call) res: { const func_val = try sema.resolveConstValue(block, func_src, func); const module_fn = switch (func_val.tag()) { - .decl_ref => func_val.castTag(.decl_ref).?.data.val.castTag(.function).?.data, + .decl_ref => mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data, .function => func_val.castTag(.function).?.data, .extern_fn => return sema.fail(block, call_src, "{s} call of extern function", .{ @as([]const u8, if (is_comptime_call) "comptime" else "inline"), @@ -4922,7 +4946,8 @@ fn analyzeCall( // In order to save a bit of stack space, directly modify Sema rather // than create a child one. const parent_zir = sema.code; - sema.code = module_fn.owner_decl.getFileScope().zir; + const fn_owner_decl = mod.declPtr(module_fn.owner_decl); + sema.code = fn_owner_decl.getFileScope().zir; defer sema.code = parent_zir; const parent_inst_map = sema.inst_map; @@ -4936,14 +4961,14 @@ fn analyzeCall( sema.func = module_fn; defer sema.func = parent_func; - var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, module_fn.owner_decl.src_scope); + var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, fn_owner_decl.src_scope); defer wip_captures.deinit(); var child_block: Block = .{ .parent = null, .sema = sema, .src_decl = module_fn.owner_decl, - .namespace = module_fn.owner_decl.src_namespace, + .namespace = fn_owner_decl.src_namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, .label = null, @@ -4976,7 +5001,7 @@ fn analyzeCall( // comptime state. var should_memoize = true; - var new_fn_info = module_fn.owner_decl.ty.fnInfo(); + var new_fn_info = fn_owner_decl.ty.fnInfo(); new_fn_info.param_types = try sema.arena.alloc(Type, new_fn_info.param_types.len); new_fn_info.comptime_params = (try sema.arena.alloc(bool, new_fn_info.param_types.len)).ptr; @@ -5073,7 +5098,7 @@ fn analyzeCall( const bare_return_type = try sema.analyzeAsType(&child_block, ret_ty_src, ret_ty_inst); // Create a fresh inferred error set type for inline/comptime calls. const fn_ret_ty = blk: { - if (module_fn.hasInferredErrorSet()) { + if (module_fn.hasInferredErrorSet(mod)) { const node = try sema.gpa.create(Module.Fn.InferredErrorSetListNode); node.data = .{ .func = module_fn }; if (parent_func) |some| { @@ -5097,7 +5122,7 @@ fn analyzeCall( // bug generating invalid LLVM IR. const res2: Air.Inst.Ref = res2: { if (should_memoize and is_comptime_call) { - if (mod.memoized_calls.getContext(memoized_call_key, .{ .target = target })) |result| { + if (mod.memoized_calls.getContext(memoized_call_key, .{ .module = mod })) |result| { const ty_inst = try sema.addType(fn_ret_ty); try sema.air_values.append(gpa, result.val); sema.air_instructions.set(block_inst, .{ @@ -5150,7 +5175,13 @@ fn analyzeCall( }; if (!is_comptime_call) { - try sema.emitDbgInline(block, module_fn, parent_func.?, parent_func.?.owner_decl.ty, .dbg_inline_end); + try sema.emitDbgInline( + block, + module_fn, + parent_func.?, + mod.declPtr(parent_func.?.owner_decl).ty, + .dbg_inline_end, + ); } if (should_memoize and is_comptime_call) { @@ -5172,7 +5203,7 @@ fn analyzeCall( try mod.memoized_calls.putContext(gpa, memoized_call_key, .{ .val = try result_val.copy(arena), .arena = arena_allocator.state, - }, .{ .target = sema.mod.getTarget() }); + }, .{ .module = mod }); delete_memoized_call_key = false; } } @@ -5239,13 +5270,14 @@ fn instantiateGenericCall( const func_val = try sema.resolveConstValue(block, func_src, func); const module_fn = switch (func_val.tag()) { .function => func_val.castTag(.function).?.data, - .decl_ref => func_val.castTag(.decl_ref).?.data.val.castTag(.function).?.data, + .decl_ref => mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data, else => unreachable, }; // Check the Module's generic function map with an adapted context, so that we // can match against `uncasted_args` rather than doing the work below to create a // generic Scope only to junk it if it matches an existing instantiation. - const namespace = module_fn.owner_decl.src_namespace; + const fn_owner_decl = mod.declPtr(module_fn.owner_decl); + const namespace = fn_owner_decl.src_namespace; const fn_zir = namespace.file_scope.zir; const fn_info = fn_zir.getFnInfo(module_fn.zir_body_inst); const zir_tags = fn_zir.instructions.items(.tag); @@ -5261,7 +5293,6 @@ fn instantiateGenericCall( std.hash.autoHash(&hasher, @ptrToInt(module_fn)); const comptime_tvs = try sema.arena.alloc(TypedValue, func_ty_info.param_types.len); - const target = sema.mod.getTarget(); { var i: usize = 0; @@ -5290,9 +5321,9 @@ fn instantiateGenericCall( const arg_src = call_src; // TODO better source location const arg_ty = sema.typeOf(uncasted_args[i]); const arg_val = try sema.resolveValue(block, arg_src, uncasted_args[i]); - arg_val.hash(arg_ty, &hasher, target); + arg_val.hash(arg_ty, &hasher, mod); if (is_anytype) { - arg_ty.hashWithHasher(&hasher, target); + arg_ty.hashWithHasher(&hasher, mod); comptime_tvs[i] = .{ .ty = arg_ty, .val = arg_val, @@ -5305,7 +5336,7 @@ fn instantiateGenericCall( } } else if (is_anytype) { const arg_ty = sema.typeOf(uncasted_args[i]); - arg_ty.hashWithHasher(&hasher, target); + arg_ty.hashWithHasher(&hasher, mod); comptime_tvs[i] = .{ .ty = arg_ty, .val = Value.initTag(.generic_poison), @@ -5328,7 +5359,7 @@ fn instantiateGenericCall( .precomputed_hash = precomputed_hash, .func_ty_info = func_ty_info, .comptime_tvs = comptime_tvs, - .target = target, + .module = mod, }; const gop = try mod.monomorphed_funcs.getOrPutAdapted(gpa, {}, adapter); const callee = if (!gop.found_existing) callee: { @@ -5343,37 +5374,40 @@ fn instantiateGenericCall( try namespace.anon_decls.ensureUnusedCapacity(gpa, 1); // Create a Decl for the new function. - const src_decl = namespace.getDecl(); + const src_decl_index = namespace.getDeclIndex(); + const src_decl = mod.declPtr(src_decl_index); + const new_decl_index = try mod.allocateNewDecl(namespace, fn_owner_decl.src_node, src_decl.src_scope); + errdefer mod.destroyDecl(new_decl_index); + const new_decl = mod.declPtr(new_decl_index); // TODO better names for generic function instantiations - const name_index = mod.getNextAnonNameIndex(); const decl_name = try std.fmt.allocPrintZ(gpa, "{s}__anon_{d}", .{ - module_fn.owner_decl.name, name_index, + fn_owner_decl.name, @enumToInt(new_decl_index), }); - const new_decl = try mod.allocateNewDecl(decl_name, namespace, module_fn.owner_decl.src_node, src_decl.src_scope); - errdefer new_decl.destroy(mod); - new_decl.src_line = module_fn.owner_decl.src_line; - new_decl.is_pub = module_fn.owner_decl.is_pub; - new_decl.is_exported = module_fn.owner_decl.is_exported; - new_decl.has_align = module_fn.owner_decl.has_align; - new_decl.has_linksection_or_addrspace = module_fn.owner_decl.has_linksection_or_addrspace; - new_decl.@"addrspace" = module_fn.owner_decl.@"addrspace"; - new_decl.zir_decl_index = module_fn.owner_decl.zir_decl_index; + new_decl.name = decl_name; + new_decl.src_line = fn_owner_decl.src_line; + new_decl.is_pub = fn_owner_decl.is_pub; + new_decl.is_exported = fn_owner_decl.is_exported; + new_decl.has_align = fn_owner_decl.has_align; + new_decl.has_linksection_or_addrspace = fn_owner_decl.has_linksection_or_addrspace; + new_decl.@"addrspace" = fn_owner_decl.@"addrspace"; + new_decl.zir_decl_index = fn_owner_decl.zir_decl_index; new_decl.alive = true; // This Decl is called at runtime. new_decl.analysis = .in_progress; new_decl.generation = mod.generation; - namespace.anon_decls.putAssumeCapacityNoClobber(new_decl, {}); - errdefer assert(namespace.anon_decls.orderedRemove(new_decl)); + namespace.anon_decls.putAssumeCapacityNoClobber(new_decl_index, {}); + errdefer assert(namespace.anon_decls.orderedRemove(new_decl_index)); // The generic function Decl is guaranteed to be the first dependency // of each of its instantiations. assert(new_decl.dependencies.keys().len == 0); - try mod.declareDeclDependency(new_decl, module_fn.owner_decl); + try mod.declareDeclDependency(new_decl_index, module_fn.owner_decl); // Resolving the new function type below will possibly declare more decl dependencies // and so we remove them all here in case of error. errdefer { - for (new_decl.dependencies.keys()) |dep| { - dep.removeDependant(new_decl); + for (new_decl.dependencies.keys()) |dep_index| { + const dep = mod.declPtr(dep_index); + dep.removeDependant(new_decl_index); } } @@ -5392,6 +5426,7 @@ fn instantiateGenericCall( .perm_arena = new_decl_arena_allocator, .code = fn_zir, .owner_decl = new_decl, + .owner_decl_index = new_decl_index, .func = null, .fn_ret_ty = Type.void, .owner_func = null, @@ -5407,7 +5442,7 @@ fn instantiateGenericCall( var child_block: Block = .{ .parent = null, .sema = &child_sema, - .src_decl = new_decl, + .src_decl = new_decl_index, .namespace = namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, @@ -5564,7 +5599,7 @@ fn instantiateGenericCall( // Queue up a `codegen_func` work item for the new Fn. The `comptime_args` field // will be populated, ensuring it will have `analyzeBody` called with the ZIR // parameters mapped appropriately. - try mod.comp.bin_file.allocateDeclIndexes(new_decl); + try mod.comp.bin_file.allocateDeclIndexes(new_decl_index); try mod.comp.work_queue.writeItem(.{ .codegen_func = new_func }); try new_decl.finalizeNewArena(&new_decl_arena); @@ -5577,7 +5612,7 @@ fn instantiateGenericCall( try sema.requireRuntimeBlock(block, call_src); const comptime_args = callee.comptime_args.?; - const new_fn_info = callee.owner_decl.ty.fnInfo(); + const new_fn_info = mod.declPtr(callee.owner_decl).ty.fnInfo(); const runtime_args_len = @intCast(u32, new_fn_info.param_types.len); const runtime_args = try sema.arena.alloc(Air.Inst.Ref, runtime_args_len); { @@ -5700,8 +5735,7 @@ fn zirArrayType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const bin_inst = sema.code.instructions.items(.data)[inst].bin; const len = try sema.resolveInt(block, .unneeded, bin_inst.lhs, Type.usize); const elem_type = try sema.resolveType(block, .unneeded, bin_inst.rhs); - const target = sema.mod.getTarget(); - const array_ty = try Type.array(sema.arena, len, null, elem_type, target); + const array_ty = try Type.array(sema.arena, len, null, elem_type, sema.mod); return sema.addType(array_ty); } @@ -5720,8 +5754,7 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil const uncasted_sentinel = sema.resolveInst(extra.sentinel); const sentinel = try sema.coerce(block, elem_type, uncasted_sentinel, sentinel_src); const sentinel_val = try sema.resolveConstValue(block, sentinel_src, sentinel); - const target = sema.mod.getTarget(); - const array_ty = try Type.array(sema.arena, len, sentinel_val, elem_type, target); + const array_ty = try Type.array(sema.arena, len, sentinel_val, elem_type, sema.mod); return sema.addType(array_ty); } @@ -5748,14 +5781,13 @@ fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const error_set = try sema.resolveType(block, lhs_src, extra.lhs); const payload = try sema.resolveType(block, rhs_src, extra.rhs); - const target = sema.mod.getTarget(); if (error_set.zigTypeTag() != .ErrorSet) { return sema.fail(block, lhs_src, "expected error set type, found {}", .{ - error_set.fmt(target), + error_set.fmt(sema.mod), }); } - const err_union_ty = try Type.errorUnion(sema.arena, error_set, payload, target); + const err_union_ty = try Type.errorUnion(sema.arena, error_set, payload, sema.mod); return sema.addType(err_union_ty); } @@ -5862,11 +5894,10 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr } const lhs_ty = try sema.analyzeAsType(block, lhs_src, lhs); const rhs_ty = try sema.analyzeAsType(block, rhs_src, rhs); - const target = sema.mod.getTarget(); if (lhs_ty.zigTypeTag() != .ErrorSet) - return sema.fail(block, lhs_src, "expected error set type, found {}", .{lhs_ty.fmt(target)}); + return sema.fail(block, lhs_src, "expected error set type, found {}", .{lhs_ty.fmt(sema.mod)}); if (rhs_ty.zigTypeTag() != .ErrorSet) - return sema.fail(block, rhs_src, "expected error set type, found {}", .{rhs_ty.fmt(target)}); + return sema.fail(block, rhs_src, "expected error set type, found {}", .{rhs_ty.fmt(sema.mod)}); // Anything merged with anyerror is anyerror. if (lhs_ty.tag() == .anyerror or rhs_ty.tag() == .anyerror) { @@ -5912,7 +5943,6 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand = sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - const target = sema.mod.getTarget(); const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag()) { .Enum => operand, @@ -5929,7 +5959,7 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }, else => { return sema.fail(block, operand_src, "expected enum or tagged union, found {}", .{ - operand_ty.fmt(target), + operand_ty.fmt(sema.mod), }); }, }; @@ -5953,7 +5983,6 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const target = sema.mod.getTarget(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); @@ -5963,7 +5992,7 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const operand = sema.resolveInst(extra.rhs); if (dest_ty.zigTypeTag() != .Enum) { - return sema.fail(block, dest_ty_src, "expected enum, found {}", .{dest_ty.fmt(target)}); + return sema.fail(block, dest_ty_src, "expected enum, found {}", .{dest_ty.fmt(sema.mod)}); } if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |int_val| { @@ -5973,17 +6002,17 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A if (int_val.isUndef()) { return sema.failWithUseOfUndef(block, operand_src); } - if (!dest_ty.enumHasInt(int_val, target)) { + if (!dest_ty.enumHasInt(int_val, sema.mod)) { const msg = msg: { const msg = try sema.errMsg( block, src, "enum '{}' has no tag with value {}", - .{ dest_ty.fmt(target), int_val.fmtValue(sema.typeOf(operand), target) }, + .{ dest_ty.fmt(sema.mod), int_val.fmtValue(sema.typeOf(operand), sema.mod) }, ); errdefer msg.destroy(sema.gpa); try sema.mod.errNoteNonLazy( - dest_ty.declSrcLoc(), + dest_ty.declSrcLoc(sema.mod), msg, "enum declared here", .{}, @@ -6028,14 +6057,13 @@ fn analyzeOptionalPayloadPtr( const optional_ptr_ty = sema.typeOf(optional_ptr); assert(optional_ptr_ty.zigTypeTag() == .Pointer); - const target = sema.mod.getTarget(); const opt_type = optional_ptr_ty.elemType(); if (opt_type.zigTypeTag() != .Optional) { - return sema.fail(block, src, "expected optional type, found {}", .{opt_type.fmt(target)}); + return sema.fail(block, src, "expected optional type, found {}", .{opt_type.fmt(sema.mod)}); } const child_type = try opt_type.optionalChildAlloc(sema.arena); - const child_pointer = try Type.ptr(sema.arena, target, .{ + const child_pointer = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = child_type, .mutable = !optional_ptr_ty.isConstPtr(), .@"addrspace" = optional_ptr_ty.ptrAddressSpace(), @@ -6106,8 +6134,7 @@ fn zirOptionalPayload( return sema.failWithExpectedOptionalType(block, src, operand_ty); } const ptr_info = operand_ty.ptrInfo().data; - const target = sema.mod.getTarget(); - break :t try Type.ptr(sema.arena, target, .{ + break :t try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = try ptr_info.pointee_type.copy(sema.arena), .@"align" = ptr_info.@"align", .@"addrspace" = ptr_info.@"addrspace", @@ -6154,9 +6181,8 @@ fn zirErrUnionPayload( const operand_src = src; const operand_ty = sema.typeOf(operand); if (operand_ty.zigTypeTag() != .ErrorUnion) { - const target = sema.mod.getTarget(); return sema.fail(block, operand_src, "expected error union type, found '{}'", .{ - operand_ty.fmt(target), + operand_ty.fmt(sema.mod), }); } @@ -6205,15 +6231,14 @@ fn analyzeErrUnionPayloadPtr( const operand_ty = sema.typeOf(operand); assert(operand_ty.zigTypeTag() == .Pointer); - const target = sema.mod.getTarget(); if (operand_ty.elemType().zigTypeTag() != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found {}", .{ - operand_ty.elemType().fmt(target), + operand_ty.elemType().fmt(sema.mod), }); } const payload_ty = operand_ty.elemType().errorUnionPayload(); - const operand_pointer_ty = try Type.ptr(sema.arena, target, .{ + const operand_pointer_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = payload_ty, .mutable = !operand_ty.isConstPtr(), .@"addrspace" = operand_ty.ptrAddressSpace(), @@ -6272,10 +6297,9 @@ fn zirErrUnionCode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - const target = sema.mod.getTarget(); if (operand_ty.zigTypeTag() != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ - operand_ty.fmt(target), + operand_ty.fmt(sema.mod), }); } @@ -6302,9 +6326,8 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE assert(operand_ty.zigTypeTag() == .Pointer); if (operand_ty.elemType().zigTypeTag() != .ErrorUnion) { - const target = sema.mod.getTarget(); return sema.fail(block, src, "expected error union type, found {}", .{ - operand_ty.elemType().fmt(target), + operand_ty.elemType().fmt(sema.mod), }); } @@ -6329,10 +6352,9 @@ fn zirEnsureErrPayloadVoid(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - const target = sema.mod.getTarget(); if (operand_ty.zigTypeTag() != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ - operand_ty.fmt(target), + operand_ty.fmt(sema.mod), }); } if (operand_ty.errorUnionPayload().zigTypeTag() != .Void) { @@ -6606,7 +6628,7 @@ fn funcCommon( errdefer sema.gpa.destroy(new_extern_fn); new_extern_fn.* = Module.ExternFn{ - .owner_decl = sema.owner_decl, + .owner_decl = sema.owner_decl_index, .lib_name = null, }; @@ -6645,7 +6667,7 @@ fn funcCommon( new_func.* = .{ .state = anal_state, .zir_body_inst = func_inst, - .owner_decl = sema.owner_decl, + .owner_decl = sema.owner_decl_index, .comptime_args = comptime_args, .anytype_args = undefined, .hash = hash, @@ -6838,8 +6860,7 @@ fn zirPtrToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const ptr = sema.resolveInst(inst_data.operand); const ptr_ty = sema.typeOf(ptr); if (!ptr_ty.isPtrAtRuntime()) { - const target = sema.mod.getTarget(); - return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(target)}); + return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)}); } if (try sema.resolveMaybeUndefVal(block, ptr_src, ptr)) |ptr_val| { return sema.addConstant(Type.usize, ptr_val); @@ -7018,7 +7039,6 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const target = sema.mod.getTarget(); const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); switch (dest_ty.zigTypeTag()) { @@ -7038,10 +7058,10 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air .Type, .Undefined, .Void, - => return sema.fail(block, dest_ty_src, "invalid type '{}' for @bitCast", .{dest_ty.fmt(target)}), + => return sema.fail(block, dest_ty_src, "invalid type '{}' for @bitCast", .{dest_ty.fmt(sema.mod)}), .Pointer => return sema.fail(block, dest_ty_src, "cannot @bitCast to '{}', use @ptrCast to cast to a pointer", .{ - dest_ty.fmt(target), + dest_ty.fmt(sema.mod), }), .Struct, .Union => if (dest_ty.containerLayout() == .Auto) { const container = switch (dest_ty.zigTypeTag()) { @@ -7050,7 +7070,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air else => unreachable, }; return sema.fail(block, dest_ty_src, "cannot @bitCast to '{}', {s} does not have a guaranteed in-memory layout", .{ - dest_ty.fmt(target), container, + dest_ty.fmt(sema.mod), container, }); }, .BoundFn => @panic("TODO remove this type from the language and compiler"), @@ -7088,7 +7108,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A block, dest_ty_src, "expected float type, found '{}'", - .{dest_ty.fmt(target)}, + .{dest_ty.fmt(sema.mod)}, ), }; @@ -7099,7 +7119,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A block, operand_src, "expected float type, found '{}'", - .{operand_ty.fmt(target)}, + .{operand_ty.fmt(sema.mod)}, ), } @@ -7241,7 +7261,6 @@ fn zirSwitchCapture( const operand_ptr = sema.resolveInst(cond_info.operand); const operand_ptr_ty = sema.typeOf(operand_ptr); const operand_ty = if (operand_is_ref) operand_ptr_ty.childType() else operand_ptr_ty; - const target = sema.mod.getTarget(); const operand = if (operand_is_ref) try sema.analyzeLoad(block, operand_src, operand_ptr, operand_src) @@ -7277,7 +7296,7 @@ fn zirSwitchCapture( // Previous switch validation ensured this will succeed const first_item_val = sema.resolveConstValue(block, .unneeded, first_item) catch unreachable; - const first_field_index = @intCast(u32, enum_ty.enumTagFieldIndex(first_item_val, target).?); + const first_field_index = @intCast(u32, enum_ty.enumTagFieldIndex(first_item_val, sema.mod).?); const first_field = union_obj.fields.values()[first_field_index]; for (items[1..]) |item| { @@ -7285,16 +7304,16 @@ fn zirSwitchCapture( // Previous switch validation ensured this will succeed const item_val = sema.resolveConstValue(block, .unneeded, item_ref) catch unreachable; - const field_index = enum_ty.enumTagFieldIndex(item_val, target).?; + const field_index = enum_ty.enumTagFieldIndex(item_val, sema.mod).?; const field = union_obj.fields.values()[field_index]; - if (!field.ty.eql(first_field.ty, target)) { + if (!field.ty.eql(first_field.ty, sema.mod)) { const first_item_src = switch_src; // TODO better source location const item_src = switch_src; const msg = msg: { const msg = try sema.errMsg(block, switch_src, "capture group with incompatible types", .{}); errdefer msg.destroy(sema.gpa); - try sema.errNote(block, first_item_src, msg, "type '{}' here", .{first_field.ty.fmt(target)}); - try sema.errNote(block, item_src, msg, "type '{}' here", .{field.ty.fmt(target)}); + try sema.errNote(block, first_item_src, msg, "type '{}' here", .{first_field.ty.fmt(sema.mod)}); + try sema.errNote(block, item_src, msg, "type '{}' here", .{field.ty.fmt(sema.mod)}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); @@ -7304,7 +7323,7 @@ fn zirSwitchCapture( if (is_ref) { assert(operand_is_ref); - const field_ty_ptr = try Type.ptr(sema.arena, target, .{ + const field_ty_ptr = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = first_field.ty, .@"addrspace" = .generic, .mutable = operand_ptr_ty.ptrIsMutable(), @@ -7388,7 +7407,6 @@ fn zirSwitchCond( else operand_ptr; const operand_ty = sema.typeOf(operand); - const target = sema.mod.getTarget(); switch (operand_ty.zigTypeTag()) { .Type, @@ -7436,7 +7454,7 @@ fn zirSwitchCond( .Vector, .Frame, .AnyFrame, - => return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(target)}), + => return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(sema.mod)}), } } @@ -7588,10 +7606,10 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError ); } try sema.mod.errNoteNonLazy( - operand_ty.declSrcLoc(), + operand_ty.declSrcLoc(sema.mod), msg, "enum '{}' declared here", - .{operand_ty.fmt(target)}, + .{operand_ty.fmt(sema.mod)}, ); break :msg msg; }; @@ -7705,10 +7723,10 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError if (maybe_msg) |msg| { try sema.mod.errNoteNonLazy( - operand_ty.declSrcLoc(), + operand_ty.declSrcLoc(sema.mod), msg, "error set '{}' declared here", - .{operand_ty.fmt(target)}, + .{operand_ty.fmt(sema.mod)}, ); return sema.failWithOwnedErrorMsg(block, msg); } @@ -7738,7 +7756,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError }, .Union => return sema.fail(block, src, "TODO validate switch .Union", .{}), .Int, .ComptimeInt => { - var range_set = RangeSet.init(gpa, target); + var range_set = RangeSet.init(gpa, sema.mod); defer range_set.deinit(); var extra_index: usize = special.end; @@ -7914,13 +7932,13 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError block, src, "else prong required when switching on type '{}'", - .{operand_ty.fmt(target)}, + .{operand_ty.fmt(sema.mod)}, ); } var seen_values = ValueSrcMap.initContext(gpa, .{ .ty = operand_ty, - .target = target, + .mod = sema.mod, }); defer seen_values.deinit(); @@ -7985,7 +8003,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError .ComptimeFloat, .Float, => return sema.fail(block, operand_src, "invalid switch operand type '{}'", .{ - operand_ty.fmt(target), + operand_ty.fmt(sema.mod), }), } @@ -8035,7 +8053,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const item = sema.resolveInst(item_ref); // Validation above ensured these will succeed. const item_val = sema.resolveConstValue(&child_block, .unneeded, item) catch unreachable; - if (operand_val.eql(item_val, operand_ty, target)) { + if (operand_val.eql(item_val, operand_ty, sema.mod)) { return sema.resolveBlockBody(block, src, &child_block, body, inst, merges); } } @@ -8057,7 +8075,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const item = sema.resolveInst(item_ref); // Validation above ensured these will succeed. const item_val = sema.resolveConstValue(&child_block, .unneeded, item) catch unreachable; - if (operand_val.eql(item_val, operand_ty, target)) { + if (operand_val.eql(item_val, operand_ty, sema.mod)) { return sema.resolveBlockBody(block, src, &child_block, body, inst, merges); } } @@ -8072,8 +8090,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError // Validation above ensured these will succeed. const first_tv = sema.resolveInstConst(&child_block, .unneeded, item_first) catch unreachable; const last_tv = sema.resolveInstConst(&child_block, .unneeded, item_last) catch unreachable; - if (Value.compare(operand_val, .gte, first_tv.val, operand_ty, target) and - Value.compare(operand_val, .lte, last_tv.val, operand_ty, target)) + if (Value.compare(operand_val, .gte, first_tv.val, operand_ty, sema.mod) and + Value.compare(operand_val, .lte, last_tv.val, operand_ty, sema.mod)) { return sema.resolveBlockBody(block, src, &child_block, body, inst, merges); } @@ -8385,7 +8403,7 @@ fn resolveSwitchItemVal( return TypedValue{ .ty = item_ty, .val = val }; } else |err| switch (err) { error.NeededSourceLocation => { - const src = switch_prong_src.resolve(sema.gpa, block.src_decl, switch_node_offset, range_expand); + const src = switch_prong_src.resolve(sema.gpa, sema.mod.declPtr(block.src_decl), switch_node_offset, range_expand); return TypedValue{ .ty = item_ty, .val = try sema.resolveConstValue(block, src, item), @@ -8434,19 +8452,18 @@ fn validateSwitchItemEnum( switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { const item_tv = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); - const target = sema.mod.getTarget(); - const field_index = item_tv.ty.enumTagFieldIndex(item_tv.val, target) orelse { + const field_index = item_tv.ty.enumTagFieldIndex(item_tv.val, sema.mod) orelse { const msg = msg: { - const src = switch_prong_src.resolve(sema.gpa, block.src_decl, src_node_offset, .none); + const src = switch_prong_src.resolve(sema.gpa, sema.mod.declPtr(block.src_decl), src_node_offset, .none); const msg = try sema.errMsg( block, src, "enum '{}' has no tag with value '{}'", - .{ item_tv.ty.fmt(target), item_tv.val.fmtValue(item_tv.ty, target) }, + .{ item_tv.ty.fmt(sema.mod), item_tv.val.fmtValue(item_tv.ty, sema.mod) }, ); errdefer msg.destroy(sema.gpa); try sema.mod.errNoteNonLazy( - item_tv.ty.declSrcLoc(), + item_tv.ty.declSrcLoc(sema.mod), msg, "enum declared here", .{}, @@ -8487,8 +8504,9 @@ fn validateSwitchDupe( ) CompileError!void { const prev_prong_src = maybe_prev_src orelse return; const gpa = sema.gpa; - const src = switch_prong_src.resolve(gpa, block.src_decl, src_node_offset, .none); - const prev_src = prev_prong_src.resolve(gpa, block.src_decl, src_node_offset, .none); + const block_src_decl = sema.mod.declPtr(block.src_decl); + const src = switch_prong_src.resolve(gpa, block_src_decl, src_node_offset, .none); + const prev_src = prev_prong_src.resolve(gpa, block_src_decl, src_node_offset, .none); const msg = msg: { const msg = try sema.errMsg( block, @@ -8525,7 +8543,8 @@ fn validateSwitchItemBool( false_count.* += 1; } if (true_count.* + false_count.* > 2) { - const src = switch_prong_src.resolve(sema.gpa, block.src_decl, src_node_offset, .none); + const block_src_decl = sema.mod.declPtr(block.src_decl); + const src = switch_prong_src.resolve(sema.gpa, block_src_decl, src_node_offset, .none); return sema.fail(block, src, "duplicate switch value", .{}); } } @@ -8558,13 +8577,12 @@ fn validateSwitchNoRange( const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = src_node_offset }; const range_src: LazySrcLoc = .{ .node_offset_switch_range = src_node_offset }; - const target = sema.mod.getTarget(); const msg = msg: { const msg = try sema.errMsg( block, operand_src, "ranges not allowed when switching on type '{}'", - .{operand_ty.fmt(target)}, + .{operand_ty.fmt(sema.mod)}, ); errdefer msg.destroy(sema.gpa); try sema.errNote( @@ -8587,7 +8605,6 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const unresolved_ty = try sema.resolveType(block, ty_src, extra.lhs); const field_name = try sema.resolveConstString(block, name_src, extra.rhs); const ty = try sema.resolveTypeFields(block, ty_src, unresolved_ty); - const target = sema.mod.getTarget(); const has_field = hf: { if (ty.isSlice()) { @@ -8610,7 +8627,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .Enum => ty.enumFields().contains(field_name), .Array => mem.eql(u8, field_name, "len"), else => return sema.fail(block, ty_src, "type '{}' does not support '@hasField'", .{ - ty.fmt(target), + ty.fmt(sema.mod), }), }; }; @@ -8633,7 +8650,8 @@ fn zirHasDecl(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air try checkNamespaceType(sema, block, lhs_src, container_type); const namespace = container_type.getNamespace() orelse return Air.Inst.Ref.bool_false; - if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |decl| { + if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |decl_index| { + const decl = sema.mod.declPtr(decl_index); if (decl.is_pub or decl.getFileScope() == block.getFileScope()) { return Air.Inst.Ref.bool_true; } @@ -8661,8 +8679,9 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. }, }; try mod.semaFile(result.file); - const file_root_decl = result.file.root_decl.?; - try mod.declareDeclDependency(sema.owner_decl, file_root_decl); + const file_root_decl_index = result.file.root_decl.unwrap().?; + const file_root_decl = mod.declPtr(file_root_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, file_root_decl_index); return sema.addConstant(file_root_decl.ty, file_root_decl.val); } @@ -8763,7 +8782,7 @@ fn zirShl( } const int_info = scalar_ty.intInfo(target); const truncated = try shifted.intTrunc(lhs_ty, sema.arena, int_info.signedness, int_info.bits, target); - if (truncated.compare(.eq, shifted, lhs_ty, target)) { + if (truncated.compare(.eq, shifted, lhs_ty, sema.mod)) { break :val shifted; } return sema.addConstUndef(lhs_ty); @@ -8927,7 +8946,7 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (scalar_type.zigTypeTag() != .Int) { return sema.fail(block, src, "unable to perform binary not operation on type '{}'", .{ - operand_type.fmt(target), + operand_type.fmt(sema.mod), }); } @@ -8939,7 +8958,7 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. var elem_val_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems) |*elem, i| { - const elem_val = val.elemValueBuffer(i, &elem_val_buf); + const elem_val = val.elemValueBuffer(sema.mod, i, &elem_val_buf); elem.* = try elem_val.bitwiseNot(scalar_type, sema.arena, target); } return sema.addConstant( @@ -9047,14 +9066,13 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; - const target = sema.mod.getTarget(); const lhs_info = (try sema.getArrayCatInfo(block, lhs_src, lhs)) orelse - return sema.fail(block, lhs_src, "expected array, found '{}'", .{lhs_ty.fmt(target)}); + return sema.fail(block, lhs_src, "expected array, found '{}'", .{lhs_ty.fmt(sema.mod)}); const rhs_info = (try sema.getArrayCatInfo(block, rhs_src, rhs)) orelse - return sema.fail(block, rhs_src, "expected array, found '{}'", .{rhs_ty.fmt(target)}); - if (!lhs_info.elem_type.eql(rhs_info.elem_type, target)) { + return sema.fail(block, rhs_src, "expected array, found '{}'", .{rhs_ty.fmt(sema.mod)}); + if (!lhs_info.elem_type.eql(rhs_info.elem_type, sema.mod)) { return sema.fail(block, rhs_src, "expected array of type '{}', found '{}'", .{ - lhs_info.elem_type.fmt(target), rhs_ty.fmt(target), + lhs_info.elem_type.fmt(sema.mod), rhs_ty.fmt(sema.mod), }); } @@ -9062,7 +9080,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // will catch this if it is a problem. var res_sent: ?Value = null; if (rhs_info.sentinel != null and lhs_info.sentinel != null) { - if (rhs_info.sentinel.?.eql(lhs_info.sentinel.?, lhs_info.elem_type, target)) { + if (rhs_info.sentinel.?.eql(lhs_info.sentinel.?, lhs_info.elem_type, sema.mod)) { res_sent = lhs_info.sentinel.?; } } @@ -9084,14 +9102,14 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai { var i: usize = 0; while (i < lhs_len) : (i += 1) { - const val = try lhs_sub_val.elemValue(sema.arena, i); + const val = try lhs_sub_val.elemValue(sema.mod, sema.arena, i); buf[i] = try val.copy(anon_decl.arena()); } } { var i: usize = 0; while (i < rhs_len) : (i += 1) { - const val = try rhs_sub_val.elemValue(sema.arena, i); + const val = try rhs_sub_val.elemValue(sema.mod, sema.arena, i); buf[lhs_len + i] = try val.copy(anon_decl.arena()); } } @@ -9123,7 +9141,6 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, inst: Air.Inst.Ref) !?Type.ArrayInfo { const t = sema.typeOf(inst); - const target = sema.mod.getTarget(); return switch (t.zigTypeTag()) { .Array => t.arrayInfo(), .Pointer => blk: { @@ -9133,7 +9150,7 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, inst: Air.Inst.R return Type.ArrayInfo{ .elem_type = t.childType(), .sentinel = t.sentinel(), - .len = val.sliceLen(target), + .len = val.sliceLen(sema.mod), }; } if (ptrinfo.pointee_type.zigTypeTag() != .Array) return null; @@ -9229,10 +9246,9 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (lhs_ty.isTuple()) { return sema.analyzeTupleMul(block, inst_data.src_node, lhs, factor); } - const target = sema.mod.getTarget(); const mulinfo = (try sema.getArrayCatInfo(block, lhs_src, lhs)) orelse - return sema.fail(block, lhs_src, "expected array, found '{}'", .{lhs_ty.fmt(target)}); + return sema.fail(block, lhs_src, "expected array, found '{}'", .{lhs_ty.fmt(sema.mod)}); const final_len_u64 = std.math.mul(u64, mulinfo.len, factor) catch return sema.fail(block, rhs_src, "operation results in overflow", .{}); @@ -9264,7 +9280,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // Optimization for the common pattern of a single element repeated N times, such // as zero-filling a byte array. const val = if (lhs_len == 1) blk: { - const elem_val = try lhs_sub_val.elemValue(sema.arena, 0); + const elem_val = try lhs_sub_val.elemValue(sema.mod, sema.arena, 0); const copied_val = try elem_val.copy(anon_decl.arena()); break :blk try Value.Tag.repeated.create(anon_decl.arena(), copied_val); } else blk: { @@ -9273,7 +9289,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai while (i < factor) : (i += 1) { var j: usize = 0; while (j < lhs_len) : (j += 1) { - const val = try lhs_sub_val.elemValue(sema.arena, j); + const val = try lhs_sub_val.elemValue(sema.mod, sema.arena, j); buf[lhs_len * i + j] = try val.copy(anon_decl.arena()); } } @@ -9310,9 +9326,8 @@ fn zirNegate( const rhs_ty = sema.typeOf(rhs); const rhs_scalar_ty = rhs_ty.scalarType(); - const target = sema.mod.getTarget(); if (tag_override == .sub and rhs_scalar_ty.isUnsignedInt()) { - return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(target)}); + return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(sema.mod)}); } const lhs = if (rhs_ty.zigTypeTag() == .Vector) @@ -9364,12 +9379,13 @@ fn zirOverflowArithmetic( const ptr = sema.resolveInst(extra.ptr); const lhs_ty = sema.typeOf(lhs); - const target = sema.mod.getTarget(); + const mod = sema.mod; + const target = mod.getTarget(); // Note, the types of lhs/rhs (also for shifting)/ptr are already correct as ensured by astgen. const dest_ty = lhs_ty; if (dest_ty.zigTypeTag() != .Int) { - return sema.fail(block, src, "expected integer type, found '{}'", .{dest_ty.fmt(target)}); + return sema.fail(block, src, "expected integer type, found '{}'", .{dest_ty.fmt(mod)}); } const maybe_lhs_val = try sema.resolveMaybeUndefVal(block, lhs_src, lhs); @@ -9445,7 +9461,7 @@ fn zirOverflowArithmetic( if (!lhs_val.isUndef()) { if (lhs_val.compareWithZero(.eq)) { break :result .{ .overflowed = .no, .wrapped = lhs }; - } else if (lhs_val.compare(.eq, Value.one, dest_ty, target)) { + } else if (lhs_val.compare(.eq, Value.one, dest_ty, mod)) { break :result .{ .overflowed = .no, .wrapped = rhs }; } } @@ -9455,7 +9471,7 @@ fn zirOverflowArithmetic( if (!rhs_val.isUndef()) { if (rhs_val.compareWithZero(.eq)) { break :result .{ .overflowed = .no, .wrapped = rhs }; - } else if (rhs_val.compare(.eq, Value.one, dest_ty, target)) { + } else if (rhs_val.compare(.eq, Value.one, dest_ty, mod)) { break :result .{ .overflowed = .no, .wrapped = lhs }; } } @@ -9596,7 +9612,8 @@ fn analyzeArithmetic( }); } - const target = sema.mod.getTarget(); + const mod = sema.mod; + const target = mod.getTarget(); const maybe_lhs_val = try sema.resolveMaybeUndefVal(block, lhs_src, casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefVal(block, rhs_src, casted_rhs); const rs: struct { src: LazySrcLoc, air_tag: Air.Inst.Tag } = rs: { @@ -9834,7 +9851,7 @@ fn analyzeArithmetic( if (lhs_val.isUndef()) { if (lhs_scalar_ty.isSignedInt() and rhs_scalar_ty.isSignedInt()) { if (maybe_rhs_val) |rhs_val| { - if (rhs_val.compare(.neq, Value.negative_one, resolved_type, target)) { + if (rhs_val.compare(.neq, Value.negative_one, resolved_type, mod)) { return sema.addConstUndef(resolved_type); } } @@ -9909,7 +9926,7 @@ fn analyzeArithmetic( if (lhs_val.isUndef()) { if (lhs_scalar_ty.isSignedInt() and rhs_scalar_ty.isSignedInt()) { if (maybe_rhs_val) |rhs_val| { - if (rhs_val.compare(.neq, Value.negative_one, resolved_type, target)) { + if (rhs_val.compare(.neq, Value.negative_one, resolved_type, mod)) { return sema.addConstUndef(resolved_type); } } @@ -9972,7 +9989,7 @@ fn analyzeArithmetic( if (lhs_val.isUndef()) { if (lhs_scalar_ty.isSignedInt() and rhs_scalar_ty.isSignedInt()) { if (maybe_rhs_val) |rhs_val| { - if (rhs_val.compare(.neq, Value.negative_one, resolved_type, target)) { + if (rhs_val.compare(.neq, Value.negative_one, resolved_type, mod)) { return sema.addConstUndef(resolved_type); } } @@ -10062,7 +10079,7 @@ fn analyzeArithmetic( if (lhs_val.compareWithZero(.eq)) { return sema.addConstant(resolved_type, Value.zero); } - if (lhs_val.compare(.eq, Value.one, resolved_type, target)) { + if (lhs_val.compare(.eq, Value.one, resolved_type, mod)) { return casted_rhs; } } @@ -10078,7 +10095,7 @@ fn analyzeArithmetic( if (rhs_val.compareWithZero(.eq)) { return sema.addConstant(resolved_type, Value.zero); } - if (rhs_val.compare(.eq, Value.one, resolved_type, target)) { + if (rhs_val.compare(.eq, Value.one, resolved_type, mod)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { @@ -10113,7 +10130,7 @@ fn analyzeArithmetic( if (lhs_val.compareWithZero(.eq)) { return sema.addConstant(resolved_type, Value.zero); } - if (lhs_val.compare(.eq, Value.one, resolved_type, target)) { + if (lhs_val.compare(.eq, Value.one, resolved_type, mod)) { return casted_rhs; } } @@ -10125,7 +10142,7 @@ fn analyzeArithmetic( if (rhs_val.compareWithZero(.eq)) { return sema.addConstant(resolved_type, Value.zero); } - if (rhs_val.compare(.eq, Value.one, resolved_type, target)) { + if (rhs_val.compare(.eq, Value.one, resolved_type, mod)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { @@ -10149,7 +10166,7 @@ fn analyzeArithmetic( if (lhs_val.compareWithZero(.eq)) { return sema.addConstant(resolved_type, Value.zero); } - if (lhs_val.compare(.eq, Value.one, resolved_type, target)) { + if (lhs_val.compare(.eq, Value.one, resolved_type, mod)) { return casted_rhs; } } @@ -10161,7 +10178,7 @@ fn analyzeArithmetic( if (rhs_val.compareWithZero(.eq)) { return sema.addConstant(resolved_type, Value.zero); } - if (rhs_val.compare(.eq, Value.one, resolved_type, target)) { + if (rhs_val.compare(.eq, Value.one, resolved_type, mod)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { @@ -10431,7 +10448,7 @@ fn analyzePtrArithmetic( if (air_tag == .ptr_sub) { return sema.fail(block, op_src, "TODO implement Sema comptime pointer subtraction", .{}); } - const new_ptr_val = try ptr_val.elemPtr(ptr_ty, sema.arena, offset_int, target); + const new_ptr_val = try ptr_val.elemPtr(ptr_ty, sema.arena, offset_int, sema.mod); return sema.addConstant(new_ptr_ty, new_ptr_val); } else break :rs offset_src; } else break :rs ptr_src; @@ -10605,7 +10622,6 @@ fn zirCmpEq( const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); - const target = sema.mod.getTarget(); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); @@ -10630,7 +10646,7 @@ fn zirCmpEq( if (lhs_ty_tag == .Null or rhs_ty_tag == .Null) { const non_null_type = if (lhs_ty_tag == .Null) rhs_ty else lhs_ty; - return sema.fail(block, src, "comparison of '{}' with null", .{non_null_type.fmt(target)}); + return sema.fail(block, src, "comparison of '{}' with null", .{non_null_type.fmt(sema.mod)}); } if (lhs_ty_tag == .Union and (rhs_ty_tag == .EnumLiteral or rhs_ty_tag == .Enum)) { @@ -10670,7 +10686,7 @@ fn zirCmpEq( if (lhs_ty_tag == .Type and rhs_ty_tag == .Type) { const lhs_as_type = try sema.analyzeAsType(block, lhs_src, lhs); const rhs_as_type = try sema.analyzeAsType(block, rhs_src, rhs); - if (lhs_as_type.eql(rhs_as_type, target) == (op == .eq)) { + if (lhs_as_type.eql(rhs_as_type, sema.mod) == (op == .eq)) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; @@ -10747,10 +10763,9 @@ fn analyzeCmp( } const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]LazySrcLoc{ lhs_src, rhs_src } }); - const target = sema.mod.getTarget(); if (!resolved_type.isSelfComparable(is_equality_cmp)) { return sema.fail(block, src, "{s} operator not allowed for type '{}'", .{ - @tagName(op), resolved_type.fmt(target), + @tagName(op), resolved_type.fmt(sema.mod), }); } const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); @@ -10768,7 +10783,6 @@ fn cmpSelf( rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const resolved_type = sema.typeOf(casted_lhs); - const target = sema.mod.getTarget(); const runtime_src: LazySrcLoc = src: { if (try sema.resolveMaybeUndefVal(block, lhs_src, casted_lhs)) |lhs_val| { if (lhs_val.isUndef()) return sema.addConstUndef(Type.bool); @@ -10777,11 +10791,11 @@ fn cmpSelf( if (resolved_type.zigTypeTag() == .Vector) { const result_ty = try Type.vector(sema.arena, resolved_type.vectorLen(), Type.@"bool"); - const cmp_val = try lhs_val.compareVector(op, rhs_val, resolved_type, sema.arena, target); + const cmp_val = try lhs_val.compareVector(op, rhs_val, resolved_type, sema.arena, sema.mod); return sema.addConstant(result_ty, cmp_val); } - if (lhs_val.compare(op, rhs_val, resolved_type, target)) { + if (lhs_val.compare(op, rhs_val, resolved_type, sema.mod)) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; @@ -10849,7 +10863,7 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .Null, .BoundFn, .Opaque, - => return sema.fail(block, src, "no size available for type '{}'", .{operand_ty.fmt(target)}), + => return sema.fail(block, src, "no size available for type '{}'", .{operand_ty.fmt(sema.mod)}), .Type, .EnumLiteral, @@ -10892,9 +10906,9 @@ fn zirThis( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { - const this_decl = block.namespace.getDecl(); + const this_decl_index = block.namespace.getDeclIndex(); const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; - return sema.analyzeDeclVal(block, src, this_decl); + return sema.analyzeDeclVal(block, src, this_decl_index); } fn zirClosureCapture( @@ -10927,7 +10941,7 @@ fn zirClosureGet( ) CompileError!Air.Inst.Ref { // TODO CLOSURE: Test this with inline functions const inst_data = sema.code.instructions.items(.data)[inst].inst_node; - var scope: *CaptureScope = block.src_decl.src_scope.?; + var scope: *CaptureScope = sema.mod.declPtr(block.src_decl).src_scope.?; // Note: The target closure must be in this scope list. // If it's not here, the zir is invalid, or the list is broken. const tv = while (true) { @@ -10973,11 +10987,12 @@ fn zirBuiltinSrc( const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; const extra = sema.code.extraData(Zir.Inst.LineColumn, extended.operand).data; const func = sema.func orelse return sema.fail(block, src, "@src outside function", .{}); + const fn_owner_decl = sema.mod.declPtr(func.owner_decl); const func_name_val = blk: { var anon_decl = try block.startAnonDecl(src); defer anon_decl.deinit(); - const name = std.mem.span(func.owner_decl.name); + const name = std.mem.span(fn_owner_decl.name); const bytes = try anon_decl.arena().dupe(u8, name[0 .. name.len + 1]); const new_decl = try anon_decl.finish( try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len - 1), @@ -10990,7 +11005,7 @@ fn zirBuiltinSrc( const file_name_val = blk: { var anon_decl = try block.startAnonDecl(src); defer anon_decl.deinit(); - const name = try func.owner_decl.getFileScope().fullPathZ(anon_decl.arena()); + const name = try fn_owner_decl.getFileScope().fullPathZ(anon_decl.arena()); const new_decl = try anon_decl.finish( try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), name.len), try Value.Tag.bytes.create(anon_decl.arena(), name[0 .. name.len + 1]), @@ -11118,24 +11133,26 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } const args_val = v: { - const fn_info_decl = (try sema.namespaceLookup( + const fn_info_decl_index = (try sema.namespaceLookup( block, src, type_info_ty.getNamespace().?, "Fn", )).?; - try sema.mod.declareDeclDependency(sema.owner_decl, fn_info_decl); - try sema.ensureDeclAnalyzed(fn_info_decl); + try sema.mod.declareDeclDependency(sema.owner_decl_index, fn_info_decl_index); + try sema.ensureDeclAnalyzed(fn_info_decl_index); + const fn_info_decl = sema.mod.declPtr(fn_info_decl_index); var fn_ty_buffer: Value.ToTypeBuffer = undefined; const fn_ty = fn_info_decl.val.toType(&fn_ty_buffer); - const param_info_decl = (try sema.namespaceLookup( + const param_info_decl_index = (try sema.namespaceLookup( block, src, fn_ty.getNamespace().?, "Param", )).?; - try sema.mod.declareDeclDependency(sema.owner_decl, param_info_decl); - try sema.ensureDeclAnalyzed(param_info_decl); + try sema.mod.declareDeclDependency(sema.owner_decl_index, param_info_decl_index); + try sema.ensureDeclAnalyzed(param_info_decl_index); + const param_info_decl = sema.mod.declPtr(param_info_decl_index); var param_buffer: Value.ToTypeBuffer = undefined; const param_ty = param_info_decl.val.toType(¶m_buffer); const new_decl = try params_anon_decl.finish( @@ -11307,14 +11324,15 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // Get the Error type const error_field_ty = t: { - const set_field_ty_decl = (try sema.namespaceLookup( + const set_field_ty_decl_index = (try sema.namespaceLookup( block, src, type_info_ty.getNamespace().?, "Error", )).?; - try sema.mod.declareDeclDependency(sema.owner_decl, set_field_ty_decl); - try sema.ensureDeclAnalyzed(set_field_ty_decl); + try sema.mod.declareDeclDependency(sema.owner_decl_index, set_field_ty_decl_index); + try sema.ensureDeclAnalyzed(set_field_ty_decl_index); + const set_field_ty_decl = sema.mod.declPtr(set_field_ty_decl_index); var buffer: Value.ToTypeBuffer = undefined; break :t try set_field_ty_decl.val.toType(&buffer).copy(fields_anon_decl.arena()); }; @@ -11416,14 +11434,15 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer fields_anon_decl.deinit(); const enum_field_ty = t: { - const enum_field_ty_decl = (try sema.namespaceLookup( + const enum_field_ty_decl_index = (try sema.namespaceLookup( block, src, type_info_ty.getNamespace().?, "EnumField", )).?; - try sema.mod.declareDeclDependency(sema.owner_decl, enum_field_ty_decl); - try sema.ensureDeclAnalyzed(enum_field_ty_decl); + try sema.mod.declareDeclDependency(sema.owner_decl_index, enum_field_ty_decl_index); + try sema.ensureDeclAnalyzed(enum_field_ty_decl_index); + const enum_field_ty_decl = sema.mod.declPtr(enum_field_ty_decl_index); var buffer: Value.ToTypeBuffer = undefined; break :t try enum_field_ty_decl.val.toType(&buffer).copy(fields_anon_decl.arena()); }; @@ -11514,14 +11533,15 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer fields_anon_decl.deinit(); const union_field_ty = t: { - const union_field_ty_decl = (try sema.namespaceLookup( + const union_field_ty_decl_index = (try sema.namespaceLookup( block, src, type_info_ty.getNamespace().?, "UnionField", )).?; - try sema.mod.declareDeclDependency(sema.owner_decl, union_field_ty_decl); - try sema.ensureDeclAnalyzed(union_field_ty_decl); + try sema.mod.declareDeclDependency(sema.owner_decl_index, union_field_ty_decl_index); + try sema.ensureDeclAnalyzed(union_field_ty_decl_index); + const union_field_ty_decl = sema.mod.declPtr(union_field_ty_decl_index); var buffer: Value.ToTypeBuffer = undefined; break :t try union_field_ty_decl.val.toType(&buffer).copy(fields_anon_decl.arena()); }; @@ -11621,14 +11641,15 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer fields_anon_decl.deinit(); const struct_field_ty = t: { - const struct_field_ty_decl = (try sema.namespaceLookup( + const struct_field_ty_decl_index = (try sema.namespaceLookup( block, src, type_info_ty.getNamespace().?, "StructField", )).?; - try sema.mod.declareDeclDependency(sema.owner_decl, struct_field_ty_decl); - try sema.ensureDeclAnalyzed(struct_field_ty_decl); + try sema.mod.declareDeclDependency(sema.owner_decl_index, struct_field_ty_decl_index); + try sema.ensureDeclAnalyzed(struct_field_ty_decl_index); + const struct_field_ty_decl = sema.mod.declPtr(struct_field_ty_decl_index); var buffer: Value.ToTypeBuffer = undefined; break :t try struct_field_ty_decl.val.toType(&buffer).copy(fields_anon_decl.arena()); }; @@ -11811,14 +11832,15 @@ fn typeInfoDecls( defer decls_anon_decl.deinit(); const declaration_ty = t: { - const declaration_ty_decl = (try sema.namespaceLookup( + const declaration_ty_decl_index = (try sema.namespaceLookup( block, src, type_info_ty.getNamespace().?, "Declaration", )).?; - try sema.mod.declareDeclDependency(sema.owner_decl, declaration_ty_decl); - try sema.ensureDeclAnalyzed(declaration_ty_decl); + try sema.mod.declareDeclDependency(sema.owner_decl_index, declaration_ty_decl_index); + try sema.ensureDeclAnalyzed(declaration_ty_decl_index); + const declaration_ty_decl = sema.mod.declPtr(declaration_ty_decl_index); var buffer: Value.ToTypeBuffer = undefined; break :t try declaration_ty_decl.val.toType(&buffer).copy(decls_anon_decl.arena()); }; @@ -11827,7 +11849,8 @@ fn typeInfoDecls( const decls_len = if (opt_namespace) |ns| ns.decls.count() else 0; const decls_vals = try decls_anon_decl.arena().alloc(Value, decls_len); for (decls_vals) |*decls_val, i| { - const decl = opt_namespace.?.decls.keys()[i]; + const decl_index = opt_namespace.?.decls.keys()[i]; + const decl = sema.mod.declPtr(decl_index); const name_val = v: { var anon_decl = try block.startAnonDecl(src); defer anon_decl.deinit(); @@ -11947,12 +11970,11 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi }, else => {}, } - const target = sema.mod.getTarget(); return sema.fail( block, src, "bit shifting operation expected integer type, found '{}'", - .{operand.fmt(target)}, + .{operand.fmt(sema.mod)}, ); } @@ -12426,8 +12448,7 @@ fn zirPtrTypeSimple(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr const inst_data = sema.code.instructions.items(.data)[inst].ptr_type_simple; const elem_type = try sema.resolveType(block, .unneeded, inst_data.elem_type); - const target = sema.mod.getTarget(); - const ty = try Type.ptr(sema.arena, target, .{ + const ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = elem_type, .@"addrspace" = .generic, .mutable = inst_data.is_mutable, @@ -12466,7 +12487,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air // Check if this happens to be the lazy alignment of our element type, in // which case we can make this 0 without resolving it. if (val.castTag(.lazy_align)) |payload| { - if (payload.data.eql(unresolved_elem_ty, target)) { + if (payload.data.eql(unresolved_elem_ty, sema.mod)) { break :blk 0; } } @@ -12505,7 +12526,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air try sema.resolveTypeLayout(block, elem_ty_src, elem_ty); break :t elem_ty; }; - const ty = try Type.ptr(sema.arena, target, .{ + const ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = elem_ty, .sentinel = sentinel, .@"align" = abi_align, @@ -12754,10 +12775,10 @@ fn finishStructInit( const gpa = sema.gpa; if (root_msg) |msg| { - const fqn = try struct_obj.getFullyQualifiedName(gpa); + const fqn = try struct_obj.getFullyQualifiedName(sema.mod); defer gpa.free(fqn); try sema.mod.errNoteNonLazy( - struct_obj.srcLoc(), + struct_obj.srcLoc(sema.mod), msg, "struct '{s}' declared here", .{fqn}, @@ -12782,7 +12803,7 @@ fn finishStructInit( if (is_ref) { const target = sema.mod.getTarget(); - const alloc_ty = try Type.ptr(sema.arena, target, .{ + const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = struct_ty, .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); @@ -12851,7 +12872,7 @@ fn zirStructInitAnon( if (is_ref) { const target = sema.mod.getTarget(); - const alloc_ty = try Type.ptr(sema.arena, target, .{ + const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = tuple_ty, .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); @@ -12862,7 +12883,7 @@ fn zirStructInitAnon( const item = sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index); extra_index = item.end; - const field_ptr_ty = try Type.ptr(sema.arena, target, .{ + const field_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ .mutable = true, .@"addrspace" = target_util.defaultAddressSpace(target, .local), .pointee_type = field_ty, @@ -12949,13 +12970,13 @@ fn zirArrayInit( if (is_ref) { const target = sema.mod.getTarget(); - const alloc_ty = try Type.ptr(sema.arena, target, .{ + const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = array_ty, .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); const alloc = try block.addTy(.alloc, alloc_ty); - const elem_ptr_ty = try Type.ptr(sema.arena, target, .{ + const elem_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ .mutable = true, .@"addrspace" = target_util.defaultAddressSpace(target, .local), .pointee_type = elem_ty, @@ -13017,14 +13038,14 @@ fn zirArrayInitAnon( if (is_ref) { const target = sema.mod.getTarget(); - const alloc_ty = try Type.ptr(sema.arena, target, .{ + const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = tuple_ty, .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); const alloc = try block.addTy(.alloc, alloc_ty); for (operands) |operand, i_usize| { const i = @intCast(u32, i_usize); - const field_ptr_ty = try Type.ptr(sema.arena, target, .{ + const field_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ .mutable = true, .@"addrspace" = target_util.defaultAddressSpace(target, .local), .pointee_type = types[i], @@ -13096,7 +13117,6 @@ fn fieldType( ty_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const resolved_ty = try sema.resolveTypeFields(block, ty_src, aggregate_ty); - const target = sema.mod.getTarget(); var cur_ty = resolved_ty; while (true) { switch (cur_ty.zigTypeTag()) { @@ -13127,7 +13147,7 @@ fn fieldType( else => {}, } return sema.fail(block, ty_src, "expected struct or union; found '{}'", .{ - resolved_ty.fmt(target), + resolved_ty.fmt(sema.mod), }); } } @@ -13216,10 +13236,10 @@ fn zirUnaryMath( const scalar_ty = operand_ty.scalarType(); switch (scalar_ty.zigTypeTag()) { .ComptimeFloat, .Float => {}, - else => return sema.fail(block, operand_src, "expected vector of floats or float type, found '{}'", .{scalar_ty.fmt(target)}), + else => return sema.fail(block, operand_src, "expected vector of floats or float type, found '{}'", .{scalar_ty.fmt(sema.mod)}), } }, - else => return sema.fail(block, operand_src, "expected vector of floats or float type, found '{}'", .{operand_ty.fmt(target)}), + else => return sema.fail(block, operand_src, "expected vector of floats or float type, found '{}'", .{operand_ty.fmt(sema.mod)}), } switch (operand_ty.zigTypeTag()) { @@ -13234,7 +13254,7 @@ fn zirUnaryMath( var elem_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems) |*elem, i| { - const elem_val = val.elemValueBuffer(i, &elem_buf); + const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); elem.* = try eval(elem_val, scalar_ty, sema.arena, target); } return sema.addConstant( @@ -13267,7 +13287,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - const target = sema.mod.getTarget(); + const mod = sema.mod; try sema.resolveTypeLayout(block, operand_src, operand_ty); const enum_ty = switch (operand_ty.zigTypeTag()) { @@ -13278,31 +13298,33 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }, .Enum => operand_ty, .Union => operand_ty.unionTagType() orelse { - const decl = operand_ty.getOwnerDecl(); + const decl_index = operand_ty.getOwnerDecl(); + const decl = mod.declPtr(decl_index); const msg = msg: { const msg = try sema.errMsg(block, src, "union '{s}' is untagged", .{ decl.name, }); errdefer msg.destroy(sema.gpa); - try sema.mod.errNoteNonLazy(decl.srcLoc(), msg, "declared here", .{}); + try mod.errNoteNonLazy(decl.srcLoc(), msg, "declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); }, else => return sema.fail(block, operand_src, "expected enum or union; found {}", .{ - operand_ty.fmt(target), + operand_ty.fmt(mod), }), }; - const enum_decl = enum_ty.getOwnerDecl(); + const enum_decl_index = enum_ty.getOwnerDecl(); const casted_operand = try sema.coerce(block, enum_ty, operand, operand_src); if (try sema.resolveDefinedValue(block, operand_src, casted_operand)) |val| { - const field_index = enum_ty.enumTagFieldIndex(val, target) orelse { + const field_index = enum_ty.enumTagFieldIndex(val, mod) orelse { + const enum_decl = mod.declPtr(enum_decl_index); const msg = msg: { const msg = try sema.errMsg(block, src, "no field with value {} in enum '{s}'", .{ casted_operand, enum_decl.name, }); errdefer msg.destroy(sema.gpa); - try sema.mod.errNoteNonLazy(enum_decl.srcLoc(), msg, "declared here", .{}); + try mod.errNoteNonLazy(enum_decl.srcLoc(), msg, "declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); @@ -13317,6 +13339,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air } fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const type_info_ty = try sema.resolveBuiltinTypeFields(block, src, "Type"); @@ -13326,8 +13349,8 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I const val = try sema.resolveConstValue(block, operand_src, type_info); const union_val = val.cast(Value.Payload.Union).?.data; const tag_ty = type_info_ty.unionTagType().?; - const target = sema.mod.getTarget(); - const tag_index = tag_ty.enumTagFieldIndex(union_val.tag, target).?; + const target = mod.getTarget(); + const tag_index = tag_ty.enumTagFieldIndex(union_val.tag, mod).?; switch (@intToEnum(std.builtin.TypeId, tag_index)) { .Type => return Air.Inst.Ref.type_type, .Void => return Air.Inst.Ref.void_type, @@ -13406,14 +13429,14 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I return sema.fail(block, src, "sentinels are only allowed on slices and unknown-length pointers", .{}); } const sentinel_ptr_val = sentinel_val.castTag(.opt_payload).?.data; - const ptr_ty = try Type.ptr(sema.arena, target, .{ + const ptr_ty = try Type.ptr(sema.arena, mod, .{ .@"addrspace" = .generic, .pointee_type = child_ty, }); actual_sentinel = (try sema.pointerDeref(block, src, sentinel_ptr_val, ptr_ty)).?; } - const ty = try Type.ptr(sema.arena, target, .{ + const ty = try Type.ptr(sema.arena, mod, .{ .size = ptr_size, .mutable = !is_const_val.toBool(), .@"volatile" = is_volatile_val.toBool(), @@ -13439,14 +13462,14 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I var buffer: Value.ToTypeBuffer = undefined; const child_ty = try child_val.toType(&buffer).copy(sema.arena); const sentinel = if (sentinel_val.castTag(.opt_payload)) |p| blk: { - const ptr_ty = try Type.ptr(sema.arena, target, .{ + const ptr_ty = try Type.ptr(sema.arena, mod, .{ .@"addrspace" = .generic, .pointee_type = child_ty, }); break :blk (try sema.pointerDeref(block, src, p.data, ptr_ty)).?; } else null; - const ty = try Type.array(sema.arena, len, sentinel, child_ty, target); + const ty = try Type.array(sema.arena, len, sentinel, child_ty, sema.mod); return sema.addType(ty); }, .Optional => { @@ -13483,8 +13506,9 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I const payload_val = union_val.val.optionalValue() orelse return sema.addType(Type.initTag(.anyerror)); const slice_val = payload_val.castTag(.slice).?.data; - const decl = slice_val.ptr.pointerDecl().?; - try sema.ensureDeclAnalyzed(decl); + const decl_index = slice_val.ptr.pointerDecl().?; + try sema.ensureDeclAnalyzed(decl_index); + const decl = mod.declPtr(decl_index); const array_val = decl.val.castTag(.aggregate).?.data; var names: Module.ErrorSet.NameMap = .{}; @@ -13494,9 +13518,9 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I // TODO use reflection instead of magic numbers here // error_set: type, const name_val = struct_val[0]; - const name_str = try name_val.toAllocatedBytes(Type.initTag(.const_slice_u8), sema.arena, target); + const name_str = try name_val.toAllocatedBytes(Type.initTag(.const_slice_u8), sema.arena, sema.mod); - const kv = try sema.mod.getErrorValue(name_str); + const kv = try mod.getErrorValue(name_str); names.putAssumeCapacityNoClobber(kv.key, {}); } @@ -13518,7 +13542,7 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I const is_tuple_val = struct_val[3]; // Decls - if (decls_val.sliceLen(target) > 0) { + if (decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified structs must have no decls", .{}); } @@ -13548,11 +13572,10 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I } // Decls - if (decls_val.sliceLen(target) > 0) { + if (decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified enums must have no decls", .{}); } - const mod = sema.mod; const gpa = sema.gpa; var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); @@ -13572,20 +13595,20 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I }; const enum_ty = Type.initPayload(&enum_ty_payload.base); const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty); - const type_name = try sema.createTypeName(block, .anon, "enum"); - const new_decl = try mod.createAnonymousDeclNamed(block, .{ + const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, .{ .ty = Type.type, .val = enum_val, - }, type_name); + }, .anon, "enum"); + const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; - errdefer mod.abortAnonDecl(new_decl); + errdefer mod.abortAnonDecl(new_decl_index); // Enum tag type var buffer: Value.ToTypeBuffer = undefined; const int_tag_ty = try tag_type_val.toType(&buffer).copy(new_decl_arena_allocator); enum_obj.* = .{ - .owner_decl = new_decl, + .owner_decl = new_decl_index, .tag_ty = int_tag_ty, .tag_ty_inferred = false, .fields = .{}, @@ -13599,17 +13622,17 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I }; // Fields - const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(target)); + const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod)); if (fields_len > 0) { try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); try enum_obj.values.ensureTotalCapacityContext(new_decl_arena_allocator, fields_len, .{ .ty = enum_obj.tag_ty, - .target = target, + .mod = mod, }); var i: usize = 0; while (i < fields_len) : (i += 1) { - const elem_val = try fields_val.elemValue(sema.arena, i); + const elem_val = try fields_val.elemValue(sema.mod, sema.arena, i); const field_struct_val = elem_val.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // name: []const u8 @@ -13620,7 +13643,7 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I const field_name = try name_val.toAllocatedBytes( Type.initTag(.const_slice_u8), new_decl_arena_allocator, - target, + sema.mod, ); const gop = enum_obj.fields.getOrPutAssumeCapacity(field_name); @@ -13632,13 +13655,13 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I const copied_tag_val = try value_val.copy(new_decl_arena_allocator); enum_obj.values.putAssumeCapacityNoClobberContext(copied_tag_val, {}, .{ .ty = enum_obj.tag_ty, - .target = target, + .mod = mod, }); } } try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl); + return sema.analyzeDeclVal(block, src, new_decl_index); }, .Opaque => { const struct_val = union_val.val.castTag(.aggregate).?.data; @@ -13646,11 +13669,10 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I const decls_val = struct_val[0]; // Decls - if (decls_val.sliceLen(target) > 0) { + if (decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified opaque must have no decls", .{}); } - const mod = sema.mod; var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); errdefer new_decl_arena.deinit(); const new_decl_arena_allocator = new_decl_arena.allocator(); @@ -13663,16 +13685,16 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I }; const opaque_ty = Type.initPayload(&opaque_ty_payload.base); const opaque_val = try Value.Tag.ty.create(new_decl_arena_allocator, opaque_ty); - const type_name = try sema.createTypeName(block, .anon, "opaque"); - const new_decl = try mod.createAnonymousDeclNamed(block, .{ + const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, .{ .ty = Type.type, .val = opaque_val, - }, type_name); + }, .anon, "opaque"); + const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; - errdefer mod.abortAnonDecl(new_decl); + errdefer mod.abortAnonDecl(new_decl_index); opaque_obj.* = .{ - .owner_decl = new_decl, + .owner_decl = new_decl_index, .node_offset = src.node_offset, .namespace = .{ .parent = block.namespace, @@ -13682,7 +13704,7 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I }; try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl); + return sema.analyzeDeclVal(block, src, new_decl_index); }, .Union => { // TODO use reflection instead of magic numbers here @@ -13697,7 +13719,7 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I const decls_val = struct_val[3]; // Decls - if (decls_val.sliceLen(target) > 0) { + if (decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified unions must have no decls", .{}); } @@ -13714,15 +13736,15 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I }; const union_ty = Type.initPayload(&union_payload.base); const new_union_val = try Value.Tag.ty.create(new_decl_arena_allocator, union_ty); - const type_name = try sema.createTypeName(block, .anon, "union"); - const new_decl = try sema.mod.createAnonymousDeclNamed(block, .{ + const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, .{ .ty = Type.type, .val = new_union_val, - }, type_name); + }, .anon, "union"); + const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; - errdefer sema.mod.abortAnonDecl(new_decl); + errdefer mod.abortAnonDecl(new_decl_index); union_obj.* = .{ - .owner_decl = new_decl, + .owner_decl = new_decl_index, .tag_ty = Type.initTag(.@"null"), .fields = .{}, .node_offset = src.node_offset, @@ -13737,7 +13759,7 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I }; // Tag type - const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(target)); + const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod)); union_obj.tag_ty = if (tag_type_val.optionalValue()) |payload_val| blk: { var buffer: Value.ToTypeBuffer = undefined; break :blk try payload_val.toType(&buffer).copy(new_decl_arena_allocator); @@ -13749,7 +13771,7 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I var i: usize = 0; while (i < fields_len) : (i += 1) { - const elem_val = try fields_val.elemValue(sema.arena, i); + const elem_val = try fields_val.elemValue(sema.mod, sema.arena, i); const field_struct_val = elem_val.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // name: []const u8 @@ -13762,7 +13784,7 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I const field_name = try name_val.toAllocatedBytes( Type.initTag(.const_slice_u8), new_decl_arena_allocator, - target, + sema.mod, ); const gop = union_obj.fields.getOrPutAssumeCapacity(field_name); @@ -13780,7 +13802,7 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I } try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl); + return sema.analyzeDeclVal(block, src, new_decl_index); }, .Fn => return sema.fail(block, src, "TODO: Sema.zirReify for Fn", .{}), .BoundFn => @panic("TODO delete BoundFn from the language"), @@ -13794,9 +13816,7 @@ fn reifyTuple( src: LazySrcLoc, fields_val: Value, ) CompileError!Air.Inst.Ref { - const target = sema.mod.getTarget(); - - const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(target)); + const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(sema.mod)); if (fields_len == 0) return sema.addType(Type.initTag(.empty_struct_literal)); const types = try sema.arena.alloc(Type, fields_len); @@ -13808,7 +13828,7 @@ fn reifyTuple( var i: usize = 0; while (i < fields_len) : (i += 1) { - const elem_val = try fields_val.elemValue(sema.arena, i); + const elem_val = try fields_val.elemValue(sema.mod, sema.arena, i); const field_struct_val = elem_val.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // name: []const u8 @@ -13821,7 +13841,7 @@ fn reifyTuple( const field_name = try name_val.toAllocatedBytes( Type.initTag(.const_slice_u8), sema.arena, - target, + sema.mod, ); const field_index = std.fmt.parseUnsigned(u32, field_name, 10) catch |err| { @@ -13850,7 +13870,7 @@ fn reifyTuple( const default_val = if (default_value_val.optionalValue()) |opt_val| blk: { const payload_val = if (opt_val.pointerDecl()) |opt_decl| - opt_decl.val + sema.mod.declPtr(opt_decl).val else opt_val; break :blk try payload_val.copy(sema.arena); @@ -13883,15 +13903,16 @@ fn reifyStruct( const struct_obj = try new_decl_arena_allocator.create(Module.Struct); const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj); const new_struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty); - const type_name = try sema.createTypeName(block, .anon, "struct"); - const new_decl = try sema.mod.createAnonymousDeclNamed(block, .{ + const mod = sema.mod; + const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, .{ .ty = Type.type, .val = new_struct_val, - }, type_name); + }, .anon, "struct"); + const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; - errdefer sema.mod.abortAnonDecl(new_decl); + errdefer mod.abortAnonDecl(new_decl_index); struct_obj.* = .{ - .owner_decl = new_decl, + .owner_decl = new_decl_index, .fields = .{}, .node_offset = src.node_offset, .zir_index = inst, @@ -13905,14 +13926,14 @@ fn reifyStruct( }, }; - const target = sema.mod.getTarget(); + const target = mod.getTarget(); // Fields - const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(target)); + const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod)); try struct_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); var i: usize = 0; while (i < fields_len) : (i += 1) { - const elem_val = try fields_val.elemValue(sema.arena, i); + const elem_val = try fields_val.elemValue(sema.mod, sema.arena, i); const field_struct_val = elem_val.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // name: []const u8 @@ -13929,7 +13950,7 @@ fn reifyStruct( const field_name = try name_val.toAllocatedBytes( Type.initTag(.const_slice_u8), new_decl_arena_allocator, - target, + mod, ); const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name); @@ -13940,7 +13961,7 @@ fn reifyStruct( const default_val = if (default_value_val.optionalValue()) |opt_val| blk: { const payload_val = if (opt_val.pointerDecl()) |opt_decl| - opt_decl.val + mod.declPtr(opt_decl).val else opt_val; break :blk try payload_val.copy(new_decl_arena_allocator); @@ -13957,7 +13978,7 @@ fn reifyStruct( } try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl); + return sema.analyzeDeclVal(block, src, new_decl_index); } fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -13968,8 +13989,7 @@ fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var anon_decl = try block.startAnonDecl(LazySrcLoc.unneeded); defer anon_decl.deinit(); - const target = sema.mod.getTarget(); - const bytes = try ty.nameAllocArena(anon_decl.arena(), target); + const bytes = try ty.nameAllocArena(anon_decl.arena(), sema.mod); const new_decl = try anon_decl.finish( try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), @@ -14010,7 +14030,7 @@ fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! error.FloatCannotFit => { return sema.fail(block, operand_src, "integer value {d} cannot be stored in type '{}'", .{ std.math.floor(val.toFloat(f64)), - dest_ty.fmt(target), + dest_ty.fmt(sema.mod), }); }, else => |e| return e, @@ -14064,9 +14084,9 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (try sema.resolveDefinedValue(block, operand_src, operand_coerced)) |val| { const addr = val.toUnsignedInt(target); if (!type_res.isAllowzeroPtr() and addr == 0) - return sema.fail(block, operand_src, "pointer type '{}' does not allow address zero", .{type_res.fmt(target)}); + return sema.fail(block, operand_src, "pointer type '{}' does not allow address zero", .{type_res.fmt(sema.mod)}); if (addr != 0 and addr % ptr_align != 0) - return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{type_res.fmt(target)}); + return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{type_res.fmt(sema.mod)}); const val_payload = try sema.arena.create(Value.Payload.U64); val_payload.* = .{ @@ -14110,7 +14130,6 @@ fn zirErrSetCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); - const target = sema.mod.getTarget(); try sema.checkErrorSetType(block, dest_ty_src, dest_ty); try sema.checkErrorSetType(block, operand_src, operand_ty); @@ -14124,7 +14143,7 @@ fn zirErrSetCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! block, src, "error.{s} not a member of error set '{}'", - .{ error_name, dest_ty.fmt(target) }, + .{ error_name, dest_ty.fmt(sema.mod) }, ); } } @@ -14178,11 +14197,11 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air var buf: Type.Payload.ElemType = undefined; var dest_ptr_info = dest_ty.optionalChild(&buf).ptrInfo().data; dest_ptr_info.@"align" = operand_align; - break :blk try Type.optional(sema.arena, try Type.ptr(sema.arena, target, dest_ptr_info)); + break :blk try Type.optional(sema.arena, try Type.ptr(sema.arena, sema.mod, dest_ptr_info)); } else { var dest_ptr_info = dest_ty.ptrInfo().data; dest_ptr_info.@"align" = operand_align; - break :blk try Type.ptr(sema.arena, target, dest_ptr_info); + break :blk try Type.ptr(sema.arena, sema.mod, dest_ptr_info); } }; @@ -14235,7 +14254,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (operand_info.signedness != dest_info.signedness) { return sema.fail(block, operand_src, "expected {s} integer type, found '{}'", .{ - @tagName(dest_info.signedness), operand_ty.fmt(target), + @tagName(dest_info.signedness), operand_ty.fmt(sema.mod), }); } if (operand_info.bits < dest_info.bits) { @@ -14244,7 +14263,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, "destination type '{}' has more bits than source type '{}'", - .{ dest_ty.fmt(target), operand_ty.fmt(target) }, + .{ dest_ty.fmt(sema.mod), operand_ty.fmt(sema.mod) }, ); errdefer msg.destroy(sema.gpa); try sema.errNote(block, dest_ty_src, msg, "destination type has {d} bits", .{ @@ -14270,7 +14289,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var elem_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, operand_ty.vectorLen()); for (elems) |*elem, i| { - const elem_val = val.elemValueBuffer(i, &elem_buf); + const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); elem.* = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, target); } return sema.addConstant( @@ -14302,8 +14321,7 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A // TODO insert safety check that the alignment is correct const ptr_info = ptr_ty.ptrInfo().data; - const target = sema.mod.getTarget(); - const dest_ty = try Type.ptr(sema.arena, target, .{ + const dest_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = ptr_info.pointee_type, .@"align" = dest_align, .@"addrspace" = ptr_info.@"addrspace", @@ -14346,7 +14364,7 @@ fn zirBitCount( const elems = try sema.arena.alloc(Value, vec_len); const scalar_ty = operand_ty.scalarType(); for (elems) |*elem, i| { - const elem_val = val.elemValueBuffer(i, &elem_buf); + const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); const count = comptimeOp(elem_val, scalar_ty, target); elem.* = try Value.Tag.int_u64.create(sema.arena, count); } @@ -14386,7 +14404,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, ty_src, "@byteSwap requires the number of bits to be evenly divisible by 8, but {} has {} bits", - .{ scalar_ty.fmt(target), bits }, + .{ scalar_ty.fmt(sema.mod), bits }, ); } @@ -14414,7 +14432,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var elem_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems) |*elem, i| { - const elem_val = val.elemValueBuffer(i, &elem_buf); + const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); elem.* = try elem_val.byteSwap(operand_ty, target, sema.arena); } return sema.addConstant( @@ -14462,7 +14480,7 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! var elem_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems) |*elem, i| { - const elem_val = val.elemValueBuffer(i, &elem_buf); + const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); elem.* = try elem_val.bitReverse(operand_ty, target, sema.arena); } return sema.addConstant( @@ -14506,7 +14524,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 block, lhs_src, "expected struct type, found '{}'", - .{ty.fmt(target)}, + .{ty.fmt(sema.mod)}, ); } @@ -14516,7 +14534,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 block, rhs_src, "struct '{}' has no field '{s}'", - .{ ty.fmt(target), field_name }, + .{ ty.fmt(sema.mod), field_name }, ); }; @@ -14542,20 +14560,18 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 } fn checkNamespaceType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void { - const target = sema.mod.getTarget(); switch (ty.zigTypeTag()) { .Struct, .Enum, .Union, .Opaque => return, - else => return sema.fail(block, src, "expected struct, enum, union, or opaque; found '{}'", .{ty.fmt(target)}), + else => return sema.fail(block, src, "expected struct, enum, union, or opaque; found '{}'", .{ty.fmt(sema.mod)}), } } /// Returns `true` if the type was a comptime_int. fn checkIntType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool { - const target = sema.mod.getTarget(); switch (try ty.zigTypeTagOrPoison()) { .ComptimeInt => return true, .Int => return false, - else => return sema.fail(block, src, "expected integer type, found '{}'", .{ty.fmt(target)}), + else => return sema.fail(block, src, "expected integer type, found '{}'", .{ty.fmt(sema.mod)}), } } @@ -14565,7 +14581,6 @@ fn checkPtrOperand( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - const target = sema.mod.getTarget(); switch (ty.zigTypeTag()) { .Pointer => return, .Fn => { @@ -14574,7 +14589,7 @@ fn checkPtrOperand( block, ty_src, "expected pointer, found {}", - .{ty.fmt(target)}, + .{ty.fmt(sema.mod)}, ); errdefer msg.destroy(sema.gpa); @@ -14587,7 +14602,7 @@ fn checkPtrOperand( .Optional => if (ty.isPtrLikeOptional()) return, else => {}, } - return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(target)}); + return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(sema.mod)}); } fn checkPtrType( @@ -14596,7 +14611,6 @@ fn checkPtrType( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - const target = sema.mod.getTarget(); switch (ty.zigTypeTag()) { .Pointer => return, .Fn => { @@ -14605,7 +14619,7 @@ fn checkPtrType( block, ty_src, "expected pointer type, found '{}'", - .{ty.fmt(target)}, + .{ty.fmt(sema.mod)}, ); errdefer msg.destroy(sema.gpa); @@ -14618,7 +14632,7 @@ fn checkPtrType( .Optional => if (ty.isPtrLikeOptional()) return, else => {}, } - return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(target)}); + return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(sema.mod)}); } fn checkVectorElemType( @@ -14631,8 +14645,7 @@ fn checkVectorElemType( .Int, .Float, .Bool => return, else => if (ty.isPtrAtRuntime()) return, } - const target = sema.mod.getTarget(); - return sema.fail(block, ty_src, "expected integer, float, bool, or pointer for the vector element type; found '{}'", .{ty.fmt(target)}); + return sema.fail(block, ty_src, "expected integer, float, bool, or pointer for the vector element type; found '{}'", .{ty.fmt(sema.mod)}); } fn checkFloatType( @@ -14641,10 +14654,9 @@ fn checkFloatType( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - const target = sema.mod.getTarget(); switch (ty.zigTypeTag()) { .ComptimeInt, .ComptimeFloat, .Float => {}, - else => return sema.fail(block, ty_src, "expected float type, found '{}'", .{ty.fmt(target)}), + else => return sema.fail(block, ty_src, "expected float type, found '{}'", .{ty.fmt(sema.mod)}), } } @@ -14654,14 +14666,13 @@ fn checkNumericType( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - const target = sema.mod.getTarget(); switch (ty.zigTypeTag()) { .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, .Vector => switch (ty.childType().zigTypeTag()) { .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, else => |t| return sema.fail(block, ty_src, "expected number, found '{}'", .{t}), }, - else => return sema.fail(block, ty_src, "expected number, found '{}'", .{ty.fmt(target)}), + else => return sema.fail(block, ty_src, "expected number, found '{}'", .{ty.fmt(sema.mod)}), } } @@ -14697,7 +14708,7 @@ fn checkAtomicOperandType( block, ty_src, "expected bool, integer, float, enum, or pointer type; found {}", - .{ty.fmt(target)}, + .{ty.fmt(sema.mod)}, ); }, }; @@ -14761,7 +14772,6 @@ fn checkIntOrVector( operand_src: LazySrcLoc, ) CompileError!Type { const operand_ty = sema.typeOf(operand); - const target = sema.mod.getTarget(); switch (try operand_ty.zigTypeTagOrPoison()) { .Int => return operand_ty, .Vector => { @@ -14769,12 +14779,12 @@ fn checkIntOrVector( switch (try elem_ty.zigTypeTagOrPoison()) { .Int => return elem_ty, else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{ - elem_ty.fmt(target), + elem_ty.fmt(sema.mod), }), } }, else => return sema.fail(block, operand_src, "expected integer or vector, found '{}'", .{ - operand_ty.fmt(target), + operand_ty.fmt(sema.mod), }), } } @@ -14786,7 +14796,6 @@ fn checkIntOrVectorAllowComptime( operand_src: LazySrcLoc, ) CompileError!Type { const operand_ty = sema.typeOf(operand); - const target = sema.mod.getTarget(); switch (try operand_ty.zigTypeTagOrPoison()) { .Int, .ComptimeInt => return operand_ty, .Vector => { @@ -14794,21 +14803,20 @@ fn checkIntOrVectorAllowComptime( switch (try elem_ty.zigTypeTagOrPoison()) { .Int, .ComptimeInt => return elem_ty, else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{ - elem_ty.fmt(target), + elem_ty.fmt(sema.mod), }), } }, else => return sema.fail(block, operand_src, "expected integer or vector, found '{}'", .{ - operand_ty.fmt(target), + operand_ty.fmt(sema.mod), }), } } fn checkErrorSetType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void { - const target = sema.mod.getTarget(); switch (ty.zigTypeTag()) { .ErrorSet => return, - else => return sema.fail(block, src, "expected error set type, found '{}'", .{ty.fmt(target)}), + else => return sema.fail(block, src, "expected error set type, found '{}'", .{ty.fmt(sema.mod)}), } } @@ -14892,10 +14900,9 @@ fn checkVectorizableBinaryOperands( return sema.failWithOwnedErrorMsg(block, msg); } } else { - const target = sema.mod.getTarget(); const msg = msg: { const msg = try sema.errMsg(block, src, "mixed scalar and vector operands: {} and {}", .{ - lhs_ty.fmt(target), rhs_ty.fmt(target), + lhs_ty.fmt(sema.mod), rhs_ty.fmt(sema.mod), }); errdefer msg.destroy(sema.gpa); if (lhs_is_vector) { @@ -14934,9 +14941,8 @@ fn resolveExportOptions( return sema.fail(block, src, "TODO: implement exporting with linksection", .{}); } const name_ty = Type.initTag(.const_slice_u8); - const target = sema.mod.getTarget(); return std.builtin.ExportOptions{ - .name = try name_val.toAllocatedBytes(name_ty, sema.arena, target), + .name = try name_val.toAllocatedBytes(name_ty, sema.arena, sema.mod), .linkage = linkage_val.toEnum(std.builtin.GlobalLinkage), .section = null, // TODO }; @@ -14995,13 +15001,12 @@ fn zirCmpxchg( const ptr_ty = sema.typeOf(ptr); const elem_ty = ptr_ty.elemType(); try sema.checkAtomicOperandType(block, elem_ty_src, elem_ty); - const target = sema.mod.getTarget(); if (elem_ty.zigTypeTag() == .Float) { return sema.fail( block, elem_ty_src, "expected bool, integer, enum, or pointer type; found '{}'", - .{elem_ty.fmt(target)}, + .{elem_ty.fmt(sema.mod)}, ); } const expected_value = try sema.coerce(block, elem_ty, sema.resolveInst(extra.expected_value), expected_src); @@ -15038,7 +15043,7 @@ fn zirCmpxchg( return sema.addConstUndef(result_ty); } const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src; - const result_val = if (stored_val.eql(expected_val, elem_ty, target)) blk: { + const result_val = if (stored_val.eql(expected_val, elem_ty, sema.mod)) blk: { try sema.storePtr(block, src, ptr, new_value); break :blk Value.@"null"; } else try Value.Tag.opt_payload.create(sema.arena, stored_val); @@ -15103,7 +15108,7 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const target = sema.mod.getTarget(); if (operand_ty.zigTypeTag() != .Vector) { - return sema.fail(block, operand_src, "expected vector, found {}", .{operand_ty.fmt(target)}); + return sema.fail(block, operand_src, "expected vector, found {}", .{operand_ty.fmt(sema.mod)}); } const scalar_ty = operand_ty.childType(); @@ -15113,13 +15118,13 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .And, .Or, .Xor => switch (scalar_ty.zigTypeTag()) { .Int, .Bool => {}, else => return sema.fail(block, operand_src, "@reduce operation '{s}' requires integer or boolean operand; found {}", .{ - @tagName(operation), operand_ty.fmt(target), + @tagName(operation), operand_ty.fmt(sema.mod), }), }, .Min, .Max, .Add, .Mul => switch (scalar_ty.zigTypeTag()) { .Int, .Float => {}, else => return sema.fail(block, operand_src, "@reduce operation '{s}' requires integer or float operand; found {}", .{ - @tagName(operation), operand_ty.fmt(target), + @tagName(operation), operand_ty.fmt(sema.mod), }), }, } @@ -15134,11 +15139,11 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |operand_val| { if (operand_val.isUndef()) return sema.addConstUndef(scalar_ty); - var accum: Value = try operand_val.elemValue(sema.arena, 0); + var accum: Value = try operand_val.elemValue(sema.mod, sema.arena, 0); var elem_buf: Value.ElemValueBuffer = undefined; var i: u32 = 1; while (i < vec_len) : (i += 1) { - const elem_val = operand_val.elemValueBuffer(i, &elem_buf); + const elem_val = operand_val.elemValueBuffer(sema.mod, i, &elem_buf); switch (operation) { .And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, target), .Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, target), @@ -15174,11 +15179,10 @@ fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air var b = sema.resolveInst(extra.b); var mask = sema.resolveInst(extra.mask); var mask_ty = sema.typeOf(mask); - const target = sema.mod.getTarget(); const mask_len = switch (sema.typeOf(mask).zigTypeTag()) { .Array, .Vector => sema.typeOf(mask).arrayLen(), - else => return sema.fail(block, mask_src, "expected vector or array, found {}", .{sema.typeOf(mask).fmt(target)}), + else => return sema.fail(block, mask_src, "expected vector or array, found {}", .{sema.typeOf(mask).fmt(sema.mod)}), }; mask_ty = try Type.Tag.vector.create(sema.arena, .{ .len = mask_len, @@ -15210,21 +15214,20 @@ fn analyzeShuffle( .elem_type = elem_ty, }); - const target = sema.mod.getTarget(); var maybe_a_len = switch (sema.typeOf(a).zigTypeTag()) { .Array, .Vector => sema.typeOf(a).arrayLen(), .Undefined => null, else => return sema.fail(block, a_src, "expected vector or array with element type {}, found {}", .{ - elem_ty.fmt(target), - sema.typeOf(a).fmt(target), + elem_ty.fmt(sema.mod), + sema.typeOf(a).fmt(sema.mod), }), }; var maybe_b_len = switch (sema.typeOf(b).zigTypeTag()) { .Array, .Vector => sema.typeOf(b).arrayLen(), .Undefined => null, else => return sema.fail(block, b_src, "expected vector or array with element type {}, found {}", .{ - elem_ty.fmt(target), - sema.typeOf(b).fmt(target), + elem_ty.fmt(sema.mod), + sema.typeOf(b).fmt(sema.mod), }), }; if (maybe_a_len == null and maybe_b_len == null) { @@ -15253,7 +15256,7 @@ fn analyzeShuffle( var i: usize = 0; while (i < mask_len) : (i += 1) { var buf: Value.ElemValueBuffer = undefined; - const elem = mask.elemValueBuffer(i, &buf); + const elem = mask.elemValueBuffer(sema.mod, i, &buf); if (elem.isUndef()) continue; const int = elem.toSignedInt(); var unsigned: u32 = undefined; @@ -15272,7 +15275,7 @@ fn analyzeShuffle( try sema.errNote(block, operand_info[chosen][1], msg, "selected index {d} out of bounds of {}", .{ unsigned, - operand_info[chosen][2].fmt(target), + operand_info[chosen][2].fmt(sema.mod), }); if (chosen == 1) { @@ -15292,7 +15295,7 @@ fn analyzeShuffle( i = 0; while (i < mask_len) : (i += 1) { var buf: Value.ElemValueBuffer = undefined; - const mask_elem_val = mask.elemValueBuffer(i, &buf); + const mask_elem_val = mask.elemValueBuffer(sema.mod, i, &buf); if (mask_elem_val.isUndef()) { values[i] = Value.undef; continue; @@ -15300,9 +15303,9 @@ fn analyzeShuffle( const int = mask_elem_val.toSignedInt(); const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int); if (int >= 0) { - values[i] = try a_val.elemValue(sema.arena, unsigned); + values[i] = try a_val.elemValue(sema.mod, sema.arena, unsigned); } else { - values[i] = try b_val.elemValue(sema.arena, unsigned); + values[i] = try b_val.elemValue(sema.mod, sema.arena, unsigned); } } const res_val = try Value.Tag.aggregate.create(sema.arena, values); @@ -15358,7 +15361,6 @@ fn analyzeShuffle( fn zirSelect(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Select, inst_data.payload_index).data; - const target = sema.mod.getTarget(); const elem_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const pred_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; @@ -15372,7 +15374,7 @@ fn zirSelect(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const vec_len_u64 = switch (try pred_ty.zigTypeTagOrPoison()) { .Vector, .Array => pred_ty.arrayLen(), - else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(target)}), + else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(sema.mod)}), }; const vec_len = try sema.usizeCast(block, pred_src, vec_len_u64); @@ -15399,12 +15401,12 @@ fn zirSelect(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. var buf: Value.ElemValueBuffer = undefined; const elems = try sema.gpa.alloc(Value, vec_len); for (elems) |*elem, i| { - const pred_elem_val = pred_val.elemValueBuffer(i, &buf); + const pred_elem_val = pred_val.elemValueBuffer(sema.mod, i, &buf); const should_choose_a = pred_elem_val.toBool(); if (should_choose_a) { - elem.* = a_val.elemValueBuffer(i, &buf); + elem.* = a_val.elemValueBuffer(sema.mod, i, &buf); } else { - elem.* = b_val.elemValueBuffer(i, &buf); + elem.* = b_val.elemValueBuffer(sema.mod, i, &buf); } } @@ -15630,7 +15632,7 @@ fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. switch (ty.zigTypeTag()) { .ComptimeFloat, .Float, .Vector => {}, - else => return sema.fail(block, src, "expected vector of floats or float type, found '{}'", .{ty.fmt(target)}), + else => return sema.fail(block, src, "expected vector of floats or float type, found '{}'", .{ty.fmt(sema.mod)}), } const runtime_src = if (maybe_mulend1) |mulend1_val| rs: { @@ -15704,10 +15706,9 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError break :modifier modifier_val.toEnum(std.builtin.CallOptions.Modifier); }; - const target = sema.mod.getTarget(); const args_ty = sema.typeOf(args); if (!args_ty.isTuple() and args_ty.tag() != .empty_struct_literal) { - return sema.fail(block, args_src, "expected a tuple, found {}", .{args_ty.fmt(target)}); + return sema.fail(block, args_src, "expected a tuple, found {}", .{args_ty.fmt(sema.mod)}); } var resolved_args: []Air.Inst.Ref = undefined; @@ -15744,10 +15745,9 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const field_name = try sema.resolveConstString(block, name_src, extra.field_name); const field_ptr = sema.resolveInst(extra.field_ptr); const field_ptr_ty = sema.typeOf(field_ptr); - const target = sema.mod.getTarget(); if (struct_ty.zigTypeTag() != .Struct) { - return sema.fail(block, ty_src, "expected struct type, found '{}'", .{struct_ty.fmt(target)}); + return sema.fail(block, ty_src, "expected struct type, found '{}'", .{struct_ty.fmt(sema.mod)}); } try sema.resolveTypeLayout(block, ty_src, struct_ty); @@ -15756,7 +15756,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr return sema.failWithBadStructFieldAccess(block, struct_obj, name_src, field_name); if (field_ptr_ty.zigTypeTag() != .Pointer) { - return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{field_ptr_ty.fmt(target)}); + return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{field_ptr_ty.fmt(sema.mod)}); } const field = struct_obj.fields.values()[field_index]; const field_ptr_ty_info = field_ptr_ty.ptrInfo().data; @@ -15773,11 +15773,11 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr ptr_ty_data.@"align" = field.abi_align; } - const actual_field_ptr_ty = try Type.ptr(sema.arena, target, ptr_ty_data); + const actual_field_ptr_ty = try Type.ptr(sema.arena, sema.mod, ptr_ty_data); const casted_field_ptr = try sema.coerce(block, actual_field_ptr_ty, field_ptr, ptr_src); ptr_ty_data.pointee_type = struct_ty; - const result_ptr = try Type.ptr(sema.arena, target, ptr_ty_data); + const result_ptr = try Type.ptr(sema.arena, sema.mod, ptr_ty_data); if (try sema.resolveDefinedValue(block, src, casted_field_ptr)) |field_ptr_val| { const payload = field_ptr_val.castTag(.field_ptr).?.data; @@ -15850,8 +15850,8 @@ fn analyzeMinMax( var rhs_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems) |*elem, i| { - const lhs_elem_val = lhs_val.elemValueBuffer(i, &lhs_buf); - const rhs_elem_val = rhs_val.elemValueBuffer(i, &rhs_buf); + const lhs_elem_val = lhs_val.elemValueBuffer(sema.mod, i, &lhs_buf); + const rhs_elem_val = rhs_val.elemValueBuffer(sema.mod, i, &rhs_buf); elem.* = opFunc(lhs_elem_val, rhs_elem_val, target); } return sema.addConstant( @@ -15878,18 +15878,17 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; const dest_ptr = sema.resolveInst(extra.dest); const dest_ptr_ty = sema.typeOf(dest_ptr); - const target = sema.mod.getTarget(); try sema.checkPtrOperand(block, dest_src, dest_ptr_ty); if (dest_ptr_ty.isConstPtr()) { - return sema.fail(block, dest_src, "cannot store through const pointer '{}'", .{dest_ptr_ty.fmt(target)}); + return sema.fail(block, dest_src, "cannot store through const pointer '{}'", .{dest_ptr_ty.fmt(sema.mod)}); } const uncasted_src_ptr = sema.resolveInst(extra.source); const uncasted_src_ptr_ty = sema.typeOf(uncasted_src_ptr); try sema.checkPtrOperand(block, src_src, uncasted_src_ptr_ty); const src_ptr_info = uncasted_src_ptr_ty.ptrInfo().data; - const wanted_src_ptr_ty = try Type.ptr(sema.arena, target, .{ + const wanted_src_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = dest_ptr_ty.elemType2(), .@"align" = src_ptr_info.@"align", .@"addrspace" = src_ptr_info.@"addrspace", @@ -15936,10 +15935,9 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; const dest_ptr = sema.resolveInst(extra.dest); const dest_ptr_ty = sema.typeOf(dest_ptr); - const target = sema.mod.getTarget(); try sema.checkPtrOperand(block, dest_src, dest_ptr_ty); if (dest_ptr_ty.isConstPtr()) { - return sema.fail(block, dest_src, "cannot store through const pointer '{}'", .{dest_ptr_ty.fmt(target)}); + return sema.fail(block, dest_src, "cannot store through const pointer '{}'", .{dest_ptr_ty.fmt(sema.mod)}); } const elem_ty = dest_ptr_ty.elemType2(); const value = try sema.coerce(block, elem_ty, sema.resolveInst(extra.byte), value_src); @@ -16057,7 +16055,7 @@ fn zirVarExtended( }); new_var.* = .{ - .owner_decl = sema.owner_decl, + .owner_decl = sema.owner_decl_index, .init = init_val, .is_extern = small.is_extern, .is_mutable = true, // TODO get rid of this unused field @@ -16294,7 +16292,7 @@ fn zirBuiltinExtern( var ty = try sema.resolveType(block, ty_src, extra.lhs); const options_inst = sema.resolveInst(extra.rhs); - const target = sema.mod.getTarget(); + const mod = sema.mod; const options = options: { const extern_options_ty = try sema.getBuiltinType(block, options_src, "ExternOptions"); @@ -16315,11 +16313,11 @@ fn zirBuiltinExtern( var library_name: ?[]const u8 = null; if (!library_name_val.isNull()) { const payload = library_name_val.castTag(.opt_payload).?.data; - library_name = try payload.toAllocatedBytes(Type.initTag(.const_slice_u8), sema.arena, target); + library_name = try payload.toAllocatedBytes(Type.initTag(.const_slice_u8), sema.arena, mod); } break :options std.builtin.ExternOptions{ - .name = try name_val.toAllocatedBytes(Type.initTag(.const_slice_u8), sema.arena, target), + .name = try name_val.toAllocatedBytes(Type.initTag(.const_slice_u8), sema.arena, mod), .library_name = library_name, .linkage = linkage_val.toEnum(std.builtin.GlobalLinkage), .is_thread_local = is_thread_local_val.toBool(), @@ -16344,8 +16342,10 @@ fn zirBuiltinExtern( // TODO check duplicate extern - const new_decl = try sema.mod.allocateNewDecl(try sema.gpa.dupeZ(u8, options.name), sema.owner_decl.src_namespace, sema.owner_decl.src_node, null); - errdefer new_decl.destroy(sema.mod); + const new_decl_index = try mod.allocateNewDecl(sema.owner_decl.src_namespace, sema.owner_decl.src_node, null); + errdefer mod.destroyDecl(new_decl_index); + const new_decl = mod.declPtr(new_decl_index); + new_decl.name = try sema.gpa.dupeZ(u8, options.name); var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); errdefer new_decl_arena.deinit(); @@ -16355,7 +16355,7 @@ fn zirBuiltinExtern( errdefer new_decl_arena_allocator.destroy(new_var); new_var.* = .{ - .owner_decl = sema.owner_decl, + .owner_decl = sema.owner_decl_index, .init = Value.initTag(.unreachable_value), .is_extern = true, .is_mutable = false, // TODO get rid of this unused field @@ -16378,13 +16378,13 @@ fn zirBuiltinExtern( new_decl.@"linksection" = null; new_decl.has_tv = true; new_decl.analysis = .complete; - new_decl.generation = sema.mod.generation; + new_decl.generation = mod.generation; const arena_state = try new_decl_arena_allocator.create(std.heap.ArenaAllocator.State); arena_state.* = new_decl_arena.state; new_decl.value_arena = arena_state; - const ref = try sema.analyzeDeclRef(new_decl); + const ref = try sema.analyzeDeclRef(new_decl_index); try sema.requireRuntimeBlock(block, src); return block.addBitCast(ty, ref); } @@ -16412,12 +16412,14 @@ fn validateVarType( ) CompileError!void { if (try sema.validateRunTimeType(block, src, var_ty, is_extern)) return; - const target = sema.mod.getTarget(); + const mod = sema.mod; + const msg = msg: { - const msg = try sema.errMsg(block, src, "variable of type '{}' must be const or comptime", .{var_ty.fmt(target)}); + const msg = try sema.errMsg(block, src, "variable of type '{}' must be const or comptime", .{var_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); - try sema.explainWhyTypeIsComptime(block, src, msg, src.toSrcLoc(block.src_decl), var_ty); + const src_decl = mod.declPtr(block.src_decl); + try sema.explainWhyTypeIsComptime(block, src, msg, src.toSrcLoc(src_decl), var_ty); break :msg msg; }; @@ -16489,7 +16491,6 @@ fn explainWhyTypeIsComptime( ty: Type, ) CompileError!void { const mod = sema.mod; - const target = mod.getTarget(); switch (ty.zigTypeTag()) { .Bool, .Int, @@ -16503,7 +16504,7 @@ fn explainWhyTypeIsComptime( .Fn => { try mod.errNoteNonLazy(src_loc, msg, "use '*const {}' for a function pointer type", .{ - ty.fmt(target), + ty.fmt(sema.mod), }); }, @@ -16534,7 +16535,7 @@ fn explainWhyTypeIsComptime( if (ty.castTag(.@"struct")) |payload| { const struct_obj = payload.data; for (struct_obj.fields.values()) |field, i| { - const field_src_loc = struct_obj.fieldSrcLoc(sema.gpa, .{ + const field_src_loc = struct_obj.fieldSrcLoc(sema.mod, .{ .index = i, .range = .type, }); @@ -16551,7 +16552,7 @@ fn explainWhyTypeIsComptime( if (ty.cast(Type.Payload.Union)) |payload| { const union_obj = payload.data; for (union_obj.fields.values()) |field, i| { - const field_src_loc = union_obj.fieldSrcLoc(sema.gpa, .{ + const field_src_loc = union_obj.fieldSrcLoc(sema.mod, .{ .index = i, .range = .type, }); @@ -16668,7 +16669,7 @@ fn panicWithMsg( const unresolved_stack_trace_ty = try sema.getBuiltinType(block, src, "StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(block, src, unresolved_stack_trace_ty); const target = mod.getTarget(); - const ptr_stack_trace_ty = try Type.ptr(arena, target, .{ + const ptr_stack_trace_ty = try Type.ptr(arena, mod, .{ .pointee_type = stack_trace_ty, .@"addrspace" = target_util.defaultAddressSpace(target, .global_constant), // TODO might need a place that is more dynamic }); @@ -16748,8 +16749,6 @@ fn fieldVal( else object_ty; - const target = sema.mod.getTarget(); - switch (inner_ty.zigTypeTag()) { .Array => { if (mem.eql(u8, field_name, "len")) { @@ -16762,7 +16761,7 @@ fn fieldVal( block, field_name_src, "no member named '{s}' in '{}'", - .{ field_name, object_ty.fmt(target) }, + .{ field_name, object_ty.fmt(sema.mod) }, ); } }, @@ -16786,7 +16785,7 @@ fn fieldVal( block, field_name_src, "no member named '{s}' in '{}'", - .{ field_name, object_ty.fmt(target) }, + .{ field_name, object_ty.fmt(sema.mod) }, ); } } else if (ptr_info.pointee_type.zigTypeTag() == .Array) { @@ -16800,7 +16799,7 @@ fn fieldVal( block, field_name_src, "no member named '{s}' in '{}'", - .{ field_name, ptr_info.pointee_type.fmt(target) }, + .{ field_name, ptr_info.pointee_type.fmt(sema.mod) }, ); } } @@ -16822,7 +16821,7 @@ fn fieldVal( break :blk entry.key_ptr.*; } return sema.fail(block, src, "no error named '{s}' in '{}'", .{ - field_name, child_type.fmt(target), + field_name, child_type.fmt(sema.mod), }); } else (try sema.mod.getErrorValue(field_name)).key; @@ -16876,10 +16875,10 @@ fn fieldVal( else => unreachable, }; return sema.fail(block, src, "{s} '{}' has no member named '{s}'", .{ - kw_name, child_type.fmt(target), field_name, + kw_name, child_type.fmt(sema.mod), field_name, }); }, - else => return sema.fail(block, src, "type '{}' has no members", .{child_type.fmt(target)}), + else => return sema.fail(block, src, "type '{}' has no members", .{child_type.fmt(sema.mod)}), } }, .Struct => if (is_pointer_to) { @@ -16898,7 +16897,7 @@ fn fieldVal( }, else => {}, } - return sema.fail(block, src, "type '{}' does not support field access", .{object_ty.fmt(target)}); + return sema.fail(block, src, "type '{}' does not support field access", .{object_ty.fmt(sema.mod)}); } fn fieldPtr( @@ -16912,12 +16911,11 @@ fn fieldPtr( // When editing this function, note that there is corresponding logic to be edited // in `fieldVal`. This function takes a pointer and returns a pointer. - const target = sema.mod.getTarget(); const object_ptr_src = src; // TODO better source location const object_ptr_ty = sema.typeOf(object_ptr); const object_ty = switch (object_ptr_ty.zigTypeTag()) { .Pointer => object_ptr_ty.elemType(), - else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty.fmt(target)}), + else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty.fmt(sema.mod)}), }; // Zig allows dereferencing a single pointer during field lookup. Note that @@ -16945,7 +16943,7 @@ fn fieldPtr( block, field_name_src, "no member named '{s}' in '{}'", - .{ field_name, object_ty.fmt(target) }, + .{ field_name, object_ty.fmt(sema.mod) }, ); } }, @@ -16971,7 +16969,7 @@ fn fieldPtr( } try sema.requireRuntimeBlock(block, src); - const result_ty = try Type.ptr(sema.arena, target, .{ + const result_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = slice_ptr_ty, .mutable = object_ptr_ty.ptrIsMutable(), .@"addrspace" = object_ptr_ty.ptrAddressSpace(), @@ -16985,13 +16983,13 @@ fn fieldPtr( return sema.analyzeDeclRef(try anon_decl.finish( Type.usize, - try Value.Tag.int_u64.create(anon_decl.arena(), val.sliceLen(target)), + try Value.Tag.int_u64.create(anon_decl.arena(), val.sliceLen(sema.mod)), 0, // default alignment )); } try sema.requireRuntimeBlock(block, src); - const result_ty = try Type.ptr(sema.arena, target, .{ + const result_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = Type.usize, .mutable = object_ptr_ty.ptrIsMutable(), .@"addrspace" = object_ptr_ty.ptrAddressSpace(), @@ -17003,7 +17001,7 @@ fn fieldPtr( block, field_name_src, "no member named '{s}' in '{}'", - .{ field_name, object_ty.fmt(target) }, + .{ field_name, object_ty.fmt(sema.mod) }, ); } }, @@ -17027,7 +17025,7 @@ fn fieldPtr( break :blk entry.key_ptr.*; } return sema.fail(block, src, "no error named '{s}' in '{}'", .{ - field_name, child_type.fmt(target), + field_name, child_type.fmt(sema.mod), }); } else (try sema.mod.getErrorValue(field_name)).key; @@ -17085,7 +17083,7 @@ fn fieldPtr( } return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }, - else => return sema.fail(block, src, "type '{}' has no members", .{child_type.fmt(target)}), + else => return sema.fail(block, src, "type '{}' has no members", .{child_type.fmt(sema.mod)}), } }, .Struct => { @@ -17104,7 +17102,7 @@ fn fieldPtr( }, else => {}, } - return sema.fail(block, src, "type '{}' does not support field access (fieldPtr, {}.{s})", .{ object_ty.fmt(target), object_ptr_ty.fmt(target), field_name }); + return sema.fail(block, src, "type '{}' does not support field access (fieldPtr, {}.{s})", .{ object_ty.fmt(sema.mod), object_ptr_ty.fmt(sema.mod), field_name }); } fn fieldCallBind( @@ -17118,13 +17116,12 @@ fn fieldCallBind( // When editing this function, note that there is corresponding logic to be edited // in `fieldVal`. This function takes a pointer and returns a pointer. - const target = sema.mod.getTarget(); const raw_ptr_src = src; // TODO better source location const raw_ptr_ty = sema.typeOf(raw_ptr); const inner_ty = if (raw_ptr_ty.zigTypeTag() == .Pointer and raw_ptr_ty.ptrSize() == .One) raw_ptr_ty.childType() else - return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty.fmt(target)}); + return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty.fmt(sema.mod)}); // Optionally dereference a second pointer to get the concrete type. const is_double_ptr = inner_ty.zigTypeTag() == .Pointer and inner_ty.ptrSize() == .One; @@ -17184,7 +17181,7 @@ fn fieldCallBind( first_param_type.zigTypeTag() == .Pointer and (first_param_type.ptrSize() == .One or first_param_type.ptrSize() == .C) and - first_param_type.childType().eql(concrete_ty, target))) + first_param_type.childType().eql(concrete_ty, sema.mod))) { // zig fmt: on // TODO: bound fn calls on rvalues should probably @@ -17195,7 +17192,7 @@ fn fieldCallBind( .arg0_inst = object_ptr, }); return sema.addConstant(ty, value); - } else if (first_param_type.eql(concrete_ty, target)) { + } else if (first_param_type.eql(concrete_ty, sema.mod)) { var deref = try sema.analyzeLoad(block, src, object_ptr, src); const ty = Type.Tag.bound_fn.init(); const value = try Value.Tag.bound_fn.create(arena, .{ @@ -17211,7 +17208,7 @@ fn fieldCallBind( else => {}, } - return sema.fail(block, src, "type '{}' has no field or member function named '{s}'", .{ concrete_ty.fmt(target), field_name }); + return sema.fail(block, src, "type '{}' has no field or member function named '{s}'", .{ concrete_ty.fmt(sema.mod), field_name }); } fn finishFieldCallBind( @@ -17224,8 +17221,7 @@ fn finishFieldCallBind( object_ptr: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { const arena = sema.arena; - const target = sema.mod.getTarget(); - const ptr_field_ty = try Type.ptr(arena, target, .{ + const ptr_field_ty = try Type.ptr(arena, sema.mod, .{ .pointee_type = field_ty, .mutable = ptr_ty.ptrIsMutable(), .@"addrspace" = ptr_ty.ptrAddressSpace(), @@ -17254,9 +17250,10 @@ fn namespaceLookup( src: LazySrcLoc, namespace: *Namespace, decl_name: []const u8, -) CompileError!?*Decl { +) CompileError!?Decl.Index { const gpa = sema.gpa; - if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |decl| { + if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |decl_index| { + const decl = sema.mod.declPtr(decl_index); if (!decl.is_pub and decl.getFileScope() != block.getFileScope()) { const msg = msg: { const msg = try sema.errMsg(block, src, "'{s}' is not marked 'pub'", .{ @@ -17268,7 +17265,7 @@ fn namespaceLookup( }; return sema.failWithOwnedErrorMsg(block, msg); } - return decl; + return decl_index; } return null; } @@ -17377,7 +17374,7 @@ fn structFieldPtrByIndex( ptr_ty_data.@"align" = field.abi_align; } - const ptr_field_ty = try Type.ptr(sema.arena, target, ptr_ty_data); + const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, ptr_ty_data); if (field.is_comptime) { var anon_decl = try block.startAnonDecl(field_src); @@ -17476,15 +17473,14 @@ fn tupleFieldIndex( field_name: []const u8, field_name_src: LazySrcLoc, ) CompileError!u32 { - const target = sema.mod.getTarget(); const field_index = std.fmt.parseUnsigned(u32, field_name, 10) catch |err| { return sema.fail(block, field_name_src, "tuple {} has no such field '{s}': {s}", .{ - tuple_ty.fmt(target), field_name, @errorName(err), + tuple_ty.fmt(sema.mod), field_name, @errorName(err), }); }; if (field_index >= tuple_ty.structFieldCount()) { return sema.fail(block, field_name_src, "tuple {} has no such field '{s}'", .{ - tuple_ty.fmt(target), field_name, + tuple_ty.fmt(sema.mod), field_name, }); } return field_index; @@ -17535,8 +17531,7 @@ fn unionFieldPtr( const union_obj = union_ty.cast(Type.Payload.Union).?.data; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src); const field = union_obj.fields.values()[field_index]; - const target = sema.mod.getTarget(); - const ptr_field_ty = try Type.ptr(arena, target, .{ + const ptr_field_ty = try Type.ptr(arena, sema.mod, .{ .pointee_type = field.ty, .mutable = union_ptr_ty.ptrIsMutable(), .@"addrspace" = union_ptr_ty.ptrAddressSpace(), @@ -17559,7 +17554,7 @@ fn unionFieldPtr( // .data = field_index, //}; //const field_tag = Value.initPayload(&field_tag_buf.base); - //const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, target); + //const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, mod); //if (!tag_matches) { // // TODO enhance this saying which one was active // // and which one was accessed, and showing where the union was declared. @@ -17608,8 +17603,7 @@ fn unionFieldVal( .data = field_index, }; const field_tag = Value.initPayload(&field_tag_buf.base); - const target = sema.mod.getTarget(); - const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, target); + const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, sema.mod); switch (union_obj.layout) { .Auto => { if (tag_matches) { @@ -17630,7 +17624,7 @@ fn unionFieldVal( if (tag_matches) { return sema.addConstant(field.ty, tag_and_val.val); } else { - const old_ty = union_ty.unionFieldType(tag_and_val.tag, target); + const old_ty = union_ty.unionFieldType(tag_and_val.tag, sema.mod); const new_val = try sema.bitCastVal(block, src, tag_and_val.val, old_ty, field.ty, 0); return sema.addConstant(field.ty, new_val); } @@ -17655,17 +17649,17 @@ fn elemPtr( const target = sema.mod.getTarget(); const indexable_ty = switch (indexable_ptr_ty.zigTypeTag()) { .Pointer => indexable_ptr_ty.elemType(), - else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(target)}), + else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(sema.mod)}), }; if (!indexable_ty.isIndexable()) { - return sema.fail(block, src, "element access of non-indexable type '{}'", .{indexable_ty.fmt(target)}); + return sema.fail(block, src, "element access of non-indexable type '{}'", .{indexable_ty.fmt(sema.mod)}); } switch (indexable_ty.zigTypeTag()) { .Pointer => { // In all below cases, we have to deref the ptr operand to get the actual indexable pointer. const indexable = try sema.analyzeLoad(block, indexable_ptr_src, indexable_ptr, indexable_ptr_src); - const result_ty = try indexable_ty.elemPtrType(sema.arena, target); + const result_ty = try indexable_ty.elemPtrType(sema.arena, sema.mod); switch (indexable_ty.ptrSize()) { .Slice => return sema.elemPtrSlice(block, indexable_ptr_src, indexable, elem_index_src, elem_index), .Many, .C => { @@ -17676,7 +17670,7 @@ fn elemPtr( const ptr_val = maybe_ptr_val orelse break :rs indexable_ptr_src; const index_val = maybe_index_val orelse break :rs elem_index_src; const index = @intCast(usize, index_val.toUnsignedInt(target)); - const elem_ptr = try ptr_val.elemPtr(indexable_ty, sema.arena, index, target); + const elem_ptr = try ptr_val.elemPtr(indexable_ty, sema.arena, index, sema.mod); return sema.addConstant(result_ty, elem_ptr); }; @@ -17713,7 +17707,7 @@ fn elemVal( const target = sema.mod.getTarget(); if (!indexable_ty.isIndexable()) { - return sema.fail(block, src, "element access of non-indexable type '{}'", .{indexable_ty.fmt(target)}); + return sema.fail(block, src, "element access of non-indexable type '{}'", .{indexable_ty.fmt(sema.mod)}); } // TODO in case of a vector of pointers, we need to detect whether the element @@ -17731,7 +17725,7 @@ fn elemVal( const indexable_val = maybe_indexable_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; const index = @intCast(usize, index_val.toUnsignedInt(target)); - const elem_ptr_val = try indexable_val.elemPtr(indexable_ty, sema.arena, index, target); + const elem_ptr_val = try indexable_val.elemPtr(indexable_ty, sema.arena, index, sema.mod); if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, indexable_ty)) |elem_val| { return sema.addConstant(indexable_ty.elemType2(), elem_val); } @@ -17785,8 +17779,7 @@ fn tupleFieldPtr( } const field_ty = tuple_fields.types[field_index]; - const target = sema.mod.getTarget(); - const ptr_field_ty = try Type.ptr(sema.arena, target, .{ + const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = field_ty, .mutable = tuple_ptr_ty.ptrIsMutable(), .@"addrspace" = tuple_ptr_ty.ptrAddressSpace(), @@ -17881,7 +17874,7 @@ fn elemValArray( } if (maybe_index_val) |index_val| { const index = @intCast(usize, index_val.toUnsignedInt(target)); - const elem_val = try array_val.elemValue(sema.arena, index); + const elem_val = try array_val.elemValue(sema.mod, sema.arena, index); return sema.addConstant(elem_ty, elem_val); } } @@ -17914,7 +17907,7 @@ fn elemPtrArray( const array_sent = array_ty.sentinel() != null; const array_len = array_ty.arrayLen(); const array_len_s = array_len + @boolToInt(array_sent); - const elem_ptr_ty = try array_ptr_ty.elemPtrType(sema.arena, target); + const elem_ptr_ty = try array_ptr_ty.elemPtrType(sema.arena, sema.mod); if (array_len_s == 0) { return sema.fail(block, elem_index_src, "indexing into empty array", .{}); @@ -17937,7 +17930,7 @@ fn elemPtrArray( } if (maybe_index_val) |index_val| { const index = @intCast(usize, index_val.toUnsignedInt(target)); - const elem_ptr = try array_ptr_val.elemPtr(array_ptr_ty, sema.arena, index, target); + const elem_ptr = try array_ptr_val.elemPtr(array_ptr_ty, sema.arena, index, sema.mod); return sema.addConstant(elem_ptr_ty, elem_ptr); } } @@ -17977,7 +17970,7 @@ fn elemValSlice( if (maybe_slice_val) |slice_val| { runtime_src = elem_index_src; - const slice_len = slice_val.sliceLen(target); + const slice_len = slice_val.sliceLen(sema.mod); const slice_len_s = slice_len + @boolToInt(slice_sent); if (slice_len_s == 0) { return sema.fail(block, elem_index_src, "indexing into empty slice", .{}); @@ -17988,7 +17981,7 @@ fn elemValSlice( const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_val = try slice_val.elemPtr(slice_ty, sema.arena, index, target); + const elem_ptr_val = try slice_val.elemPtr(slice_ty, sema.arena, index, sema.mod); if (try sema.pointerDeref(block, slice_src, elem_ptr_val, slice_ty)) |elem_val| { return sema.addConstant(elem_ty, elem_val); } @@ -17999,7 +17992,7 @@ fn elemValSlice( try sema.requireRuntimeBlock(block, runtime_src); if (block.wantSafety()) { const len_inst = if (maybe_slice_val) |slice_val| - try sema.addIntUnsigned(Type.usize, slice_val.sliceLen(target)) + try sema.addIntUnsigned(Type.usize, slice_val.sliceLen(sema.mod)) else try block.addTyOp(.slice_len, Type.usize, slice); const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt; @@ -18020,7 +18013,7 @@ fn elemPtrSlice( const target = sema.mod.getTarget(); const slice_ty = sema.typeOf(slice); const slice_sent = slice_ty.sentinel() != null; - const elem_ptr_ty = try slice_ty.elemPtrType(sema.arena, target); + const elem_ptr_ty = try slice_ty.elemPtrType(sema.arena, sema.mod); const maybe_undef_slice_val = try sema.resolveMaybeUndefVal(block, slice_src, slice); // index must be defined since it can index out of bounds @@ -18030,7 +18023,7 @@ fn elemPtrSlice( if (slice_val.isUndef()) { return sema.addConstUndef(elem_ptr_ty); } - const slice_len = slice_val.sliceLen(target); + const slice_len = slice_val.sliceLen(sema.mod); const slice_len_s = slice_len + @boolToInt(slice_sent); if (slice_len_s == 0) { return sema.fail(block, elem_index_src, "indexing into empty slice", .{}); @@ -18041,7 +18034,7 @@ fn elemPtrSlice( const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_val = try slice_val.elemPtr(slice_ty, sema.arena, index, target); + const elem_ptr_val = try slice_val.elemPtr(slice_ty, sema.arena, index, sema.mod); return sema.addConstant(elem_ptr_ty, elem_ptr_val); } } @@ -18052,7 +18045,7 @@ fn elemPtrSlice( const len_inst = len: { if (maybe_undef_slice_val) |slice_val| if (!slice_val.isUndef()) - break :len try sema.addIntUnsigned(Type.usize, slice_val.sliceLen(target)); + break :len try sema.addIntUnsigned(Type.usize, slice_val.sliceLen(sema.mod)); break :len try block.addTyOp(.slice_len, Type.usize, slice); }; const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt; @@ -18079,7 +18072,7 @@ fn coerce( const inst_ty = try sema.resolveTypeFields(block, inst_src, sema.typeOf(inst)); const target = sema.mod.getTarget(); // If the types are the same, we can return the operand. - if (dest_ty.eql(inst_ty, target)) + if (dest_ty.eql(inst_ty, sema.mod)) return inst; const arena = sema.arena; @@ -18185,7 +18178,7 @@ fn coerce( // *[N:s]T to [*]T if (dest_info.sentinel) |dst_sentinel| { if (array_ty.sentinel()) |src_sentinel| { - if (src_sentinel.eql(dst_sentinel, dst_elem_type, target)) { + if (src_sentinel.eql(dst_sentinel, dst_elem_type, sema.mod)) { return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); } } @@ -18254,7 +18247,7 @@ fn coerce( } if (inst_info.size == .Slice) { if (dest_info.sentinel == null or inst_info.sentinel == null or - !dest_info.sentinel.?.eql(inst_info.sentinel.?, dest_info.pointee_type, target)) + !dest_info.sentinel.?.eql(inst_info.sentinel.?, dest_info.pointee_type, sema.mod)) break :p; const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty); @@ -18334,7 +18327,7 @@ fn coerce( } if (dest_info.sentinel == null or inst_info.sentinel == null or - !dest_info.sentinel.?.eql(inst_info.sentinel.?, dest_info.pointee_type, target)) + !dest_info.sentinel.?.eql(inst_info.sentinel.?, dest_info.pointee_type, sema.mod)) break :p; const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty); @@ -18347,11 +18340,16 @@ fn coerce( const val = (try sema.resolveDefinedValue(block, inst_src, inst)) orelse break :float; if (val.floatHasFraction()) { - return sema.fail(block, inst_src, "fractional component prevents float value {} from coercion to type '{}'", .{ val.fmtValue(inst_ty, target), dest_ty.fmt(target) }); + return sema.fail( + block, + inst_src, + "fractional component prevents float value {} from coercion to type '{}'", + .{ val.fmtValue(inst_ty, sema.mod), dest_ty.fmt(sema.mod) }, + ); } const result_val = val.floatToInt(sema.arena, inst_ty, dest_ty, target) catch |err| switch (err) { error.FloatCannotFit => { - return sema.fail(block, inst_src, "integer value {d} cannot be stored in type '{}'", .{ std.math.floor(val.toFloat(f64)), dest_ty.fmt(target) }); + return sema.fail(block, inst_src, "integer value {d} cannot be stored in type '{}'", .{ std.math.floor(val.toFloat(f64)), dest_ty.fmt(sema.mod) }); }, else => |e| return e, }; @@ -18361,7 +18359,7 @@ fn coerce( if (try sema.resolveDefinedValue(block, inst_src, inst)) |val| { // comptime known integer to other number if (!val.intFitsInType(dest_ty, target)) { - return sema.fail(block, inst_src, "type {} cannot represent integer value {}", .{ dest_ty.fmt(target), val.fmtValue(inst_ty, target) }); + return sema.fail(block, inst_src, "type {} cannot represent integer value {}", .{ dest_ty.fmt(sema.mod), val.fmtValue(inst_ty, sema.mod) }); } return try sema.addConstant(dest_ty, val); } @@ -18391,12 +18389,12 @@ fn coerce( .Float => { if (try sema.resolveDefinedValue(block, inst_src, inst)) |val| { const result_val = try val.floatCast(sema.arena, dest_ty, target); - if (!val.eql(result_val, dest_ty, target)) { + if (!val.eql(result_val, dest_ty, sema.mod)) { return sema.fail( block, inst_src, "type {} cannot represent float value {}", - .{ dest_ty.fmt(target), val.fmtValue(inst_ty, target) }, + .{ dest_ty.fmt(sema.mod), val.fmtValue(inst_ty, sema.mod) }, ); } return try sema.addConstant(dest_ty, result_val); @@ -18415,12 +18413,12 @@ fn coerce( const result_val = try val.intToFloat(sema.arena, inst_ty, dest_ty, target); // TODO implement this compile error //const int_again_val = try result_val.floatToInt(sema.arena, inst_ty); - //if (!int_again_val.eql(val, inst_ty, target)) { + //if (!int_again_val.eql(val, inst_ty, mod)) { // return sema.fail( // block, // inst_src, // "type {} cannot represent integer value {}", - // .{ dest_ty.fmt(target), val }, + // .{ dest_ty.fmt(sema.mod), val }, // ); //} return try sema.addConstant(dest_ty, result_val); @@ -18441,11 +18439,11 @@ fn coerce( block, inst_src, "enum '{}' has no field named '{s}'", - .{ dest_ty.fmt(target), bytes }, + .{ dest_ty.fmt(sema.mod), bytes }, ); errdefer msg.destroy(sema.gpa); try sema.mod.errNoteNonLazy( - dest_ty.declSrcLoc(), + dest_ty.declSrcLoc(sema.mod), msg, "enum declared here", .{}, @@ -18462,7 +18460,7 @@ fn coerce( .Union => blk: { // union to its own tag type const union_tag_ty = inst_ty.unionTagType() orelse break :blk; - if (union_tag_ty.eql(dest_ty, target)) { + if (union_tag_ty.eql(dest_ty, sema.mod)) { return sema.unionToTag(block, dest_ty, inst, inst_src); } }, @@ -18557,7 +18555,7 @@ fn coerce( return sema.addConstUndef(dest_ty); } - return sema.fail(block, inst_src, "expected {}, found {}", .{ dest_ty.fmt(target), inst_ty.fmt(target) }); + return sema.fail(block, inst_src, "expected {}, found {}", .{ dest_ty.fmt(sema.mod), inst_ty.fmt(sema.mod) }); } const InMemoryCoercionResult = enum { @@ -18586,7 +18584,7 @@ fn coerceInMemoryAllowed( dest_src: LazySrcLoc, src_src: LazySrcLoc, ) CompileError!InMemoryCoercionResult { - if (dest_ty.eql(src_ty, target)) + if (dest_ty.eql(src_ty, sema.mod)) return .ok; // Differently-named integers with the same number of bits. @@ -18650,7 +18648,7 @@ fn coerceInMemoryAllowed( } const ok_sent = dest_info.sentinel == null or (src_info.sentinel != null and - dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.elem_type, target)); + dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.elem_type, sema.mod)); if (!ok_sent) { return .no_match; } @@ -18893,7 +18891,7 @@ fn coerceInMemoryAllowedPtrs( const ok_sent = dest_info.sentinel == null or src_info.size == .C or (src_info.sentinel != null and - dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.pointee_type, target)); + dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.pointee_type, sema.mod)); if (!ok_sent) { return .no_match; } @@ -18934,7 +18932,7 @@ fn coerceInMemoryAllowedPtrs( // resolved and we compare the alignment numerically. alignment: { if (src_info.@"align" == 0 and dest_info.@"align" == 0 and - dest_info.pointee_type.eql(src_info.pointee_type, target)) + dest_info.pointee_type.eql(src_info.pointee_type, sema.mod)) { break :alignment; } @@ -19089,8 +19087,7 @@ fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { // We have a pointer-to-array and a pointer-to-vector. If the elements and // lengths match, return the result. const vector_ty = sema.typeOf(prev_ptr).childType(); - const target = sema.mod.getTarget(); - if (array_ty.childType().eql(vector_ty.childType(), target) and + if (array_ty.childType().eql(vector_ty.childType(), sema.mod) and array_ty.arrayLen() == vector_ty.vectorLen()) { return prev_ptr; @@ -19114,8 +19111,8 @@ fn storePtrVal( const bitcasted_val = try sema.bitCastVal(block, src, operand_val, operand_ty, mut_kit.ty, 0); - const arena = mut_kit.beginArena(sema.gpa); - defer mut_kit.finishArena(); + const arena = mut_kit.beginArena(sema.mod); + defer mut_kit.finishArena(sema.mod); mut_kit.val.* = try bitcasted_val.copy(arena); } @@ -19126,13 +19123,15 @@ const ComptimePtrMutationKit = struct { ty: Type, decl_arena: std.heap.ArenaAllocator = undefined, - fn beginArena(self: *ComptimePtrMutationKit, gpa: Allocator) Allocator { - self.decl_arena = self.decl_ref_mut.decl.value_arena.?.promote(gpa); + fn beginArena(self: *ComptimePtrMutationKit, mod: *Module) Allocator { + const decl = mod.declPtr(self.decl_ref_mut.decl_index); + self.decl_arena = decl.value_arena.?.promote(mod.gpa); return self.decl_arena.allocator(); } - fn finishArena(self: *ComptimePtrMutationKit) void { - self.decl_ref_mut.decl.value_arena.?.* = self.decl_arena.state; + fn finishArena(self: *ComptimePtrMutationKit, mod: *Module) void { + const decl = mod.declPtr(self.decl_ref_mut.decl_index); + decl.value_arena.?.* = self.decl_arena.state; self.decl_arena = undefined; } }; @@ -19154,10 +19153,11 @@ fn beginComptimePtrMutation( switch (ptr_val.tag()) { .decl_ref_mut => { const decl_ref_mut = ptr_val.castTag(.decl_ref_mut).?.data; + const decl = sema.mod.declPtr(decl_ref_mut.decl_index); return ComptimePtrMutationKit{ .decl_ref_mut = decl_ref_mut, - .val = &decl_ref_mut.decl.val, - .ty = decl_ref_mut.decl.ty, + .val = &decl.val, + .ty = decl.ty, }; }, .elem_ptr => { @@ -19178,8 +19178,8 @@ fn beginComptimePtrMutation( // An array has been initialized to undefined at comptime and now we // are for the first time setting an element. We must change the representation // of the array from `undef` to `array`. - const arena = parent.beginArena(sema.gpa); - defer parent.finishArena(); + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); const array_len_including_sentinel = try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel()); @@ -19200,8 +19200,8 @@ fn beginComptimePtrMutation( // If we wanted to avoid this, there would need to be special detection // elsewhere to identify when writing a value to an array element that is stored // using the `bytes` tag, and handle it without making a call to this function. - const arena = parent.beginArena(sema.gpa); - defer parent.finishArena(); + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); const bytes = parent.val.castTag(.bytes).?.data; const dest_len = parent.ty.arrayLenIncludingSentinel(); @@ -19229,8 +19229,8 @@ fn beginComptimePtrMutation( // need to be special detection elsewhere to identify when writing a value to an // array element that is stored using the `repeated` tag, and handle it // without making a call to this function. - const arena = parent.beginArena(sema.gpa); - defer parent.finishArena(); + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); const repeated_val = try parent.val.castTag(.repeated).?.data.copy(arena); const array_len_including_sentinel = @@ -19281,8 +19281,8 @@ fn beginComptimePtrMutation( // A struct or union has been initialized to undefined at comptime and now we // are for the first time setting a field. We must change the representation // of the struct/union from `undef` to `struct`/`union`. - const arena = parent.beginArena(sema.gpa); - defer parent.finishArena(); + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); switch (parent.ty.zigTypeTag()) { .Struct => { @@ -19322,8 +19322,8 @@ fn beginComptimePtrMutation( }, .@"union" => { // We need to set the active field of the union. - const arena = parent.beginArena(sema.gpa); - defer parent.finishArena(); + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); const payload = &parent.val.castTag(.@"union").?.data; payload.tag = try Value.Tag.enum_field_index.create(arena, field_index); @@ -19347,8 +19347,8 @@ fn beginComptimePtrMutation( // An error union has been initialized to undefined at comptime and now we // are for the first time setting the payload. We must change the // representation of the error union from `undef` to `opt_payload`. - const arena = parent.beginArena(sema.gpa); - defer parent.finishArena(); + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); const payload = try arena.create(Value.Payload.SubValue); payload.* = .{ @@ -19380,8 +19380,8 @@ fn beginComptimePtrMutation( // An optional has been initialized to undefined at comptime and now we // are for the first time setting the payload. We must change the // representation of the optional from `undef` to `opt_payload`. - const arena = parent.beginArena(sema.gpa); - defer parent.finishArena(); + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); const payload = try arena.create(Value.Payload.SubValue); payload.* = .{ @@ -19451,12 +19451,13 @@ fn beginComptimePtrLoad( .decl_ref, .decl_ref_mut, => blk: { - const decl = switch (ptr_val.tag()) { + const decl_index = switch (ptr_val.tag()) { .decl_ref => ptr_val.castTag(.decl_ref).?.data, - .decl_ref_mut => ptr_val.castTag(.decl_ref_mut).?.data.decl, + .decl_ref_mut => ptr_val.castTag(.decl_ref_mut).?.data.decl_index, else => unreachable, }; const is_mutable = ptr_val.tag() == .decl_ref_mut; + const decl = sema.mod.declPtr(decl_index); const decl_tv = try decl.typedValue(); if (decl_tv.val.tag() == .variable) return error.RuntimeLoad; @@ -19477,7 +19478,9 @@ fn beginComptimePtrLoad( // This code assumes that elem_ptrs have been "flattened" in order for direct dereference // to succeed, meaning that elem ptrs of the same elem_ty are coalesced. Here we check that // our parent is not an elem_ptr with the same elem_ty, since that would be "unflattened" - if (elem_ptr.array_ptr.castTag(.elem_ptr)) |parent_elem_ptr| assert(!(parent_elem_ptr.data.elem_ty.eql(elem_ty, target))); + if (elem_ptr.array_ptr.castTag(.elem_ptr)) |parent_elem_ptr| { + assert(!(parent_elem_ptr.data.elem_ty.eql(elem_ty, sema.mod))); + } if (elem_ptr.index != 0) { if (elem_ty.hasWellDefinedLayout()) { @@ -19510,11 +19513,11 @@ fn beginComptimePtrLoad( if (maybe_array_ty) |load_ty| { // It's possible that we're loading a [N]T, in which case we'd like to slice // the pointee array directly from our parent array. - if (load_ty.isArrayOrVector() and load_ty.childType().eql(elem_ty, target)) { + if (load_ty.isArrayOrVector() and load_ty.childType().eql(elem_ty, sema.mod)) { const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel()); deref.pointee = if (elem_ptr.index + N <= check_len) TypedValue{ - .ty = try Type.array(sema.arena, N, null, elem_ty, target), - .val = try array_tv.val.sliceArray(sema.arena, elem_ptr.index, elem_ptr.index + N), + .ty = try Type.array(sema.arena, N, null, elem_ty, sema.mod), + .val = try array_tv.val.sliceArray(sema.mod, sema.arena, elem_ptr.index, elem_ptr.index + N), } else null; break :blk deref; } @@ -19522,7 +19525,7 @@ fn beginComptimePtrLoad( deref.pointee = if (elem_ptr.index < check_len) TypedValue{ .ty = elem_ty, - .val = try array_tv.val.elemValue(sema.arena, elem_ptr.index), + .val = try array_tv.val.elemValue(sema.mod, sema.arena, elem_ptr.index), } else null; break :blk deref; }, @@ -19637,9 +19640,9 @@ fn bitCast( if (old_bits != dest_bits) { return sema.fail(block, inst_src, "@bitCast size mismatch: destination type '{}' has {d} bits but source type '{}' has {d} bits", .{ - dest_ty.fmt(target), + dest_ty.fmt(sema.mod), dest_bits, - old_ty.fmt(target), + old_ty.fmt(sema.mod), old_bits, }); } @@ -19662,7 +19665,7 @@ pub fn bitCastVal( buffer_offset: usize, ) !Value { const target = sema.mod.getTarget(); - if (old_ty.eql(new_ty, target)) return val; + if (old_ty.eql(new_ty, sema.mod)) return val; // For types with well-defined memory layouts, we serialize them a byte buffer, // then deserialize to the new type. @@ -19718,12 +19721,11 @@ fn coerceEnumToUnion( inst_src: LazySrcLoc, ) !Air.Inst.Ref { const inst_ty = sema.typeOf(inst); - const target = sema.mod.getTarget(); const tag_ty = union_ty.unionTagType() orelse { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "expected {}, found {}", .{ - union_ty.fmt(target), inst_ty.fmt(target), + union_ty.fmt(sema.mod), inst_ty.fmt(sema.mod), }); errdefer msg.destroy(sema.gpa); try sema.errNote(block, union_ty_src, msg, "cannot coerce enum to untagged union", .{}); @@ -19736,10 +19738,10 @@ fn coerceEnumToUnion( const enum_tag = try sema.coerce(block, tag_ty, inst, inst_src); if (try sema.resolveDefinedValue(block, inst_src, enum_tag)) |val| { const union_obj = union_ty.cast(Type.Payload.Union).?.data; - const field_index = union_obj.tag_ty.enumTagFieldIndex(val, target) orelse { + const field_index = union_obj.tag_ty.enumTagFieldIndex(val, sema.mod) orelse { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "union {} has no tag with value {}", .{ - union_ty.fmt(target), val.fmtValue(tag_ty, target), + union_ty.fmt(sema.mod), val.fmtValue(tag_ty, sema.mod), }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_ty); @@ -19753,7 +19755,7 @@ fn coerceEnumToUnion( const msg = msg: { const field_name = union_obj.fields.keys()[field_index]; const msg = try sema.errMsg(block, inst_src, "coercion from enum '{}' to union '{}' must initialize '{}' field '{s}'", .{ - inst_ty.fmt(target), union_ty.fmt(target), field_ty.fmt(target), field_name, + inst_ty.fmt(sema.mod), union_ty.fmt(sema.mod), field_ty.fmt(sema.mod), field_name, }); errdefer msg.destroy(sema.gpa); @@ -19775,7 +19777,7 @@ fn coerceEnumToUnion( if (tag_ty.isNonexhaustiveEnum()) { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "runtime coercion to union {} from non-exhaustive enum", .{ - union_ty.fmt(target), + union_ty.fmt(sema.mod), }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, tag_ty); @@ -19795,7 +19797,7 @@ fn coerceEnumToUnion( block, inst_src, "runtime coercion from enum '{}' to union '{}' which has non-void fields", - .{ tag_ty.fmt(target), union_ty.fmt(target) }, + .{ tag_ty.fmt(sema.mod), union_ty.fmt(sema.mod) }, ); errdefer msg.destroy(sema.gpa); @@ -19804,7 +19806,7 @@ fn coerceEnumToUnion( while (it.next()) |field| { const field_name = field.key_ptr.*; const field_ty = field.value_ptr.ty; - try sema.addFieldErrNote(block, union_ty, field_index, msg, "field '{s}' has type '{}'", .{ field_name, field_ty.fmt(target) }); + try sema.addFieldErrNote(block, union_ty, field_index, msg, "field '{s}' has type '{}'", .{ field_name, field_ty.fmt(sema.mod) }); field_index += 1; } try sema.addDeclaredHereNote(msg, union_ty); @@ -19892,7 +19894,7 @@ fn coerceArrayLike( if (dest_len != inst_len) { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "expected {}, found {}", .{ - dest_ty.fmt(target), inst_ty.fmt(target), + dest_ty.fmt(sema.mod), inst_ty.fmt(sema.mod), }); errdefer msg.destroy(sema.gpa); try sema.errNote(block, dest_ty_src, msg, "destination has length {d}", .{dest_len}); @@ -19959,12 +19961,11 @@ fn coerceTupleToArray( const inst_ty = sema.typeOf(inst); const inst_len = inst_ty.arrayLen(); const dest_len = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLen()); - const target = sema.mod.getTarget(); if (dest_len != inst_len) { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "expected {}, found {}", .{ - dest_ty.fmt(target), inst_ty.fmt(target), + dest_ty.fmt(sema.mod), inst_ty.fmt(sema.mod), }); errdefer msg.destroy(sema.gpa); try sema.errNote(block, dest_ty_src, msg, "destination has length {d}", .{dest_len}); @@ -20017,8 +20018,7 @@ fn coerceTupleToSlicePtrs( const tuple_ty = sema.typeOf(ptr_tuple).childType(); const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src); const slice_info = slice_ty.ptrInfo().data; - const target = sema.mod.getTarget(); - const array_ty = try Type.array(sema.arena, tuple_ty.structFieldCount(), slice_info.sentinel, slice_info.pointee_type, target); + const array_ty = try Type.array(sema.arena, tuple_ty.structFieldCount(), slice_info.sentinel, slice_info.pointee_type, sema.mod); const array_inst = try sema.coerceTupleToArray(block, array_ty, slice_ty_src, tuple, tuple_src); if (slice_info.@"align" != 0) { return sema.fail(block, slice_ty_src, "TODO: override the alignment of the array decl we create here", .{}); @@ -20141,23 +20141,23 @@ fn analyzeDeclVal( sema: *Sema, block: *Block, src: LazySrcLoc, - decl: *Decl, + decl_index: Decl.Index, ) CompileError!Air.Inst.Ref { - if (sema.decl_val_table.get(decl)) |result| { + if (sema.decl_val_table.get(decl_index)) |result| { return result; } - const decl_ref = try sema.analyzeDeclRef(decl); + const decl_ref = try sema.analyzeDeclRef(decl_index); const result = try sema.analyzeLoad(block, src, decl_ref, src); if (Air.refToIndex(result)) |index| { if (sema.air_instructions.items(.tag)[index] == .constant) { - try sema.decl_val_table.put(sema.gpa, decl, result); + try sema.decl_val_table.put(sema.gpa, decl_index, result); } } return result; } -fn ensureDeclAnalyzed(sema: *Sema, decl: *Decl) CompileError!void { - sema.mod.ensureDeclAnalyzed(decl) catch |err| { +fn ensureDeclAnalyzed(sema: *Sema, decl_index: Decl.Index) CompileError!void { + sema.mod.ensureDeclAnalyzed(decl_index) catch |err| { if (sema.owner_func) |owner_func| { owner_func.state = .dependency_failure; } else { @@ -20186,7 +20186,7 @@ fn refValue(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type, val: Value) ! try val.copy(anon_decl.arena()), 0, // default alignment ); - try sema.mod.declareDeclDependency(sema.owner_decl, decl); + try sema.mod.declareDeclDependency(sema.owner_decl_index, decl); return try Value.Tag.decl_ref.create(sema.arena, decl); } @@ -20197,29 +20197,29 @@ fn optRefValue(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type, opt_val: ? return result; } -fn analyzeDeclRef(sema: *Sema, decl: *Decl) CompileError!Air.Inst.Ref { - try sema.mod.declareDeclDependency(sema.owner_decl, decl); - try sema.ensureDeclAnalyzed(decl); +fn analyzeDeclRef(sema: *Sema, decl_index: Decl.Index) CompileError!Air.Inst.Ref { + try sema.mod.declareDeclDependency(sema.owner_decl_index, decl_index); + try sema.ensureDeclAnalyzed(decl_index); - const target = sema.mod.getTarget(); + const decl = sema.mod.declPtr(decl_index); const decl_tv = try decl.typedValue(); if (decl_tv.val.castTag(.variable)) |payload| { const variable = payload.data; - const ty = try Type.ptr(sema.arena, target, .{ + const ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = decl_tv.ty, .mutable = variable.is_mutable, .@"addrspace" = decl.@"addrspace", .@"align" = decl.@"align", }); - return sema.addConstant(ty, try Value.Tag.decl_ref.create(sema.arena, decl)); + return sema.addConstant(ty, try Value.Tag.decl_ref.create(sema.arena, decl_index)); } return sema.addConstant( - try Type.ptr(sema.arena, target, .{ + try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = decl_tv.ty, .mutable = false, .@"addrspace" = decl.@"addrspace", }), - try Value.Tag.decl_ref.create(sema.arena, decl), + try Value.Tag.decl_ref.create(sema.arena, decl_index), ); } @@ -20243,13 +20243,12 @@ fn analyzeRef( try sema.requireRuntimeBlock(block, src); const address_space = target_util.defaultAddressSpace(sema.mod.getTarget(), .local); - const target = sema.mod.getTarget(); - const ptr_type = try Type.ptr(sema.arena, target, .{ + const ptr_type = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = operand_ty, .mutable = false, .@"addrspace" = address_space, }); - const mut_ptr_type = try Type.ptr(sema.arena, target, .{ + const mut_ptr_type = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = operand_ty, .@"addrspace" = address_space, }); @@ -20267,11 +20266,10 @@ fn analyzeLoad( ptr: Air.Inst.Ref, ptr_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { - const target = sema.mod.getTarget(); const ptr_ty = sema.typeOf(ptr); const elem_ty = switch (ptr_ty.zigTypeTag()) { .Pointer => ptr_ty.childType(), - else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(target)}), + else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)}), }; if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| { if (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) |elem_val| { @@ -20310,8 +20308,7 @@ fn analyzeSliceLen( if (slice_val.isUndef()) { return sema.addConstUndef(Type.usize); } - const target = sema.mod.getTarget(); - return sema.addIntUnsigned(Type.usize, slice_val.sliceLen(target)); + return sema.addIntUnsigned(Type.usize, slice_val.sliceLen(sema.mod)); } try sema.requireRuntimeBlock(block, src); return block.addTyOp(.slice_len, Type.usize, slice_inst); @@ -20417,8 +20414,9 @@ fn analyzeSlice( const target = sema.mod.getTarget(); const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag()) { .Pointer => ptr_ptr_ty.elemType(), - else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty.fmt(target)}), + else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty.fmt(sema.mod)}), }; + const mod = sema.mod; var array_ty = ptr_ptr_child_ty; var slice_ty = ptr_ptr_ty; @@ -20465,7 +20463,7 @@ fn analyzeSlice( elem_ty = ptr_ptr_child_ty.childType(); }, }, - else => return sema.fail(block, ptr_src, "slice of non-array type '{}'", .{ptr_ptr_child_ty.fmt(target)}), + else => return sema.fail(block, ptr_src, "slice of non-array type '{}'", .{ptr_ptr_child_ty.fmt(mod)}), } const ptr = if (slice_ty.isSlice()) @@ -20492,7 +20490,7 @@ fn analyzeSlice( sema.arena, array_ty.arrayLenIncludingSentinel(), ); - if (end_val.compare(.gt, len_s_val, Type.usize, target)) { + if (end_val.compare(.gt, len_s_val, Type.usize, mod)) { const sentinel_label: []const u8 = if (array_ty.sentinel() != null) " +1 (sentinel)" else @@ -20503,8 +20501,8 @@ fn analyzeSlice( end_src, "end index {} out of bounds for array of length {}{s}", .{ - end_val.fmtValue(Type.usize, target), - len_val.fmtValue(Type.usize, target), + end_val.fmtValue(Type.usize, mod), + len_val.fmtValue(Type.usize, mod), sentinel_label, }, ); @@ -20513,7 +20511,7 @@ fn analyzeSlice( // end_is_len is only true if we are NOT using the sentinel // length. For sentinel-length, we don't want the type to // contain the sentinel. - if (end_val.eql(len_val, Type.usize, target)) { + if (end_val.eql(len_val, Type.usize, mod)) { end_is_len = true; } } @@ -20529,10 +20527,10 @@ fn analyzeSlice( const has_sentinel = slice_ty.sentinel() != null; var int_payload: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, - .data = slice_val.sliceLen(target) + @boolToInt(has_sentinel), + .data = slice_val.sliceLen(mod) + @boolToInt(has_sentinel), }; const slice_len_val = Value.initPayload(&int_payload.base); - if (end_val.compare(.gt, slice_len_val, Type.usize, target)) { + if (end_val.compare(.gt, slice_len_val, Type.usize, mod)) { const sentinel_label: []const u8 = if (has_sentinel) " +1 (sentinel)" else @@ -20543,8 +20541,8 @@ fn analyzeSlice( end_src, "end index {} out of bounds for slice of length {d}{s}", .{ - end_val.fmtValue(Type.usize, target), - slice_val.sliceLen(target), + end_val.fmtValue(Type.usize, mod), + slice_val.sliceLen(mod), sentinel_label, }, ); @@ -20557,7 +20555,7 @@ fn analyzeSlice( int_payload.data -= 1; } - if (end_val.eql(slice_len_val, Type.usize, target)) { + if (end_val.eql(slice_len_val, Type.usize, mod)) { end_is_len = true; } } @@ -20590,14 +20588,14 @@ fn analyzeSlice( // requirement: start <= end if (try sema.resolveDefinedValue(block, src, end)) |end_val| { if (try sema.resolveDefinedValue(block, src, start)) |start_val| { - if (start_val.compare(.gt, end_val, Type.usize, target)) { + if (start_val.compare(.gt, end_val, Type.usize, mod)) { return sema.fail( block, start_src, "start index {} is larger than end index {}", .{ - start_val.fmtValue(Type.usize, target), - end_val.fmtValue(Type.usize, target), + start_val.fmtValue(Type.usize, mod), + end_val.fmtValue(Type.usize, mod), }, ); } @@ -20613,8 +20611,8 @@ fn analyzeSlice( if (opt_new_len_val) |new_len_val| { const new_len_int = new_len_val.toUnsignedInt(target); - const return_ty = try Type.ptr(sema.arena, target, .{ - .pointee_type = try Type.array(sema.arena, new_len_int, sentinel, elem_ty, target), + const return_ty = try Type.ptr(sema.arena, mod, .{ + .pointee_type = try Type.array(sema.arena, new_len_int, sentinel, elem_ty, mod), .sentinel = null, .@"align" = new_ptr_ty_info.@"align", .@"addrspace" = new_ptr_ty_info.@"addrspace", @@ -20641,7 +20639,7 @@ fn analyzeSlice( return sema.fail(block, ptr_src, "non-zero length slice of undefined pointer", .{}); } - const return_ty = try Type.ptr(sema.arena, target, .{ + const return_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = elem_ty, .sentinel = sentinel, .@"align" = new_ptr_ty_info.@"align", @@ -20667,7 +20665,7 @@ fn analyzeSlice( if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| { // we don't need to add one for sentinels because the // underlying value data includes the sentinel - break :blk try sema.addIntUnsigned(Type.usize, slice_val.sliceLen(target)); + break :blk try sema.addIntUnsigned(Type.usize, slice_val.sliceLen(mod)); } const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice); @@ -20920,7 +20918,6 @@ fn cmpVector( try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); const result_ty = try Type.vector(sema.arena, lhs_ty.vectorLen(), Type.@"bool"); - const target = sema.mod.getTarget(); const runtime_src: LazySrcLoc = src: { if (try sema.resolveMaybeUndefVal(block, lhs_src, lhs)) |lhs_val| { @@ -20928,7 +20925,7 @@ fn cmpVector( if (lhs_val.isUndef() or rhs_val.isUndef()) { return sema.addConstUndef(result_ty); } - const cmp_val = try lhs_val.compareVector(op, rhs_val, lhs_ty, sema.arena, target); + const cmp_val = try lhs_val.compareVector(op, rhs_val, lhs_ty, sema.arena, sema.mod); return sema.addConstant(result_ty, cmp_val); } else { break :src rhs_src; @@ -21080,7 +21077,7 @@ fn resolvePeerTypes( const candidate_ty_tag = try candidate_ty.zigTypeTagOrPoison(); const chosen_ty_tag = try chosen_ty.zigTypeTagOrPoison(); - if (candidate_ty.eql(chosen_ty, target)) + if (candidate_ty.eql(chosen_ty, sema.mod)) continue; switch (candidate_ty_tag) { @@ -21496,27 +21493,27 @@ fn resolvePeerTypes( // the source locations. const chosen_src = candidate_srcs.resolve( sema.gpa, - block.src_decl, + sema.mod.declPtr(block.src_decl), chosen_i, ); const candidate_src = candidate_srcs.resolve( sema.gpa, - block.src_decl, + sema.mod.declPtr(block.src_decl), candidate_i + 1, ); const msg = msg: { const msg = try sema.errMsg(block, src, "incompatible types: '{}' and '{}'", .{ - chosen_ty.fmt(target), - candidate_ty.fmt(target), + chosen_ty.fmt(sema.mod), + candidate_ty.fmt(sema.mod), }); errdefer msg.destroy(sema.gpa); if (chosen_src) |src_loc| - try sema.errNote(block, src_loc, msg, "type '{}' here", .{chosen_ty.fmt(target)}); + try sema.errNote(block, src_loc, msg, "type '{}' here", .{chosen_ty.fmt(sema.mod)}); if (candidate_src) |src_loc| - try sema.errNote(block, src_loc, msg, "type '{}' here", .{candidate_ty.fmt(target)}); + try sema.errNote(block, src_loc, msg, "type '{}' here", .{candidate_ty.fmt(sema.mod)}); break :msg msg; }; @@ -21538,13 +21535,13 @@ fn resolvePeerTypes( else => unreachable, }; - const new_ptr_ty = try Type.ptr(sema.arena, target, info.data); + const new_ptr_ty = try Type.ptr(sema.arena, sema.mod, info.data); const opt_ptr_ty = if (any_are_null) try Type.optional(sema.arena, new_ptr_ty) else new_ptr_ty; const set_ty = err_set_ty orelse return opt_ptr_ty; - return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, target); + return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, sema.mod); } if (seen_const) { @@ -21554,24 +21551,24 @@ fn resolvePeerTypes( const ptr_ty = chosen_ty.errorUnionPayload(); var info = ptr_ty.ptrInfo(); info.data.mutable = false; - const new_ptr_ty = try Type.ptr(sema.arena, target, info.data); + const new_ptr_ty = try Type.ptr(sema.arena, sema.mod, info.data); const opt_ptr_ty = if (any_are_null) try Type.optional(sema.arena, new_ptr_ty) else new_ptr_ty; const set_ty = err_set_ty orelse chosen_ty.errorUnionSet(); - return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, target); + return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, sema.mod); }, .Pointer => { var info = chosen_ty.ptrInfo(); info.data.mutable = false; - const new_ptr_ty = try Type.ptr(sema.arena, target, info.data); + const new_ptr_ty = try Type.ptr(sema.arena, sema.mod, info.data); const opt_ptr_ty = if (any_are_null) try Type.optional(sema.arena, new_ptr_ty) else new_ptr_ty; const set_ty = err_set_ty orelse return opt_ptr_ty; - return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, target); + return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, sema.mod); }, else => return chosen_ty, } @@ -21583,16 +21580,16 @@ fn resolvePeerTypes( else => try Type.optional(sema.arena, chosen_ty), }; const set_ty = err_set_ty orelse return opt_ty; - return try Type.errorUnion(sema.arena, set_ty, opt_ty, target); + return try Type.errorUnion(sema.arena, set_ty, opt_ty, sema.mod); } if (err_set_ty) |ty| switch (chosen_ty.zigTypeTag()) { .ErrorSet => return ty, .ErrorUnion => { const payload_ty = chosen_ty.errorUnionPayload(); - return try Type.errorUnion(sema.arena, ty, payload_ty, target); + return try Type.errorUnion(sema.arena, ty, payload_ty, sema.mod); }, - else => return try Type.errorUnion(sema.arena, ty, chosen_ty, target), + else => return try Type.errorUnion(sema.arena, ty, chosen_ty, sema.mod), }; return chosen_ty; @@ -21670,12 +21667,11 @@ fn resolveStructLayout( ) CompileError!void { const resolved_ty = try sema.resolveTypeFields(block, src, ty); if (resolved_ty.castTag(.@"struct")) |payload| { - const target = sema.mod.getTarget(); const struct_obj = payload.data; switch (struct_obj.status) { .none, .have_field_types => {}, .field_types_wip, .layout_wip => { - return sema.fail(block, src, "struct {} depends on itself", .{ty.fmt(target)}); + return sema.fail(block, src, "struct {} depends on itself", .{ty.fmt(sema.mod)}); }, .have_layout, .fully_resolved_wip, .fully_resolved => return, } @@ -21703,11 +21699,10 @@ fn resolveUnionLayout( ) CompileError!void { const resolved_ty = try sema.resolveTypeFields(block, src, ty); const union_obj = resolved_ty.cast(Type.Payload.Union).?.data; - const target = sema.mod.getTarget(); switch (union_obj.status) { .none, .have_field_types => {}, .field_types_wip, .layout_wip => { - return sema.fail(block, src, "union {} depends on itself", .{ty.fmt(target)}); + return sema.fail(block, src, "union {} depends on itself", .{ty.fmt(sema.mod)}); }, .have_layout, .fully_resolved_wip, .fully_resolved => return, } @@ -21774,10 +21769,6 @@ fn resolveStructFully( .fully_resolved_wip, .fully_resolved => return, } - log.debug("resolveStructFully {*} ('{s}')", .{ - struct_obj.owner_decl, struct_obj.owner_decl.name, - }); - { // After we have resolve struct layout we have to go over the fields again to // make sure pointer fields get their child types resolved as well. @@ -21866,11 +21857,10 @@ fn resolveTypeFieldsStruct( ty: Type, struct_obj: *Module.Struct, ) CompileError!void { - const target = sema.mod.getTarget(); switch (struct_obj.status) { .none => {}, .field_types_wip => { - return sema.fail(block, src, "struct {} depends on itself", .{ty.fmt(target)}); + return sema.fail(block, src, "struct {} depends on itself", .{ty.fmt(sema.mod)}); }, .have_field_types, .have_layout, @@ -21897,11 +21887,10 @@ fn resolveTypeFieldsUnion( ty: Type, union_obj: *Module.Union, ) CompileError!void { - const target = sema.mod.getTarget(); switch (union_obj.status) { .none => {}, .field_types_wip => { - return sema.fail(block, src, "union {} depends on itself", .{ty.fmt(target)}); + return sema.fail(block, src, "union {} depends on itself", .{ty.fmt(sema.mod)}); }, .have_field_types, .have_layout, @@ -21945,7 +21934,8 @@ fn resolveInferredErrorSet( // `*Module.Fn`. Not only is the function not relevant to the inferred error set // in this case, it may be a generic function which would cause an assertion failure // if we called `ensureFuncBodyAnalyzed` on it here. - if (ies.func.owner_decl.ty.fnInfo().return_type.errorUnionSet().castTag(.error_set_inferred).?.data == ies) { + const ies_func_owner_decl = sema.mod.declPtr(ies.func.owner_decl); + if (ies_func_owner_decl.ty.fnInfo().return_type.errorUnionSet().castTag(.error_set_inferred).?.data == ies) { // In this case we are dealing with the actual InferredErrorSet object that // corresponds to the function, not one created to track an inline/comptime call. try sema.ensureFuncBodyAnalyzed(ies.func); @@ -21986,7 +21976,7 @@ fn semaStructFields( defer tracy.end(); const gpa = mod.gpa; - const decl = struct_obj.owner_decl; + const decl_index = struct_obj.owner_decl; const zir = struct_obj.namespace.file_scope.zir; const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended; assert(extended.opcode == .struct_decl); @@ -22026,6 +22016,7 @@ fn semaStructFields( } extra_index += body.len; + const decl = mod.declPtr(decl_index); var decl_arena = decl.value_arena.?.promote(gpa); defer decl.value_arena.?.* = decl_arena.state; const decl_arena_allocator = decl_arena.allocator(); @@ -22040,6 +22031,7 @@ fn semaStructFields( .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, + .owner_decl_index = decl_index, .func = null, .fn_ret_ty = Type.void, .owner_func = null, @@ -22052,7 +22044,7 @@ fn semaStructFields( var block_scope: Block = .{ .parent = null, .sema = &sema, - .src_decl = decl, + .src_decl = decl_index, .namespace = &struct_obj.namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, @@ -22171,7 +22163,7 @@ fn semaUnionFields(block: *Block, mod: *Module, union_obj: *Module.Union) Compil defer tracy.end(); const gpa = mod.gpa; - const decl = union_obj.owner_decl; + const decl_index = union_obj.owner_decl; const zir = union_obj.namespace.file_scope.zir; const extended = zir.instructions.items(.data)[union_obj.zir_index].extended; assert(extended.opcode == .union_decl); @@ -22217,8 +22209,10 @@ fn semaUnionFields(block: *Block, mod: *Module, union_obj: *Module.Union) Compil } extra_index += body.len; - var decl_arena = union_obj.owner_decl.value_arena.?.promote(gpa); - defer union_obj.owner_decl.value_arena.?.* = decl_arena.state; + const decl = mod.declPtr(decl_index); + + var decl_arena = decl.value_arena.?.promote(gpa); + defer decl.value_arena.?.* = decl_arena.state; const decl_arena_allocator = decl_arena.allocator(); var analysis_arena = std.heap.ArenaAllocator.init(gpa); @@ -22231,6 +22225,7 @@ fn semaUnionFields(block: *Block, mod: *Module, union_obj: *Module.Union) Compil .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, + .owner_decl_index = decl_index, .func = null, .fn_ret_ty = Type.void, .owner_func = null, @@ -22243,7 +22238,7 @@ fn semaUnionFields(block: *Block, mod: *Module, union_obj: *Module.Union) Compil var block_scope: Block = .{ .parent = null, .sema = &sema, - .src_decl = decl, + .src_decl = decl_index, .namespace = &union_obj.namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, @@ -22353,7 +22348,7 @@ fn semaUnionFields(block: *Block, mod: *Module, union_obj: *Module.Union) Compil const copied_val = try val.copy(decl_arena_allocator); map.putAssumeCapacityContext(copied_val, {}, .{ .ty = int_tag_ty, - .target = target, + .mod = mod, }); } else { const val = if (last_tag_val) |val| @@ -22365,7 +22360,7 @@ fn semaUnionFields(block: *Block, mod: *Module, union_obj: *Module.Union) Compil const copied_val = try val.copy(decl_arena_allocator); map.putAssumeCapacityContext(copied_val, {}, .{ .ty = int_tag_ty, - .target = target, + .mod = mod, }); } } @@ -22411,7 +22406,7 @@ fn semaUnionFields(block: *Block, mod: *Module, union_obj: *Module.Union) Compil const enum_has_field = names.orderedRemove(field_name); if (!enum_has_field) { const msg = msg: { - const msg = try sema.errMsg(block, src, "enum '{}' has no field named '{s}'", .{ union_obj.tag_ty.fmt(target), field_name }); + const msg = try sema.errMsg(block, src, "enum '{}' has no field named '{s}'", .{ union_obj.tag_ty.fmt(sema.mod), field_name }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_obj.tag_ty); break :msg msg; @@ -22475,15 +22470,16 @@ fn generateUnionTagTypeNumbered( const enum_ty = Type.initPayload(&enum_ty_payload.base); const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty); // TODO better type name - const new_decl = try mod.createAnonymousDecl(block, .{ + const new_decl_index = try mod.createAnonymousDecl(block, .{ .ty = Type.type, .val = enum_val, }); + const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; - errdefer mod.abortAnonDecl(new_decl); + errdefer mod.abortAnonDecl(new_decl_index); enum_obj.* = .{ - .owner_decl = new_decl, + .owner_decl = new_decl_index, .tag_ty = int_ty, .fields = .{}, .values = .{}, @@ -22493,7 +22489,7 @@ fn generateUnionTagTypeNumbered( try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); try enum_obj.values.ensureTotalCapacityContext(new_decl_arena_allocator, fields_len, .{ .ty = int_ty, - .target = sema.mod.getTarget(), + .mod = mod, }); try new_decl.finalizeNewArena(&new_decl_arena); return enum_ty; @@ -22515,15 +22511,16 @@ fn generateUnionTagTypeSimple(sema: *Sema, block: *Block, fields_len: usize) !Ty const enum_ty = Type.initPayload(&enum_ty_payload.base); const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty); // TODO better type name - const new_decl = try mod.createAnonymousDecl(block, .{ + const new_decl_index = try mod.createAnonymousDecl(block, .{ .ty = Type.type, .val = enum_val, }); + const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; - errdefer mod.abortAnonDecl(new_decl); + errdefer mod.abortAnonDecl(new_decl_index); enum_obj.* = .{ - .owner_decl = new_decl, + .owner_decl = new_decl_index, .fields = .{}, .node_offset = 0, }; @@ -22545,7 +22542,7 @@ fn getBuiltin( const opt_builtin_inst = try sema.namespaceLookupRef( block, src, - std_file.root_decl.?.src_namespace, + mod.declPtr(std_file.root_decl.unwrap().?).src_namespace, "builtin", ); const builtin_inst = try sema.analyzeLoad(block, src, opt_builtin_inst.?, src); @@ -22984,8 +22981,7 @@ fn analyzeComptimeAlloc( // Needed to make an anon decl with type `var_type` (the `finish()` call below). _ = try sema.typeHasOnePossibleValue(block, src, var_type); - const target = sema.mod.getTarget(); - const ptr_type = try Type.ptr(sema.arena, target, .{ + const ptr_type = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = var_type, .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .global_constant), .@"align" = alignment, @@ -22994,7 +22990,7 @@ fn analyzeComptimeAlloc( var anon_decl = try block.startAnonDecl(src); defer anon_decl.deinit(); - const decl = try anon_decl.finish( + const decl_index = try anon_decl.finish( try var_type.copy(anon_decl.arena()), // There will be stores before the first load, but they may be to sub-elements or // sub-fields. So we need to initialize with undef to allow the mechanism to expand @@ -23002,12 +22998,13 @@ fn analyzeComptimeAlloc( Value.undef, alignment, ); + const decl = sema.mod.declPtr(decl_index); decl.@"align" = alignment; - try sema.mod.declareDeclDependency(sema.owner_decl, decl); + try sema.mod.declareDeclDependency(sema.owner_decl_index, decl_index); return sema.addConstant(ptr_type, try Value.Tag.decl_ref_mut.create(sema.arena, .{ .runtime_index = block.runtime_index, - .decl = decl, + .decl_index = decl_index, })); } @@ -23099,7 +23096,7 @@ fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr // The type is not in-memory coercible or the direct dereference failed, so it must // be bitcast according to the pointer type we are performing the load through. if (!load_ty.hasWellDefinedLayout()) - return sema.fail(block, src, "comptime dereference requires {} to have a well-defined layout, but it does not.", .{load_ty.fmt(target)}); + return sema.fail(block, src, "comptime dereference requires {} to have a well-defined layout, but it does not.", .{load_ty.fmt(sema.mod)}); const load_sz = try sema.typeAbiSize(block, src, load_ty); @@ -23114,11 +23111,11 @@ fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr if (deref.ty_without_well_defined_layout) |bad_ty| { // We got no parent for bit-casting, or the parent we got was too small. Either way, the problem // is that some type we encountered when de-referencing does not have a well-defined layout. - return sema.fail(block, src, "comptime dereference requires {} to have a well-defined layout, but it does not.", .{bad_ty.fmt(target)}); + return sema.fail(block, src, "comptime dereference requires {} to have a well-defined layout, but it does not.", .{bad_ty.fmt(sema.mod)}); } else { // If all encountered types had well-defined layouts, the parent is the root decl and it just // wasn't big enough for the load. - return sema.fail(block, src, "dereference of {} exceeds bounds of containing decl of type {}", .{ ptr_ty.fmt(target), deref.parent.?.tv.ty.fmt(target) }); + return sema.fail(block, src, "dereference of {} exceeds bounds of containing decl of type {}", .{ ptr_ty.fmt(sema.mod), deref.parent.?.tv.ty.fmt(sema.mod) }); } } @@ -23484,9 +23481,8 @@ fn anonStructFieldIndex( return @intCast(u32, i); } } - const target = sema.mod.getTarget(); return sema.fail(block, field_src, "anonymous struct {} has no such field '{s}'", .{ - struct_ty.fmt(target), field_name, + struct_ty.fmt(sema.mod), field_name, }); } diff --git a/src/TypedValue.zig b/src/TypedValue.zig index b4be670e19..b0d5d77010 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -1,6 +1,7 @@ const std = @import("std"); const Type = @import("type.zig").Type; const Value = @import("value.zig").Value; +const Module = @import("Module.zig"); const Allocator = std.mem.Allocator; const TypedValue = @This(); const Target = std.Target; @@ -31,13 +32,13 @@ pub fn copy(self: TypedValue, arena: Allocator) error{OutOfMemory}!TypedValue { }; } -pub fn eql(a: TypedValue, b: TypedValue, target: std.Target) bool { - if (!a.ty.eql(b.ty, target)) return false; - return a.val.eql(b.val, a.ty, target); +pub fn eql(a: TypedValue, b: TypedValue, mod: *Module) bool { + if (!a.ty.eql(b.ty, mod)) return false; + return a.val.eql(b.val, a.ty, mod); } -pub fn hash(tv: TypedValue, hasher: *std.hash.Wyhash, target: std.Target) void { - return tv.val.hash(tv.ty, hasher, target); +pub fn hash(tv: TypedValue, hasher: *std.hash.Wyhash, mod: *Module) void { + return tv.val.hash(tv.ty, hasher, mod); } pub fn enumToInt(tv: TypedValue, buffer: *Value.Payload.U64) Value { @@ -48,7 +49,7 @@ const max_aggregate_items = 100; const FormatContext = struct { tv: TypedValue, - target: Target, + mod: *Module, }; pub fn format( @@ -59,7 +60,7 @@ pub fn format( ) !void { _ = options; comptime std.debug.assert(fmt.len == 0); - return ctx.tv.print(writer, 3, ctx.target); + return ctx.tv.print(writer, 3, ctx.mod); } /// Prints the Value according to the Type, not according to the Value Tag. @@ -67,8 +68,9 @@ pub fn print( tv: TypedValue, writer: anytype, level: u8, - target: std.Target, + mod: *Module, ) @TypeOf(writer).Error!void { + const target = mod.getTarget(); var val = tv.val; var ty = tv.ty; while (true) switch (val.tag()) { @@ -156,7 +158,7 @@ pub fn print( try print(.{ .ty = fields[i].ty, .val = vals[i], - }, writer, level - 1, target); + }, writer, level - 1, mod); } return writer.writeAll(" }"); } else { @@ -170,7 +172,7 @@ pub fn print( try print(.{ .ty = elem_ty, .val = vals[i], - }, writer, level - 1, target); + }, writer, level - 1, mod); } return writer.writeAll(" }"); } @@ -185,12 +187,12 @@ pub fn print( try print(.{ .ty = ty.unionTagType().?, .val = union_val.tag, - }, writer, level - 1, target); + }, writer, level - 1, mod); try writer.writeAll(" = "); try print(.{ - .ty = ty.unionFieldType(union_val.tag, target), + .ty = ty.unionFieldType(union_val.tag, mod), .val = union_val.val, - }, writer, level - 1, target); + }, writer, level - 1, mod); return writer.writeAll(" }"); }, @@ -205,7 +207,7 @@ pub fn print( }, .bool_true => return writer.writeAll("true"), .bool_false => return writer.writeAll("false"), - .ty => return val.castTag(.ty).?.data.print(writer, target), + .ty => return val.castTag(.ty).?.data.print(writer, mod), .int_type => { const int_type = val.castTag(.int_type).?.data; return writer.print("{s}{d}", .{ @@ -222,28 +224,32 @@ pub fn print( const x = sub_ty.abiAlignment(target); return writer.print("{d}", .{x}); }, - .function => return writer.print("(function '{s}')", .{val.castTag(.function).?.data.owner_decl.name}), + .function => return writer.print("(function '{s}')", .{ + mod.declPtr(val.castTag(.function).?.data.owner_decl).name, + }), .extern_fn => return writer.writeAll("(extern function)"), .variable => return writer.writeAll("(variable)"), .decl_ref_mut => { - const decl = val.castTag(.decl_ref_mut).?.data.decl; + const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index; + const decl = mod.declPtr(decl_index); if (level == 0) { return writer.print("(decl ref mut '{s}')", .{decl.name}); } return print(.{ .ty = decl.ty, .val = decl.val, - }, writer, level - 1, target); + }, writer, level - 1, mod); }, .decl_ref => { - const decl = val.castTag(.decl_ref).?.data; + const decl_index = val.castTag(.decl_ref).?.data; + const decl = mod.declPtr(decl_index); if (level == 0) { return writer.print("(decl ref '{s}')", .{decl.name}); } return print(.{ .ty = decl.ty, .val = decl.val, - }, writer, level - 1, target); + }, writer, level - 1, mod); }, .elem_ptr => { const elem_ptr = val.castTag(.elem_ptr).?.data; @@ -251,7 +257,7 @@ pub fn print( try print(.{ .ty = elem_ptr.elem_ty, .val = elem_ptr.array_ptr, - }, writer, level - 1, target); + }, writer, level - 1, mod); return writer.print("[{}]", .{elem_ptr.index}); }, .field_ptr => { @@ -260,7 +266,7 @@ pub fn print( try print(.{ .ty = field_ptr.container_ty, .val = field_ptr.container_ptr, - }, writer, level - 1, target); + }, writer, level - 1, mod); if (field_ptr.container_ty.zigTypeTag() == .Struct) { const field_name = field_ptr.container_ty.structFields().keys()[field_ptr.field_index]; @@ -288,7 +294,7 @@ pub fn print( }; while (i < max_aggregate_items) : (i += 1) { if (i != 0) try writer.writeAll(", "); - try print(elem_tv, writer, level - 1, target); + try print(elem_tv, writer, level - 1, mod); } return writer.writeAll(" }"); }, @@ -300,7 +306,7 @@ pub fn print( try print(.{ .ty = ty.elemType2(), .val = ty.sentinel().?, - }, writer, level - 1, target); + }, writer, level - 1, mod); return writer.writeAll(" }"); }, .slice => return writer.writeAll("(slice)"), diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 688f59e804..fc37ae00dd 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -237,8 +237,10 @@ pub fn generate( @panic("Attempted to compile for architecture that was disabled by build configuration"); } - assert(module_fn.owner_decl.has_tv); - const fn_type = module_fn.owner_decl.ty; + const mod = bin_file.options.module.?; + const fn_owner_decl = mod.declPtr(module_fn.owner_decl); + assert(fn_owner_decl.has_tv); + const fn_type = fn_owner_decl.ty; var branch_stack = std.ArrayList(Branch).init(bin_file.allocator); defer { @@ -819,9 +821,9 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { return @as(u32, 0); } - const target = self.target.*; const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch { - return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(target)}); + const mod = self.bin_file.options.module.?; + return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; // TODO swap this for inst.ty.ptrAlign const abi_align = elem_ty.abiAlignment(self.target.*); @@ -830,9 +832,9 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { const elem_ty = self.air.typeOfIndex(inst); - const target = self.target.*; const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch { - return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(target)}); + const mod = self.bin_file.options.module.?; + return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; const abi_align = elem_ty.abiAlignment(self.target.*); if (abi_align > self.stack_align) @@ -1422,7 +1424,7 @@ fn binOp( lhs_ty: Type, rhs_ty: Type, ) InnerError!MCValue { - const target = self.target.*; + const mod = self.bin_file.options.module.?; switch (tag) { .add, .sub, @@ -1432,7 +1434,7 @@ fn binOp( .Float => return self.fail("TODO binary operations on floats", .{}), .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - assert(lhs_ty.eql(rhs_ty, target)); + assert(lhs_ty.eql(rhs_ty, mod)); const int_info = lhs_ty.intInfo(self.target.*); if (int_info.bits <= 64) { // Only say yes if the operation is @@ -1483,7 +1485,7 @@ fn binOp( switch (lhs_ty.zigTypeTag()) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - assert(lhs_ty.eql(rhs_ty, target)); + assert(lhs_ty.eql(rhs_ty, mod)); const int_info = lhs_ty.intInfo(self.target.*); if (int_info.bits <= 64) { // TODO add optimisations for multiplication @@ -1534,7 +1536,7 @@ fn binOp( switch (lhs_ty.zigTypeTag()) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - assert(lhs_ty.eql(rhs_ty, target)); + assert(lhs_ty.eql(rhs_ty, mod)); const int_info = lhs_ty.intInfo(self.target.*); if (int_info.bits <= 64) { // TODO implement bitwise operations with immediates @@ -2425,12 +2427,12 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { const ty = self.air.typeOfIndex(inst); const result = self.args[arg_index]; - const target = self.target.*; const mcv = switch (result) { // Copy registers to the stack .register => |reg| blk: { + const mod = self.bin_file.options.module.?; const abi_size = math.cast(u32, ty.abiSize(self.target.*)) catch { - return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(target)}); + return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(mod)}); }; const abi_align = ty.abiAlignment(self.target.*); const stack_offset = try self.allocMem(inst, abi_size, abi_align); @@ -2537,17 +2539,19 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. // Due to incremental compilation, how function calls are generated depends // on linking. + const mod = self.bin_file.options.module.?; if (self.air.value(callee)) |func_value| { if (self.bin_file.tag == link.File.Elf.base_tag or self.bin_file.tag == link.File.Coff.base_tag) { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); + const fn_owner_decl = mod.declPtr(func.owner_decl); const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: { const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; - break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes); + break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes); } else if (self.bin_file.cast(link.File.Coff)) |coff_file| - coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * ptr_bytes + coff_file.offset_table_virtual_address + fn_owner_decl.link.coff.offset_table_index * ptr_bytes else unreachable; @@ -2565,8 +2569,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; + const fn_owner_decl = mod.declPtr(func.owner_decl); try self.genSetReg(Type.initTag(.u64), .x30, .{ - .got_load = func.owner_decl.link.macho.local_sym_index, + .got_load = fn_owner_decl.link.macho.local_sym_index, }); // blr x30 _ = try self.addInst(.{ @@ -2575,7 +2580,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. }); } else if (func_value.castTag(.extern_fn)) |func_payload| { const extern_fn = func_payload.data; - const decl_name = extern_fn.owner_decl.name; + const decl_name = mod.declPtr(extern_fn.owner_decl).name; if (extern_fn.lib_name) |lib_name| { log.debug("TODO enforce that '{s}' is expected in '{s}' library", .{ decl_name, @@ -2588,7 +2593,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. .tag = .call_extern, .data = .{ .extern_fn = .{ - .atom_index = self.mod_fn.owner_decl.link.macho.local_sym_index, + .atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.local_sym_index, .sym_name = n_strx, }, }, @@ -2602,7 +2607,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); const got_addr = p9.bases.data; - const got_index = func_payload.data.owner_decl.link.plan9.got_index.?; + const got_index = mod.declPtr(func_payload.data.owner_decl).link.plan9.got_index.?; const fn_got_addr = got_addr + got_index * ptr_bytes; try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = fn_got_addr }); @@ -3478,12 +3483,13 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro .direct_load => .load_memory_ptr_direct, else => unreachable, }; + const mod = self.bin_file.options.module.?; _ = try self.addInst(.{ .tag = tag, .data = .{ .payload = try self.addExtra(Mir.LoadMemoryPie{ .register = @enumToInt(src_reg), - .atom_index = self.mod_fn.owner_decl.link.macho.local_sym_index, + .atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.local_sym_index, .sym_index = sym_index, }), }, @@ -3597,12 +3603,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .direct_load => .load_memory_direct, else => unreachable, }; + const mod = self.bin_file.options.module.?; _ = try self.addInst(.{ .tag = tag, .data = .{ .payload = try self.addExtra(Mir.LoadMemoryPie{ .register = @enumToInt(reg), - .atom_index = self.mod_fn.owner_decl.link.macho.local_sym_index, + .atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.local_sym_index, .sym_index = sym_index, }), }, @@ -3860,7 +3867,7 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue { } } -fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCValue { +fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue { const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); @@ -3872,7 +3879,10 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCVa } } - decl.alive = true; + const mod = self.bin_file.options.module.?; + const decl = mod.declPtr(decl_index); + mod.markDeclAlive(decl); + if (self.bin_file.cast(link.File.Elf)) |elf_file| { const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes; @@ -3886,7 +3896,7 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCVa const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes; return MCValue{ .memory = got_addr }; } else if (self.bin_file.cast(link.File.Plan9)) |p9| { - try p9.seeDecl(decl); + try p9.seeDecl(decl_index); const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes; return MCValue{ .memory = got_addr }; } else { @@ -3922,7 +3932,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { return self.lowerDeclRef(typed_value, payload.data); } if (typed_value.val.castTag(.decl_ref_mut)) |payload| { - return self.lowerDeclRef(typed_value, payload.data.decl); + return self.lowerDeclRef(typed_value, payload.data.decl_index); } const target = self.target.*; diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index d39c7d9176..54de053475 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -271,8 +271,10 @@ pub fn generate( @panic("Attempted to compile for architecture that was disabled by build configuration"); } - assert(module_fn.owner_decl.has_tv); - const fn_type = module_fn.owner_decl.ty; + const mod = bin_file.options.module.?; + const fn_owner_decl = mod.declPtr(module_fn.owner_decl); + assert(fn_owner_decl.has_tv); + const fn_type = fn_owner_decl.ty; var branch_stack = std.ArrayList(Branch).init(bin_file.allocator); defer { @@ -838,9 +840,9 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { return @as(u32, 0); } - const target = self.target.*; const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch { - return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(target)}); + const mod = self.bin_file.options.module.?; + return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; // TODO swap this for inst.ty.ptrAlign const abi_align = elem_ty.abiAlignment(self.target.*); @@ -849,9 +851,9 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { const elem_ty = self.air.typeOfIndex(inst); - const target = self.target.*; const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch { - return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(target)}); + const mod = self.bin_file.options.module.?; + return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; const abi_align = elem_ty.abiAlignment(self.target.*); if (abi_align > self.stack_align) @@ -1204,7 +1206,8 @@ fn minMax( .Float => return self.fail("TODO ARM min/max on floats", .{}), .Vector => return self.fail("TODO ARM min/max on vectors", .{}), .Int => { - assert(lhs_ty.eql(rhs_ty, self.target.*)); + const mod = self.bin_file.options.module.?; + assert(lhs_ty.eql(rhs_ty, mod)); const int_info = lhs_ty.intInfo(self.target.*); if (int_info.bits <= 32) { const lhs_is_register = lhs == .register; @@ -1372,7 +1375,8 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { switch (lhs_ty.zigTypeTag()) { .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}), .Int => { - assert(lhs_ty.eql(rhs_ty, self.target.*)); + const mod = self.bin_file.options.module.?; + assert(lhs_ty.eql(rhs_ty, mod)); const int_info = lhs_ty.intInfo(self.target.*); if (int_info.bits < 32) { const stack_offset = try self.allocMem(inst, tuple_size, tuple_align); @@ -1472,7 +1476,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { switch (lhs_ty.zigTypeTag()) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), .Int => { - assert(lhs_ty.eql(rhs_ty, self.target.*)); + const mod = self.bin_file.options.module.?; + assert(lhs_ty.eql(rhs_ty, mod)); const int_info = lhs_ty.intInfo(self.target.*); if (int_info.bits <= 16) { const stack_offset = try self.allocMem(inst, tuple_size, tuple_align); @@ -2682,7 +2687,6 @@ fn binOp( lhs_ty: Type, rhs_ty: Type, ) InnerError!MCValue { - const target = self.target.*; switch (tag) { .add, .sub, @@ -2692,7 +2696,8 @@ fn binOp( .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { - assert(lhs_ty.eql(rhs_ty, target)); + const mod = self.bin_file.options.module.?; + assert(lhs_ty.eql(rhs_ty, mod)); const int_info = lhs_ty.intInfo(self.target.*); if (int_info.bits <= 32) { // Only say yes if the operation is @@ -2740,7 +2745,8 @@ fn binOp( .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { - assert(lhs_ty.eql(rhs_ty, target)); + const mod = self.bin_file.options.module.?; + assert(lhs_ty.eql(rhs_ty, mod)); const int_info = lhs_ty.intInfo(self.target.*); if (int_info.bits <= 32) { // TODO add optimisations for multiplication @@ -2794,7 +2800,8 @@ fn binOp( switch (lhs_ty.zigTypeTag()) { .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { - assert(lhs_ty.eql(rhs_ty, target)); + const mod = self.bin_file.options.module.?; + assert(lhs_ty.eql(rhs_ty, mod)); const int_info = lhs_ty.intInfo(self.target.*); if (int_info.bits <= 32) { const lhs_immediate_ok = lhs == .immediate and Instruction.Operand.fromU32(lhs.immediate) != null; @@ -3100,8 +3107,9 @@ fn addDbgInfoTypeReloc(self: *Self, ty: Type) error{OutOfMemory}!void { const dbg_info = &dw.dbg_info; const index = dbg_info.items.len; try dbg_info.resize(index + 4); // DW.AT.type, DW.FORM.ref4 + const mod = self.bin_file.options.module.?; const atom = switch (self.bin_file.tag) { - .elf => &self.mod_fn.owner_decl.link.elf.dbg_info_atom, + .elf => &mod.declPtr(self.mod_fn.owner_decl).link.elf.dbg_info_atom, .macho => unreachable, else => unreachable, }; @@ -3318,11 +3326,13 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. const func = func_payload.data; const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); + const mod = self.bin_file.options.module.?; + const fn_owner_decl = mod.declPtr(func.owner_decl); const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: { const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; - break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes); + break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes); } else if (self.bin_file.cast(link.File.Coff)) |coff_file| - coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * ptr_bytes + coff_file.offset_table_virtual_address + fn_owner_decl.link.coff.offset_table_index * ptr_bytes else unreachable; @@ -4924,11 +4934,14 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue { } } -fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCValue { +fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue { const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); - decl.alive = true; + const mod = self.bin_file.options.module.?; + const decl = mod.declPtr(decl_index); + mod.markDeclAlive(decl); + if (self.bin_file.cast(link.File.Elf)) |elf_file| { const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes; @@ -4939,7 +4952,7 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCVa const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes; return MCValue{ .memory = got_addr }; } else if (self.bin_file.cast(link.File.Plan9)) |p9| { - try p9.seeDecl(decl); + try p9.seeDecl(decl_index); const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes; return MCValue{ .memory = got_addr }; } else { @@ -4976,7 +4989,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { return self.lowerDeclRef(typed_value, payload.data); } if (typed_value.val.castTag(.decl_ref_mut)) |payload| { - return self.lowerDeclRef(typed_value, payload.data.decl); + return self.lowerDeclRef(typed_value, payload.data.decl_index); } const target = self.target.*; diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index cf9e5fefcd..15377378cd 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -229,8 +229,10 @@ pub fn generate( @panic("Attempted to compile for architecture that was disabled by build configuration"); } - assert(module_fn.owner_decl.has_tv); - const fn_type = module_fn.owner_decl.ty; + const mod = bin_file.options.module.?; + const fn_owner_decl = mod.declPtr(module_fn.owner_decl); + assert(fn_owner_decl.has_tv); + const fn_type = fn_owner_decl.ty; var branch_stack = std.ArrayList(Branch).init(bin_file.allocator); defer { @@ -738,8 +740,9 @@ fn addDbgInfoTypeReloc(self: *Self, ty: Type) !void { const dbg_info = &dw.dbg_info; const index = dbg_info.items.len; try dbg_info.resize(index + 4); // DW.AT.type, DW.FORM.ref4 + const mod = self.bin_file.options.module.?; const atom = switch (self.bin_file.tag) { - .elf => &self.mod_fn.owner_decl.link.elf.dbg_info_atom, + .elf => &mod.declPtr(self.mod_fn.owner_decl).link.elf.dbg_info_atom, .macho => unreachable, else => unreachable, }; @@ -768,9 +771,9 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { const elem_ty = self.air.typeOfIndex(inst).elemType(); - const target = self.target.*; const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch { - return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(target)}); + const mod = self.bin_file.options.module.?; + return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; // TODO swap this for inst.ty.ptrAlign const abi_align = elem_ty.abiAlignment(self.target.*); @@ -779,9 +782,9 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { const elem_ty = self.air.typeOfIndex(inst); - const target = self.target.*; const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch { - return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(target)}); + const mod = self.bin_file.options.module.?; + return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; const abi_align = elem_ty.abiAlignment(self.target.*); if (abi_align > self.stack_align) @@ -1037,7 +1040,8 @@ fn binOp( .Float => return self.fail("TODO binary operations on floats", .{}), .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - assert(lhs_ty.eql(rhs_ty, self.target.*)); + const mod = self.bin_file.options.module.?; + assert(lhs_ty.eql(rhs_ty, mod)); const int_info = lhs_ty.intInfo(self.target.*); if (int_info.bits <= 64) { // TODO immediate operands @@ -1679,11 +1683,13 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); + const mod = self.bin_file.options.module.?; + const fn_owner_decl = mod.declPtr(func.owner_decl); const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: { const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; - break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes); + break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes); } else if (self.bin_file.cast(link.File.Coff)) |coff_file| - coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * ptr_bytes + coff_file.offset_table_virtual_address + fn_owner_decl.link.coff.offset_table_index * ptr_bytes else unreachable; @@ -1768,7 +1774,8 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); const ty = self.air.typeOf(bin_op.lhs); - assert(ty.eql(self.air.typeOf(bin_op.rhs), self.target.*)); + const mod = self.bin_file.options.module.?; + assert(ty.eql(self.air.typeOf(bin_op.rhs), mod)); if (ty.zigTypeTag() == .ErrorSet) return self.fail("TODO implement cmp for errors", .{}); @@ -2501,10 +2508,12 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue { } } -fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCValue { +fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue { const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); - decl.alive = true; + const mod = self.bin_file.options.module.?; + const decl = mod.declPtr(decl_index); + mod.markDeclAlive(decl); if (self.bin_file.cast(link.File.Elf)) |elf_file| { const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes; @@ -2517,7 +2526,7 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCVa const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes; return MCValue{ .memory = got_addr }; } else if (self.bin_file.cast(link.File.Plan9)) |p9| { - try p9.seeDecl(decl); + try p9.seeDecl(decl_index); const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes; return MCValue{ .memory = got_addr }; } else { @@ -2534,7 +2543,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { return self.lowerDeclRef(typed_value, payload.data); } if (typed_value.val.castTag(.decl_ref_mut)) |payload| { - return self.lowerDeclRef(typed_value, payload.data.decl); + return self.lowerDeclRef(typed_value, payload.data.decl_index); } const target = self.target.*; const ptr_bits = self.target.cpu.arch.ptrBitWidth(); @@ -2544,7 +2553,8 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_type = typed_value.ty.slicePtrFieldType(&buf); const ptr_mcv = try self.genTypedValue(.{ .ty = ptr_type, .val = typed_value.val }); - const slice_len = typed_value.val.sliceLen(target); + const mod = self.bin_file.options.module.?; + const slice_len = typed_value.val.sliceLen(mod); // Codegen can't handle some kinds of indirection. If the wrong union field is accessed here it may mean // the Sema code needs to use anonymous Decls or alloca instructions to store data. const ptr_imm = ptr_mcv.memory; diff --git a/src/arch/sparcv9/CodeGen.zig b/src/arch/sparcv9/CodeGen.zig index 71c41bc67d..7e1ecefbb7 100644 --- a/src/arch/sparcv9/CodeGen.zig +++ b/src/arch/sparcv9/CodeGen.zig @@ -243,8 +243,10 @@ pub fn generate( @panic("Attempted to compile for architecture that was disabled by build configuration"); } - assert(module_fn.owner_decl.has_tv); - const fn_type = module_fn.owner_decl.ty; + const mod = bin_file.options.module.?; + const fn_owner_decl = mod.declPtr(module_fn.owner_decl); + assert(fn_owner_decl.has_tv); + const fn_type = fn_owner_decl.ty; var branch_stack = std.ArrayList(Branch).init(bin_file.allocator); defer { @@ -871,7 +873,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. const ptr_bytes: u64 = @divExact(ptr_bits, 8); const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: { const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; - break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes); + const mod = self.bin_file.options.module.?; + break :blk @intCast(u32, got.p_vaddr + mod.declPtr(func.owner_decl).link.elf.offset_table_index * ptr_bytes); } else unreachable; try self.genSetReg(Type.initTag(.usize), .o7, .{ .memory = got_addr }); @@ -1026,9 +1029,9 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { return @as(u32, 0); } - const target = self.target.*; const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch { - return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(target)}); + const mod = self.bin_file.options.module.?; + return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; // TODO swap this for inst.ty.ptrAlign const abi_align = elem_ty.abiAlignment(self.target.*); @@ -1037,9 +1040,9 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { const elem_ty = self.air.typeOfIndex(inst); - const target = self.target.*; const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch { - return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(target)}); + const mod = self.bin_file.options.module.?; + return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; const abi_align = elem_ty.abiAlignment(self.target.*); if (abi_align > self.stack_align) @@ -1372,7 +1375,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { return self.lowerDeclRef(typed_value, payload.data); } if (typed_value.val.castTag(.decl_ref_mut)) |payload| { - return self.lowerDeclRef(typed_value, payload.data.decl); + return self.lowerDeclRef(typed_value, payload.data.decl_index); } const target = self.target.*; @@ -1422,7 +1425,7 @@ fn iterateBigTomb(self: *Self, inst: Air.Inst.Index, operand_count: usize) !BigT }; } -fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCValue { +fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue { const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); @@ -1434,7 +1437,10 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCVa } } - decl.alive = true; + const mod = self.bin_file.options.module.?; + const decl = mod.declPtr(decl_index); + + mod.markDeclAlive(decl); if (self.bin_file.cast(link.File.Elf)) |elf_file| { const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes; diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index e58a3a6d65..4586f5624b 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -538,6 +538,10 @@ const Self = @This(); /// Reference to the function declaration the code /// section belongs to decl: *Decl, +decl_index: Decl.Index, +/// Current block depth. Used to calculate the relative difference between a break +/// and block +block_depth: u32 = 0, air: Air, liveness: Liveness, gpa: mem.Allocator, @@ -559,9 +563,6 @@ local_index: u32 = 0, arg_index: u32 = 0, /// If codegen fails, an error messages will be allocated and saved in `err_msg` err_msg: *Module.ErrorMsg, -/// Current block depth. Used to calculate the relative difference between a break -/// and block -block_depth: u32 = 0, /// List of all locals' types generated throughout this declaration /// used to emit locals count at start of 'code' section. locals: std.ArrayListUnmanaged(u8), @@ -644,7 +645,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!WValue { // In the other cases, we will simply lower the constant to a value that fits // into a single local (such as a pointer, integer, bool, etc). const result = if (isByRef(ty, self.target)) blk: { - const sym_index = try self.bin_file.lowerUnnamedConst(self.decl, .{ .ty = ty, .val = val }); + const sym_index = try self.bin_file.lowerUnnamedConst(.{ .ty = ty, .val = val }, self.decl_index); break :blk WValue{ .memory = sym_index }; } else try self.lowerConstant(val, ty); @@ -838,7 +839,8 @@ pub fn generate( .liveness = liveness, .values = .{}, .code = code, - .decl = func.owner_decl, + .decl_index = func.owner_decl, + .decl = bin_file.options.module.?.declPtr(func.owner_decl), .err_msg = undefined, .locals = .{}, .target = bin_file.options.target, @@ -1022,8 +1024,9 @@ fn allocStack(self: *Self, ty: Type) !WValue { } const abi_size = std.math.cast(u32, ty.abiSize(self.target)) catch { + const module = self.bin_file.base.options.module.?; return self.fail("Type {} with ABI size of {d} exceeds stack frame size", .{ - ty.fmt(self.target), ty.abiSize(self.target), + ty.fmt(module), ty.abiSize(self.target), }); }; const abi_align = ty.abiAlignment(self.target); @@ -1056,8 +1059,9 @@ fn allocStackPtr(self: *Self, inst: Air.Inst.Index) !WValue { const abi_alignment = ptr_ty.ptrAlignment(self.target); const abi_size = std.math.cast(u32, pointee_ty.abiSize(self.target)) catch { + const module = self.bin_file.base.options.module.?; return self.fail("Type {} with ABI size of {d} exceeds stack frame size", .{ - pointee_ty.fmt(self.target), pointee_ty.abiSize(self.target), + pointee_ty.fmt(module), pointee_ty.abiSize(self.target), }); }; if (abi_alignment > self.stack_alignment) { @@ -1542,20 +1546,21 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. const ret_ty = fn_ty.fnReturnType(); const first_param_sret = isByRef(ret_ty, self.target); - const target: ?*Decl = blk: { + const callee: ?*Decl = blk: { const func_val = self.air.value(pl_op.operand) orelse break :blk null; + const module = self.bin_file.base.options.module.?; if (func_val.castTag(.function)) |func| { - break :blk func.data.owner_decl; + break :blk module.declPtr(func.data.owner_decl); } else if (func_val.castTag(.extern_fn)) |extern_fn| { - const ext_decl = extern_fn.data.owner_decl; + const ext_decl = module.declPtr(extern_fn.data.owner_decl); var func_type = try genFunctype(self.gpa, ext_decl.ty, self.target); defer func_type.deinit(self.gpa); ext_decl.fn_link.wasm.type_index = try self.bin_file.putOrGetFuncType(func_type); try self.bin_file.addOrUpdateImport(ext_decl); break :blk ext_decl; } else if (func_val.castTag(.decl_ref)) |decl_ref| { - break :blk decl_ref.data; + break :blk module.declPtr(decl_ref.data); } return self.fail("Expected a function, but instead found type '{s}'", .{func_val.tag()}); }; @@ -1580,7 +1585,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. } } - if (target) |direct| { + if (callee) |direct| { try self.addLabel(.call, direct.link.wasm.sym_index); } else { // in this case we call a function pointer @@ -1837,16 +1842,16 @@ fn wrapBinOp(self: *Self, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError fn lowerParentPtr(self: *Self, ptr_val: Value, ptr_child_ty: Type) InnerError!WValue { switch (ptr_val.tag()) { .decl_ref_mut => { - const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl; - return self.lowerParentPtrDecl(ptr_val, decl); + const decl_index = ptr_val.castTag(.decl_ref_mut).?.data.decl_index; + return self.lowerParentPtrDecl(ptr_val, decl_index); }, .decl_ref => { - const decl = ptr_val.castTag(.decl_ref).?.data; - return self.lowerParentPtrDecl(ptr_val, decl); + const decl_index = ptr_val.castTag(.decl_ref).?.data; + return self.lowerParentPtrDecl(ptr_val, decl_index); }, .variable => { - const decl = ptr_val.castTag(.variable).?.data.owner_decl; - return self.lowerParentPtrDecl(ptr_val, decl); + const decl_index = ptr_val.castTag(.variable).?.data.owner_decl; + return self.lowerParentPtrDecl(ptr_val, decl_index); }, .field_ptr => { const field_ptr = ptr_val.castTag(.field_ptr).?.data; @@ -1918,24 +1923,31 @@ fn lowerParentPtr(self: *Self, ptr_val: Value, ptr_child_ty: Type) InnerError!WV } } -fn lowerParentPtrDecl(self: *Self, ptr_val: Value, decl: *Module.Decl) InnerError!WValue { - decl.markAlive(); +fn lowerParentPtrDecl(self: *Self, ptr_val: Value, decl_index: Module.Decl.Index) InnerError!WValue { + const module = self.bin_file.base.options.module.?; + const decl = module.declPtr(decl_index); + module.markDeclAlive(decl); var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, .data = decl.ty, }; const ptr_ty = Type.initPayload(&ptr_ty_payload.base); - return self.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl); + return self.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index); } -fn lowerDeclRefValue(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!WValue { +fn lowerDeclRefValue(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!WValue { if (tv.ty.isSlice()) { - return WValue{ .memory = try self.bin_file.lowerUnnamedConst(decl, tv) }; - } else if (decl.ty.zigTypeTag() != .Fn and !decl.ty.hasRuntimeBitsIgnoreComptime()) { + return WValue{ .memory = try self.bin_file.lowerUnnamedConst(tv, decl_index) }; + } + + const module = self.bin_file.base.options.module.?; + const decl = module.declPtr(decl_index); + if (decl.ty.zigTypeTag() != .Fn and !decl.ty.hasRuntimeBitsIgnoreComptime()) { return WValue{ .imm32 = 0xaaaaaaaa }; } - decl.markAlive(); + module.markDeclAlive(decl); + const target_sym_index = decl.link.wasm.sym_index; if (decl.ty.zigTypeTag() == .Fn) { try self.bin_file.addTableFunction(target_sym_index); @@ -1946,12 +1958,12 @@ fn lowerDeclRefValue(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError fn lowerConstant(self: *Self, val: Value, ty: Type) InnerError!WValue { if (val.isUndefDeep()) return self.emitUndefined(ty); if (val.castTag(.decl_ref)) |decl_ref| { - const decl = decl_ref.data; - return self.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl); + const decl_index = decl_ref.data; + return self.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index); } - if (val.castTag(.decl_ref_mut)) |decl_ref| { - const decl = decl_ref.data.decl; - return self.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl); + if (val.castTag(.decl_ref_mut)) |decl_ref_mut| { + const decl_index = decl_ref_mut.data.decl_index; + return self.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index); } const target = self.target; @@ -2347,8 +2359,9 @@ fn airStructFieldPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const struct_ptr = try self.resolveInst(extra.data.struct_operand); const struct_ty = self.air.typeOf(extra.data.struct_operand).childType(); const offset = std.math.cast(u32, struct_ty.structFieldOffset(extra.data.field_index, self.target)) catch { + const module = self.bin_file.base.options.module.?; return self.fail("Field type '{}' too big to fit into stack frame", .{ - struct_ty.structFieldType(extra.data.field_index).fmt(self.target), + struct_ty.structFieldType(extra.data.field_index).fmt(module), }); }; return self.structFieldPtr(struct_ptr, offset); @@ -2360,8 +2373,9 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u32) InnerEr const struct_ty = self.air.typeOf(ty_op.operand).childType(); const field_ty = struct_ty.structFieldType(index); const offset = std.math.cast(u32, struct_ty.structFieldOffset(index, self.target)) catch { + const module = self.bin_file.base.options.module.?; return self.fail("Field type '{}' too big to fit into stack frame", .{ - field_ty.fmt(self.target), + field_ty.fmt(module), }); }; return self.structFieldPtr(struct_ptr, offset); @@ -2387,7 +2401,8 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const field_ty = struct_ty.structFieldType(field_index); if (!field_ty.hasRuntimeBitsIgnoreComptime()) return WValue{ .none = {} }; const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, self.target)) catch { - return self.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(self.target)}); + const module = self.bin_file.base.options.module.?; + return self.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(module)}); }; if (isByRef(field_ty, self.target)) { @@ -2782,7 +2797,8 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!WValue } const offset = std.math.cast(u32, opt_ty.abiSize(self.target) - payload_ty.abiSize(self.target)) catch { - return self.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(self.target)}); + const module = self.bin_file.base.options.module.?; + return self.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(module)}); }; try self.emitWValue(operand); @@ -2811,7 +2827,8 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) InnerError!WValue { return operand; } const offset = std.math.cast(u32, op_ty.abiSize(self.target) - payload_ty.abiSize(self.target)) catch { - return self.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(self.target)}); + const module = self.bin_file.base.options.module.?; + return self.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(module)}); }; // Create optional type, set the non-null bit, and store the operand inside the optional type diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index f2eb1e2afd..4097352975 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -309,8 +309,10 @@ pub fn generate( @panic("Attempted to compile for architecture that was disabled by build configuration"); } - assert(module_fn.owner_decl.has_tv); - const fn_type = module_fn.owner_decl.ty; + const mod = bin_file.options.module.?; + const fn_owner_decl = mod.declPtr(module_fn.owner_decl); + assert(fn_owner_decl.has_tv); + const fn_type = fn_owner_decl.ty; var branch_stack = std.ArrayList(Branch).init(bin_file.allocator); defer { @@ -396,14 +398,14 @@ pub fn generate( if (builtin.mode == .Debug and bin_file.options.module.?.comp.verbose_mir) { const w = std.io.getStdErr().writer(); - w.print("# Begin Function MIR: {s}:\n", .{module_fn.owner_decl.name}) catch {}; + w.print("# Begin Function MIR: {s}:\n", .{fn_owner_decl.name}) catch {}; const PrintMir = @import("PrintMir.zig"); const print = PrintMir{ .mir = mir, .bin_file = bin_file, }; print.printMir(w, function.mir_to_air_map, air) catch {}; // we don't care if the debug printing fails - w.print("# End Function MIR: {s}\n\n", .{module_fn.owner_decl.name}) catch {}; + w.print("# End Function MIR: {s}\n\n", .{fn_owner_decl.name}) catch {}; } if (function.err_msg) |em| { @@ -915,9 +917,9 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { return self.allocMem(inst, @sizeOf(usize), @alignOf(usize)); } - const target = self.target.*; const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch { - return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(target)}); + const mod = self.bin_file.options.module.?; + return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; // TODO swap this for inst.ty.ptrAlign const abi_align = ptr_ty.ptrAlignment(self.target.*); @@ -926,9 +928,9 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { const elem_ty = self.air.typeOfIndex(inst); - const target = self.target.*; const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch { - return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(target)}); + const mod = self.bin_file.options.module.?; + return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; const abi_align = elem_ty.abiAlignment(self.target.*); if (abi_align > self.stack_align) @@ -2650,6 +2652,8 @@ fn loadMemPtrIntoRegister(self: *Self, reg: Register, ptr_ty: Type, ptr: MCValue .direct_load => 0b01, else => unreachable, }; + const mod = self.bin_file.options.module.?; + const fn_owner_decl = mod.declPtr(self.mod_fn.owner_decl); _ = try self.addInst(.{ .tag = .lea_pie, .ops = (Mir.Ops{ @@ -2658,7 +2662,7 @@ fn loadMemPtrIntoRegister(self: *Self, reg: Register, ptr_ty: Type, ptr: MCValue }).encode(), .data = .{ .load_reloc = .{ - .atom_index = self.mod_fn.owner_decl.link.macho.local_sym_index, + .atom_index = fn_owner_decl.link.macho.local_sym_index, .sym_index = sym_index, }, }, @@ -3583,17 +3587,19 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. // Due to incremental compilation, how function calls are generated depends // on linking. + const mod = self.bin_file.options.module.?; if (self.bin_file.tag == link.File.Elf.base_tag or self.bin_file.tag == link.File.Coff.base_tag) { if (self.air.value(callee)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); + const fn_owner_decl = mod.declPtr(func.owner_decl); const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: { const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; - break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes); + break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes); } else if (self.bin_file.cast(link.File.Coff)) |coff_file| - @intCast(u32, coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * ptr_bytes) + @intCast(u32, coff_file.offset_table_virtual_address + fn_owner_decl.link.coff.offset_table_index * ptr_bytes) else unreachable; _ = try self.addInst(.{ @@ -3625,8 +3631,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. if (self.air.value(callee)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; + const fn_owner_decl = mod.declPtr(func.owner_decl); try self.genSetReg(Type.initTag(.usize), .rax, .{ - .got_load = func.owner_decl.link.macho.local_sym_index, + .got_load = fn_owner_decl.link.macho.local_sym_index, }); // callq *%rax _ = try self.addInst(.{ @@ -3639,7 +3646,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. }); } else if (func_value.castTag(.extern_fn)) |func_payload| { const extern_fn = func_payload.data; - const decl_name = extern_fn.owner_decl.name; + const decl_name = mod.declPtr(extern_fn.owner_decl).name; if (extern_fn.lib_name) |lib_name| { log.debug("TODO enforce that '{s}' is expected in '{s}' library", .{ decl_name, @@ -3652,7 +3659,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. .ops = undefined, .data = .{ .extern_fn = .{ - .atom_index = self.mod_fn.owner_decl.link.macho.local_sym_index, + .atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.local_sym_index, .sym_name = n_strx, }, }, @@ -3680,7 +3687,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); const got_addr = p9.bases.data; - const got_index = func_payload.data.owner_decl.link.plan9.got_index.?; + const got_index = mod.declPtr(func_payload.data.owner_decl).link.plan9.got_index.?; const fn_got_addr = got_addr + got_index * ptr_bytes; _ = try self.addInst(.{ .tag = .call, @@ -4012,9 +4019,11 @@ fn addDbgInfoTypeReloc(self: *Self, ty: Type) !void { const dbg_info = &dw.dbg_info; const index = dbg_info.items.len; try dbg_info.resize(index + 4); // DW.AT.type, DW.FORM.ref4 + const mod = self.bin_file.options.module.?; + const fn_owner_decl = mod.declPtr(self.mod_fn.owner_decl); const atom = switch (self.bin_file.tag) { - .elf => &self.mod_fn.owner_decl.link.elf.dbg_info_atom, - .macho => &self.mod_fn.owner_decl.link.macho.dbg_info_atom, + .elf => &fn_owner_decl.link.elf.dbg_info_atom, + .macho => &fn_owner_decl.link.macho.dbg_info_atom, else => unreachable, }; try dw.addTypeReloc(atom, ty, @intCast(u32, index), null); @@ -6124,7 +6133,7 @@ fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCV return mcv; } -fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCValue { +fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue { log.debug("lowerDeclRef: ty = {}, val = {}", .{ tv.ty.fmtDebug(), tv.val.fmtDebug() }); const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); @@ -6137,7 +6146,9 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCVa } } - decl.markAlive(); + const module = self.bin_file.options.module.?; + const decl = module.declPtr(decl_index); + module.markDeclAlive(decl); if (self.bin_file.cast(link.File.Elf)) |elf_file| { const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; @@ -6152,7 +6163,7 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCVa const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes; return MCValue{ .memory = got_addr }; } else if (self.bin_file.cast(link.File.Plan9)) |p9| { - try p9.seeDecl(decl); + try p9.seeDecl(decl_index); const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes; return MCValue{ .memory = got_addr }; } else { @@ -6189,7 +6200,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { return self.lowerDeclRef(typed_value, payload.data); } if (typed_value.val.castTag(.decl_ref_mut)) |payload| { - return self.lowerDeclRef(typed_value, payload.data.decl); + return self.lowerDeclRef(typed_value, payload.data.decl_index); } const target = self.target.*; diff --git a/src/codegen.zig b/src/codegen.zig index 7fa0b2c0f8..debd7b5e9d 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -347,7 +347,9 @@ pub fn generateSymbol( switch (container_ptr.tag()) { .decl_ref => { - const decl = container_ptr.castTag(.decl_ref).?.data; + const decl_index = container_ptr.castTag(.decl_ref).?.data; + const mod = bin_file.options.module.?; + const decl = mod.declPtr(decl_index); const addend = blk: { switch (decl.ty.tag()) { .@"struct" => { @@ -364,7 +366,7 @@ pub fn generateSymbol( }, } }; - return lowerDeclRef(bin_file, src_loc, typed_value, decl, code, debug_output, .{ + return lowerDeclRef(bin_file, src_loc, typed_value, decl_index, code, debug_output, .{ .parent_atom_index = reloc_info.parent_atom_index, .addend = (reloc_info.addend orelse 0) + addend, }); @@ -400,8 +402,8 @@ pub fn generateSymbol( switch (array_ptr.tag()) { .decl_ref => { - const decl = array_ptr.castTag(.decl_ref).?.data; - return lowerDeclRef(bin_file, src_loc, typed_value, decl, code, debug_output, .{ + const decl_index = array_ptr.castTag(.decl_ref).?.data; + return lowerDeclRef(bin_file, src_loc, typed_value, decl_index, code, debug_output, .{ .parent_atom_index = reloc_info.parent_atom_index, .addend = (reloc_info.addend orelse 0) + addend, }); @@ -589,7 +591,8 @@ pub fn generateSymbol( } const union_ty = typed_value.ty.cast(Type.Payload.Union).?.data; - const field_index = union_ty.tag_ty.enumTagFieldIndex(union_obj.tag, target).?; + const mod = bin_file.options.module.?; + const field_index = union_ty.tag_ty.enumTagFieldIndex(union_obj.tag, mod).?; assert(union_ty.haveFieldTypes()); const field_ty = union_ty.fields.values()[field_index].ty; if (!field_ty.hasRuntimeBits()) { @@ -772,12 +775,13 @@ fn lowerDeclRef( bin_file: *link.File, src_loc: Module.SrcLoc, typed_value: TypedValue, - decl: *Module.Decl, + decl_index: Module.Decl.Index, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, reloc_info: RelocInfo, ) GenerateSymbolError!Result { const target = bin_file.options.target; + const module = bin_file.options.module.?; if (typed_value.ty.isSlice()) { // generate ptr var buf: Type.SlicePtrFieldTypeBuffer = undefined; @@ -796,7 +800,7 @@ fn lowerDeclRef( // generate length var slice_len: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, - .data = typed_value.val.sliceLen(target), + .data = typed_value.val.sliceLen(module), }; switch (try generateSymbol(bin_file, src_loc, .{ .ty = Type.usize, @@ -813,14 +817,16 @@ fn lowerDeclRef( } const ptr_width = target.cpu.arch.ptrBitWidth(); + const decl = module.declPtr(decl_index); const is_fn_body = decl.ty.zigTypeTag() == .Fn; if (!is_fn_body and !decl.ty.hasRuntimeBits()) { try code.writer().writeByteNTimes(0xaa, @divExact(ptr_width, 8)); return Result{ .appended = {} }; } - decl.markAlive(); - const vaddr = try bin_file.getDeclVAddr(decl, .{ + module.markDeclAlive(decl); + + const vaddr = try bin_file.getDeclVAddr(decl_index, .{ .parent_atom_index = reloc_info.parent_atom_index, .offset = code.items.len, .addend = reloc_info.addend orelse 0, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 54f8285291..a0b1bc30b9 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -32,8 +32,8 @@ pub const CValue = union(enum) { /// Index into the parameters arg: usize, /// By-value - decl: *Decl, - decl_ref: *Decl, + decl: Decl.Index, + decl_ref: Decl.Index, /// An undefined (void *) pointer (cannot be dereferenced) undefined_ptr: void, /// Render the slice as an identifier (using fmtIdent) @@ -58,7 +58,7 @@ pub const TypedefMap = std.ArrayHashMap( const FormatTypeAsCIdentContext = struct { ty: Type, - target: std.Target, + mod: *Module, }; /// TODO make this not cut off at 128 bytes @@ -71,14 +71,14 @@ fn formatTypeAsCIdentifier( _ = fmt; _ = options; var buffer = [1]u8{0} ** 128; - var buf = std.fmt.bufPrint(&buffer, "{}", .{data.ty.fmt(data.target)}) catch &buffer; + var buf = std.fmt.bufPrint(&buffer, "{}", .{data.ty.fmt(data.mod)}) catch &buffer; return formatIdent(buf, "", .{}, writer); } -pub fn typeToCIdentifier(ty: Type, target: std.Target) std.fmt.Formatter(formatTypeAsCIdentifier) { +pub fn typeToCIdentifier(ty: Type, mod: *Module) std.fmt.Formatter(formatTypeAsCIdentifier) { return .{ .data = .{ .ty = ty, - .target = target, + .mod = mod, } }; } @@ -349,6 +349,7 @@ pub const DeclGen = struct { gpa: std.mem.Allocator, module: *Module, decl: *Decl, + decl_index: Decl.Index, fwd_decl: std.ArrayList(u8), error_msg: ?*Module.ErrorMsg, /// The key of this map is Type which has references to typedefs_arena. @@ -376,10 +377,8 @@ pub const DeclGen = struct { writer: anytype, ty: Type, val: Value, - decl: *Decl, + decl_index: Decl.Index, ) error{ OutOfMemory, AnalysisFail }!void { - const target = dg.module.getTarget(); - if (ty.isSlice()) { try writer.writeByte('('); try dg.renderTypecast(writer, ty); @@ -387,11 +386,12 @@ pub const DeclGen = struct { var buf: Type.SlicePtrFieldTypeBuffer = undefined; try dg.renderValue(writer, ty.slicePtrFieldType(&buf), val.slicePtr()); try writer.writeAll(", "); - try writer.print("{d}", .{val.sliceLen(target)}); + try writer.print("{d}", .{val.sliceLen(dg.module)}); try writer.writeAll("}"); return; } + const decl = dg.module.declPtr(decl_index); assert(decl.has_tv); // We shouldn't cast C function pointers as this is UB (when you call // them). The analysis until now should ensure that the C function @@ -399,21 +399,21 @@ pub const DeclGen = struct { // somewhere and we should let the C compiler tell us about it. if (ty.castPtrToFn() == null) { // Determine if we must pointer cast. - if (ty.eql(decl.ty, target)) { + if (ty.eql(decl.ty, dg.module)) { try writer.writeByte('&'); - try dg.renderDeclName(writer, decl); + try dg.renderDeclName(writer, decl_index); return; } try writer.writeAll("(("); try dg.renderTypecast(writer, ty); try writer.writeAll(")&"); - try dg.renderDeclName(writer, decl); + try dg.renderDeclName(writer, decl_index); try writer.writeByte(')'); return; } - try dg.renderDeclName(writer, decl); + try dg.renderDeclName(writer, decl_index); } fn renderInt128( @@ -471,13 +471,13 @@ pub const DeclGen = struct { try writer.writeByte(')'); switch (ptr_val.tag()) { .decl_ref_mut, .decl_ref, .variable => { - const decl = switch (ptr_val.tag()) { + const decl_index = switch (ptr_val.tag()) { .decl_ref => ptr_val.castTag(.decl_ref).?.data, - .decl_ref_mut => ptr_val.castTag(.decl_ref_mut).?.data.decl, + .decl_ref_mut => ptr_val.castTag(.decl_ref_mut).?.data.decl_index, .variable => ptr_val.castTag(.variable).?.data.owner_decl, else => unreachable, }; - try dg.renderDeclValue(writer, ptr_ty, ptr_val, decl); + try dg.renderDeclValue(writer, ptr_ty, ptr_val, decl_index); }, .field_ptr => { const field_ptr = ptr_val.castTag(.field_ptr).?.data; @@ -685,7 +685,7 @@ pub const DeclGen = struct { var index: usize = 0; while (index < ai.len) : (index += 1) { if (index != 0) try writer.writeAll(","); - const elem_val = try val.elemValue(arena_allocator, index); + const elem_val = try val.elemValue(dg.module, arena_allocator, index); try dg.renderValue(writer, ai.elem_type, elem_val); } if (ai.sentinel) |s| { @@ -837,7 +837,7 @@ pub const DeclGen = struct { try writer.writeAll(".payload = {"); } - const index = union_ty.tag_ty.enumTagFieldIndex(union_obj.tag, target).?; + const index = union_ty.tag_ty.enumTagFieldIndex(union_obj.tag, dg.module).?; const field_ty = ty.unionFields().values()[index].ty; const field_name = ty.unionFields().keys()[index]; if (field_ty.hasRuntimeBits()) { @@ -889,7 +889,7 @@ pub const DeclGen = struct { try w.writeAll("void"); } try w.writeAll(" "); - try dg.renderDeclName(w, dg.decl); + try dg.renderDeclName(w, dg.decl_index); try w.writeAll("("); const param_len = dg.decl.ty.fnParamLen(); @@ -927,8 +927,7 @@ pub const DeclGen = struct { try bw.writeAll(" (*"); const name_start = buffer.items.len; - const target = dg.module.getTarget(); - try bw.print("zig_F_{s})(", .{typeToCIdentifier(t, target)}); + try bw.print("zig_F_{s})(", .{typeToCIdentifier(t, dg.module)}); const name_end = buffer.items.len - 2; const param_len = fn_info.param_types.len; @@ -982,11 +981,10 @@ pub const DeclGen = struct { try bw.writeAll("; size_t len; } "); const name_index = buffer.items.len; - const target = dg.module.getTarget(); if (t.isConstPtr()) { - try bw.print("zig_L_{s}", .{typeToCIdentifier(child_type, target)}); + try bw.print("zig_L_{s}", .{typeToCIdentifier(child_type, dg.module)}); } else { - try bw.print("zig_M_{s}", .{typeToCIdentifier(child_type, target)}); + try bw.print("zig_M_{s}", .{typeToCIdentifier(child_type, dg.module)}); } if (ptr_sentinel) |s| { try bw.writeAll("_s_"); @@ -1009,7 +1007,7 @@ pub const DeclGen = struct { fn renderStructTypedef(dg: *DeclGen, t: Type) error{ OutOfMemory, AnalysisFail }![]const u8 { const struct_obj = t.castTag(.@"struct").?.data; // Handle 0 bit types elsewhere. - const fqn = try struct_obj.getFullyQualifiedName(dg.typedefs.allocator); + const fqn = try struct_obj.getFullyQualifiedName(dg.module); defer dg.typedefs.allocator.free(fqn); var buffer = std.ArrayList(u8).init(dg.typedefs.allocator); @@ -1072,8 +1070,7 @@ pub const DeclGen = struct { try buffer.appendSlice("} "); const name_start = buffer.items.len; - const target = dg.module.getTarget(); - try writer.print("zig_T_{};\n", .{typeToCIdentifier(t, target)}); + try writer.print("zig_T_{};\n", .{typeToCIdentifier(t, dg.module)}); const rendered = buffer.toOwnedSlice(); errdefer dg.typedefs.allocator.free(rendered); @@ -1090,7 +1087,7 @@ pub const DeclGen = struct { fn renderUnionTypedef(dg: *DeclGen, t: Type) error{ OutOfMemory, AnalysisFail }![]const u8 { const union_ty = t.cast(Type.Payload.Union).?.data; - const fqn = try union_ty.getFullyQualifiedName(dg.typedefs.allocator); + const fqn = try union_ty.getFullyQualifiedName(dg.module); defer dg.typedefs.allocator.free(fqn); const target = dg.module.getTarget(); @@ -1157,7 +1154,6 @@ pub const DeclGen = struct { try dg.renderTypeAndName(bw, child_type, payload_name, .Mut, 0); try bw.writeAll("; uint16_t error; } "); const name_index = buffer.items.len; - const target = dg.module.getTarget(); if (err_set_type.castTag(.error_set_inferred)) |inf_err_set_payload| { const func = inf_err_set_payload.data.func; try bw.writeAll("zig_E_"); @@ -1165,7 +1161,7 @@ pub const DeclGen = struct { try bw.writeAll(";\n"); } else { try bw.print("zig_E_{s}_{s};\n", .{ - typeToCIdentifier(err_set_type, target), typeToCIdentifier(child_type, target), + typeToCIdentifier(err_set_type, dg.module), typeToCIdentifier(child_type, dg.module), }); } @@ -1195,8 +1191,7 @@ pub const DeclGen = struct { try dg.renderType(bw, elem_type); const name_start = buffer.items.len + 1; - const target = dg.module.getTarget(); - try bw.print(" zig_A_{s}_{d}", .{ typeToCIdentifier(elem_type, target), c_len }); + try bw.print(" zig_A_{s}_{d}", .{ typeToCIdentifier(elem_type, dg.module), c_len }); const name_end = buffer.items.len; try bw.print("[{d}];\n", .{c_len}); @@ -1224,8 +1219,7 @@ pub const DeclGen = struct { try dg.renderTypeAndName(bw, child_type, payload_name, .Mut, 0); try bw.writeAll("; bool is_null; } "); const name_index = buffer.items.len; - const target = dg.module.getTarget(); - try bw.print("zig_Q_{s};\n", .{typeToCIdentifier(child_type, target)}); + try bw.print("zig_Q_{s};\n", .{typeToCIdentifier(child_type, dg.module)}); const rendered = buffer.toOwnedSlice(); errdefer dg.typedefs.allocator.free(rendered); @@ -1535,16 +1529,17 @@ pub const DeclGen = struct { } } - fn renderDeclName(dg: DeclGen, writer: anytype, decl: *Decl) !void { - decl.markAlive(); + fn renderDeclName(dg: DeclGen, writer: anytype, decl_index: Decl.Index) !void { + const decl = dg.module.declPtr(decl_index); + dg.module.markDeclAlive(decl); - if (dg.module.decl_exports.get(decl)) |exports| { + if (dg.module.decl_exports.get(decl_index)) |exports| { return writer.writeAll(exports[0].options.name); } else if (decl.val.tag() == .extern_fn) { return writer.writeAll(mem.sliceTo(decl.name, 0)); } else { const gpa = dg.module.gpa; - const name = try decl.getFullyQualifiedName(gpa); + const name = try decl.getFullyQualifiedName(dg.module); defer gpa.free(name); return writer.print("{ }", .{fmtIdent(name)}); } @@ -1616,7 +1611,11 @@ pub fn genDecl(o: *Object) !void { try fwd_decl_writer.writeAll("zig_threadlocal "); } - const decl_c_value: CValue = if (is_global) .{ .bytes = mem.span(o.dg.decl.name) } else .{ .decl = o.dg.decl }; + const decl_c_value: CValue = if (is_global) .{ + .bytes = mem.span(o.dg.decl.name), + } else .{ + .decl = o.dg.decl_index, + }; try o.dg.renderTypeAndName(fwd_decl_writer, o.dg.decl.ty, decl_c_value, .Mut, o.dg.decl.@"align"); try fwd_decl_writer.writeAll(";\n"); @@ -1641,7 +1640,7 @@ pub fn genDecl(o: *Object) !void { // TODO ask the Decl if it is const // https://github.com/ziglang/zig/issues/7582 - const decl_c_value: CValue = .{ .decl = o.dg.decl }; + const decl_c_value: CValue = .{ .decl = o.dg.decl_index }; try o.dg.renderTypeAndName(writer, tv.ty, decl_c_value, .Mut, o.dg.decl.@"align"); try writer.writeAll(" = "); @@ -2234,13 +2233,12 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue { if (src_val_is_undefined) return try airStoreUndefined(f, dest_ptr); - const target = f.object.dg.module.getTarget(); const writer = f.object.writer(); if (lhs_child_type.zigTypeTag() == .Array) { // For this memcpy to safely work we need the rhs to have the same // underlying type as the lhs (i.e. they must both be arrays of the same underlying type). const rhs_type = f.air.typeOf(bin_op.rhs); - assert(rhs_type.eql(lhs_child_type, target)); + assert(rhs_type.eql(lhs_child_type, f.object.dg.module)); // If the source is a constant, writeCValue will emit a brace initialization // so work around this by initializing into new local. @@ -2780,7 +2778,8 @@ fn airDbgInline(f: *Function, inst: Air.Inst.Index) !CValue { const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const writer = f.object.writer(); const function = f.air.values[ty_pl.payload].castTag(.function).?.data; - try writer.print("/* dbg func:{s} */\n", .{function.owner_decl.name}); + const mod = f.object.dg.module; + try writer.print("/* dbg func:{s} */\n", .{mod.declPtr(function.owner_decl).name}); return CValue.none; } diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index cdb9addcff..ba637cb99e 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -161,6 +161,7 @@ pub fn targetTriple(allocator: Allocator, target: std.Target) ![:0]u8 { pub const Object = struct { gpa: Allocator, + module: *Module, llvm_module: *const llvm.Module, di_builder: ?*llvm.DIBuilder, /// One of these mappings: @@ -181,7 +182,7 @@ pub const Object = struct { /// version of the name and incorrectly get function not found in the llvm module. /// * it works for functions not all globals. /// Therefore, this table keeps track of the mapping. - decl_map: std.AutoHashMapUnmanaged(*const Module.Decl, *const llvm.Value), + decl_map: std.AutoHashMapUnmanaged(Module.Decl.Index, *const llvm.Value), /// Maps Zig types to LLVM types. The table memory itself is backed by the GPA of /// the compiler, but the Type/Value memory here is backed by `type_map_arena`. /// TODO we need to remove entries from this map in response to incremental compilation @@ -340,6 +341,7 @@ pub const Object = struct { return Object{ .gpa = gpa, + .module = options.module.?, .llvm_module = llvm_module, .di_map = .{}, .di_builder = opt_di_builder, @@ -568,18 +570,20 @@ pub const Object = struct { air: Air, liveness: Liveness, ) !void { - const decl = func.owner_decl; + const decl_index = func.owner_decl; + const decl = module.declPtr(decl_index); var dg: DeclGen = .{ .context = o.context, .object = o, .module = module, + .decl_index = decl_index, .decl = decl, .err_msg = null, .gpa = module.gpa, }; - const llvm_func = try dg.resolveLlvmFunction(decl); + const llvm_func = try dg.resolveLlvmFunction(decl_index); if (module.align_stack_fns.get(func)) |align_info| { dg.addFnAttrInt(llvm_func, "alignstack", align_info.alignment); @@ -632,7 +636,7 @@ pub const Object = struct { const line_number = decl.src_line + 1; const is_internal_linkage = decl.val.tag() != .extern_fn and - !dg.module.decl_exports.contains(decl); + !dg.module.decl_exports.contains(decl_index); const noret_bit: c_uint = if (fn_info.return_type.isNoReturn()) llvm.DIFlags.NoReturn else @@ -684,48 +688,51 @@ pub const Object = struct { fg.genBody(air.getMainBody()) catch |err| switch (err) { error.CodegenFail => { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl, dg.err_msg.?); + try module.failed_decls.put(module.gpa, decl_index, dg.err_msg.?); dg.err_msg = null; return; }, else => |e| return e, }; - const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{}; - try o.updateDeclExports(module, decl, decl_exports); + const decl_exports = module.decl_exports.get(decl_index) orelse &[0]*Module.Export{}; + try o.updateDeclExports(module, decl_index, decl_exports); } - pub fn updateDecl(self: *Object, module: *Module, decl: *Module.Decl) !void { + pub fn updateDecl(self: *Object, module: *Module, decl_index: Module.Decl.Index) !void { + const decl = module.declPtr(decl_index); var dg: DeclGen = .{ .context = self.context, .object = self, .module = module, .decl = decl, + .decl_index = decl_index, .err_msg = null, .gpa = module.gpa, }; dg.genDecl() catch |err| switch (err) { error.CodegenFail => { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl, dg.err_msg.?); + try module.failed_decls.put(module.gpa, decl_index, dg.err_msg.?); dg.err_msg = null; return; }, else => |e| return e, }; - const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{}; - try self.updateDeclExports(module, decl, decl_exports); + const decl_exports = module.decl_exports.get(decl_index) orelse &[0]*Module.Export{}; + try self.updateDeclExports(module, decl_index, decl_exports); } pub fn updateDeclExports( self: *Object, - module: *const Module, - decl: *const Module.Decl, + module: *Module, + decl_index: Module.Decl.Index, exports: []const *Module.Export, ) !void { // If the module does not already have the function, we ignore this function call // because we call `updateDeclExports` at the end of `updateFunc` and `updateDecl`. - const llvm_global = self.decl_map.get(decl) orelse return; + const llvm_global = self.decl_map.get(decl_index) orelse return; + const decl = module.declPtr(decl_index); if (decl.isExtern()) { llvm_global.setValueName(decl.name); llvm_global.setUnnamedAddr(.False); @@ -798,7 +805,7 @@ pub const Object = struct { } } } else { - const fqn = try decl.getFullyQualifiedName(module.gpa); + const fqn = try decl.getFullyQualifiedName(module); defer module.gpa.free(fqn); llvm_global.setValueName2(fqn.ptr, fqn.len); llvm_global.setLinkage(.Internal); @@ -814,8 +821,8 @@ pub const Object = struct { } } - pub fn freeDecl(self: *Object, decl: *Module.Decl) void { - const llvm_value = self.decl_map.get(decl) orelse return; + pub fn freeDecl(self: *Object, decl_index: Module.Decl.Index) void { + const llvm_value = self.decl_map.get(decl_index) orelse return; llvm_value.deleteGlobal(); } @@ -847,7 +854,7 @@ pub const Object = struct { const gpa = o.gpa; // Be careful not to reference this `gop` variable after any recursive calls // to `lowerDebugType`. - const gop = try o.di_type_map.getOrPutContext(gpa, ty, .{ .target = o.target }); + const gop = try o.di_type_map.getOrPutContext(gpa, ty, .{ .mod = o.module }); if (gop.found_existing) { const annotated = gop.value_ptr.*; const di_type = annotated.toDIType(); @@ -860,7 +867,7 @@ pub const Object = struct { }; return o.lowerDebugTypeImpl(entry, resolve, di_type); } - errdefer assert(o.di_type_map.orderedRemoveContext(ty, .{ .target = o.target })); + errdefer assert(o.di_type_map.orderedRemoveContext(ty, .{ .mod = o.module })); // The Type memory is ephemeral; since we want to store a longer-lived // reference, we need to copy it here. gop.key_ptr.* = try ty.copy(o.type_map_arena.allocator()); @@ -891,7 +898,7 @@ pub const Object = struct { .Int => { const info = ty.intInfo(target); assert(info.bits != 0); - const name = try ty.nameAlloc(gpa, target); + const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); const dwarf_encoding: c_uint = switch (info.signedness) { .signed => DW.ATE.signed, @@ -902,13 +909,14 @@ pub const Object = struct { return di_type; }, .Enum => { - const owner_decl = ty.getOwnerDecl(); + const owner_decl_index = ty.getOwnerDecl(); + const owner_decl = o.module.declPtr(owner_decl_index); if (!ty.hasRuntimeBitsIgnoreComptime()) { - const enum_di_ty = try o.makeEmptyNamespaceDIType(owner_decl); + const enum_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index); // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` // means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(enum_di_ty), .{ .target = o.target }); + try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(enum_di_ty), .{ .mod = o.module }); return enum_di_ty; } @@ -938,7 +946,7 @@ pub const Object = struct { const di_file = try o.getDIFile(gpa, owner_decl.src_namespace.file_scope); const di_scope = try o.namespaceToDebugScope(owner_decl.src_namespace); - const name = try ty.nameAlloc(gpa, target); + const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); var buffer: Type.Payload.Bits = undefined; const int_ty = ty.intTagType(&buffer); @@ -956,12 +964,12 @@ pub const Object = struct { "", ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(enum_di_ty), .{ .target = o.target }); + try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(enum_di_ty), .{ .mod = o.module }); return enum_di_ty; }, .Float => { const bits = ty.floatBits(target); - const name = try ty.nameAlloc(gpa, target); + const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); const di_type = dib.createBasicType(name, bits, DW.ATE.float); gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_type); @@ -1009,7 +1017,7 @@ pub const Object = struct { const bland_ptr_ty = Type.initPayload(&payload.base); const ptr_di_ty = try o.lowerDebugType(bland_ptr_ty, resolve); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.init(ptr_di_ty, resolve), .{ .target = o.target }); + try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.init(ptr_di_ty, resolve), .{ .mod = o.module }); return ptr_di_ty; } @@ -1018,7 +1026,7 @@ pub const Object = struct { const ptr_ty = ty.slicePtrFieldType(&buf); const len_ty = Type.usize; - const name = try ty.nameAlloc(gpa, target); + const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); const di_file: ?*llvm.DIFile = null; const line = 0; @@ -1089,12 +1097,12 @@ pub const Object = struct { ); dib.replaceTemporary(fwd_decl, full_di_ty); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .target = o.target }); + try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module }); return full_di_ty; } const elem_di_ty = try o.lowerDebugType(ptr_info.pointee_type, .fwd); - const name = try ty.nameAlloc(gpa, target); + const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); const ptr_di_ty = dib.createPointerType( elem_di_ty, @@ -1103,7 +1111,7 @@ pub const Object = struct { name, ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(ptr_di_ty), .{ .target = o.target }); + try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(ptr_di_ty), .{ .mod = o.module }); return ptr_di_ty; }, .Opaque => { @@ -1112,9 +1120,10 @@ pub const Object = struct { gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty); return di_ty; } - const name = try ty.nameAlloc(gpa, target); + const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); - const owner_decl = ty.getOwnerDecl(); + const owner_decl_index = ty.getOwnerDecl(); + const owner_decl = o.module.declPtr(owner_decl_index); const opaque_di_ty = dib.createForwardDeclType( DW.TAG.structure_type, name, @@ -1124,7 +1133,7 @@ pub const Object = struct { ); // The recursive call to `lowerDebugType` va `namespaceToDebugScope` // means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(opaque_di_ty), .{ .target = o.target }); + try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(opaque_di_ty), .{ .mod = o.module }); return opaque_di_ty; }, .Array => { @@ -1135,7 +1144,7 @@ pub const Object = struct { @intCast(c_int, ty.arrayLen()), ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(array_di_ty), .{ .target = o.target }); + try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(array_di_ty), .{ .mod = o.module }); return array_di_ty; }, .Vector => { @@ -1146,11 +1155,11 @@ pub const Object = struct { ty.vectorLen(), ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(vector_di_ty), .{ .target = o.target }); + try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(vector_di_ty), .{ .mod = o.module }); return vector_di_ty; }, .Optional => { - const name = try ty.nameAlloc(gpa, target); + const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); var buf: Type.Payload.ElemType = undefined; const child_ty = ty.optionalChild(&buf); @@ -1162,7 +1171,7 @@ pub const Object = struct { if (ty.isPtrLikeOptional()) { const ptr_di_ty = try o.lowerDebugType(child_ty, resolve); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(ptr_di_ty), .{ .target = o.target }); + try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(ptr_di_ty), .{ .mod = o.module }); return ptr_di_ty; } @@ -1235,7 +1244,7 @@ pub const Object = struct { ); dib.replaceTemporary(fwd_decl, full_di_ty); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .target = o.target }); + try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module }); return full_di_ty; }, .ErrorUnion => { @@ -1244,10 +1253,10 @@ pub const Object = struct { if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { const err_set_di_ty = try o.lowerDebugType(err_set_ty, .full); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(err_set_di_ty), .{ .target = o.target }); + try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(err_set_di_ty), .{ .mod = o.module }); return err_set_di_ty; } - const name = try ty.nameAlloc(gpa, target); + const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); const di_file: ?*llvm.DIFile = null; const line = 0; @@ -1332,7 +1341,7 @@ pub const Object = struct { ); dib.replaceTemporary(fwd_decl, full_di_ty); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .target = o.target }); + try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module }); return full_di_ty; }, .ErrorSet => { @@ -1344,7 +1353,7 @@ pub const Object = struct { }, .Struct => { const compile_unit_scope = o.di_compile_unit.?.toScope(); - const name = try ty.nameAlloc(gpa, target); + const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); if (ty.castTag(.@"struct")) |payload| { @@ -1431,7 +1440,7 @@ pub const Object = struct { ); dib.replaceTemporary(fwd_decl, full_di_ty); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .target = o.target }); + try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module }); return full_di_ty; } @@ -1445,23 +1454,23 @@ pub const Object = struct { // into. Therefore we can satisfy this by making an empty namespace, // rather than changing the frontend to unnecessarily resolve the // struct field types. - const owner_decl = ty.getOwnerDecl(); - const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl); + const owner_decl_index = ty.getOwnerDecl(); + const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index); dib.replaceTemporary(fwd_decl, struct_di_ty); // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` // means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty), .{ .target = o.target }); + try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty), .{ .mod = o.module }); return struct_di_ty; } } if (!ty.hasRuntimeBitsIgnoreComptime()) { - const owner_decl = ty.getOwnerDecl(); - const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl); + const owner_decl_index = ty.getOwnerDecl(); + const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index); dib.replaceTemporary(fwd_decl, struct_di_ty); // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` // means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty), .{ .target = o.target }); + try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty), .{ .mod = o.module }); return struct_di_ty; } @@ -1516,14 +1525,14 @@ pub const Object = struct { ); dib.replaceTemporary(fwd_decl, full_di_ty); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .target = o.target }); + try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module }); return full_di_ty; }, .Union => { const compile_unit_scope = o.di_compile_unit.?.toScope(); - const owner_decl = ty.getOwnerDecl(); + const owner_decl_index = ty.getOwnerDecl(); - const name = try ty.nameAlloc(gpa, target); + const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); const fwd_decl = opt_fwd_decl orelse blk: { @@ -1540,11 +1549,11 @@ pub const Object = struct { }; if (!ty.hasRuntimeBitsIgnoreComptime()) { - const union_di_ty = try o.makeEmptyNamespaceDIType(owner_decl); + const union_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index); dib.replaceTemporary(fwd_decl, union_di_ty); // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` // means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(union_di_ty), .{ .target = o.target }); + try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(union_di_ty), .{ .mod = o.module }); return union_di_ty; } @@ -1572,7 +1581,7 @@ pub const Object = struct { dib.replaceTemporary(fwd_decl, full_di_ty); // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` // means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .target = o.target }); + try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module }); return full_di_ty; } @@ -1626,7 +1635,7 @@ pub const Object = struct { if (layout.tag_size == 0) { dib.replaceTemporary(fwd_decl, union_di_ty); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(union_di_ty), .{ .target = o.target }); + try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(union_di_ty), .{ .mod = o.module }); return union_di_ty; } @@ -1685,7 +1694,7 @@ pub const Object = struct { ); dib.replaceTemporary(fwd_decl, full_di_ty); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .target = o.target }); + try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module }); return full_di_ty; }, .Fn => { @@ -1733,7 +1742,7 @@ pub const Object = struct { 0, ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(fn_di_ty), .{ .target = o.target }); + try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(fn_di_ty), .{ .mod = o.module }); return fn_di_ty; }, .ComptimeInt => unreachable, @@ -1762,7 +1771,8 @@ pub const Object = struct { /// This is to be used instead of void for debug info types, to avoid tripping /// Assertion `!isa(Scope) && "shouldn't make a namespace scope for a type"' /// when targeting CodeView (Windows). - fn makeEmptyNamespaceDIType(o: *Object, decl: *const Module.Decl) !*llvm.DIType { + fn makeEmptyNamespaceDIType(o: *Object, decl_index: Module.Decl.Index) !*llvm.DIType { + const decl = o.module.declPtr(decl_index); const fields: [0]*llvm.DIType = .{}; return o.di_builder.?.createStructType( try o.namespaceToDebugScope(decl.src_namespace), @@ -1787,6 +1797,7 @@ pub const DeclGen = struct { object: *Object, module: *Module, decl: *Module.Decl, + decl_index: Module.Decl.Index, gpa: Allocator, err_msg: ?*Module.ErrorMsg, @@ -1804,6 +1815,7 @@ pub const DeclGen = struct { fn genDecl(dg: *DeclGen) !void { const decl = dg.decl; + const decl_index = dg.decl_index; assert(decl.has_tv); log.debug("gen: {s} type: {}, value: {}", .{ @@ -1817,7 +1829,7 @@ pub const DeclGen = struct { _ = try dg.resolveLlvmFunction(extern_fn.data.owner_decl); } else { const target = dg.module.getTarget(); - var global = try dg.resolveGlobalDecl(decl); + var global = try dg.resolveGlobalDecl(decl_index); global.setAlignment(decl.getAlignment(target)); assert(decl.has_tv); const init_val = if (decl.val.castTag(.variable)) |payload| init_val: { @@ -1858,7 +1870,7 @@ pub const DeclGen = struct { // old uses. const new_global_ptr = new_global.constBitCast(global.typeOf()); global.replaceAllUsesWith(new_global_ptr); - dg.object.decl_map.putAssumeCapacity(decl, new_global); + dg.object.decl_map.putAssumeCapacity(decl_index, new_global); new_global.takeName(global); global.deleteGlobal(); global = new_global; @@ -1869,7 +1881,7 @@ pub const DeclGen = struct { const di_file = try dg.object.getDIFile(dg.gpa, decl.src_namespace.file_scope); const line_number = decl.src_line + 1; - const is_internal_linkage = !dg.module.decl_exports.contains(decl); + const is_internal_linkage = !dg.module.decl_exports.contains(decl_index); const di_global = dib.createGlobalVariable( di_file.toScope(), decl.name, @@ -1888,12 +1900,10 @@ pub const DeclGen = struct { /// If the llvm function does not exist, create it. /// Note that this can be called before the function's semantic analysis has /// completed, so if any attributes rely on that, they must be done in updateFunc, not here. - fn resolveLlvmFunction(dg: *DeclGen, decl: *Module.Decl) !*const llvm.Value { - return dg.resolveLlvmFunctionExtra(decl, decl.ty); - } - - fn resolveLlvmFunctionExtra(dg: *DeclGen, decl: *Module.Decl, zig_fn_type: Type) !*const llvm.Value { - const gop = try dg.object.decl_map.getOrPut(dg.gpa, decl); + fn resolveLlvmFunction(dg: *DeclGen, decl_index: Module.Decl.Index) !*const llvm.Value { + const decl = dg.module.declPtr(decl_index); + const zig_fn_type = decl.ty; + const gop = try dg.object.decl_map.getOrPut(dg.gpa, decl_index); if (gop.found_existing) return gop.value_ptr.*; assert(decl.has_tv); @@ -1903,7 +1913,7 @@ pub const DeclGen = struct { const fn_type = try dg.llvmType(zig_fn_type); - const fqn = try decl.getFullyQualifiedName(dg.gpa); + const fqn = try decl.getFullyQualifiedName(dg.module); defer dg.gpa.free(fqn); const llvm_addrspace = dg.llvmAddressSpace(decl.@"addrspace"); @@ -1996,12 +2006,13 @@ pub const DeclGen = struct { // TODO add target-cpu and target-features fn attributes } - fn resolveGlobalDecl(dg: *DeclGen, decl: *Module.Decl) Error!*const llvm.Value { - const gop = try dg.object.decl_map.getOrPut(dg.gpa, decl); + fn resolveGlobalDecl(dg: *DeclGen, decl_index: Module.Decl.Index) Error!*const llvm.Value { + const gop = try dg.object.decl_map.getOrPut(dg.gpa, decl_index); if (gop.found_existing) return gop.value_ptr.*; - errdefer assert(dg.object.decl_map.remove(decl)); + errdefer assert(dg.object.decl_map.remove(decl_index)); - const fqn = try decl.getFullyQualifiedName(dg.gpa); + const decl = dg.module.declPtr(decl_index); + const fqn = try decl.getFullyQualifiedName(dg.module); defer dg.gpa.free(fqn); const llvm_type = try dg.llvmType(decl.ty); @@ -2122,7 +2133,7 @@ pub const DeclGen = struct { }, .Opaque => switch (t.tag()) { .@"opaque" => { - const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .target = target }); + const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = dg.module }); if (gop.found_existing) return gop.value_ptr.*; // The Type memory is ephemeral; since we want to store a longer-lived @@ -2130,7 +2141,7 @@ pub const DeclGen = struct { gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); const opaque_obj = t.castTag(.@"opaque").?.data; - const name = try opaque_obj.getFullyQualifiedName(gpa); + const name = try opaque_obj.getFullyQualifiedName(dg.module); defer gpa.free(name); const llvm_struct_ty = dg.context.structCreateNamed(name); @@ -2191,7 +2202,7 @@ pub const DeclGen = struct { return dg.context.intType(16); }, .Struct => { - const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .target = target }); + const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = dg.module }); if (gop.found_existing) return gop.value_ptr.*; // The Type memory is ephemeral; since we want to store a longer-lived @@ -2260,7 +2271,7 @@ pub const DeclGen = struct { return int_llvm_ty; } - const name = try struct_obj.getFullyQualifiedName(gpa); + const name = try struct_obj.getFullyQualifiedName(dg.module); defer gpa.free(name); const llvm_struct_ty = dg.context.structCreateNamed(name); @@ -2314,7 +2325,7 @@ pub const DeclGen = struct { return llvm_struct_ty; }, .Union => { - const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .target = target }); + const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = dg.module }); if (gop.found_existing) return gop.value_ptr.*; // The Type memory is ephemeral; since we want to store a longer-lived @@ -2330,7 +2341,7 @@ pub const DeclGen = struct { return enum_tag_llvm_ty; } - const name = try union_obj.getFullyQualifiedName(gpa); + const name = try union_obj.getFullyQualifiedName(dg.module); defer gpa.free(name); const llvm_union_ty = dg.context.structCreateNamed(name); @@ -2439,7 +2450,7 @@ pub const DeclGen = struct { // TODO this duplicates code with Pointer but they should share the handling // of the tv.val.tag() and then Int should do extra constPtrToInt on top .Int => switch (tv.val.tag()) { - .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl), + .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl_index), .decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data), else => { var bigint_space: Value.BigIntSpace = undefined; @@ -2524,12 +2535,13 @@ pub const DeclGen = struct { } }, .Pointer => switch (tv.val.tag()) { - .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl), + .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl_index), .decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data), .variable => { - const decl = tv.val.castTag(.variable).?.data.owner_decl; - decl.markAlive(); - const val = try dg.resolveGlobalDecl(decl); + const decl_index = tv.val.castTag(.variable).?.data.owner_decl; + const decl = dg.module.declPtr(decl_index); + dg.module.markDeclAlive(decl); + const val = try dg.resolveGlobalDecl(decl_index); const llvm_var_type = try dg.llvmType(tv.ty); const llvm_addrspace = dg.llvmAddressSpace(decl.@"addrspace"); const llvm_type = llvm_var_type.pointerType(llvm_addrspace); @@ -2683,13 +2695,14 @@ pub const DeclGen = struct { return dg.context.constStruct(&fields, fields.len, .False); }, .Fn => { - const fn_decl = switch (tv.val.tag()) { + const fn_decl_index = switch (tv.val.tag()) { .extern_fn => tv.val.castTag(.extern_fn).?.data.owner_decl, .function => tv.val.castTag(.function).?.data.owner_decl, else => unreachable, }; - fn_decl.markAlive(); - return dg.resolveLlvmFunction(fn_decl); + const fn_decl = dg.module.declPtr(fn_decl_index); + dg.module.markDeclAlive(fn_decl); + return dg.resolveLlvmFunction(fn_decl_index); }, .ErrorSet => { const llvm_ty = try dg.llvmType(tv.ty); @@ -2911,7 +2924,7 @@ pub const DeclGen = struct { }); } const union_obj = tv.ty.cast(Type.Payload.Union).?.data; - const field_index = union_obj.tag_ty.enumTagFieldIndex(tag_and_val.tag, target).?; + const field_index = union_obj.tag_ty.enumTagFieldIndex(tag_and_val.tag, dg.module).?; assert(union_obj.haveFieldTypes()); const field_ty = union_obj.fields.values()[field_index].ty; const payload = p: { @@ -3049,17 +3062,22 @@ pub const DeclGen = struct { llvm_ptr: *const llvm.Value, }; - fn lowerParentPtrDecl(dg: *DeclGen, ptr_val: Value, decl: *Module.Decl, ptr_child_ty: Type) Error!*const llvm.Value { - decl.markAlive(); + fn lowerParentPtrDecl( + dg: *DeclGen, + ptr_val: Value, + decl_index: Module.Decl.Index, + ptr_child_ty: Type, + ) Error!*const llvm.Value { + const decl = dg.module.declPtr(decl_index); + dg.module.markDeclAlive(decl); var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, .data = decl.ty, }; const ptr_ty = Type.initPayload(&ptr_ty_payload.base); - const llvm_ptr = try dg.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl); + const llvm_ptr = try dg.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index); - const target = dg.module.getTarget(); - if (ptr_child_ty.eql(decl.ty, target)) { + if (ptr_child_ty.eql(decl.ty, dg.module)) { return llvm_ptr; } else { return llvm_ptr.constBitCast((try dg.llvmType(ptr_child_ty)).pointerType(0)); @@ -3071,7 +3089,7 @@ pub const DeclGen = struct { var bitcast_needed: bool = undefined; const llvm_ptr = switch (ptr_val.tag()) { .decl_ref_mut => { - const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl; + const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl_index; return dg.lowerParentPtrDecl(ptr_val, decl, ptr_child_ty); }, .decl_ref => { @@ -3123,7 +3141,7 @@ pub const DeclGen = struct { }, .Struct => { const field_ty = parent_ty.structFieldType(field_index); - bitcast_needed = !field_ty.eql(ptr_child_ty, target); + bitcast_needed = !field_ty.eql(ptr_child_ty, dg.module); var ty_buf: Type.Payload.Pointer = undefined; const llvm_field_index = llvmFieldIndex(parent_ty, field_index, target, &ty_buf).?; @@ -3139,7 +3157,7 @@ pub const DeclGen = struct { .elem_ptr => blk: { const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; const parent_llvm_ptr = try dg.lowerParentPtr(elem_ptr.array_ptr, elem_ptr.elem_ty); - bitcast_needed = !elem_ptr.elem_ty.eql(ptr_child_ty, target); + bitcast_needed = !elem_ptr.elem_ty.eql(ptr_child_ty, dg.module); const llvm_usize = try dg.llvmType(Type.usize); const indices: [1]*const llvm.Value = .{ @@ -3153,7 +3171,7 @@ pub const DeclGen = struct { var buf: Type.Payload.ElemType = undefined; const payload_ty = opt_payload_ptr.container_ty.optionalChild(&buf); - bitcast_needed = !payload_ty.eql(ptr_child_ty, target); + bitcast_needed = !payload_ty.eql(ptr_child_ty, dg.module); if (!payload_ty.hasRuntimeBitsIgnoreComptime() or payload_ty.isPtrLikeOptional()) { // In this case, we represent pointer to optional the same as pointer @@ -3173,7 +3191,7 @@ pub const DeclGen = struct { const parent_llvm_ptr = try dg.lowerParentPtr(eu_payload_ptr.container_ptr, eu_payload_ptr.container_ty); const payload_ty = eu_payload_ptr.container_ty.errorUnionPayload(); - bitcast_needed = !payload_ty.eql(ptr_child_ty, target); + bitcast_needed = !payload_ty.eql(ptr_child_ty, dg.module); if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { // In this case, we represent pointer to error union the same as pointer @@ -3201,15 +3219,14 @@ pub const DeclGen = struct { fn lowerDeclRefValue( self: *DeclGen, tv: TypedValue, - decl: *Module.Decl, + decl_index: Module.Decl.Index, ) Error!*const llvm.Value { - const target = self.module.getTarget(); if (tv.ty.isSlice()) { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = tv.ty.slicePtrFieldType(&buf); var slice_len: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, - .data = tv.val.sliceLen(target), + .data = tv.val.sliceLen(self.module), }; const fields: [2]*const llvm.Value = .{ try self.genTypedValue(.{ @@ -3229,8 +3246,9 @@ pub const DeclGen = struct { // const bar = foo; // ... &bar; // `bar` is just an alias and we actually want to lower a reference to `foo`. + const decl = self.module.declPtr(decl_index); if (decl.val.castTag(.function)) |func| { - if (func.data.owner_decl != decl) { + if (func.data.owner_decl != decl_index) { return self.lowerDeclRefValue(tv, func.data.owner_decl); } } @@ -3240,12 +3258,12 @@ pub const DeclGen = struct { return self.lowerPtrToVoid(tv.ty); } - decl.markAlive(); + self.module.markDeclAlive(decl); const llvm_val = if (is_fn_body) - try self.resolveLlvmFunction(decl) + try self.resolveLlvmFunction(decl_index) else - try self.resolveGlobalDecl(decl); + try self.resolveGlobalDecl(decl_index); const llvm_type = try self.llvmType(tv.ty); if (tv.ty.zigTypeTag() == .Int) { @@ -4405,7 +4423,8 @@ pub const FuncGen = struct { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const func = self.air.values[ty_pl.payload].castTag(.function).?.data; - const decl = func.owner_decl; + const decl_index = func.owner_decl; + const decl = self.dg.module.declPtr(decl_index); const di_file = try self.dg.object.getDIFile(self.gpa, decl.src_namespace.file_scope); self.di_file = di_file; const line_number = decl.src_line + 1; @@ -4417,10 +4436,10 @@ pub const FuncGen = struct { .base_line = self.base_line, }); - const fqn = try decl.getFullyQualifiedName(self.gpa); + const fqn = try decl.getFullyQualifiedName(self.dg.module); defer self.gpa.free(fqn); - const is_internal_linkage = !self.dg.module.decl_exports.contains(decl); + const is_internal_linkage = !self.dg.module.decl_exports.contains(decl_index); const subprogram = dib.createFunction( di_file.toScope(), decl.name, @@ -4447,7 +4466,8 @@ pub const FuncGen = struct { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const func = self.air.values[ty_pl.payload].castTag(.function).?.data; - const decl = func.owner_decl; + const mod = self.dg.module; + const decl = mod.declPtr(func.owner_decl); const di_file = try self.dg.object.getDIFile(self.gpa, decl.src_namespace.file_scope); self.di_file = di_file; const old = self.dbg_inlined.pop(); @@ -5887,7 +5907,7 @@ pub const FuncGen = struct { if (self.dg.object.di_builder) |dib| { const src_index = self.getSrcArgIndex(self.arg_index - 1); const func = self.dg.decl.getFunction().?; - const lbrace_line = func.owner_decl.src_line + func.lbrace_line + 1; + const lbrace_line = self.dg.module.declPtr(func.owner_decl).src_line + func.lbrace_line + 1; const lbrace_col = func.lbrace_column + 1; const di_local_var = dib.createParameterVariable( self.di_scope.?, @@ -6430,8 +6450,9 @@ pub const FuncGen = struct { const operand = try self.resolveInst(un_op); const enum_ty = self.air.typeOf(un_op); + const mod = self.dg.module; const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{ - try enum_ty.getOwnerDecl().getFullyQualifiedName(arena), + try mod.declPtr(enum_ty.getOwnerDecl()).getFullyQualifiedName(mod), }); const llvm_fn = try self.getEnumTagNameFunction(enum_ty, llvm_fn_name); @@ -6617,7 +6638,7 @@ pub const FuncGen = struct { for (values) |*val, i| { var buf: Value.ElemValueBuffer = undefined; - const elem = mask.elemValueBuffer(i, &buf); + const elem = mask.elemValueBuffer(self.dg.module, i, &buf); if (elem.isUndef()) { val.* = llvm_i32.getUndef(); } else { diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 6072c59845..791c2dc187 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -633,7 +633,13 @@ pub const DeclGen = struct { return result_id.toRef(); } - fn airArithOp(self: *DeclGen, inst: Air.Inst.Index, comptime fop: Opcode, comptime sop: Opcode, comptime uop: Opcode) !IdRef { + fn airArithOp( + self: *DeclGen, + inst: Air.Inst.Index, + comptime fop: Opcode, + comptime sop: Opcode, + comptime uop: Opcode, + ) !IdRef { // LHS and RHS are guaranteed to have the same type, and AIR guarantees // the result to be the same as the LHS and RHS, which matches SPIR-V. const ty = self.air.typeOfIndex(inst); @@ -644,10 +650,8 @@ pub const DeclGen = struct { const result_id = self.spv.allocId(); const result_type_id = try self.resolveTypeId(ty); - const target = self.getTarget(); - - assert(self.air.typeOf(bin_op.lhs).eql(ty, target)); - assert(self.air.typeOf(bin_op.rhs).eql(ty, target)); + assert(self.air.typeOf(bin_op.lhs).eql(ty, self.module)); + assert(self.air.typeOf(bin_op.rhs).eql(ty, self.module)); // Binary operations are generally applicable to both scalar and vector operations // in SPIR-V, but int and float versions of operations require different opcodes. @@ -694,7 +698,7 @@ pub const DeclGen = struct { const result_id = self.spv.allocId(); const result_type_id = try self.resolveTypeId(Type.initTag(.bool)); const op_ty = self.air.typeOf(bin_op.lhs); - assert(op_ty.eql(self.air.typeOf(bin_op.rhs), self.getTarget())); + assert(op_ty.eql(self.air.typeOf(bin_op.rhs), self.module)); // Comparisons are generally applicable to both scalar and vector operations in SPIR-V, // but int and float versions of operations require different opcodes. diff --git a/src/crash_report.zig b/src/crash_report.zig index 472e52b04d..e9d4022bba 100644 --- a/src/crash_report.zig +++ b/src/crash_report.zig @@ -90,9 +90,11 @@ fn dumpStatusReport() !void { const stderr = io.getStdErr().writer(); const block: *Sema.Block = anal.block; + const mod = anal.sema.mod; + const block_src_decl = mod.declPtr(block.src_decl); try stderr.writeAll("Analyzing "); - try writeFullyQualifiedDeclWithFile(block.src_decl, stderr); + try writeFullyQualifiedDeclWithFile(mod, block_src_decl, stderr); try stderr.writeAll("\n"); print_zir.renderInstructionContext( @@ -100,7 +102,7 @@ fn dumpStatusReport() !void { anal.body, anal.body_index, block.namespace.file_scope, - block.src_decl.src_node, + block_src_decl.src_node, 6, // indent stderr, ) catch |err| switch (err) { @@ -115,13 +117,14 @@ fn dumpStatusReport() !void { while (parent) |curr| { fba.reset(); try stderr.writeAll(" in "); - try writeFullyQualifiedDeclWithFile(curr.block.src_decl, stderr); + const curr_block_src_decl = mod.declPtr(curr.block.src_decl); + try writeFullyQualifiedDeclWithFile(mod, curr_block_src_decl, stderr); try stderr.writeAll("\n > "); print_zir.renderSingleInstruction( allocator, curr.body[curr.body_index], curr.block.namespace.file_scope, - curr.block.src_decl.src_node, + curr_block_src_decl.src_node, 6, // indent stderr, ) catch |err| switch (err) { @@ -146,10 +149,10 @@ fn writeFilePath(file: *Module.File, stream: anytype) !void { try stream.writeAll(file.sub_file_path); } -fn writeFullyQualifiedDeclWithFile(decl: *Decl, stream: anytype) !void { +fn writeFullyQualifiedDeclWithFile(mod: *Module, decl: *Decl, stream: anytype) !void { try writeFilePath(decl.getFileScope(), stream); try stream.writeAll(": "); - try decl.renderFullyQualifiedDebugName(stream); + try decl.renderFullyQualifiedDebugName(mod, stream); } pub fn compilerPanic(msg: []const u8, error_return_trace: ?*std.builtin.StackTrace) noreturn { diff --git a/src/link.zig b/src/link.zig index c449a37ee6..08c8446219 100644 --- a/src/link.zig +++ b/src/link.zig @@ -417,17 +417,18 @@ pub const File = struct { /// Called from within the CodeGen to lower a local variable instantion as an unnamed /// constant. Returns the symbol index of the lowered constant in the read-only section /// of the final binary. - pub fn lowerUnnamedConst(base: *File, tv: TypedValue, decl: *Module.Decl) UpdateDeclError!u32 { + pub fn lowerUnnamedConst(base: *File, tv: TypedValue, decl_index: Module.Decl.Index) UpdateDeclError!u32 { + const decl = base.options.module.?.declPtr(decl_index); log.debug("lowerUnnamedConst {*} ({s})", .{ decl, decl.name }); switch (base.tag) { // zig fmt: off - .coff => return @fieldParentPtr(Coff, "base", base).lowerUnnamedConst(tv, decl), - .elf => return @fieldParentPtr(Elf, "base", base).lowerUnnamedConst(tv, decl), - .macho => return @fieldParentPtr(MachO, "base", base).lowerUnnamedConst(tv, decl), - .plan9 => return @fieldParentPtr(Plan9, "base", base).lowerUnnamedConst(tv, decl), + .coff => return @fieldParentPtr(Coff, "base", base).lowerUnnamedConst(tv, decl_index), + .elf => return @fieldParentPtr(Elf, "base", base).lowerUnnamedConst(tv, decl_index), + .macho => return @fieldParentPtr(MachO, "base", base).lowerUnnamedConst(tv, decl_index), + .plan9 => return @fieldParentPtr(Plan9, "base", base).lowerUnnamedConst(tv, decl_index), .spirv => unreachable, .c => unreachable, - .wasm => unreachable, + .wasm => return @fieldParentPtr(Wasm, "base", base).lowerUnnamedConst(tv, decl_index), .nvptx => unreachable, // zig fmt: on } @@ -435,19 +436,20 @@ pub const File = struct { /// May be called before or after updateDeclExports but must be called /// after allocateDeclIndexes for any given Decl. - pub fn updateDecl(base: *File, module: *Module, decl: *Module.Decl) UpdateDeclError!void { + pub fn updateDecl(base: *File, module: *Module, decl_index: Module.Decl.Index) UpdateDeclError!void { + const decl = module.declPtr(decl_index); log.debug("updateDecl {*} ({s}), type={}", .{ decl, decl.name, decl.ty.fmtDebug() }); assert(decl.has_tv); switch (base.tag) { // zig fmt: off - .coff => return @fieldParentPtr(Coff, "base", base).updateDecl(module, decl), - .elf => return @fieldParentPtr(Elf, "base", base).updateDecl(module, decl), - .macho => return @fieldParentPtr(MachO, "base", base).updateDecl(module, decl), - .c => return @fieldParentPtr(C, "base", base).updateDecl(module, decl), - .wasm => return @fieldParentPtr(Wasm, "base", base).updateDecl(module, decl), - .spirv => return @fieldParentPtr(SpirV, "base", base).updateDecl(module, decl), - .plan9 => return @fieldParentPtr(Plan9, "base", base).updateDecl(module, decl), - .nvptx => return @fieldParentPtr(NvPtx, "base", base).updateDecl(module, decl), + .coff => return @fieldParentPtr(Coff, "base", base).updateDecl(module, decl_index), + .elf => return @fieldParentPtr(Elf, "base", base).updateDecl(module, decl_index), + .macho => return @fieldParentPtr(MachO, "base", base).updateDecl(module, decl_index), + .c => return @fieldParentPtr(C, "base", base).updateDecl(module, decl_index), + .wasm => return @fieldParentPtr(Wasm, "base", base).updateDecl(module, decl_index), + .spirv => return @fieldParentPtr(SpirV, "base", base).updateDecl(module, decl_index), + .plan9 => return @fieldParentPtr(Plan9, "base", base).updateDecl(module, decl_index), + .nvptx => return @fieldParentPtr(NvPtx, "base", base).updateDecl(module, decl_index), // zig fmt: on } } @@ -455,8 +457,9 @@ pub const File = struct { /// May be called before or after updateDeclExports but must be called /// after allocateDeclIndexes for any given Decl. pub fn updateFunc(base: *File, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) UpdateDeclError!void { + const owner_decl = module.declPtr(func.owner_decl); log.debug("updateFunc {*} ({s}), type={}", .{ - func.owner_decl, func.owner_decl.name, func.owner_decl.ty.fmtDebug(), + owner_decl, owner_decl.name, owner_decl.ty.fmtDebug(), }); switch (base.tag) { // zig fmt: off @@ -492,19 +495,20 @@ pub const File = struct { /// TODO we're transitioning to deleting this function and instead having /// each linker backend notice the first time updateDecl or updateFunc is called, or /// a callee referenced from AIR. - pub fn allocateDeclIndexes(base: *File, decl: *Module.Decl) error{OutOfMemory}!void { + pub fn allocateDeclIndexes(base: *File, decl_index: Module.Decl.Index) error{OutOfMemory}!void { + const decl = base.options.module.?.declPtr(decl_index); log.debug("allocateDeclIndexes {*} ({s})", .{ decl, decl.name }); switch (base.tag) { - .coff => return @fieldParentPtr(Coff, "base", base).allocateDeclIndexes(decl), - .elf => return @fieldParentPtr(Elf, "base", base).allocateDeclIndexes(decl), - .macho => return @fieldParentPtr(MachO, "base", base).allocateDeclIndexes(decl) catch |err| switch (err) { + .coff => return @fieldParentPtr(Coff, "base", base).allocateDeclIndexes(decl_index), + .elf => return @fieldParentPtr(Elf, "base", base).allocateDeclIndexes(decl_index), + .macho => return @fieldParentPtr(MachO, "base", base).allocateDeclIndexes(decl_index) catch |err| switch (err) { // remap this error code because we are transitioning away from // `allocateDeclIndexes`. error.Overflow => return error.OutOfMemory, error.OutOfMemory => return error.OutOfMemory, }, - .wasm => return @fieldParentPtr(Wasm, "base", base).allocateDeclIndexes(decl), - .plan9 => return @fieldParentPtr(Plan9, "base", base).allocateDeclIndexes(decl), + .wasm => return @fieldParentPtr(Wasm, "base", base).allocateDeclIndexes(decl_index), + .plan9 => return @fieldParentPtr(Plan9, "base", base).allocateDeclIndexes(decl_index), .c, .spirv, .nvptx => {}, } } @@ -621,17 +625,16 @@ pub const File = struct { } /// Called when a Decl is deleted from the Module. - pub fn freeDecl(base: *File, decl: *Module.Decl) void { - log.debug("freeDecl {*} ({s})", .{ decl, decl.name }); + pub fn freeDecl(base: *File, decl_index: Module.Decl.Index) void { switch (base.tag) { - .coff => @fieldParentPtr(Coff, "base", base).freeDecl(decl), - .elf => @fieldParentPtr(Elf, "base", base).freeDecl(decl), - .macho => @fieldParentPtr(MachO, "base", base).freeDecl(decl), - .c => @fieldParentPtr(C, "base", base).freeDecl(decl), - .wasm => @fieldParentPtr(Wasm, "base", base).freeDecl(decl), - .spirv => @fieldParentPtr(SpirV, "base", base).freeDecl(decl), - .plan9 => @fieldParentPtr(Plan9, "base", base).freeDecl(decl), - .nvptx => @fieldParentPtr(NvPtx, "base", base).freeDecl(decl), + .coff => @fieldParentPtr(Coff, "base", base).freeDecl(decl_index), + .elf => @fieldParentPtr(Elf, "base", base).freeDecl(decl_index), + .macho => @fieldParentPtr(MachO, "base", base).freeDecl(decl_index), + .c => @fieldParentPtr(C, "base", base).freeDecl(decl_index), + .wasm => @fieldParentPtr(Wasm, "base", base).freeDecl(decl_index), + .spirv => @fieldParentPtr(SpirV, "base", base).freeDecl(decl_index), + .plan9 => @fieldParentPtr(Plan9, "base", base).freeDecl(decl_index), + .nvptx => @fieldParentPtr(NvPtx, "base", base).freeDecl(decl_index), } } @@ -656,20 +659,21 @@ pub const File = struct { pub fn updateDeclExports( base: *File, module: *Module, - decl: *Module.Decl, + decl_index: Module.Decl.Index, exports: []const *Module.Export, ) UpdateDeclExportsError!void { + const decl = module.declPtr(decl_index); log.debug("updateDeclExports {*} ({s})", .{ decl, decl.name }); assert(decl.has_tv); switch (base.tag) { - .coff => return @fieldParentPtr(Coff, "base", base).updateDeclExports(module, decl, exports), - .elf => return @fieldParentPtr(Elf, "base", base).updateDeclExports(module, decl, exports), - .macho => return @fieldParentPtr(MachO, "base", base).updateDeclExports(module, decl, exports), - .c => return @fieldParentPtr(C, "base", base).updateDeclExports(module, decl, exports), - .wasm => return @fieldParentPtr(Wasm, "base", base).updateDeclExports(module, decl, exports), - .spirv => return @fieldParentPtr(SpirV, "base", base).updateDeclExports(module, decl, exports), - .plan9 => return @fieldParentPtr(Plan9, "base", base).updateDeclExports(module, decl, exports), - .nvptx => return @fieldParentPtr(NvPtx, "base", base).updateDeclExports(module, decl, exports), + .coff => return @fieldParentPtr(Coff, "base", base).updateDeclExports(module, decl_index, exports), + .elf => return @fieldParentPtr(Elf, "base", base).updateDeclExports(module, decl_index, exports), + .macho => return @fieldParentPtr(MachO, "base", base).updateDeclExports(module, decl_index, exports), + .c => return @fieldParentPtr(C, "base", base).updateDeclExports(module, decl_index, exports), + .wasm => return @fieldParentPtr(Wasm, "base", base).updateDeclExports(module, decl_index, exports), + .spirv => return @fieldParentPtr(SpirV, "base", base).updateDeclExports(module, decl_index, exports), + .plan9 => return @fieldParentPtr(Plan9, "base", base).updateDeclExports(module, decl_index, exports), + .nvptx => return @fieldParentPtr(NvPtx, "base", base).updateDeclExports(module, decl_index, exports), } } @@ -683,14 +687,14 @@ pub const File = struct { /// The linker is passed information about the containing atom, `parent_atom_index`, and offset within it's /// memory buffer, `offset`, so that it can make a note of potential relocation sites, should the /// `Decl`'s address was not yet resolved, or the containing atom gets moved in virtual memory. - pub fn getDeclVAddr(base: *File, decl: *const Module.Decl, reloc_info: RelocInfo) !u64 { + pub fn getDeclVAddr(base: *File, decl_index: Module.Decl.Index, reloc_info: RelocInfo) !u64 { switch (base.tag) { - .coff => return @fieldParentPtr(Coff, "base", base).getDeclVAddr(decl, reloc_info), - .elf => return @fieldParentPtr(Elf, "base", base).getDeclVAddr(decl, reloc_info), - .macho => return @fieldParentPtr(MachO, "base", base).getDeclVAddr(decl, reloc_info), - .plan9 => return @fieldParentPtr(Plan9, "base", base).getDeclVAddr(decl, reloc_info), + .coff => return @fieldParentPtr(Coff, "base", base).getDeclVAddr(decl_index, reloc_info), + .elf => return @fieldParentPtr(Elf, "base", base).getDeclVAddr(decl_index, reloc_info), + .macho => return @fieldParentPtr(MachO, "base", base).getDeclVAddr(decl_index, reloc_info), + .plan9 => return @fieldParentPtr(Plan9, "base", base).getDeclVAddr(decl_index, reloc_info), .c => unreachable, - .wasm => return @fieldParentPtr(Wasm, "base", base).getDeclVAddr(decl, reloc_info), + .wasm => return @fieldParentPtr(Wasm, "base", base).getDeclVAddr(decl_index, reloc_info), .spirv => unreachable, .nvptx => unreachable, } diff --git a/src/link/C.zig b/src/link/C.zig index 63aa2b6030..4159a577d2 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -21,7 +21,7 @@ base: link.File, /// This linker backend does not try to incrementally link output C source code. /// Instead, it tracks all declarations in this table, and iterates over it /// in the flush function, stitching pre-rendered pieces of C code together. -decl_table: std.AutoArrayHashMapUnmanaged(*const Module.Decl, DeclBlock) = .{}, +decl_table: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, DeclBlock) = .{}, /// Stores Type/Value data for `typedefs` to reference. /// Accumulates allocations and then there is a periodic garbage collection after flush(). arena: std.heap.ArenaAllocator, @@ -87,9 +87,9 @@ pub fn deinit(self: *C) void { self.arena.deinit(); } -pub fn freeDecl(self: *C, decl: *Module.Decl) void { +pub fn freeDecl(self: *C, decl_index: Module.Decl.Index) void { const gpa = self.base.allocator; - if (self.decl_table.fetchSwapRemove(decl)) |kv| { + if (self.decl_table.fetchSwapRemove(decl_index)) |kv| { var decl_block = kv.value; decl_block.deinit(gpa); } @@ -99,8 +99,8 @@ pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, livenes const tracy = trace(@src()); defer tracy.end(); - const decl = func.owner_decl; - const gop = try self.decl_table.getOrPut(self.base.allocator, decl); + const decl_index = func.owner_decl; + const gop = try self.decl_table.getOrPut(self.base.allocator, decl_index); if (!gop.found_existing) { gop.value_ptr.* = .{}; } @@ -126,9 +126,10 @@ pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, livenes .gpa = module.gpa, .module = module, .error_msg = null, - .decl = decl, + .decl_index = decl_index, + .decl = module.declPtr(decl_index), .fwd_decl = fwd_decl.toManaged(module.gpa), - .typedefs = typedefs.promoteContext(module.gpa, .{ .target = module.getTarget() }), + .typedefs = typedefs.promoteContext(module.gpa, .{ .mod = module }), .typedefs_arena = self.arena.allocator(), }, .code = code.toManaged(module.gpa), @@ -150,7 +151,7 @@ pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, livenes codegen.genFunc(&function) catch |err| switch (err) { error.AnalysisFail => { - try module.failed_decls.put(module.gpa, decl, function.object.dg.error_msg.?); + try module.failed_decls.put(module.gpa, decl_index, function.object.dg.error_msg.?); return; }, else => |e| return e, @@ -166,11 +167,11 @@ pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, livenes code.shrinkAndFree(module.gpa, code.items.len); } -pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { +pub fn updateDecl(self: *C, module: *Module, decl_index: Module.Decl.Index) !void { const tracy = trace(@src()); defer tracy.end(); - const gop = try self.decl_table.getOrPut(self.base.allocator, decl); + const gop = try self.decl_table.getOrPut(self.base.allocator, decl_index); if (!gop.found_existing) { gop.value_ptr.* = .{}; } @@ -186,14 +187,17 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { typedefs.clearRetainingCapacity(); code.shrinkRetainingCapacity(0); + const decl = module.declPtr(decl_index); + var object: codegen.Object = .{ .dg = .{ .gpa = module.gpa, .module = module, .error_msg = null, + .decl_index = decl_index, .decl = decl, .fwd_decl = fwd_decl.toManaged(module.gpa), - .typedefs = typedefs.promoteContext(module.gpa, .{ .target = module.getTarget() }), + .typedefs = typedefs.promoteContext(module.gpa, .{ .mod = module }), .typedefs_arena = self.arena.allocator(), }, .code = code.toManaged(module.gpa), @@ -211,7 +215,7 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { codegen.genDecl(&object) catch |err| switch (err) { error.AnalysisFail => { - try module.failed_decls.put(module.gpa, decl, object.dg.error_msg.?); + try module.failed_decls.put(module.gpa, decl_index, object.dg.error_msg.?); return; }, else => |e| return e, @@ -287,14 +291,14 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) const decl_keys = self.decl_table.keys(); const decl_values = self.decl_table.values(); - for (decl_keys) |decl| { - assert(decl.has_tv); - f.remaining_decls.putAssumeCapacityNoClobber(decl, {}); + for (decl_keys) |decl_index| { + assert(module.declPtr(decl_index).has_tv); + f.remaining_decls.putAssumeCapacityNoClobber(decl_index, {}); } while (f.remaining_decls.popOrNull()) |kv| { - const decl = kv.key; - try flushDecl(self, &f, decl); + const decl_index = kv.key; + try flushDecl(self, &f, decl_index); } f.all_buffers.items[err_typedef_index] = .{ @@ -305,7 +309,8 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) // Now the function bodies. try f.all_buffers.ensureUnusedCapacity(gpa, f.fn_count); - for (decl_keys) |decl, i| { + for (decl_keys) |decl_index, i| { + const decl = module.declPtr(decl_index); if (decl.getFunction() != null) { const decl_block = &decl_values[i]; const buf = decl_block.code.items; @@ -325,7 +330,7 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) } const Flush = struct { - remaining_decls: std.AutoArrayHashMapUnmanaged(*const Module.Decl, void) = .{}, + remaining_decls: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, void) = .{}, typedefs: Typedefs = .{}, err_typedef_buf: std.ArrayListUnmanaged(u8) = .{}, /// We collect a list of buffers to write, and write them all at once with pwritev 😎 @@ -354,7 +359,9 @@ const FlushDeclError = error{ }; /// Assumes `decl` was in the `remaining_decls` set, and has already been removed. -fn flushDecl(self: *C, f: *Flush, decl: *const Module.Decl) FlushDeclError!void { +fn flushDecl(self: *C, f: *Flush, decl_index: Module.Decl.Index) FlushDeclError!void { + const module = self.base.options.module.?; + const decl = module.declPtr(decl_index); // Before flushing any particular Decl we must ensure its // dependencies are already flushed, so that the order in the .c // file comes out correctly. @@ -364,15 +371,17 @@ fn flushDecl(self: *C, f: *Flush, decl: *const Module.Decl) FlushDeclError!void } } - const decl_block = self.decl_table.getPtr(decl).?; + const decl_block = self.decl_table.getPtr(decl_index).?; const gpa = self.base.allocator; if (decl_block.typedefs.count() != 0) { - try f.typedefs.ensureUnusedCapacity(gpa, @intCast(u32, decl_block.typedefs.count())); + try f.typedefs.ensureUnusedCapacityContext(gpa, @intCast(u32, decl_block.typedefs.count()), .{ + .mod = module, + }); var it = decl_block.typedefs.iterator(); while (it.next()) |new| { const gop = f.typedefs.getOrPutAssumeCapacityContext(new.key_ptr.*, .{ - .target = self.base.options.target, + .mod = module, }); if (!gop.found_existing) { try f.err_typedef_buf.appendSlice(gpa, new.value_ptr.rendered); @@ -417,8 +426,8 @@ pub fn flushEmitH(module: *Module) !void { .iov_len = zig_h.len, }); - for (emit_h.decl_table.keys()) |decl| { - const decl_emit_h = decl.getEmitH(module); + for (emit_h.decl_table.keys()) |decl_index| { + const decl_emit_h = emit_h.declPtr(decl_index); const buf = decl_emit_h.fwd_decl.items; all_buffers.appendAssumeCapacity(.{ .iov_base = buf.ptr, @@ -442,11 +451,11 @@ pub fn flushEmitH(module: *Module) !void { pub fn updateDeclExports( self: *C, module: *Module, - decl: *Module.Decl, + decl_index: Module.Decl.Index, exports: []const *Module.Export, ) !void { _ = exports; - _ = decl; + _ = decl_index; _ = module; _ = self; } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index a91f48dfbd..246918515d 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -418,11 +418,12 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*Coff { return self; } -pub fn allocateDeclIndexes(self: *Coff, decl: *Module.Decl) !void { +pub fn allocateDeclIndexes(self: *Coff, decl_index: Module.Decl.Index) !void { if (self.llvm_object) |_| return; try self.offset_table.ensureUnusedCapacity(self.base.allocator, 1); + const decl = self.base.options.module.?.declPtr(decl_index); if (self.offset_table_free_list.popOrNull()) |i| { decl.link.coff.offset_table_index = i; } else { @@ -674,7 +675,8 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - const decl = func.owner_decl; + const decl_index = func.owner_decl; + const decl = module.declPtr(decl_index); const res = try codegen.generateFunction( &self.base, decl.srcLoc(), @@ -688,7 +690,7 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live .appended => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl, em); + try module.failed_decls.put(module.gpa, decl_index, em); return; }, }; @@ -696,24 +698,26 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live return self.finishUpdateDecl(module, func.owner_decl, code); } -pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl: *Module.Decl) !u32 { +pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.Index) !u32 { _ = self; _ = tv; - _ = decl; + _ = decl_index; log.debug("TODO lowerUnnamedConst for Coff", .{}); return error.AnalysisFail; } -pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void { +pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !void { if (build_options.skip_non_native and builtin.object_format != .coff) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl); + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl_index); } const tracy = trace(@src()); defer tracy.end(); + const decl = module.declPtr(decl_index); + if (decl.val.tag() == .extern_fn) { return; // TODO Should we do more when front-end analyzed extern decl? } @@ -735,15 +739,16 @@ pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void { .appended => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl, em); + try module.failed_decls.put(module.gpa, decl_index, em); return; }, }; - return self.finishUpdateDecl(module, decl, code); + return self.finishUpdateDecl(module, decl_index, code); } -fn finishUpdateDecl(self: *Coff, module: *Module, decl: *Module.Decl, code: []const u8) !void { +fn finishUpdateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index, code: []const u8) !void { + const decl = module.declPtr(decl_index); const required_alignment = decl.ty.abiAlignment(self.base.options.target); const curr_size = decl.link.coff.size; if (curr_size != 0) { @@ -778,15 +783,18 @@ fn finishUpdateDecl(self: *Coff, module: *Module, decl: *Module.Decl, code: []co try self.base.file.?.pwriteAll(code, self.section_data_offset + self.offset_table_size + decl.link.coff.text_offset); // Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated. - const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{}; - return self.updateDeclExports(module, decl, decl_exports); + const decl_exports = module.decl_exports.get(decl_index) orelse &[0]*Module.Export{}; + return self.updateDeclExports(module, decl_index, decl_exports); } -pub fn freeDecl(self: *Coff, decl: *Module.Decl) void { +pub fn freeDecl(self: *Coff, decl_index: Module.Decl.Index) void { if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.freeDecl(decl); + if (self.llvm_object) |llvm_object| return llvm_object.freeDecl(decl_index); } + const mod = self.base.options.module.?; + const decl = mod.declPtr(decl_index); + // Appending to free lists is allowed to fail because the free lists are heuristics based anyway. self.freeTextBlock(&decl.link.coff); self.offset_table_free_list.append(self.base.allocator, decl.link.coff.offset_table_index) catch {}; @@ -795,16 +803,17 @@ pub fn freeDecl(self: *Coff, decl: *Module.Decl) void { pub fn updateDeclExports( self: *Coff, module: *Module, - decl: *Module.Decl, + decl_index: Module.Decl.Index, exports: []const *Module.Export, ) !void { if (build_options.skip_non_native and builtin.object_format != .coff) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(module, decl, exports); + if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(module, decl_index, exports); } + const decl = module.declPtr(decl_index); for (exports) |exp| { if (exp.options.section) |section_name| { if (!mem.eql(u8, section_name, ".text")) { @@ -1474,8 +1483,14 @@ fn findLib(self: *Coff, arena: Allocator, name: []const u8) !?[]const u8 { return null; } -pub fn getDeclVAddr(self: *Coff, decl: *const Module.Decl, reloc_info: link.File.RelocInfo) !u64 { +pub fn getDeclVAddr( + self: *Coff, + decl_index: Module.Decl.Index, + reloc_info: link.File.RelocInfo, +) !u64 { _ = reloc_info; + const mod = self.base.options.module.?; + const decl = mod.declPtr(decl_index); assert(self.llvm_object == null); return self.text_section_virtual_address + decl.link.coff.text_offset; } diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 248521c544..97fc090b9a 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -67,7 +67,7 @@ pub const Atom = struct { /// Decl's inner Atom is assigned an offset within the DWARF section. pub const DeclState = struct { gpa: Allocator, - target: std.Target, + mod: *Module, dbg_line: std.ArrayList(u8), dbg_info: std.ArrayList(u8), abbrev_type_arena: std.heap.ArenaAllocator, @@ -81,10 +81,10 @@ pub const DeclState = struct { abbrev_relocs: std.ArrayListUnmanaged(AbbrevRelocation) = .{}, exprloc_relocs: std.ArrayListUnmanaged(ExprlocRelocation) = .{}, - fn init(gpa: Allocator, target: std.Target) DeclState { + fn init(gpa: Allocator, mod: *Module) DeclState { return .{ .gpa = gpa, - .target = target, + .mod = mod, .dbg_line = std.ArrayList(u8).init(gpa), .dbg_info = std.ArrayList(u8).init(gpa), .abbrev_type_arena = std.heap.ArenaAllocator.init(gpa), @@ -118,7 +118,7 @@ pub const DeclState = struct { addend: ?u32, ) !void { const resolv = self.abbrev_resolver.getContext(ty, .{ - .target = self.target, + .mod = self.mod, }) orelse blk: { const sym_index = @intCast(u32, self.abbrev_table.items.len); try self.abbrev_table.append(self.gpa, .{ @@ -128,10 +128,10 @@ pub const DeclState = struct { }); log.debug("@{d}: {}", .{ sym_index, ty.fmtDebug() }); try self.abbrev_resolver.putNoClobberContext(self.gpa, ty, sym_index, .{ - .target = self.target, + .mod = self.mod, }); break :blk self.abbrev_resolver.getContext(ty, .{ - .target = self.target, + .mod = self.mod, }).?; }; const add: u32 = addend orelse 0; @@ -153,8 +153,8 @@ pub const DeclState = struct { ) error{OutOfMemory}!void { const arena = self.abbrev_type_arena.allocator(); const dbg_info_buffer = &self.dbg_info; - const target = self.target; - const target_endian = self.target.cpu.arch.endian(); + const target = module.getTarget(); + const target_endian = target.cpu.arch.endian(); switch (ty.zigTypeTag()) { .NoReturn => unreachable, @@ -181,7 +181,7 @@ pub const DeclState = struct { // DW.AT.byte_size, DW.FORM.data1 dbg_info_buffer.appendAssumeCapacity(@intCast(u8, ty.abiSize(target))); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(target)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); }, .Optional => { if (ty.isPtrLikeOptional()) { @@ -192,7 +192,7 @@ pub const DeclState = struct { // DW.AT.byte_size, DW.FORM.data1 dbg_info_buffer.appendAssumeCapacity(@intCast(u8, ty.abiSize(target))); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(target)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); } else { // Non-pointer optionals are structs: struct { .maybe = *, .val = * } var buf = try arena.create(Type.Payload.ElemType); @@ -203,7 +203,7 @@ pub const DeclState = struct { const abi_size = ty.abiSize(target); try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(target)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); // DW.AT.member try dbg_info_buffer.ensureUnusedCapacity(7); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_member)); @@ -242,7 +242,7 @@ pub const DeclState = struct { // DW.AT.byte_size, DW.FORM.sdata dbg_info_buffer.appendAssumeCapacity(@sizeOf(usize) * 2); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(target)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); // DW.AT.member try dbg_info_buffer.ensureUnusedCapacity(5); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_member)); @@ -285,7 +285,7 @@ pub const DeclState = struct { // DW.AT.array_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.array_type)); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(target)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); // DW.AT.type, DW.FORM.ref4 var index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); @@ -312,7 +312,7 @@ pub const DeclState = struct { switch (ty.tag()) { .tuple, .anon_struct => { // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(target)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); const fields = ty.tupleFields(); for (fields.types) |field, field_index| { @@ -331,7 +331,7 @@ pub const DeclState = struct { }, else => { // DW.AT.name, DW.FORM.string - const struct_name = try ty.nameAllocArena(arena, target); + const struct_name = try ty.nameAllocArena(arena, module); try dbg_info_buffer.ensureUnusedCapacity(struct_name.len + 1); dbg_info_buffer.appendSliceAssumeCapacity(struct_name); dbg_info_buffer.appendAssumeCapacity(0); @@ -372,7 +372,7 @@ pub const DeclState = struct { const abi_size = ty.abiSize(target); try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size); // DW.AT.name, DW.FORM.string - const enum_name = try ty.nameAllocArena(arena, target); + const enum_name = try ty.nameAllocArena(arena, module); try dbg_info_buffer.ensureUnusedCapacity(enum_name.len + 1); dbg_info_buffer.appendSliceAssumeCapacity(enum_name); dbg_info_buffer.appendAssumeCapacity(0); @@ -410,7 +410,7 @@ pub const DeclState = struct { const payload_offset = if (layout.tag_align >= layout.payload_align) layout.tag_size else 0; const tag_offset = if (layout.tag_align >= layout.payload_align) 0 else layout.payload_size; const is_tagged = layout.tag_size > 0; - const union_name = try ty.nameAllocArena(arena, target); + const union_name = try ty.nameAllocArena(arena, module); // TODO this is temporary to match current state of unions in Zig - we don't yet have // safety checks implemented meaning the implicit tag is not yet stored and generated @@ -491,7 +491,7 @@ pub const DeclState = struct { self.abbrev_type_arena.allocator(), module, ty, - self.target, + target, &self.dbg_info, ); }, @@ -507,7 +507,7 @@ pub const DeclState = struct { // DW.AT.byte_size, DW.FORM.sdata try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size); // DW.AT.name, DW.FORM.string - const name = try ty.nameAllocArena(arena, target); + const name = try ty.nameAllocArena(arena, module); try dbg_info_buffer.writer().print("{s}\x00", .{name}); // DW.AT.member @@ -654,17 +654,17 @@ pub fn deinit(self: *Dwarf) void { /// Initializes Decl's state and its matching output buffers. /// Call this before `commitDeclState`. -pub fn initDeclState(self: *Dwarf, decl: *Module.Decl) !DeclState { +pub fn initDeclState(self: *Dwarf, mod: *Module, decl: *Module.Decl) !DeclState { const tracy = trace(@src()); defer tracy.end(); - const decl_name = try decl.getFullyQualifiedName(self.allocator); + const decl_name = try decl.getFullyQualifiedName(mod); defer self.allocator.free(decl_name); log.debug("initDeclState {s}{*}", .{ decl_name, decl }); const gpa = self.allocator; - var decl_state = DeclState.init(gpa, self.target); + var decl_state = DeclState.init(gpa, mod); errdefer decl_state.deinit(); const dbg_line_buffer = &decl_state.dbg_line; const dbg_info_buffer = &decl_state.dbg_info; @@ -2133,7 +2133,7 @@ fn addDbgInfoErrorSet( const abi_size = ty.abiSize(target); try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size); // DW.AT.name, DW.FORM.string - const name = try ty.nameAllocArena(arena, target); + const name = try ty.nameAllocArena(arena, module); try dbg_info_buffer.writer().print("{s}\x00", .{name}); // DW.AT.enumerator diff --git a/src/link/Elf.zig b/src/link/Elf.zig index a58321c0ec..144ac24b9b 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -134,7 +134,7 @@ atom_free_lists: std.AutoHashMapUnmanaged(u16, std.ArrayListUnmanaged(*TextBlock /// We store them here so that we can properly dispose of any allocated /// memory within the atom in the incremental linker. /// TODO consolidate this. -decls: std.AutoHashMapUnmanaged(*Module.Decl, ?u16) = .{}, +decls: std.AutoHashMapUnmanaged(Module.Decl.Index, ?u16) = .{}, /// List of atoms that are owned directly by the linker. /// Currently these are only atoms that are the result of linking @@ -178,7 +178,7 @@ const Reloc = struct { }; const RelocTable = std.AutoHashMapUnmanaged(*TextBlock, std.ArrayListUnmanaged(Reloc)); -const UnnamedConstTable = std.AutoHashMapUnmanaged(*Module.Decl, std.ArrayListUnmanaged(*TextBlock)); +const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(*TextBlock)); /// When allocating, the ideal_capacity is calculated by /// actual_capacity + (actual_capacity / ideal_factor) @@ -389,7 +389,10 @@ pub fn deinit(self: *Elf) void { } } -pub fn getDeclVAddr(self: *Elf, decl: *const Module.Decl, reloc_info: File.RelocInfo) !u64 { +pub fn getDeclVAddr(self: *Elf, decl_index: Module.Decl.Index, reloc_info: File.RelocInfo) !u64 { + const mod = self.base.options.module.?; + const decl = mod.declPtr(decl_index); + assert(self.llvm_object == null); assert(decl.link.elf.local_sym_index != 0); @@ -2189,15 +2192,17 @@ fn allocateLocalSymbol(self: *Elf) !u32 { return index; } -pub fn allocateDeclIndexes(self: *Elf, decl: *Module.Decl) !void { +pub fn allocateDeclIndexes(self: *Elf, decl_index: Module.Decl.Index) !void { if (self.llvm_object) |_| return; + const mod = self.base.options.module.?; + const decl = mod.declPtr(decl_index); if (decl.link.elf.local_sym_index != 0) return; try self.offset_table.ensureUnusedCapacity(self.base.allocator, 1); - try self.decls.putNoClobber(self.base.allocator, decl, null); + try self.decls.putNoClobber(self.base.allocator, decl_index, null); - const decl_name = try decl.getFullyQualifiedName(self.base.allocator); + const decl_name = try decl.getFullyQualifiedName(mod); defer self.base.allocator.free(decl_name); log.debug("allocating symbol indexes for {s}", .{decl_name}); @@ -2214,8 +2219,8 @@ pub fn allocateDeclIndexes(self: *Elf, decl: *Module.Decl) !void { self.offset_table.items[decl.link.elf.offset_table_index] = 0; } -fn freeUnnamedConsts(self: *Elf, decl: *Module.Decl) void { - const unnamed_consts = self.unnamed_const_atoms.getPtr(decl) orelse return; +fn freeUnnamedConsts(self: *Elf, decl_index: Module.Decl.Index) void { + const unnamed_consts = self.unnamed_const_atoms.getPtr(decl_index) orelse return; for (unnamed_consts.items) |atom| { self.freeTextBlock(atom, self.phdr_load_ro_index.?); self.local_symbol_free_list.append(self.base.allocator, atom.local_sym_index) catch {}; @@ -2225,15 +2230,18 @@ fn freeUnnamedConsts(self: *Elf, decl: *Module.Decl) void { unnamed_consts.clearAndFree(self.base.allocator); } -pub fn freeDecl(self: *Elf, decl: *Module.Decl) void { +pub fn freeDecl(self: *Elf, decl_index: Module.Decl.Index) void { if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.freeDecl(decl); + if (self.llvm_object) |llvm_object| return llvm_object.freeDecl(decl_index); } - const kv = self.decls.fetchRemove(decl); + const mod = self.base.options.module.?; + const decl = mod.declPtr(decl_index); + + const kv = self.decls.fetchRemove(decl_index); if (kv.?.value) |index| { self.freeTextBlock(&decl.link.elf, index); - self.freeUnnamedConsts(decl); + self.freeUnnamedConsts(decl_index); } // Appending to free lists is allowed to fail because the free lists are heuristics based anyway. @@ -2274,14 +2282,17 @@ fn getDeclPhdrIndex(self: *Elf, decl: *Module.Decl) !u16 { return phdr_index; } -fn updateDeclCode(self: *Elf, decl: *Module.Decl, code: []const u8, stt_bits: u8) !*elf.Elf64_Sym { - const decl_name = try decl.getFullyQualifiedName(self.base.allocator); +fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, stt_bits: u8) !*elf.Elf64_Sym { + const mod = self.base.options.module.?; + const decl = mod.declPtr(decl_index); + + const decl_name = try decl.getFullyQualifiedName(mod); defer self.base.allocator.free(decl_name); log.debug("updateDeclCode {s}{*}", .{ decl_name, decl }); const required_alignment = decl.ty.abiAlignment(self.base.options.target); - const decl_ptr = self.decls.getPtr(decl).?; + const decl_ptr = self.decls.getPtr(decl_index).?; if (decl_ptr.* == null) { decl_ptr.* = try self.getDeclPhdrIndex(decl); } @@ -2355,10 +2366,11 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - const decl = func.owner_decl; - self.freeUnnamedConsts(decl); + const decl_index = func.owner_decl; + const decl = module.declPtr(decl_index); + self.freeUnnamedConsts(decl_index); - var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(decl) else null; + var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(module, decl) else null; defer if (decl_state) |*ds| ds.deinit(); const res = if (decl_state) |*ds| @@ -2372,11 +2384,11 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven .appended => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl, em); + try module.failed_decls.put(module.gpa, decl_index, em); return; }, }; - const local_sym = try self.updateDeclCode(decl, code, elf.STT_FUNC); + const local_sym = try self.updateDeclCode(decl_index, code, elf.STT_FUNC); if (decl_state) |*ds| { try self.dwarf.?.commitDeclState( &self.base, @@ -2389,21 +2401,23 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven } // Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated. - const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{}; - return self.updateDeclExports(module, decl, decl_exports); + const decl_exports = module.decl_exports.get(decl_index) orelse &[0]*Module.Export{}; + return self.updateDeclExports(module, decl_index, decl_exports); } -pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { +pub fn updateDecl(self: *Elf, module: *Module, decl_index: Module.Decl.Index) !void { if (build_options.skip_non_native and builtin.object_format != .elf) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl); + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl_index); } const tracy = trace(@src()); defer tracy.end(); + const decl = module.declPtr(decl_index); + if (decl.val.tag() == .extern_fn) { return; // TODO Should we do more when front-end analyzed extern decl? } @@ -2414,12 +2428,12 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { } } - assert(!self.unnamed_const_atoms.contains(decl)); + assert(!self.unnamed_const_atoms.contains(decl_index)); var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(decl) else null; + var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(module, decl) else null; defer if (decl_state) |*ds| ds.deinit(); // TODO implement .debug_info for global variables @@ -2446,12 +2460,12 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { .appended => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl, em); + try module.failed_decls.put(module.gpa, decl_index, em); return; }, }; - const local_sym = try self.updateDeclCode(decl, code, elf.STT_OBJECT); + const local_sym = try self.updateDeclCode(decl_index, code, elf.STT_OBJECT); if (decl_state) |*ds| { try self.dwarf.?.commitDeclState( &self.base, @@ -2464,16 +2478,18 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { } // Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated. - const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{}; - return self.updateDeclExports(module, decl, decl_exports); + const decl_exports = module.decl_exports.get(decl_index) orelse &[0]*Module.Export{}; + return self.updateDeclExports(module, decl_index, decl_exports); } -pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl: *Module.Decl) !u32 { +pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module.Decl.Index) !u32 { var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - const module = self.base.options.module.?; - const gop = try self.unnamed_const_atoms.getOrPut(self.base.allocator, decl); + const mod = self.base.options.module.?; + const decl = mod.declPtr(decl_index); + + const gop = try self.unnamed_const_atoms.getOrPut(self.base.allocator, decl_index); if (!gop.found_existing) { gop.value_ptr.* = .{}; } @@ -2485,7 +2501,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl: *Module.Decl try self.managed_atoms.append(self.base.allocator, atom); const name_str_index = blk: { - const decl_name = try decl.getFullyQualifiedName(self.base.allocator); + const decl_name = try decl.getFullyQualifiedName(mod); defer self.base.allocator.free(decl_name); const index = unnamed_consts.items.len; @@ -2510,7 +2526,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl: *Module.Decl .appended => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); log.err("{s}", .{em.msg}); return error.AnalysisFail; }, @@ -2547,24 +2563,25 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl: *Module.Decl pub fn updateDeclExports( self: *Elf, module: *Module, - decl: *Module.Decl, + decl_index: Module.Decl.Index, exports: []const *Module.Export, ) !void { if (build_options.skip_non_native and builtin.object_format != .elf) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(module, decl, exports); + if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(module, decl_index, exports); } const tracy = trace(@src()); defer tracy.end(); try self.global_symbols.ensureUnusedCapacity(self.base.allocator, exports.len); + const decl = module.declPtr(decl_index); if (decl.link.elf.local_sym_index == 0) return; const decl_sym = self.local_symbols.items[decl.link.elf.local_sym_index]; - const decl_ptr = self.decls.getPtr(decl).?; + const decl_ptr = self.decls.getPtr(decl_index).?; if (decl_ptr.* == null) { decl_ptr.* = try self.getDeclPhdrIndex(decl); } @@ -2633,12 +2650,11 @@ pub fn updateDeclExports( } /// Must be called only after a successful call to `updateDecl`. -pub fn updateDeclLineNumber(self: *Elf, module: *Module, decl: *const Module.Decl) !void { - _ = module; +pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl: *const Module.Decl) !void { const tracy = trace(@src()); defer tracy.end(); - const decl_name = try decl.getFullyQualifiedName(self.base.allocator); + const decl_name = try decl.getFullyQualifiedName(mod); defer self.base.allocator.free(decl_name); log.debug("updateDeclLineNumber {s}{*}", .{ decl_name, decl }); diff --git a/src/link/MachO.zig b/src/link/MachO.zig index e604029382..52ffba954d 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -247,14 +247,14 @@ unnamed_const_atoms: UnnamedConstTable = .{}, /// We store them here so that we can properly dispose of any allocated /// memory within the atom in the incremental linker. /// TODO consolidate this. -decls: std.AutoArrayHashMapUnmanaged(*Module.Decl, ?MatchingSection) = .{}, +decls: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, ?MatchingSection) = .{}, const Entry = struct { target: Atom.Relocation.Target, atom: *Atom, }; -const UnnamedConstTable = std.AutoHashMapUnmanaged(*Module.Decl, std.ArrayListUnmanaged(*Atom)); +const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(*Atom)); const PendingUpdate = union(enum) { resolve_undef: u32, @@ -3451,10 +3451,15 @@ pub fn deinit(self: *MachO) void { } self.atom_free_lists.deinit(self.base.allocator); } - for (self.decls.keys()) |decl| { - decl.link.macho.deinit(self.base.allocator); + if (self.base.options.module) |mod| { + for (self.decls.keys()) |decl_index| { + const decl = mod.declPtr(decl_index); + decl.link.macho.deinit(self.base.allocator); + } + self.decls.deinit(self.base.allocator); + } else { + assert(self.decls.count() == 0); } - self.decls.deinit(self.base.allocator); { var it = self.unnamed_const_atoms.valueIterator(); @@ -3652,13 +3657,14 @@ pub fn allocateTlvPtrEntry(self: *MachO, target: Atom.Relocation.Target) !u32 { return index; } -pub fn allocateDeclIndexes(self: *MachO, decl: *Module.Decl) !void { +pub fn allocateDeclIndexes(self: *MachO, decl_index: Module.Decl.Index) !void { if (self.llvm_object) |_| return; + const decl = self.base.options.module.?.declPtr(decl_index); if (decl.link.macho.local_sym_index != 0) return; decl.link.macho.local_sym_index = try self.allocateLocalSymbol(); try self.atom_by_index_table.putNoClobber(self.base.allocator, decl.link.macho.local_sym_index, &decl.link.macho); - try self.decls.putNoClobber(self.base.allocator, decl, null); + try self.decls.putNoClobber(self.base.allocator, decl_index, null); const got_target = .{ .local = decl.link.macho.local_sym_index }; const got_index = try self.allocateGotEntry(got_target); @@ -3676,8 +3682,9 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv const tracy = trace(@src()); defer tracy.end(); - const decl = func.owner_decl; - self.freeUnnamedConsts(decl); + const decl_index = func.owner_decl; + const decl = module.declPtr(decl_index); + self.freeUnnamedConsts(decl_index); // TODO clearing the code and relocs buffer should probably be orchestrated // in a different, smarter, more automatic way somewhere else, in a more centralised @@ -3690,7 +3697,7 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv defer code_buffer.deinit(); var decl_state = if (self.d_sym) |*d_sym| - try d_sym.dwarf.initDeclState(decl) + try d_sym.dwarf.initDeclState(module, decl) else null; defer if (decl_state) |*ds| ds.deinit(); @@ -3708,12 +3715,12 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv }, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl, em); + try module.failed_decls.put(module.gpa, decl_index, em); return; }, } - const symbol = try self.placeDecl(decl, decl.link.macho.code.items.len); + const symbol = try self.placeDecl(decl_index, decl.link.macho.code.items.len); if (decl_state) |*ds| { try self.d_sym.?.dwarf.commitDeclState( @@ -3728,22 +3735,23 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv // Since we updated the vaddr and the size, each corresponding export symbol also // needs to be updated. - const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{}; - try self.updateDeclExports(module, decl, decl_exports); + const decl_exports = module.decl_exports.get(decl_index) orelse &[0]*Module.Export{}; + try self.updateDeclExports(module, decl_index, decl_exports); } -pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl: *Module.Decl) !u32 { +pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Module.Decl.Index) !u32 { var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); const module = self.base.options.module.?; - const gop = try self.unnamed_const_atoms.getOrPut(self.base.allocator, decl); + const gop = try self.unnamed_const_atoms.getOrPut(self.base.allocator, decl_index); if (!gop.found_existing) { gop.value_ptr.* = .{}; } const unnamed_consts = gop.value_ptr; - const decl_name = try decl.getFullyQualifiedName(self.base.allocator); + const decl = module.declPtr(decl_index); + const decl_name = try decl.getFullyQualifiedName(module); defer self.base.allocator.free(decl_name); const name_str_index = blk: { @@ -3769,7 +3777,7 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl: *Module.De .appended => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl, em); + try module.failed_decls.put(module.gpa, decl_index, em); log.err("{s}", .{em.msg}); return error.AnalysisFail; }, @@ -3800,16 +3808,18 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl: *Module.De return atom.local_sym_index; } -pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { +pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index) !void { if (build_options.skip_non_native and builtin.object_format != .macho) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl); + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl_index); } const tracy = trace(@src()); defer tracy.end(); + const decl = module.declPtr(decl_index); + if (decl.val.tag() == .extern_fn) { return; // TODO Should we do more when front-end analyzed extern decl? } @@ -3824,7 +3834,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { defer code_buffer.deinit(); var decl_state: ?Dwarf.DeclState = if (self.d_sym) |*d_sym| - try d_sym.dwarf.initDeclState(decl) + try d_sym.dwarf.initDeclState(module, decl) else null; defer if (decl_state) |*ds| ds.deinit(); @@ -3862,12 +3872,12 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { }, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl, em); + try module.failed_decls.put(module.gpa, decl_index, em); return; }, } }; - const symbol = try self.placeDecl(decl, code.len); + const symbol = try self.placeDecl(decl_index, code.len); if (decl_state) |*ds| { try self.d_sym.?.dwarf.commitDeclState( @@ -3882,13 +3892,13 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { // Since we updated the vaddr and the size, each corresponding export symbol also // needs to be updated. - const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{}; - try self.updateDeclExports(module, decl, decl_exports); + const decl_exports = module.decl_exports.get(decl_index) orelse &[0]*Module.Export{}; + try self.updateDeclExports(module, decl_index, decl_exports); } /// Checks if the value, or any of its embedded values stores a pointer, and thus requires /// a rebase opcode for the dynamic linker. -fn needsPointerRebase(ty: Type, val: Value, target: std.Target) bool { +fn needsPointerRebase(ty: Type, val: Value, mod: *Module) bool { if (ty.zigTypeTag() == .Fn) { return false; } @@ -3903,8 +3913,8 @@ fn needsPointerRebase(ty: Type, val: Value, target: std.Target) bool { if (ty.arrayLen() == 0) return false; const elem_ty = ty.childType(); var elem_value_buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(0, &elem_value_buf); - return needsPointerRebase(elem_ty, elem_val, target); + const elem_val = val.elemValueBuffer(mod, 0, &elem_value_buf); + return needsPointerRebase(elem_ty, elem_val, mod); }, .Struct => { const fields = ty.structFields().values(); @@ -3912,7 +3922,7 @@ fn needsPointerRebase(ty: Type, val: Value, target: std.Target) bool { if (val.castTag(.aggregate)) |payload| { const field_values = payload.data; for (field_values) |field_val, i| { - if (needsPointerRebase(fields[i].ty, field_val, target)) return true; + if (needsPointerRebase(fields[i].ty, field_val, mod)) return true; } else return false; } else return false; }, @@ -3921,18 +3931,18 @@ fn needsPointerRebase(ty: Type, val: Value, target: std.Target) bool { const sub_val = payload.data; var buffer: Type.Payload.ElemType = undefined; const sub_ty = ty.optionalChild(&buffer); - return needsPointerRebase(sub_ty, sub_val, target); + return needsPointerRebase(sub_ty, sub_val, mod); } else return false; }, .Union => { const union_obj = val.cast(Value.Payload.Union).?.data; - const active_field_ty = ty.unionFieldType(union_obj.tag, target); - return needsPointerRebase(active_field_ty, union_obj.val, target); + const active_field_ty = ty.unionFieldType(union_obj.tag, mod); + return needsPointerRebase(active_field_ty, union_obj.val, mod); }, .ErrorUnion => { if (val.castTag(.eu_payload)) |payload| { const payload_ty = ty.errorUnionPayload(); - return needsPointerRebase(payload_ty, payload.data, target); + return needsPointerRebase(payload_ty, payload.data, mod); } else return false; }, else => return false, @@ -3942,6 +3952,7 @@ fn needsPointerRebase(ty: Type, val: Value, target: std.Target) bool { fn getMatchingSectionAtom(self: *MachO, atom: *Atom, name: []const u8, ty: Type, val: Value) !MatchingSection { const code = atom.code.items; const target = self.base.options.target; + const mod = self.base.options.module.?; const alignment = ty.abiAlignment(target); const align_log_2 = math.log2(alignment); const zig_ty = ty.zigTypeTag(); @@ -3969,7 +3980,7 @@ fn getMatchingSectionAtom(self: *MachO, atom: *Atom, name: []const u8, ty: Type, }; } - if (needsPointerRebase(ty, val, target)) { + if (needsPointerRebase(ty, val, mod)) { break :blk (try self.getMatchingSection(.{ .segname = makeStaticString("__DATA_CONST"), .sectname = makeStaticString("__const"), @@ -4025,15 +4036,17 @@ fn getMatchingSectionAtom(self: *MachO, atom: *Atom, name: []const u8, ty: Type, return match; } -fn placeDecl(self: *MachO, decl: *Module.Decl, code_len: usize) !*macho.nlist_64 { +fn placeDecl(self: *MachO, decl_index: Module.Decl.Index, code_len: usize) !*macho.nlist_64 { + const module = self.base.options.module.?; + const decl = module.declPtr(decl_index); const required_alignment = decl.ty.abiAlignment(self.base.options.target); assert(decl.link.macho.local_sym_index != 0); // Caller forgot to call allocateDeclIndexes() const symbol = &self.locals.items[decl.link.macho.local_sym_index]; - const sym_name = try decl.getFullyQualifiedName(self.base.allocator); + const sym_name = try decl.getFullyQualifiedName(module); defer self.base.allocator.free(sym_name); - const decl_ptr = self.decls.getPtr(decl).?; + const decl_ptr = self.decls.getPtr(decl_index).?; if (decl_ptr.* == null) { decl_ptr.* = try self.getMatchingSectionAtom(&decl.link.macho, sym_name, decl.ty, decl.val); } @@ -4101,19 +4114,20 @@ pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl: *const Module.D pub fn updateDeclExports( self: *MachO, module: *Module, - decl: *Module.Decl, + decl_index: Module.Decl.Index, exports: []const *Module.Export, ) !void { if (build_options.skip_non_native and builtin.object_format != .macho) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(module, decl, exports); + if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(module, decl_index, exports); } const tracy = trace(@src()); defer tracy.end(); try self.globals.ensureUnusedCapacity(self.base.allocator, exports.len); + const decl = module.declPtr(decl_index); if (decl.link.macho.local_sym_index == 0) return; const decl_sym = &self.locals.items[decl.link.macho.local_sym_index]; @@ -4250,9 +4264,8 @@ pub fn deleteExport(self: *MachO, exp: Export) void { global.n_value = 0; } -fn freeUnnamedConsts(self: *MachO, decl: *Module.Decl) void { - log.debug("freeUnnamedConsts for decl {*}", .{decl}); - const unnamed_consts = self.unnamed_const_atoms.getPtr(decl) orelse return; +fn freeUnnamedConsts(self: *MachO, decl_index: Module.Decl.Index) void { + const unnamed_consts = self.unnamed_const_atoms.getPtr(decl_index) orelse return; for (unnamed_consts.items) |atom| { self.freeAtom(atom, .{ .seg = self.text_segment_cmd_index.?, @@ -4267,15 +4280,17 @@ fn freeUnnamedConsts(self: *MachO, decl: *Module.Decl) void { unnamed_consts.clearAndFree(self.base.allocator); } -pub fn freeDecl(self: *MachO, decl: *Module.Decl) void { +pub fn freeDecl(self: *MachO, decl_index: Module.Decl.Index) void { if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.freeDecl(decl); + if (self.llvm_object) |llvm_object| return llvm_object.freeDecl(decl_index); } + const mod = self.base.options.module.?; + const decl = mod.declPtr(decl_index); log.debug("freeDecl {*}", .{decl}); - const kv = self.decls.fetchSwapRemove(decl); + const kv = self.decls.fetchSwapRemove(decl_index); if (kv.?.value) |match| { self.freeAtom(&decl.link.macho, match, false); - self.freeUnnamedConsts(decl); + self.freeUnnamedConsts(decl_index); } // Appending to free lists is allowed to fail because the free lists are heuristics based anyway. if (decl.link.macho.local_sym_index != 0) { @@ -4307,7 +4322,10 @@ pub fn freeDecl(self: *MachO, decl: *Module.Decl) void { } } -pub fn getDeclVAddr(self: *MachO, decl: *const Module.Decl, reloc_info: File.RelocInfo) !u64 { +pub fn getDeclVAddr(self: *MachO, decl_index: Module.Decl.Index, reloc_info: File.RelocInfo) !u64 { + const mod = self.base.options.module.?; + const decl = mod.declPtr(decl_index); + assert(self.llvm_object == null); assert(decl.link.macho.local_sym_index != 0); diff --git a/src/link/NvPtx.zig b/src/link/NvPtx.zig index b518dc3f68..bd86d87201 100644 --- a/src/link/NvPtx.zig +++ b/src/link/NvPtx.zig @@ -74,27 +74,27 @@ pub fn updateFunc(self: *NvPtx, module: *Module, func: *Module.Fn, air: Air, liv try self.llvm_object.updateFunc(module, func, air, liveness); } -pub fn updateDecl(self: *NvPtx, module: *Module, decl: *Module.Decl) !void { +pub fn updateDecl(self: *NvPtx, module: *Module, decl_index: Module.Decl.Index) !void { if (!build_options.have_llvm) return; - return self.llvm_object.updateDecl(module, decl); + return self.llvm_object.updateDecl(module, decl_index); } pub fn updateDeclExports( self: *NvPtx, module: *Module, - decl: *const Module.Decl, + decl_index: Module.Decl.Index, exports: []const *Module.Export, ) !void { if (!build_options.have_llvm) return; if (build_options.skip_non_native and builtin.object_format != .nvptx) { @panic("Attempted to compile for object format that was disabled by build configuration"); } - return self.llvm_object.updateDeclExports(module, decl, exports); + return self.llvm_object.updateDeclExports(module, decl_index, exports); } -pub fn freeDecl(self: *NvPtx, decl: *Module.Decl) void { +pub fn freeDecl(self: *NvPtx, decl_index: Module.Decl.Index) void { if (!build_options.have_llvm) return; - return self.llvm_object.freeDecl(decl); + return self.llvm_object.freeDecl(decl_index); } pub fn flush(self: *NvPtx, comp: *Compilation, prog_node: *std.Progress.Node) !void { diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 5d740dd2b9..16f7841c2d 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -59,9 +59,9 @@ path_arena: std.heap.ArenaAllocator, /// If we group the decls by file, it makes it really easy to do this (put the symbol in the correct place) fn_decl_table: std.AutoArrayHashMapUnmanaged( *Module.File, - struct { sym_index: u32, functions: std.AutoArrayHashMapUnmanaged(*Module.Decl, FnDeclOutput) = .{} }, + struct { sym_index: u32, functions: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, FnDeclOutput) = .{} }, ) = .{}, -data_decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, []const u8) = .{}, +data_decl_table: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, []const u8) = .{}, hdr: aout.ExecHdr = undefined, @@ -162,11 +162,13 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*Plan9 { return self; } -fn putFn(self: *Plan9, decl: *Module.Decl, out: FnDeclOutput) !void { +fn putFn(self: *Plan9, decl_index: Module.Decl.Index, out: FnDeclOutput) !void { const gpa = self.base.allocator; + const mod = self.base.options.module.?; + const decl = mod.declPtr(decl_index); const fn_map_res = try self.fn_decl_table.getOrPut(gpa, decl.getFileScope()); if (fn_map_res.found_existing) { - try fn_map_res.value_ptr.functions.put(gpa, decl, out); + try fn_map_res.value_ptr.functions.put(gpa, decl_index, out); } else { const file = decl.getFileScope(); const arena = self.path_arena.allocator(); @@ -178,7 +180,7 @@ fn putFn(self: *Plan9, decl: *Module.Decl, out: FnDeclOutput) !void { break :blk @intCast(u32, self.syms.items.len - 1); }, }; - try fn_map_res.value_ptr.functions.put(gpa, decl, out); + try fn_map_res.value_ptr.functions.put(gpa, decl_index, out); var a = std.ArrayList(u8).init(arena); errdefer a.deinit(); @@ -229,9 +231,10 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv @panic("Attempted to compile for object format that was disabled by build configuration"); } - const decl = func.owner_decl; + const decl_index = func.owner_decl; + const decl = module.declPtr(decl_index); - try self.seeDecl(decl); + try self.seeDecl(decl_index); log.debug("codegen decl {*} ({s})", .{ decl, decl.name }); var code_buffer = std.ArrayList(u8).init(self.base.allocator); @@ -262,7 +265,7 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv .appended => code_buffer.toOwnedSlice(), .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl, em); + try module.failed_decls.put(module.gpa, decl_index, em); return; }, }; @@ -272,19 +275,21 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv .start_line = start_line.?, .end_line = end_line, }; - try self.putFn(decl, out); + try self.putFn(decl_index, out); return self.updateFinish(decl); } -pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl: *Module.Decl) !u32 { +pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.Index) !u32 { _ = self; _ = tv; - _ = decl; + _ = decl_index; log.debug("TODO lowerUnnamedConst for Plan9", .{}); return error.AnalysisFail; } -pub fn updateDecl(self: *Plan9, module: *Module, decl: *Module.Decl) !void { +pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index) !void { + const decl = module.declPtr(decl_index); + if (decl.val.tag() == .extern_fn) { return; // TODO Should we do more when front-end analyzed extern decl? } @@ -295,7 +300,7 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl: *Module.Decl) !void { } } - try self.seeDecl(decl); + try self.seeDecl(decl_index); log.debug("codegen decl {*} ({s})", .{ decl, decl.name }); @@ -315,13 +320,13 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl: *Module.Decl) !void { .appended => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl, em); + try module.failed_decls.put(module.gpa, decl_index, em); return; }, }; var duped_code = try self.base.allocator.dupe(u8, code); errdefer self.base.allocator.free(duped_code); - try self.data_decl_table.put(self.base.allocator, decl, duped_code); + try self.data_decl_table.put(self.base.allocator, decl_index, duped_code); return self.updateFinish(decl); } /// called at the end of update{Decl,Func} @@ -435,7 +440,8 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No while (it_file.next()) |fentry| { var it = fentry.value_ptr.functions.iterator(); while (it.next()) |entry| { - const decl = entry.key_ptr.*; + const decl_index = entry.key_ptr.*; + const decl = mod.declPtr(decl_index); const out = entry.value_ptr.*; log.debug("write text decl {*} ({s}), lines {d} to {d}", .{ decl, decl.name, out.start_line + 1, out.end_line }); { @@ -462,7 +468,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No mem.writeInt(u64, got_table[decl.link.plan9.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); } self.syms.items[decl.link.plan9.sym_index.?].value = off; - if (mod.decl_exports.get(decl)) |exports| { + if (mod.decl_exports.get(decl_index)) |exports| { try self.addDeclExports(mod, decl, exports); } } @@ -482,7 +488,8 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No { var it = self.data_decl_table.iterator(); while (it.next()) |entry| { - const decl = entry.key_ptr.*; + const decl_index = entry.key_ptr.*; + const decl = mod.declPtr(decl_index); const code = entry.value_ptr.*; log.debug("write data decl {*} ({s})", .{ decl, decl.name }); @@ -498,7 +505,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No mem.writeInt(u64, got_table[decl.link.plan9.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); } self.syms.items[decl.link.plan9.sym_index.?].value = off; - if (mod.decl_exports.get(decl)) |exports| { + if (mod.decl_exports.get(decl_index)) |exports| { try self.addDeclExports(mod, decl, exports); } } @@ -564,24 +571,25 @@ fn addDeclExports( } } -pub fn freeDecl(self: *Plan9, decl: *Module.Decl) void { +pub fn freeDecl(self: *Plan9, decl_index: Module.Decl.Index) void { // TODO audit the lifetimes of decls table entries. It's possible to get // allocateDeclIndexes and then freeDecl without any updateDecl in between. // However that is planned to change, see the TODO comment in Module.zig // in the deleteUnusedDecl function. + const mod = self.base.options.module.?; + const decl = mod.declPtr(decl_index); const is_fn = (decl.val.tag() == .function); if (is_fn) { - var symidx_and_submap = - self.fn_decl_table.get(decl.getFileScope()).?; + var symidx_and_submap = self.fn_decl_table.get(decl.getFileScope()).?; var submap = symidx_and_submap.functions; - _ = submap.swapRemove(decl); + _ = submap.swapRemove(decl_index); if (submap.count() == 0) { self.syms.items[symidx_and_submap.sym_index] = aout.Sym.undefined_symbol; self.syms_index_free_list.append(self.base.allocator, symidx_and_submap.sym_index) catch {}; submap.deinit(self.base.allocator); } } else { - _ = self.data_decl_table.swapRemove(decl); + _ = self.data_decl_table.swapRemove(decl_index); } if (decl.link.plan9.got_index) |i| { // TODO: if this catch {} is triggered, an assertion in flushModule will be triggered, because got_index_free_list will have the wrong length @@ -593,7 +601,9 @@ pub fn freeDecl(self: *Plan9, decl: *Module.Decl) void { } } -pub fn seeDecl(self: *Plan9, decl: *Module.Decl) !void { +pub fn seeDecl(self: *Plan9, decl_index: Module.Decl.Index) !void { + const mod = self.base.options.module.?; + const decl = mod.declPtr(decl_index); if (decl.link.plan9.got_index == null) { if (self.got_index_free_list.popOrNull()) |i| { decl.link.plan9.got_index = i; @@ -607,14 +617,13 @@ pub fn seeDecl(self: *Plan9, decl: *Module.Decl) !void { pub fn updateDeclExports( self: *Plan9, module: *Module, - decl: *Module.Decl, + decl_index: Module.Decl.Index, exports: []const *Module.Export, ) !void { - try self.seeDecl(decl); + try self.seeDecl(decl_index); // we do all the things in flush _ = self; _ = module; - _ = decl; _ = exports; } pub fn deinit(self: *Plan9) void { @@ -709,14 +718,18 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { }); } } + + const mod = self.base.options.module.?; + // write the data symbols { var it = self.data_decl_table.iterator(); while (it.next()) |entry| { - const decl = entry.key_ptr.*; + const decl_index = entry.key_ptr.*; + const decl = mod.declPtr(decl_index); const sym = self.syms.items[decl.link.plan9.sym_index.?]; try self.writeSym(writer, sym); - if (self.base.options.module.?.decl_exports.get(decl)) |exports| { + if (self.base.options.module.?.decl_exports.get(decl_index)) |exports| { for (exports) |e| { try self.writeSym(writer, self.syms.items[e.link.plan9.?]); } @@ -737,10 +750,11 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { // write all the decls come from the file of the z symbol var submap_it = symidx_and_submap.functions.iterator(); while (submap_it.next()) |entry| { - const decl = entry.key_ptr.*; + const decl_index = entry.key_ptr.*; + const decl = mod.declPtr(decl_index); const sym = self.syms.items[decl.link.plan9.sym_index.?]; try self.writeSym(writer, sym); - if (self.base.options.module.?.decl_exports.get(decl)) |exports| { + if (self.base.options.module.?.decl_exports.get(decl_index)) |exports| { for (exports) |e| { const s = self.syms.items[e.link.plan9.?]; if (mem.eql(u8, s.name, "_start")) @@ -754,12 +768,18 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { } /// this will be removed, moved to updateFinish -pub fn allocateDeclIndexes(self: *Plan9, decl: *Module.Decl) !void { +pub fn allocateDeclIndexes(self: *Plan9, decl_index: Module.Decl.Index) !void { _ = self; - _ = decl; + _ = decl_index; } -pub fn getDeclVAddr(self: *Plan9, decl: *const Module.Decl, reloc_info: link.File.RelocInfo) !u64 { +pub fn getDeclVAddr( + self: *Plan9, + decl_index: Module.Decl.Index, + reloc_info: link.File.RelocInfo, +) !u64 { _ = reloc_info; + const mod = self.base.options.module.?; + const decl = mod.declPtr(decl_index); if (decl.ty.zigTypeTag() == .Fn) { var start = self.bases.text; var it_file = self.fn_decl_table.iterator(); @@ -767,7 +787,7 @@ pub fn getDeclVAddr(self: *Plan9, decl: *const Module.Decl, reloc_info: link.Fil var symidx_and_submap = fentry.value_ptr; var submap_it = symidx_and_submap.functions.iterator(); while (submap_it.next()) |entry| { - if (entry.key_ptr.* == decl) return start; + if (entry.key_ptr.* == decl_index) return start; start += entry.value_ptr.code.len; } } @@ -776,7 +796,7 @@ pub fn getDeclVAddr(self: *Plan9, decl: *const Module.Decl, reloc_info: link.Fil var start = self.bases.data + self.got_len * if (!self.sixtyfour_bit) @as(u32, 4) else 8; var it = self.data_decl_table.iterator(); while (it.next()) |kv| { - if (decl == kv.key_ptr.*) return start; + if (decl_index == kv.key_ptr.*) return start; start += kv.value_ptr.len; } unreachable; diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index e4d032539f..e295dceb55 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -54,7 +54,7 @@ base: link.File, /// This linker backend does not try to incrementally link output SPIR-V code. /// Instead, it tracks all declarations in this table, and iterates over it /// in the flush function. -decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, DeclGenContext) = .{}, +decl_table: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, DeclGenContext) = .{}, const DeclGenContext = struct { air: Air, @@ -145,29 +145,31 @@ pub fn updateFunc(self: *SpirV, module: *Module, func: *Module.Fn, air: Air, liv }; } -pub fn updateDecl(self: *SpirV, module: *Module, decl: *Module.Decl) !void { +pub fn updateDecl(self: *SpirV, module: *Module, decl_index: Module.Decl.Index) !void { if (build_options.skip_non_native) { @panic("Attempted to compile for architecture that was disabled by build configuration"); } _ = module; // Keep track of all decls so we can iterate over them on flush(). - _ = try self.decl_table.getOrPut(self.base.allocator, decl); + _ = try self.decl_table.getOrPut(self.base.allocator, decl_index); } pub fn updateDeclExports( self: *SpirV, module: *Module, - decl: *const Module.Decl, + decl_index: Module.Decl.Index, exports: []const *Module.Export, ) !void { _ = self; _ = module; - _ = decl; + _ = decl_index; _ = exports; } -pub fn freeDecl(self: *SpirV, decl: *Module.Decl) void { - const index = self.decl_table.getIndex(decl).?; +pub fn freeDecl(self: *SpirV, decl_index: Module.Decl.Index) void { + const index = self.decl_table.getIndex(decl_index).?; + const module = self.base.options.module.?; + const decl = module.declPtr(decl_index); if (decl.val.tag() == .function) { self.decl_table.values()[index].deinit(self.base.allocator); } @@ -208,7 +210,8 @@ pub fn flushModule(self: *SpirV, comp: *Compilation, prog_node: *std.Progress.No // TODO: We're allocating an ID unconditionally now, are there // declarations which don't generate a result? // TODO: fn_link is used here, but thats probably not the right field. It will work anyway though. - for (self.decl_table.keys()) |decl| { + for (self.decl_table.keys()) |decl_index| { + const decl = module.declPtr(decl_index); if (decl.has_tv) { decl.fn_link.spirv.id = spv.allocId(); } @@ -220,7 +223,8 @@ pub fn flushModule(self: *SpirV, comp: *Compilation, prog_node: *std.Progress.No var it = self.decl_table.iterator(); while (it.next()) |entry| { - const decl = entry.key_ptr.*; + const decl_index = entry.key_ptr.*; + const decl = module.declPtr(decl_index); if (!decl.has_tv) continue; const air = entry.value_ptr.air; @@ -228,7 +232,7 @@ pub fn flushModule(self: *SpirV, comp: *Compilation, prog_node: *std.Progress.No // Note, if `decl` is not a function, air/liveness may be undefined. if (try decl_gen.gen(decl, air, liveness)) |msg| { - try module.failed_decls.put(module.gpa, decl, msg); + try module.failed_decls.put(module.gpa, decl_index, msg); return; // TODO: Attempt to generate more decls? } } diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 4f72dfe388..fad7543b0e 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -48,7 +48,7 @@ host_name: []const u8 = "env", /// List of all `Decl` that are currently alive. /// This is ment for bookkeeping so we can safely cleanup all codegen memory /// when calling `deinit` -decls: std.AutoHashMapUnmanaged(*Module.Decl, void) = .{}, +decls: std.AutoHashMapUnmanaged(Module.Decl.Index, void) = .{}, /// List of all symbols generated by Zig code. symbols: std.ArrayListUnmanaged(Symbol) = .{}, /// List of symbol indexes which are free to be used. @@ -429,9 +429,11 @@ pub fn deinit(self: *Wasm) void { if (self.llvm_object) |llvm_object| llvm_object.destroy(gpa); } + const mod = self.base.options.module.?; var decl_it = self.decls.keyIterator(); - while (decl_it.next()) |decl_ptr| { - decl_ptr.*.link.wasm.deinit(gpa); + while (decl_it.next()) |decl_index_ptr| { + const decl = mod.declPtr(decl_index_ptr.*); + decl.link.wasm.deinit(gpa); } for (self.func_types.items) |*func_type| { @@ -476,12 +478,13 @@ pub fn deinit(self: *Wasm) void { self.string_table.deinit(gpa); } -pub fn allocateDeclIndexes(self: *Wasm, decl: *Module.Decl) !void { +pub fn allocateDeclIndexes(self: *Wasm, decl_index: Module.Decl.Index) !void { if (self.llvm_object) |_| return; + const decl = self.base.options.module.?.declPtr(decl_index); if (decl.link.wasm.sym_index != 0) return; try self.symbols.ensureUnusedCapacity(self.base.allocator, 1); - try self.decls.putNoClobber(self.base.allocator, decl, {}); + try self.decls.putNoClobber(self.base.allocator, decl_index, {}); const atom = &decl.link.wasm; @@ -502,14 +505,15 @@ pub fn allocateDeclIndexes(self: *Wasm, decl: *Module.Decl) !void { try self.symbol_atom.putNoClobber(self.base.allocator, atom.symbolLoc(), atom); } -pub fn updateFunc(self: *Wasm, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *Wasm, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .wasm) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func, air, liveness); } - const decl = func.owner_decl; + const decl_index = func.owner_decl; + const decl = mod.declPtr(decl_index); assert(decl.link.wasm.sym_index != 0); // Must call allocateDeclIndexes() decl.link.wasm.clear(); @@ -530,7 +534,7 @@ pub fn updateFunc(self: *Wasm, module: *Module, func: *Module.Fn, air: Air, live .appended => code_writer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); return; }, }; @@ -540,14 +544,15 @@ pub fn updateFunc(self: *Wasm, module: *Module, func: *Module.Fn, air: Air, live // Generate code for the Decl, storing it in memory to be later written to // the file on flush(). -pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void { +pub fn updateDecl(self: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !void { if (build_options.skip_non_native and builtin.object_format != .wasm) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl); + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(mod, decl_index); } + const decl = mod.declPtr(decl_index); assert(decl.link.wasm.sym_index != 0); // Must call allocateDeclIndexes() decl.link.wasm.clear(); @@ -580,7 +585,7 @@ pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void { .appended => code_writer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); return; }, }; @@ -590,12 +595,13 @@ pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void { fn finishUpdateDecl(self: *Wasm, decl: *Module.Decl, code: []const u8) !void { if (code.len == 0) return; + const mod = self.base.options.module.?; const atom: *Atom = &decl.link.wasm; atom.size = @intCast(u32, code.len); atom.alignment = decl.ty.abiAlignment(self.base.options.target); const symbol = &self.symbols.items[atom.sym_index]; - const full_name = try decl.getFullyQualifiedName(self.base.allocator); + const full_name = try decl.getFullyQualifiedName(mod); defer self.base.allocator.free(full_name); symbol.name = try self.string_table.put(self.base.allocator, full_name); try atom.code.appendSlice(self.base.allocator, code); @@ -606,12 +612,15 @@ fn finishUpdateDecl(self: *Wasm, decl: *Module.Decl, code: []const u8) !void { /// Lowers a constant typed value to a local symbol and atom. /// Returns the symbol index of the local /// The given `decl` is the parent decl whom owns the constant. -pub fn lowerUnnamedConst(self: *Wasm, decl: *Module.Decl, tv: TypedValue) !u32 { +pub fn lowerUnnamedConst(self: *Wasm, tv: TypedValue, decl_index: Module.Decl.Index) !u32 { assert(tv.ty.zigTypeTag() != .Fn); // cannot create local symbols for functions + const mod = self.base.options.module.?; + const decl = mod.declPtr(decl_index); + // Create and initialize a new local symbol and atom const local_index = decl.link.wasm.locals.items.len; - const fqdn = try decl.getFullyQualifiedName(self.base.allocator); + const fqdn = try decl.getFullyQualifiedName(mod); defer self.base.allocator.free(fqdn); const name = try std.fmt.allocPrintZ(self.base.allocator, "__unnamed_{s}_{d}", .{ fqdn, local_index }); defer self.base.allocator.free(name); @@ -641,7 +650,6 @@ pub fn lowerUnnamedConst(self: *Wasm, decl: *Module.Decl, tv: TypedValue) !u32 { var value_bytes = std.ArrayList(u8).init(self.base.allocator); defer value_bytes.deinit(); - const module = self.base.options.module.?; const result = try codegen.generateSymbol( &self.base, decl.srcLoc(), @@ -658,7 +666,7 @@ pub fn lowerUnnamedConst(self: *Wasm, decl: *Module.Decl, tv: TypedValue) !u32 { .appended => value_bytes.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); return error.AnalysisFail; }, }; @@ -672,9 +680,11 @@ pub fn lowerUnnamedConst(self: *Wasm, decl: *Module.Decl, tv: TypedValue) !u32 { /// Returns the given pointer address pub fn getDeclVAddr( self: *Wasm, - decl: *const Module.Decl, + decl_index: Module.Decl.Index, reloc_info: link.File.RelocInfo, ) !u64 { + const mod = self.base.options.module.?; + const decl = mod.declPtr(decl_index); const target_symbol_index = decl.link.wasm.sym_index; assert(target_symbol_index != 0); assert(reloc_info.parent_atom_index != 0); @@ -722,21 +732,23 @@ pub fn deleteExport(self: *Wasm, exp: Export) void { pub fn updateDeclExports( self: *Wasm, - module: *Module, - decl: *const Module.Decl, + mod: *Module, + decl_index: Module.Decl.Index, exports: []const *Module.Export, ) !void { if (build_options.skip_non_native and builtin.object_format != .wasm) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(module, decl, exports); + if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(mod, decl_index, exports); } + const decl = mod.declPtr(decl_index); + for (exports) |exp| { if (exp.options.section) |section| { - try module.failed_exports.putNoClobber(module.gpa, exp, try Module.ErrorMsg.create( - module.gpa, + try mod.failed_exports.putNoClobber(mod.gpa, exp, try Module.ErrorMsg.create( + mod.gpa, decl.srcLoc(), "Unimplemented: ExportOptions.section '{s}'", .{section}, @@ -754,8 +766,8 @@ pub fn updateDeclExports( // are strong symbols, we have a linker error. // In the other case we replace one with the other. if (!exp_is_weak and !existing_sym.isWeak()) { - try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create( - module.gpa, + try mod.failed_exports.put(mod.gpa, exp, try Module.ErrorMsg.create( + mod.gpa, decl.srcLoc(), \\LinkError: symbol '{s}' defined multiple times \\ first definition in '{s}' @@ -773,8 +785,9 @@ pub fn updateDeclExports( } } - const sym_index = exp.exported_decl.link.wasm.sym_index; - const sym_loc = exp.exported_decl.link.wasm.symbolLoc(); + const exported_decl = mod.declPtr(exp.exported_decl); + const sym_index = exported_decl.link.wasm.sym_index; + const sym_loc = exported_decl.link.wasm.symbolLoc(); const symbol = sym_loc.getSymbol(self); switch (exp.options.linkage) { .Internal => { @@ -786,8 +799,8 @@ pub fn updateDeclExports( }, .Strong => {}, // symbols are strong by default .LinkOnce => { - try module.failed_exports.putNoClobber(module.gpa, exp, try Module.ErrorMsg.create( - module.gpa, + try mod.failed_exports.putNoClobber(mod.gpa, exp, try Module.ErrorMsg.create( + mod.gpa, decl.srcLoc(), "Unimplemented: LinkOnce", .{}, @@ -813,13 +826,15 @@ pub fn updateDeclExports( } } -pub fn freeDecl(self: *Wasm, decl: *Module.Decl) void { +pub fn freeDecl(self: *Wasm, decl_index: Module.Decl.Index) void { if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.freeDecl(decl); + if (self.llvm_object) |llvm_object| return llvm_object.freeDecl(decl_index); } + const mod = self.base.options.module.?; + const decl = mod.declPtr(decl_index); const atom = &decl.link.wasm; self.symbols_free_list.append(self.base.allocator, atom.sym_index) catch {}; - _ = self.decls.remove(decl); + _ = self.decls.remove(decl_index); self.symbols.items[atom.sym_index].tag = .dead; for (atom.locals.items) |local_atom| { const local_symbol = &self.symbols.items[local_atom.sym_index]; @@ -1414,8 +1429,8 @@ fn populateErrorNameTable(self: *Wasm) !void { // Addend for each relocation to the table var addend: u32 = 0; - const module = self.base.options.module.?; - for (module.error_name_list.items) |error_name| { + const mod = self.base.options.module.?; + for (mod.error_name_list.items) |error_name| { const len = @intCast(u32, error_name.len + 1); // names are 0-termianted const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); @@ -1456,9 +1471,11 @@ fn resetState(self: *Wasm) void { for (self.segment_info.items) |*segment_info| { self.base.allocator.free(segment_info.name); } + const mod = self.base.options.module.?; var decl_it = self.decls.keyIterator(); - while (decl_it.next()) |decl| { - const atom = &decl.*.link.wasm; + while (decl_it.next()) |decl_index_ptr| { + const decl = mod.declPtr(decl_index_ptr.*); + const atom = &decl.link.wasm; atom.next = null; atom.prev = null; @@ -1546,12 +1563,14 @@ pub fn flushModule(self: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod defer self.resetState(); try self.setupStart(); try self.setupImports(); + const mod = self.base.options.module.?; var decl_it = self.decls.keyIterator(); - while (decl_it.next()) |decl| { - if (decl.*.isExtern()) continue; + while (decl_it.next()) |decl_index_ptr| { + const decl = mod.declPtr(decl_index_ptr.*); + if (decl.isExtern()) continue; const atom = &decl.*.link.wasm; - if (decl.*.ty.zigTypeTag() == .Fn) { - try self.parseAtom(atom, .{ .function = decl.*.fn_link.wasm }); + if (decl.ty.zigTypeTag() == .Fn) { + try self.parseAtom(atom, .{ .function = decl.fn_link.wasm }); } else { try self.parseAtom(atom, .data); } @@ -2045,7 +2064,7 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) ! // If there is no Zig code to compile, then we should skip flushing the output file because it // will not be part of the linker line anyway. - const module_obj_path: ?[]const u8 = if (self.base.options.module) |module| blk: { + const module_obj_path: ?[]const u8 = if (self.base.options.module) |mod| blk: { const use_stage1 = build_options.is_stage1 and self.base.options.use_stage1; if (use_stage1) { const obj_basename = try std.zig.binNameAlloc(arena, .{ @@ -2054,7 +2073,7 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) ! .output_mode = .Obj, }); switch (self.base.options.cache_mode) { - .incremental => break :blk try module.zig_cache_artifact_directory.join( + .incremental => break :blk try mod.zig_cache_artifact_directory.join( arena, &[_][]const u8{obj_basename}, ), @@ -2253,7 +2272,7 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) ! } if (auto_export_symbols) { - if (self.base.options.module) |module| { + if (self.base.options.module) |mod| { // when we use stage1, we use the exports that stage1 provided us. // For stage2, we can directly retrieve them from the module. const use_stage1 = build_options.is_stage1 and self.base.options.use_stage1; @@ -2264,14 +2283,15 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) ! } else { const skip_export_non_fn = target.os.tag == .wasi and self.base.options.wasi_exec_model == .command; - for (module.decl_exports.values()) |exports| { + for (mod.decl_exports.values()) |exports| { for (exports) |exprt| { - if (skip_export_non_fn and exprt.exported_decl.ty.zigTypeTag() != .Fn) { + const exported_decl = mod.declPtr(exprt.exported_decl); + if (skip_export_non_fn and exported_decl.ty.zigTypeTag() != .Fn) { // skip exporting symbols when we're building a WASI command // and the symbol is not a function continue; } - const symbol_name = exprt.exported_decl.name; + const symbol_name = exported_decl.name; const arg = try std.fmt.allocPrint(arena, "--export={s}", .{symbol_name}); try argv.append(arg); } diff --git a/src/main.zig b/src/main.zig index 84a69b98f1..e47ff0e272 100644 --- a/src/main.zig +++ b/src/main.zig @@ -3892,7 +3892,7 @@ pub fn cmdFmt(gpa: Allocator, arena: Allocator, args: []const []const u8) !void .tree_loaded = true, .zir = undefined, .pkg = undefined, - .root_decl = null, + .root_decl = .none, }; file.pkg = try Package.create(gpa, null, file.sub_file_path); @@ -4098,7 +4098,7 @@ fn fmtPathFile( .tree_loaded = true, .zir = undefined, .pkg = undefined, - .root_decl = null, + .root_decl = .none, }; file.pkg = try Package.create(fmt.gpa, null, file.sub_file_path); @@ -4757,7 +4757,7 @@ pub fn cmdAstCheck( .tree = undefined, .zir = undefined, .pkg = undefined, - .root_decl = null, + .root_decl = .none, }; if (zig_source_file) |file_name| { var f = fs.cwd().openFile(file_name, .{}) catch |err| { @@ -4910,7 +4910,7 @@ pub fn cmdChangelist( .tree = undefined, .zir = undefined, .pkg = undefined, - .root_decl = null, + .root_decl = .none, }; file.pkg = try Package.create(gpa, null, file.sub_file_path); diff --git a/src/print_air.zig b/src/print_air.zig index 8a1a8fa950..27d222f262 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -7,7 +7,7 @@ const Value = @import("value.zig").Value; const Air = @import("Air.zig"); const Liveness = @import("Liveness.zig"); -pub fn dump(gpa: Allocator, air: Air, liveness: Liveness) void { +pub fn dump(module: *Module, air: Air, liveness: Liveness) void { const instruction_bytes = air.instructions.len * // Here we don't use @sizeOf(Air.Inst.Data) because it would include // the debug safety tag but we want to measure release size. @@ -41,11 +41,12 @@ pub fn dump(gpa: Allocator, air: Air, liveness: Liveness) void { liveness.special.count(), fmtIntSizeBin(liveness_special_bytes), }); // zig fmt: on - var arena = std.heap.ArenaAllocator.init(gpa); + var arena = std.heap.ArenaAllocator.init(module.gpa); defer arena.deinit(); var writer: Writer = .{ - .gpa = gpa, + .module = module, + .gpa = module.gpa, .arena = arena.allocator(), .air = air, .liveness = liveness, @@ -58,6 +59,7 @@ pub fn dump(gpa: Allocator, air: Air, liveness: Liveness) void { } const Writer = struct { + module: *Module, gpa: Allocator, arena: Allocator, air: Air, @@ -591,7 +593,8 @@ const Writer = struct { fn writeDbgInline(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; const function = w.air.values[ty_pl.payload].castTag(.function).?.data; - try s.print("{s}", .{function.owner_decl.name}); + const owner_decl = w.module.declPtr(function.owner_decl); + try s.print("{s}", .{owner_decl.name}); } fn writeDbgVar(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { diff --git a/src/type.zig b/src/type.zig index 72cf2cd534..c96c512d9a 100644 --- a/src/type.zig +++ b/src/type.zig @@ -521,7 +521,7 @@ pub const Type = extern union { } } - pub fn eql(a: Type, b: Type, target: Target) bool { + pub fn eql(a: Type, b: Type, mod: *Module) bool { // As a shortcut, if the small tags / addresses match, we're done. if (a.tag_if_small_enough == b.tag_if_small_enough) return true; @@ -637,7 +637,7 @@ pub const Type = extern union { const a_info = a.fnInfo(); const b_info = b.fnInfo(); - if (!eql(a_info.return_type, b_info.return_type, target)) + if (!eql(a_info.return_type, b_info.return_type, mod)) return false; if (a_info.cc != b_info.cc) @@ -663,7 +663,7 @@ pub const Type = extern union { if (a_param_ty.tag() == .generic_poison) continue; if (b_param_ty.tag() == .generic_poison) continue; - if (!eql(a_param_ty, b_param_ty, target)) + if (!eql(a_param_ty, b_param_ty, mod)) return false; } @@ -681,13 +681,13 @@ pub const Type = extern union { if (a.arrayLen() != b.arrayLen()) return false; const elem_ty = a.elemType(); - if (!elem_ty.eql(b.elemType(), target)) + if (!elem_ty.eql(b.elemType(), mod)) return false; const sentinel_a = a.sentinel(); const sentinel_b = b.sentinel(); if (sentinel_a) |sa| { if (sentinel_b) |sb| { - return sa.eql(sb, elem_ty, target); + return sa.eql(sb, elem_ty, mod); } else { return false; } @@ -718,7 +718,7 @@ pub const Type = extern union { const info_a = a.ptrInfo().data; const info_b = b.ptrInfo().data; - if (!info_a.pointee_type.eql(info_b.pointee_type, target)) + if (!info_a.pointee_type.eql(info_b.pointee_type, mod)) return false; if (info_a.@"align" != info_b.@"align") return false; @@ -741,7 +741,7 @@ pub const Type = extern union { const sentinel_b = info_b.sentinel; if (sentinel_a) |sa| { if (sentinel_b) |sb| { - if (!sa.eql(sb, info_a.pointee_type, target)) + if (!sa.eql(sb, info_a.pointee_type, mod)) return false; } else { return false; @@ -762,7 +762,7 @@ pub const Type = extern union { var buf_a: Payload.ElemType = undefined; var buf_b: Payload.ElemType = undefined; - return a.optionalChild(&buf_a).eql(b.optionalChild(&buf_b), target); + return a.optionalChild(&buf_a).eql(b.optionalChild(&buf_b), mod); }, .anyerror_void_error_union, .error_union => { @@ -770,18 +770,18 @@ pub const Type = extern union { const a_set = a.errorUnionSet(); const b_set = b.errorUnionSet(); - if (!a_set.eql(b_set, target)) return false; + if (!a_set.eql(b_set, mod)) return false; const a_payload = a.errorUnionPayload(); const b_payload = b.errorUnionPayload(); - if (!a_payload.eql(b_payload, target)) return false; + if (!a_payload.eql(b_payload, mod)) return false; return true; }, .anyframe_T => { if (b.zigTypeTag() != .AnyFrame) return false; - return a.childType().eql(b.childType(), target); + return a.childType().eql(b.childType(), mod); }, .empty_struct => { @@ -804,7 +804,7 @@ pub const Type = extern union { for (a_tuple.types) |a_ty, i| { const b_ty = b_tuple.types[i]; - if (!eql(a_ty, b_ty, target)) return false; + if (!eql(a_ty, b_ty, mod)) return false; } for (a_tuple.values) |a_val, i| { @@ -820,7 +820,7 @@ pub const Type = extern union { if (b_val.tag() == .unreachable_value) { return false; } else { - if (!Value.eql(a_val, b_val, ty, target)) return false; + if (!Value.eql(a_val, b_val, ty, mod)) return false; } } } @@ -840,7 +840,7 @@ pub const Type = extern union { for (a_struct_obj.types) |a_ty, i| { const b_ty = b_struct_obj.types[i]; - if (!eql(a_ty, b_ty, target)) return false; + if (!eql(a_ty, b_ty, mod)) return false; } for (a_struct_obj.values) |a_val, i| { @@ -856,7 +856,7 @@ pub const Type = extern union { if (b_val.tag() == .unreachable_value) { return false; } else { - if (!Value.eql(a_val, b_val, ty, target)) return false; + if (!Value.eql(a_val, b_val, ty, mod)) return false; } } } @@ -911,13 +911,13 @@ pub const Type = extern union { } } - pub fn hash(self: Type, target: Target) u64 { + pub fn hash(self: Type, mod: *Module) u64 { var hasher = std.hash.Wyhash.init(0); - self.hashWithHasher(&hasher, target); + self.hashWithHasher(&hasher, mod); return hasher.final(); } - pub fn hashWithHasher(ty: Type, hasher: *std.hash.Wyhash, target: Target) void { + pub fn hashWithHasher(ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void { switch (ty.tag()) { .generic_poison => unreachable, @@ -1036,7 +1036,7 @@ pub const Type = extern union { std.hash.autoHash(hasher, std.builtin.TypeId.Fn); const fn_info = ty.fnInfo(); - hashWithHasher(fn_info.return_type, hasher, target); + hashWithHasher(fn_info.return_type, hasher, mod); std.hash.autoHash(hasher, fn_info.alignment); std.hash.autoHash(hasher, fn_info.cc); std.hash.autoHash(hasher, fn_info.is_var_args); @@ -1046,7 +1046,7 @@ pub const Type = extern union { for (fn_info.param_types) |param_ty, i| { std.hash.autoHash(hasher, fn_info.paramIsComptime(i)); if (param_ty.tag() == .generic_poison) continue; - hashWithHasher(param_ty, hasher, target); + hashWithHasher(param_ty, hasher, mod); } }, @@ -1059,8 +1059,8 @@ pub const Type = extern union { const elem_ty = ty.elemType(); std.hash.autoHash(hasher, ty.arrayLen()); - hashWithHasher(elem_ty, hasher, target); - hashSentinel(ty.sentinel(), elem_ty, hasher, target); + hashWithHasher(elem_ty, hasher, mod); + hashSentinel(ty.sentinel(), elem_ty, hasher, mod); }, .vector => { @@ -1068,7 +1068,7 @@ pub const Type = extern union { const elem_ty = ty.elemType(); std.hash.autoHash(hasher, ty.vectorLen()); - hashWithHasher(elem_ty, hasher, target); + hashWithHasher(elem_ty, hasher, mod); }, .single_const_pointer_to_comptime_int, @@ -1092,8 +1092,8 @@ pub const Type = extern union { std.hash.autoHash(hasher, std.builtin.TypeId.Pointer); const info = ty.ptrInfo().data; - hashWithHasher(info.pointee_type, hasher, target); - hashSentinel(info.sentinel, info.pointee_type, hasher, target); + hashWithHasher(info.pointee_type, hasher, mod); + hashSentinel(info.sentinel, info.pointee_type, hasher, mod); std.hash.autoHash(hasher, info.@"align"); std.hash.autoHash(hasher, info.@"addrspace"); std.hash.autoHash(hasher, info.bit_offset); @@ -1111,22 +1111,22 @@ pub const Type = extern union { std.hash.autoHash(hasher, std.builtin.TypeId.Optional); var buf: Payload.ElemType = undefined; - hashWithHasher(ty.optionalChild(&buf), hasher, target); + hashWithHasher(ty.optionalChild(&buf), hasher, mod); }, .anyerror_void_error_union, .error_union => { std.hash.autoHash(hasher, std.builtin.TypeId.ErrorUnion); const set_ty = ty.errorUnionSet(); - hashWithHasher(set_ty, hasher, target); + hashWithHasher(set_ty, hasher, mod); const payload_ty = ty.errorUnionPayload(); - hashWithHasher(payload_ty, hasher, target); + hashWithHasher(payload_ty, hasher, mod); }, .anyframe_T => { std.hash.autoHash(hasher, std.builtin.TypeId.AnyFrame); - hashWithHasher(ty.childType(), hasher, target); + hashWithHasher(ty.childType(), hasher, mod); }, .empty_struct => { @@ -1145,10 +1145,10 @@ pub const Type = extern union { std.hash.autoHash(hasher, tuple.types.len); for (tuple.types) |field_ty, i| { - hashWithHasher(field_ty, hasher, target); + hashWithHasher(field_ty, hasher, mod); const field_val = tuple.values[i]; if (field_val.tag() == .unreachable_value) continue; - field_val.hash(field_ty, hasher, target); + field_val.hash(field_ty, hasher, mod); } }, .anon_struct => { @@ -1160,9 +1160,9 @@ pub const Type = extern union { const field_name = struct_obj.names[i]; const field_val = struct_obj.values[i]; hasher.update(field_name); - hashWithHasher(field_ty, hasher, target); + hashWithHasher(field_ty, hasher, mod); if (field_val.tag() == .unreachable_value) continue; - field_val.hash(field_ty, hasher, target); + field_val.hash(field_ty, hasher, mod); } }, @@ -1210,35 +1210,35 @@ pub const Type = extern union { } } - fn hashSentinel(opt_val: ?Value, ty: Type, hasher: *std.hash.Wyhash, target: Target) void { + fn hashSentinel(opt_val: ?Value, ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void { if (opt_val) |s| { std.hash.autoHash(hasher, true); - s.hash(ty, hasher, target); + s.hash(ty, hasher, mod); } else { std.hash.autoHash(hasher, false); } } pub const HashContext64 = struct { - target: Target, + mod: *Module, pub fn hash(self: @This(), t: Type) u64 { - return t.hash(self.target); + return t.hash(self.mod); } pub fn eql(self: @This(), a: Type, b: Type) bool { - return a.eql(b, self.target); + return a.eql(b, self.mod); } }; pub const HashContext32 = struct { - target: Target, + mod: *Module, pub fn hash(self: @This(), t: Type) u32 { - return @truncate(u32, t.hash(self.target)); + return @truncate(u32, t.hash(self.mod)); } pub fn eql(self: @This(), a: Type, b: Type, b_index: usize) bool { _ = b_index; - return a.eql(b, self.target); + return a.eql(b, self.mod); } }; @@ -1483,16 +1483,16 @@ pub const Type = extern union { @compileError("do not format types directly; use either ty.fmtDebug() or ty.fmt()"); } - pub fn fmt(ty: Type, target: Target) std.fmt.Formatter(format2) { + pub fn fmt(ty: Type, module: *Module) std.fmt.Formatter(format2) { return .{ .data = .{ .ty = ty, - .target = target, + .module = module, } }; } const FormatContext = struct { ty: Type, - target: Target, + module: *Module, }; fn format2( @@ -1503,7 +1503,7 @@ pub const Type = extern union { ) !void { comptime assert(unused_format_string.len == 0); _ = options; - return print(ctx.ty, writer, ctx.target); + return print(ctx.ty, writer, ctx.module); } pub fn fmtDebug(ty: Type) std.fmt.Formatter(dump) { @@ -1579,27 +1579,39 @@ pub const Type = extern union { .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; - return struct_obj.owner_decl.renderFullyQualifiedName(writer); + return writer.print("({s} decl={d})", .{ + @tagName(t), struct_obj.owner_decl, + }); }, .@"union", .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; - return union_obj.owner_decl.renderFullyQualifiedName(writer); + return writer.print("({s} decl={d})", .{ + @tagName(t), union_obj.owner_decl, + }); }, .enum_full, .enum_nonexhaustive => { const enum_full = ty.cast(Payload.EnumFull).?.data; - return enum_full.owner_decl.renderFullyQualifiedName(writer); + return writer.print("({s} decl={d})", .{ + @tagName(t), enum_full.owner_decl, + }); }, .enum_simple => { const enum_simple = ty.castTag(.enum_simple).?.data; - return enum_simple.owner_decl.renderFullyQualifiedName(writer); + return writer.print("({s} decl={d})", .{ + @tagName(t), enum_simple.owner_decl, + }); }, .enum_numbered => { const enum_numbered = ty.castTag(.enum_numbered).?.data; - return enum_numbered.owner_decl.renderFullyQualifiedName(writer); + return writer.print("({s} decl={d})", .{ + @tagName(t), enum_numbered.owner_decl, + }); }, .@"opaque" => { - // TODO use declaration name - return writer.writeAll("opaque {}"); + const opaque_obj = ty.castTag(.@"opaque").?.data; + return writer.print("({s} decl={d})", .{ + @tagName(t), opaque_obj.owner_decl, + }); }, .anyerror_void_error_union => return writer.writeAll("anyerror!void"), @@ -1845,7 +1857,9 @@ pub const Type = extern union { }, .error_set_inferred => { const func = ty.castTag(.error_set_inferred).?.data.func; - return writer.print("@typeInfo(@typeInfo(@TypeOf({s})).Fn.return_type.?).ErrorUnion.error_set", .{func.owner_decl.name}); + return writer.print("({s} func={d})", .{ + @tagName(t), func.owner_decl, + }); }, .error_set_merged => { const names = ty.castTag(.error_set_merged).?.data.keys(); @@ -1871,15 +1885,15 @@ pub const Type = extern union { pub const nameAllocArena = nameAlloc; - pub fn nameAlloc(ty: Type, ally: Allocator, target: Target) Allocator.Error![:0]const u8 { + pub fn nameAlloc(ty: Type, ally: Allocator, module: *Module) Allocator.Error![:0]const u8 { var buffer = std.ArrayList(u8).init(ally); defer buffer.deinit(); - try ty.print(buffer.writer(), target); + try ty.print(buffer.writer(), module); return buffer.toOwnedSliceSentinel(0); } /// Prints a name suitable for `@typeName`. - pub fn print(ty: Type, writer: anytype, target: Target) @TypeOf(writer).Error!void { + pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void { const t = ty.tag(); switch (t) { .inferred_alloc_const => unreachable, @@ -1946,32 +1960,38 @@ pub const Type = extern union { .empty_struct => { const namespace = ty.castTag(.empty_struct).?.data; - try namespace.renderFullyQualifiedName("", writer); + try namespace.renderFullyQualifiedName(mod, "", writer); }, .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; - try struct_obj.owner_decl.renderFullyQualifiedName(writer); + const decl = mod.declPtr(struct_obj.owner_decl); + try decl.renderFullyQualifiedName(mod, writer); }, .@"union", .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; - try union_obj.owner_decl.renderFullyQualifiedName(writer); + const decl = mod.declPtr(union_obj.owner_decl); + try decl.renderFullyQualifiedName(mod, writer); }, .enum_full, .enum_nonexhaustive => { const enum_full = ty.cast(Payload.EnumFull).?.data; - try enum_full.owner_decl.renderFullyQualifiedName(writer); + const decl = mod.declPtr(enum_full.owner_decl); + try decl.renderFullyQualifiedName(mod, writer); }, .enum_simple => { const enum_simple = ty.castTag(.enum_simple).?.data; - try enum_simple.owner_decl.renderFullyQualifiedName(writer); + const decl = mod.declPtr(enum_simple.owner_decl); + try decl.renderFullyQualifiedName(mod, writer); }, .enum_numbered => { const enum_numbered = ty.castTag(.enum_numbered).?.data; - try enum_numbered.owner_decl.renderFullyQualifiedName(writer); + const decl = mod.declPtr(enum_numbered.owner_decl); + try decl.renderFullyQualifiedName(mod, writer); }, .@"opaque" => { const opaque_obj = ty.cast(Payload.Opaque).?.data; - try opaque_obj.owner_decl.renderFullyQualifiedName(writer); + const decl = mod.declPtr(opaque_obj.owner_decl); + try decl.renderFullyQualifiedName(mod, writer); }, .anyerror_void_error_union => try writer.writeAll("anyerror!void"), @@ -1990,7 +2010,8 @@ pub const Type = extern union { const func = ty.castTag(.error_set_inferred).?.data.func; try writer.writeAll("@typeInfo(@typeInfo(@TypeOf("); - try func.owner_decl.renderFullyQualifiedName(writer); + const owner_decl = mod.declPtr(func.owner_decl); + try owner_decl.renderFullyQualifiedName(mod, writer); try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set"); }, @@ -1999,7 +2020,7 @@ pub const Type = extern union { try writer.writeAll("fn("); for (fn_info.param_types) |param_ty, i| { if (i != 0) try writer.writeAll(", "); - try print(param_ty, writer, target); + try print(param_ty, writer, mod); } if (fn_info.is_var_args) { if (fn_info.param_types.len != 0) { @@ -2016,14 +2037,14 @@ pub const Type = extern union { if (fn_info.alignment != 0) { try writer.print("align({d}) ", .{fn_info.alignment}); } - try print(fn_info.return_type, writer, target); + try print(fn_info.return_type, writer, mod); }, .error_union => { const error_union = ty.castTag(.error_union).?.data; - try print(error_union.error_set, writer, target); + try print(error_union.error_set, writer, mod); try writer.writeAll("!"); - try print(error_union.payload, writer, target); + try print(error_union.payload, writer, mod); }, .array_u8 => { @@ -2037,21 +2058,21 @@ pub const Type = extern union { .vector => { const payload = ty.castTag(.vector).?.data; try writer.print("@Vector({d}, ", .{payload.len}); - try print(payload.elem_type, writer, target); + try print(payload.elem_type, writer, mod); try writer.writeAll(")"); }, .array => { const payload = ty.castTag(.array).?.data; try writer.print("[{d}]", .{payload.len}); - try print(payload.elem_type, writer, target); + try print(payload.elem_type, writer, mod); }, .array_sentinel => { const payload = ty.castTag(.array_sentinel).?.data; try writer.print("[{d}:{}]", .{ payload.len, - payload.sentinel.fmtValue(payload.elem_type, target), + payload.sentinel.fmtValue(payload.elem_type, mod), }); - try print(payload.elem_type, writer, target); + try print(payload.elem_type, writer, mod); }, .tuple => { const tuple = ty.castTag(.tuple).?.data; @@ -2063,9 +2084,9 @@ pub const Type = extern union { if (val.tag() != .unreachable_value) { try writer.writeAll("comptime "); } - try print(field_ty, writer, target); + try print(field_ty, writer, mod); if (val.tag() != .unreachable_value) { - try writer.print(" = {}", .{val.fmtValue(field_ty, target)}); + try writer.print(" = {}", .{val.fmtValue(field_ty, mod)}); } } try writer.writeAll("}"); @@ -2083,10 +2104,10 @@ pub const Type = extern union { try writer.writeAll(anon_struct.names[i]); try writer.writeAll(": "); - try print(field_ty, writer, target); + try print(field_ty, writer, mod); if (val.tag() != .unreachable_value) { - try writer.print(" = {}", .{val.fmtValue(field_ty, target)}); + try writer.print(" = {}", .{val.fmtValue(field_ty, mod)}); } } try writer.writeAll("}"); @@ -2106,8 +2127,8 @@ pub const Type = extern union { if (info.sentinel) |s| switch (info.size) { .One, .C => unreachable, - .Many => try writer.print("[*:{}]", .{s.fmtValue(info.pointee_type, target)}), - .Slice => try writer.print("[:{}]", .{s.fmtValue(info.pointee_type, target)}), + .Many => try writer.print("[*:{}]", .{s.fmtValue(info.pointee_type, mod)}), + .Slice => try writer.print("[:{}]", .{s.fmtValue(info.pointee_type, mod)}), } else switch (info.size) { .One => try writer.writeAll("*"), .Many => try writer.writeAll("[*]"), @@ -2129,7 +2150,7 @@ pub const Type = extern union { if (info.@"volatile") try writer.writeAll("volatile "); if (info.@"allowzero" and info.size != .C) try writer.writeAll("allowzero "); - try print(info.pointee_type, writer, target); + try print(info.pointee_type, writer, mod); }, .int_signed => { @@ -2143,22 +2164,22 @@ pub const Type = extern union { .optional => { const child_type = ty.castTag(.optional).?.data; try writer.writeByte('?'); - try print(child_type, writer, target); + try print(child_type, writer, mod); }, .optional_single_mut_pointer => { const pointee_type = ty.castTag(.optional_single_mut_pointer).?.data; try writer.writeAll("?*"); - try print(pointee_type, writer, target); + try print(pointee_type, writer, mod); }, .optional_single_const_pointer => { const pointee_type = ty.castTag(.optional_single_const_pointer).?.data; try writer.writeAll("?*const "); - try print(pointee_type, writer, target); + try print(pointee_type, writer, mod); }, .anyframe_T => { const return_type = ty.castTag(.anyframe_T).?.data; try writer.print("anyframe->", .{}); - try print(return_type, writer, target); + try print(return_type, writer, mod); }, .error_set => { const names = ty.castTag(.error_set).?.data.names.keys(); @@ -3834,8 +3855,8 @@ pub const Type = extern union { /// For [*]T, returns *T /// For []T, returns *T /// Handles const-ness and address spaces in particular. - pub fn elemPtrType(ptr_ty: Type, arena: Allocator, target: Target) !Type { - return try Type.ptr(arena, target, .{ + pub fn elemPtrType(ptr_ty: Type, arena: Allocator, mod: *Module) !Type { + return try Type.ptr(arena, mod, .{ .pointee_type = ptr_ty.elemType2(), .mutable = ptr_ty.ptrIsMutable(), .@"addrspace" = ptr_ty.ptrAddressSpace(), @@ -3948,9 +3969,9 @@ pub const Type = extern union { return union_obj.fields; } - pub fn unionFieldType(ty: Type, enum_tag: Value, target: Target) Type { + pub fn unionFieldType(ty: Type, enum_tag: Value, mod: *Module) Type { const union_obj = ty.cast(Payload.Union).?.data; - const index = union_obj.tag_ty.enumTagFieldIndex(enum_tag, target).?; + const index = union_obj.tag_ty.enumTagFieldIndex(enum_tag, mod).?; assert(union_obj.haveFieldTypes()); return union_obj.fields.values()[index].ty; } @@ -4970,20 +4991,20 @@ pub const Type = extern union { /// Asserts `ty` is an enum. `enum_tag` can either be `enum_field_index` or /// an integer which represents the enum value. Returns the field index in /// declaration order, or `null` if `enum_tag` does not match any field. - pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, target: Target) ?usize { + pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?usize { if (enum_tag.castTag(.enum_field_index)) |payload| { return @as(usize, payload.data); } const S = struct { - fn fieldWithRange(int_ty: Type, int_val: Value, end: usize, tg: Target) ?usize { + fn fieldWithRange(int_ty: Type, int_val: Value, end: usize, m: *Module) ?usize { if (int_val.compareWithZero(.lt)) return null; var end_payload: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = end, }; const end_val = Value.initPayload(&end_payload.base); - if (int_val.compare(.gte, end_val, int_ty, tg)) return null; - return @intCast(usize, int_val.toUnsignedInt(tg)); + if (int_val.compare(.gte, end_val, int_ty, m)) return null; + return @intCast(usize, int_val.toUnsignedInt(m.getTarget())); } }; switch (ty.tag()) { @@ -4991,11 +5012,11 @@ pub const Type = extern union { const enum_full = ty.cast(Payload.EnumFull).?.data; const tag_ty = enum_full.tag_ty; if (enum_full.values.count() == 0) { - return S.fieldWithRange(tag_ty, enum_tag, enum_full.fields.count(), target); + return S.fieldWithRange(tag_ty, enum_tag, enum_full.fields.count(), mod); } else { return enum_full.values.getIndexContext(enum_tag, .{ .ty = tag_ty, - .target = target, + .mod = mod, }); } }, @@ -5003,11 +5024,11 @@ pub const Type = extern union { const enum_obj = ty.castTag(.enum_numbered).?.data; const tag_ty = enum_obj.tag_ty; if (enum_obj.values.count() == 0) { - return S.fieldWithRange(tag_ty, enum_tag, enum_obj.fields.count(), target); + return S.fieldWithRange(tag_ty, enum_tag, enum_obj.fields.count(), mod); } else { return enum_obj.values.getIndexContext(enum_tag, .{ .ty = tag_ty, - .target = target, + .mod = mod, }); } }, @@ -5020,7 +5041,7 @@ pub const Type = extern union { .data = bits, }; const tag_ty = Type.initPayload(&buffer.base); - return S.fieldWithRange(tag_ty, enum_tag, fields_len, target); + return S.fieldWithRange(tag_ty, enum_tag, fields_len, mod); }, .atomic_order, .atomic_rmw_op, @@ -5224,32 +5245,35 @@ pub const Type = extern union { } } - pub fn declSrcLoc(ty: Type) Module.SrcLoc { - return declSrcLocOrNull(ty).?; + pub fn declSrcLoc(ty: Type, mod: *Module) Module.SrcLoc { + return declSrcLocOrNull(ty, mod).?; } - pub fn declSrcLocOrNull(ty: Type) ?Module.SrcLoc { + pub fn declSrcLocOrNull(ty: Type, mod: *Module) ?Module.SrcLoc { switch (ty.tag()) { .enum_full, .enum_nonexhaustive => { const enum_full = ty.cast(Payload.EnumFull).?.data; - return enum_full.srcLoc(); + return enum_full.srcLoc(mod); + }, + .enum_numbered => { + const enum_numbered = ty.castTag(.enum_numbered).?.data; + return enum_numbered.srcLoc(mod); }, - .enum_numbered => return ty.castTag(.enum_numbered).?.data.srcLoc(), .enum_simple => { const enum_simple = ty.castTag(.enum_simple).?.data; - return enum_simple.srcLoc(); + return enum_simple.srcLoc(mod); }, .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; - return struct_obj.srcLoc(); + return struct_obj.srcLoc(mod); }, .error_set => { const error_set = ty.castTag(.error_set).?.data; - return error_set.srcLoc(); + return error_set.srcLoc(mod); }, .@"union", .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; - return union_obj.srcLoc(); + return union_obj.srcLoc(mod); }, .atomic_order, .atomic_rmw_op, @@ -5268,7 +5292,7 @@ pub const Type = extern union { } } - pub fn getOwnerDecl(ty: Type) *Module.Decl { + pub fn getOwnerDecl(ty: Type) Module.Decl.Index { switch (ty.tag()) { .enum_full, .enum_nonexhaustive => { const enum_full = ty.cast(Payload.EnumFull).?.data; @@ -5357,30 +5381,30 @@ pub const Type = extern union { } /// Asserts the type is an enum. - pub fn enumHasInt(ty: Type, int: Value, target: Target) bool { + pub fn enumHasInt(ty: Type, int: Value, mod: *Module) bool { const S = struct { - fn intInRange(tag_ty: Type, int_val: Value, end: usize, tg: Target) bool { + fn intInRange(tag_ty: Type, int_val: Value, end: usize, m: *Module) bool { if (int_val.compareWithZero(.lt)) return false; var end_payload: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = end, }; const end_val = Value.initPayload(&end_payload.base); - if (int_val.compare(.gte, end_val, tag_ty, tg)) return false; + if (int_val.compare(.gte, end_val, tag_ty, m)) return false; return true; } }; switch (ty.tag()) { - .enum_nonexhaustive => return int.intFitsInType(ty, target), + .enum_nonexhaustive => return int.intFitsInType(ty, mod.getTarget()), .enum_full => { const enum_full = ty.castTag(.enum_full).?.data; const tag_ty = enum_full.tag_ty; if (enum_full.values.count() == 0) { - return S.intInRange(tag_ty, int, enum_full.fields.count(), target); + return S.intInRange(tag_ty, int, enum_full.fields.count(), mod); } else { return enum_full.values.containsContext(int, .{ .ty = tag_ty, - .target = target, + .mod = mod, }); } }, @@ -5388,11 +5412,11 @@ pub const Type = extern union { const enum_obj = ty.castTag(.enum_numbered).?.data; const tag_ty = enum_obj.tag_ty; if (enum_obj.values.count() == 0) { - return S.intInRange(tag_ty, int, enum_obj.fields.count(), target); + return S.intInRange(tag_ty, int, enum_obj.fields.count(), mod); } else { return enum_obj.values.containsContext(int, .{ .ty = tag_ty, - .target = target, + .mod = mod, }); } }, @@ -5405,7 +5429,7 @@ pub const Type = extern union { .data = bits, }; const tag_ty = Type.initPayload(&buffer.base); - return S.intInRange(tag_ty, int, fields_len, target); + return S.intInRange(tag_ty, int, fields_len, mod); }, .atomic_order, .atomic_rmw_op, @@ -5937,7 +5961,9 @@ pub const Type = extern union { pub const @"anyopaque" = initTag(.anyopaque); pub const @"null" = initTag(.@"null"); - pub fn ptr(arena: Allocator, target: Target, data: Payload.Pointer.Data) !Type { + pub fn ptr(arena: Allocator, mod: *Module, data: Payload.Pointer.Data) !Type { + const target = mod.getTarget(); + var d = data; if (d.size == .C) { @@ -5967,7 +5993,7 @@ pub const Type = extern union { d.bit_offset == 0 and d.host_size == 0 and !d.@"allowzero" and !d.@"volatile") { if (d.sentinel) |sent| { - if (!d.mutable and d.pointee_type.eql(Type.u8, target)) { + if (!d.mutable and d.pointee_type.eql(Type.u8, mod)) { switch (d.size) { .Slice => { if (sent.compareWithZero(.eq)) { @@ -5982,7 +6008,7 @@ pub const Type = extern union { else => {}, } } - } else if (!d.mutable and d.pointee_type.eql(Type.u8, target)) { + } else if (!d.mutable and d.pointee_type.eql(Type.u8, mod)) { switch (d.size) { .Slice => return Type.initTag(.const_slice_u8), .Many => return Type.initTag(.manyptr_const_u8), @@ -6016,11 +6042,11 @@ pub const Type = extern union { len: u64, sent: ?Value, elem_type: Type, - target: Target, + mod: *Module, ) Allocator.Error!Type { - if (elem_type.eql(Type.u8, target)) { + if (elem_type.eql(Type.u8, mod)) { if (sent) |some| { - if (some.eql(Value.zero, elem_type, target)) { + if (some.eql(Value.zero, elem_type, mod)) { return Tag.array_u8_sentinel_0.create(arena, len); } } else { @@ -6067,11 +6093,11 @@ pub const Type = extern union { arena: Allocator, error_set: Type, payload: Type, - target: Target, + mod: *Module, ) Allocator.Error!Type { assert(error_set.zigTypeTag() == .ErrorSet); - if (error_set.eql(Type.@"anyerror", target) and - payload.eql(Type.void, target)) + if (error_set.eql(Type.@"anyerror", mod) and + payload.eql(Type.void, mod)) { return Type.initTag(.anyerror_void_error_union); } diff --git a/src/value.zig b/src/value.zig index ff2c0c271b..bb7b742290 100644 --- a/src/value.zig +++ b/src/value.zig @@ -731,16 +731,16 @@ pub const Value = extern union { .int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", options, out_stream), .int_big_positive => return out_stream.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}), .int_big_negative => return out_stream.print("{}", .{val.castTag(.int_big_negative).?.asBigInt()}), - .function => return out_stream.print("(function '{s}')", .{val.castTag(.function).?.data.owner_decl.name}), + .function => return out_stream.print("(function decl={d})", .{val.castTag(.function).?.data.owner_decl}), .extern_fn => return out_stream.writeAll("(extern function)"), .variable => return out_stream.writeAll("(variable)"), .decl_ref_mut => { - const decl = val.castTag(.decl_ref_mut).?.data.decl; - return out_stream.print("(decl_ref_mut '{s}')", .{decl.name}); + const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index; + return out_stream.print("(decl_ref_mut {d})", .{decl_index}); }, .decl_ref => { - const decl = val.castTag(.decl_ref).?.data; - return out_stream.print("(decl ref '{s}')", .{decl.name}); + const decl_index = val.castTag(.decl_ref).?.data; + return out_stream.print("(decl_ref {d})", .{decl_index}); }, .elem_ptr => { const elem_ptr = val.castTag(.elem_ptr).?.data; @@ -798,16 +798,17 @@ pub const Value = extern union { return .{ .data = val }; } - pub fn fmtValue(val: Value, ty: Type, target: Target) std.fmt.Formatter(TypedValue.format) { + pub fn fmtValue(val: Value, ty: Type, mod: *Module) std.fmt.Formatter(TypedValue.format) { return .{ .data = .{ .tv = .{ .ty = ty, .val = val }, - .target = target, + .mod = mod, } }; } /// Asserts that the value is representable as an array of bytes. /// Copies the value into a freshly allocated slice of memory, which is owned by the caller. - pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator, target: Target) ![]u8 { + pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator, mod: *Module) ![]u8 { + const target = mod.getTarget(); switch (val.tag()) { .bytes => { const bytes = val.castTag(.bytes).?.data; @@ -823,25 +824,26 @@ pub const Value = extern union { return result; }, .decl_ref => { - const decl = val.castTag(.decl_ref).?.data; + const decl_index = val.castTag(.decl_ref).?.data; + const decl = mod.declPtr(decl_index); const decl_val = try decl.value(); - return decl_val.toAllocatedBytes(decl.ty, allocator, target); + return decl_val.toAllocatedBytes(decl.ty, allocator, mod); }, .the_only_possible_value => return &[_]u8{}, .slice => { const slice = val.castTag(.slice).?.data; - return arrayToAllocatedBytes(slice.ptr, slice.len.toUnsignedInt(target), allocator, target); + return arrayToAllocatedBytes(slice.ptr, slice.len.toUnsignedInt(target), allocator, mod); }, - else => return arrayToAllocatedBytes(val, ty.arrayLen(), allocator, target), + else => return arrayToAllocatedBytes(val, ty.arrayLen(), allocator, mod), } } - fn arrayToAllocatedBytes(val: Value, len: u64, allocator: Allocator, target: Target) ![]u8 { + fn arrayToAllocatedBytes(val: Value, len: u64, allocator: Allocator, mod: *Module) ![]u8 { const result = try allocator.alloc(u8, @intCast(usize, len)); var elem_value_buf: ElemValueBuffer = undefined; for (result) |*elem, i| { - const elem_val = val.elemValueBuffer(i, &elem_value_buf); - elem.* = @intCast(u8, elem_val.toUnsignedInt(target)); + const elem_val = val.elemValueBuffer(mod, i, &elem_value_buf); + elem.* = @intCast(u8, elem_val.toUnsignedInt(mod.getTarget())); } return result; } @@ -1164,7 +1166,7 @@ pub const Value = extern union { var elem_value_buf: ElemValueBuffer = undefined; var buf_off: usize = 0; while (elem_i < len) : (elem_i += 1) { - const elem_val = val.elemValueBuffer(elem_i, &elem_value_buf); + const elem_val = val.elemValueBuffer(mod, elem_i, &elem_value_buf); writeToMemory(elem_val, elem_ty, mod, buffer[buf_off..]); buf_off += elem_size; } @@ -1975,34 +1977,47 @@ pub const Value = extern union { /// Asserts the values are comparable. Both operands have type `ty`. /// Vector results will be reduced with AND. - pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, target: Target) bool { + pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, mod: *Module) bool { if (ty.zigTypeTag() == .Vector) { var i: usize = 0; while (i < ty.vectorLen()) : (i += 1) { - if (!compareScalar(lhs.indexVectorlike(i), op, rhs.indexVectorlike(i), ty.scalarType(), target)) { + if (!compareScalar(lhs.indexVectorlike(i), op, rhs.indexVectorlike(i), ty.scalarType(), mod)) { return false; } } return true; } - return compareScalar(lhs, op, rhs, ty, target); + return compareScalar(lhs, op, rhs, ty, mod); } /// Asserts the values are comparable. Both operands have type `ty`. - pub fn compareScalar(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, target: Target) bool { + pub fn compareScalar( + lhs: Value, + op: std.math.CompareOperator, + rhs: Value, + ty: Type, + mod: *Module, + ) bool { return switch (op) { - .eq => lhs.eql(rhs, ty, target), - .neq => !lhs.eql(rhs, ty, target), - else => compareHetero(lhs, op, rhs, target), + .eq => lhs.eql(rhs, ty, mod), + .neq => !lhs.eql(rhs, ty, mod), + else => compareHetero(lhs, op, rhs, mod.getTarget()), }; } /// Asserts the values are comparable vectors of type `ty`. - pub fn compareVector(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value { + pub fn compareVector( + lhs: Value, + op: std.math.CompareOperator, + rhs: Value, + ty: Type, + allocator: Allocator, + mod: *Module, + ) !Value { assert(ty.zigTypeTag() == .Vector); const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data) |*scalar, i| { - const res_bool = compareScalar(lhs.indexVectorlike(i), op, rhs.indexVectorlike(i), ty.scalarType(), target); + const res_bool = compareScalar(lhs.indexVectorlike(i), op, rhs.indexVectorlike(i), ty.scalarType(), mod); scalar.* = if (res_bool) Value.@"true" else Value.@"false"; } return Value.Tag.aggregate.create(allocator, result_data); @@ -2032,7 +2047,8 @@ pub const Value = extern union { /// for `a`. This function must act *as if* `a` has been coerced to `ty`. This complication /// is required in order to make generic function instantiation effecient - specifically /// the insertion into the monomorphized function table. - pub fn eql(a: Value, b: Value, ty: Type, target: Target) bool { + pub fn eql(a: Value, b: Value, ty: Type, mod: *Module) bool { + const target = mod.getTarget(); const a_tag = a.tag(); const b_tag = b.tag(); if (a_tag == b_tag) switch (a_tag) { @@ -2052,31 +2068,31 @@ pub const Value = extern union { const a_payload = a.castTag(.opt_payload).?.data; const b_payload = b.castTag(.opt_payload).?.data; var buffer: Type.Payload.ElemType = undefined; - return eql(a_payload, b_payload, ty.optionalChild(&buffer), target); + return eql(a_payload, b_payload, ty.optionalChild(&buffer), mod); }, .slice => { const a_payload = a.castTag(.slice).?.data; const b_payload = b.castTag(.slice).?.data; - if (!eql(a_payload.len, b_payload.len, Type.usize, target)) return false; + if (!eql(a_payload.len, b_payload.len, Type.usize, mod)) return false; var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = ty.slicePtrFieldType(&ptr_buf); - return eql(a_payload.ptr, b_payload.ptr, ptr_ty, target); + return eql(a_payload.ptr, b_payload.ptr, ptr_ty, mod); }, .elem_ptr => { const a_payload = a.castTag(.elem_ptr).?.data; const b_payload = b.castTag(.elem_ptr).?.data; if (a_payload.index != b_payload.index) return false; - return eql(a_payload.array_ptr, b_payload.array_ptr, ty, target); + return eql(a_payload.array_ptr, b_payload.array_ptr, ty, mod); }, .field_ptr => { const a_payload = a.castTag(.field_ptr).?.data; const b_payload = b.castTag(.field_ptr).?.data; if (a_payload.field_index != b_payload.field_index) return false; - return eql(a_payload.container_ptr, b_payload.container_ptr, ty, target); + return eql(a_payload.container_ptr, b_payload.container_ptr, ty, mod); }, .@"error" => { const a_name = a.castTag(.@"error").?.data.name; @@ -2086,7 +2102,7 @@ pub const Value = extern union { .eu_payload => { const a_payload = a.castTag(.eu_payload).?.data; const b_payload = b.castTag(.eu_payload).?.data; - return eql(a_payload, b_payload, ty.errorUnionPayload(), target); + return eql(a_payload, b_payload, ty.errorUnionPayload(), mod); }, .eu_payload_ptr => @panic("TODO: Implement more pointer eql cases"), .opt_payload_ptr => @panic("TODO: Implement more pointer eql cases"), @@ -2104,7 +2120,7 @@ pub const Value = extern union { const types = ty.tupleFields().types; assert(types.len == a_field_vals.len); for (types) |field_ty, i| { - if (!eql(a_field_vals[i], b_field_vals[i], field_ty, target)) return false; + if (!eql(a_field_vals[i], b_field_vals[i], field_ty, mod)) return false; } return true; } @@ -2113,7 +2129,7 @@ pub const Value = extern union { const fields = ty.structFields().values(); assert(fields.len == a_field_vals.len); for (fields) |field, i| { - if (!eql(a_field_vals[i], b_field_vals[i], field.ty, target)) return false; + if (!eql(a_field_vals[i], b_field_vals[i], field.ty, mod)) return false; } return true; } @@ -2122,7 +2138,7 @@ pub const Value = extern union { for (a_field_vals) |a_elem, i| { const b_elem = b_field_vals[i]; - if (!eql(a_elem, b_elem, elem_ty, target)) return false; + if (!eql(a_elem, b_elem, elem_ty, mod)) return false; } return true; }, @@ -2132,7 +2148,7 @@ pub const Value = extern union { switch (ty.containerLayout()) { .Packed, .Extern => { const tag_ty = ty.unionTagTypeHypothetical(); - if (!a_union.tag.eql(b_union.tag, tag_ty, target)) { + if (!a_union.tag.eql(b_union.tag, tag_ty, mod)) { // In this case, we must disregard mismatching tags and compare // based on the in-memory bytes of the payloads. @panic("TODO comptime comparison of extern union values with mismatching tags"); @@ -2140,13 +2156,13 @@ pub const Value = extern union { }, .Auto => { const tag_ty = ty.unionTagTypeHypothetical(); - if (!a_union.tag.eql(b_union.tag, tag_ty, target)) { + if (!a_union.tag.eql(b_union.tag, tag_ty, mod)) { return false; } }, } - const active_field_ty = ty.unionFieldType(a_union.tag, target); - return a_union.val.eql(b_union.val, active_field_ty, target); + const active_field_ty = ty.unionFieldType(a_union.tag, mod); + return a_union.val.eql(b_union.val, active_field_ty, mod); }, else => {}, } else if (a_tag == .null_value or b_tag == .null_value) { @@ -2171,7 +2187,7 @@ pub const Value = extern union { var buf_b: ToTypeBuffer = undefined; const a_type = a.toType(&buf_a); const b_type = b.toType(&buf_b); - return a_type.eql(b_type, target); + return a_type.eql(b_type, mod); }, .Enum => { var buf_a: Payload.U64 = undefined; @@ -2180,7 +2196,7 @@ pub const Value = extern union { const b_val = b.enumToInt(ty, &buf_b); var buf_ty: Type.Payload.Bits = undefined; const int_ty = ty.intTagType(&buf_ty); - return eql(a_val, b_val, int_ty, target); + return eql(a_val, b_val, int_ty, mod); }, .Array, .Vector => { const len = ty.arrayLen(); @@ -2189,9 +2205,9 @@ pub const Value = extern union { var a_buf: ElemValueBuffer = undefined; var b_buf: ElemValueBuffer = undefined; while (i < len) : (i += 1) { - const a_elem = elemValueBuffer(a, i, &a_buf); - const b_elem = elemValueBuffer(b, i, &b_buf); - if (!eql(a_elem, b_elem, elem_ty, target)) return false; + const a_elem = elemValueBuffer(a, mod, i, &a_buf); + const b_elem = elemValueBuffer(b, mod, i, &b_buf); + if (!eql(a_elem, b_elem, elem_ty, mod)) return false; } return true; }, @@ -2215,7 +2231,7 @@ pub const Value = extern union { .base = .{ .tag = .opt_payload }, .data = a, }; - return eql(Value.initPayload(&buffer.base), b, ty, target); + return eql(Value.initPayload(&buffer.base), b, ty, mod); } }, else => {}, @@ -2225,7 +2241,7 @@ pub const Value = extern union { /// This function is used by hash maps and so treats floating-point NaNs as equal /// to each other, and not equal to other floating-point values. - pub fn hash(val: Value, ty: Type, hasher: *std.hash.Wyhash, target: Target) void { + pub fn hash(val: Value, ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void { const zig_ty_tag = ty.zigTypeTag(); std.hash.autoHash(hasher, zig_ty_tag); if (val.isUndef()) return; @@ -2242,7 +2258,7 @@ pub const Value = extern union { .Type => { var buf: ToTypeBuffer = undefined; - return val.toType(&buf).hashWithHasher(hasher, target); + return val.toType(&buf).hashWithHasher(hasher, mod); }, .Float, .ComptimeFloat => { // Normalize the float here because this hash must match eql semantics. @@ -2263,11 +2279,11 @@ pub const Value = extern union { const slice = val.castTag(.slice).?.data; var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = ty.slicePtrFieldType(&ptr_buf); - hash(slice.ptr, ptr_ty, hasher, target); - hash(slice.len, Type.usize, hasher, target); + hash(slice.ptr, ptr_ty, hasher, mod); + hash(slice.len, Type.usize, hasher, mod); }, - else => return hashPtr(val, hasher, target), + else => return hashPtr(val, hasher, mod.getTarget()), }, .Array, .Vector => { const len = ty.arrayLen(); @@ -2275,15 +2291,15 @@ pub const Value = extern union { var index: usize = 0; var elem_value_buf: ElemValueBuffer = undefined; while (index < len) : (index += 1) { - const elem_val = val.elemValueBuffer(index, &elem_value_buf); - elem_val.hash(elem_ty, hasher, target); + const elem_val = val.elemValueBuffer(mod, index, &elem_value_buf); + elem_val.hash(elem_ty, hasher, mod); } }, .Struct => { if (ty.isTupleOrAnonStruct()) { const fields = ty.tupleFields(); for (fields.values) |field_val, i| { - field_val.hash(fields.types[i], hasher, target); + field_val.hash(fields.types[i], hasher, mod); } return; } @@ -2292,13 +2308,13 @@ pub const Value = extern union { switch (val.tag()) { .empty_struct_value => { for (fields) |field| { - field.default_val.hash(field.ty, hasher, target); + field.default_val.hash(field.ty, hasher, mod); } }, .aggregate => { const field_values = val.castTag(.aggregate).?.data; for (field_values) |field_val, i| { - field_val.hash(fields[i].ty, hasher, target); + field_val.hash(fields[i].ty, hasher, mod); } }, else => unreachable, @@ -2310,7 +2326,7 @@ pub const Value = extern union { const sub_val = payload.data; var buffer: Type.Payload.ElemType = undefined; const sub_ty = ty.optionalChild(&buffer); - sub_val.hash(sub_ty, hasher, target); + sub_val.hash(sub_ty, hasher, mod); } else { std.hash.autoHash(hasher, false); // non-null } @@ -2319,14 +2335,14 @@ pub const Value = extern union { if (val.tag() == .@"error") { std.hash.autoHash(hasher, false); // error const sub_ty = ty.errorUnionSet(); - val.hash(sub_ty, hasher, target); + val.hash(sub_ty, hasher, mod); return; } if (val.castTag(.eu_payload)) |payload| { std.hash.autoHash(hasher, true); // payload const sub_ty = ty.errorUnionPayload(); - payload.data.hash(sub_ty, hasher, target); + payload.data.hash(sub_ty, hasher, mod); return; } else unreachable; }, @@ -2339,15 +2355,15 @@ pub const Value = extern union { .Enum => { var enum_space: Payload.U64 = undefined; const int_val = val.enumToInt(ty, &enum_space); - hashInt(int_val, hasher, target); + hashInt(int_val, hasher, mod.getTarget()); }, .Union => { const union_obj = val.cast(Payload.Union).?.data; if (ty.unionTagType()) |tag_ty| { - union_obj.tag.hash(tag_ty, hasher, target); + union_obj.tag.hash(tag_ty, hasher, mod); } - const active_field_ty = ty.unionFieldType(union_obj.tag, target); - union_obj.val.hash(active_field_ty, hasher, target); + const active_field_ty = ty.unionFieldType(union_obj.tag, mod); + union_obj.val.hash(active_field_ty, hasher, mod); }, .Fn => { const func: *Module.Fn = val.castTag(.function).?.data; @@ -2372,30 +2388,30 @@ pub const Value = extern union { pub const ArrayHashContext = struct { ty: Type, - target: Target, + mod: *Module, pub fn hash(self: @This(), val: Value) u32 { - const other_context: HashContext = .{ .ty = self.ty, .target = self.target }; + const other_context: HashContext = .{ .ty = self.ty, .mod = self.mod }; return @truncate(u32, other_context.hash(val)); } pub fn eql(self: @This(), a: Value, b: Value, b_index: usize) bool { _ = b_index; - return a.eql(b, self.ty, self.target); + return a.eql(b, self.ty, self.mod); } }; pub const HashContext = struct { ty: Type, - target: Target, + mod: *Module, pub fn hash(self: @This(), val: Value) u64 { var hasher = std.hash.Wyhash.init(0); - val.hash(self.ty, &hasher, self.target); + val.hash(self.ty, &hasher, self.mod); return hasher.final(); } pub fn eql(self: @This(), a: Value, b: Value) bool { - return a.eql(b, self.ty, self.target); + return a.eql(b, self.ty, self.mod); } }; @@ -2434,9 +2450,9 @@ pub const Value = extern union { /// Gets the decl referenced by this pointer. If the pointer does not point /// to a decl, or if it points to some part of a decl (like field_ptr or element_ptr), /// this function returns null. - pub fn pointerDecl(val: Value) ?*Module.Decl { + pub fn pointerDecl(val: Value) ?Module.Decl.Index { return switch (val.tag()) { - .decl_ref_mut => val.castTag(.decl_ref_mut).?.data.decl, + .decl_ref_mut => val.castTag(.decl_ref_mut).?.data.decl_index, .extern_fn => val.castTag(.extern_fn).?.data.owner_decl, .function => val.castTag(.function).?.data.owner_decl, .variable => val.castTag(.variable).?.data.owner_decl, @@ -2462,7 +2478,7 @@ pub const Value = extern union { .function, .variable, => { - const decl: *Module.Decl = ptr_val.pointerDecl().?; + const decl: Module.Decl.Index = ptr_val.pointerDecl().?; std.hash.autoHash(hasher, decl); }, @@ -2505,53 +2521,6 @@ pub const Value = extern union { } } - pub fn markReferencedDeclsAlive(val: Value) void { - switch (val.tag()) { - .decl_ref_mut => return val.castTag(.decl_ref_mut).?.data.decl.markAlive(), - .extern_fn => return val.castTag(.extern_fn).?.data.owner_decl.markAlive(), - .function => return val.castTag(.function).?.data.owner_decl.markAlive(), - .variable => return val.castTag(.variable).?.data.owner_decl.markAlive(), - .decl_ref => return val.cast(Payload.Decl).?.data.markAlive(), - - .repeated, - .eu_payload, - .opt_payload, - .empty_array_sentinel, - => return markReferencedDeclsAlive(val.cast(Payload.SubValue).?.data), - - .eu_payload_ptr, - .opt_payload_ptr, - => return markReferencedDeclsAlive(val.cast(Payload.PayloadPtr).?.data.container_ptr), - - .slice => { - const slice = val.cast(Payload.Slice).?.data; - markReferencedDeclsAlive(slice.ptr); - markReferencedDeclsAlive(slice.len); - }, - - .elem_ptr => { - const elem_ptr = val.cast(Payload.ElemPtr).?.data; - return markReferencedDeclsAlive(elem_ptr.array_ptr); - }, - .field_ptr => { - const field_ptr = val.cast(Payload.FieldPtr).?.data; - return markReferencedDeclsAlive(field_ptr.container_ptr); - }, - .aggregate => { - for (val.castTag(.aggregate).?.data) |field_val| { - markReferencedDeclsAlive(field_val); - } - }, - .@"union" => { - const data = val.cast(Payload.Union).?.data; - markReferencedDeclsAlive(data.tag); - markReferencedDeclsAlive(data.val); - }, - - else => {}, - } - } - pub fn slicePtr(val: Value) Value { return switch (val.tag()) { .slice => val.castTag(.slice).?.data.ptr, @@ -2561,11 +2530,12 @@ pub const Value = extern union { }; } - pub fn sliceLen(val: Value, target: Target) u64 { + pub fn sliceLen(val: Value, mod: *Module) u64 { return switch (val.tag()) { - .slice => val.castTag(.slice).?.data.len.toUnsignedInt(target), + .slice => val.castTag(.slice).?.data.len.toUnsignedInt(mod.getTarget()), .decl_ref => { - const decl = val.castTag(.decl_ref).?.data; + const decl_index = val.castTag(.decl_ref).?.data; + const decl = mod.declPtr(decl_index); if (decl.ty.zigTypeTag() == .Array) { return decl.ty.arrayLen(); } else { @@ -2599,18 +2569,19 @@ pub const Value = extern union { /// Asserts the value is a single-item pointer to an array, or an array, /// or an unknown-length pointer, and returns the element value at the index. - pub fn elemValue(val: Value, arena: Allocator, index: usize) !Value { - return elemValueAdvanced(val, index, arena, undefined); + pub fn elemValue(val: Value, mod: *Module, arena: Allocator, index: usize) !Value { + return elemValueAdvanced(val, mod, index, arena, undefined); } pub const ElemValueBuffer = Payload.U64; - pub fn elemValueBuffer(val: Value, index: usize, buffer: *ElemValueBuffer) Value { - return elemValueAdvanced(val, index, null, buffer) catch unreachable; + pub fn elemValueBuffer(val: Value, mod: *Module, index: usize, buffer: *ElemValueBuffer) Value { + return elemValueAdvanced(val, mod, index, null, buffer) catch unreachable; } pub fn elemValueAdvanced( val: Value, + mod: *Module, index: usize, arena: ?Allocator, buffer: *ElemValueBuffer, @@ -2643,13 +2614,13 @@ pub const Value = extern union { .repeated => return val.castTag(.repeated).?.data, .aggregate => return val.castTag(.aggregate).?.data[index], - .slice => return val.castTag(.slice).?.data.ptr.elemValueAdvanced(index, arena, buffer), + .slice => return val.castTag(.slice).?.data.ptr.elemValueAdvanced(mod, index, arena, buffer), - .decl_ref => return val.castTag(.decl_ref).?.data.val.elemValueAdvanced(index, arena, buffer), - .decl_ref_mut => return val.castTag(.decl_ref_mut).?.data.decl.val.elemValueAdvanced(index, arena, buffer), + .decl_ref => return mod.declPtr(val.castTag(.decl_ref).?.data).val.elemValueAdvanced(mod, index, arena, buffer), + .decl_ref_mut => return mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val.elemValueAdvanced(mod, index, arena, buffer), .elem_ptr => { const data = val.castTag(.elem_ptr).?.data; - return data.array_ptr.elemValueAdvanced(index + data.index, arena, buffer); + return data.array_ptr.elemValueAdvanced(mod, index + data.index, arena, buffer); }, // The child type of arrays which have only one possible value need @@ -2661,18 +2632,24 @@ pub const Value = extern union { } // Asserts that the provided start/end are in-bounds. - pub fn sliceArray(val: Value, arena: Allocator, start: usize, end: usize) error{OutOfMemory}!Value { + pub fn sliceArray( + val: Value, + mod: *Module, + arena: Allocator, + start: usize, + end: usize, + ) error{OutOfMemory}!Value { return switch (val.tag()) { .empty_array_sentinel => if (start == 0 and end == 1) val else Value.initTag(.empty_array), .bytes => Tag.bytes.create(arena, val.castTag(.bytes).?.data[start..end]), .aggregate => Tag.aggregate.create(arena, val.castTag(.aggregate).?.data[start..end]), - .slice => sliceArray(val.castTag(.slice).?.data.ptr, arena, start, end), + .slice => sliceArray(val.castTag(.slice).?.data.ptr, mod, arena, start, end), - .decl_ref => sliceArray(val.castTag(.decl_ref).?.data.val, arena, start, end), - .decl_ref_mut => sliceArray(val.castTag(.decl_ref_mut).?.data.decl.val, arena, start, end), + .decl_ref => sliceArray(mod.declPtr(val.castTag(.decl_ref).?.data).val, mod, arena, start, end), + .decl_ref_mut => sliceArray(mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val, mod, arena, start, end), .elem_ptr => blk: { const elem_ptr = val.castTag(.elem_ptr).?.data; - break :blk sliceArray(elem_ptr.array_ptr, arena, start + elem_ptr.index, end + elem_ptr.index); + break :blk sliceArray(elem_ptr.array_ptr, mod, arena, start + elem_ptr.index, end + elem_ptr.index); }, .repeated, @@ -2718,7 +2695,13 @@ pub const Value = extern union { } /// Returns a pointer to the element value at the index. - pub fn elemPtr(val: Value, ty: Type, arena: Allocator, index: usize, target: Target) Allocator.Error!Value { + pub fn elemPtr( + val: Value, + ty: Type, + arena: Allocator, + index: usize, + mod: *Module, + ) Allocator.Error!Value { const elem_ty = ty.elemType2(); const ptr_val = switch (val.tag()) { .slice => val.castTag(.slice).?.data.ptr, @@ -2727,7 +2710,7 @@ pub const Value = extern union { if (ptr_val.tag() == .elem_ptr) { const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - if (elem_ptr.elem_ty.eql(elem_ty, target)) { + if (elem_ptr.elem_ty.eql(elem_ty, mod)) { return Tag.elem_ptr.create(arena, .{ .array_ptr = elem_ptr.array_ptr, .elem_ty = elem_ptr.elem_ty, @@ -5059,7 +5042,7 @@ pub const Value = extern union { pub const Decl = struct { base: Payload, - data: *Module.Decl, + data: Module.Decl.Index, }; pub const Variable = struct { @@ -5079,7 +5062,7 @@ pub const Value = extern union { data: Data, pub const Data = struct { - decl: *Module.Decl, + decl_index: Module.Decl.Index, runtime_index: u32, }; }; @@ -5215,7 +5198,7 @@ pub const Value = extern union { base: Payload = .{ .tag = base_tag }, data: struct { - decl: *Module.Decl, + decl_index: Module.Decl.Index, /// 0 means ABI-aligned. alignment: u16, }, From 31758f79db2c9e1122fd40bdda2243311830a5d4 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 20 Apr 2022 18:14:38 -0700 Subject: [PATCH 5/5] link: Wasm: don't assume we have a zig module --- src/link/Wasm.zig | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index fad7543b0e..3c53e91587 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -429,11 +429,14 @@ pub fn deinit(self: *Wasm) void { if (self.llvm_object) |llvm_object| llvm_object.destroy(gpa); } - const mod = self.base.options.module.?; - var decl_it = self.decls.keyIterator(); - while (decl_it.next()) |decl_index_ptr| { - const decl = mod.declPtr(decl_index_ptr.*); - decl.link.wasm.deinit(gpa); + if (self.base.options.module) |mod| { + var decl_it = self.decls.keyIterator(); + while (decl_it.next()) |decl_index_ptr| { + const decl = mod.declPtr(decl_index_ptr.*); + decl.link.wasm.deinit(gpa); + } + } else { + assert(self.decls.count() == 0); } for (self.func_types.items) |*func_type| {