Merge remote-tracking branch 'origin/master' into llvm12

This commit is contained in:
Andrew Kelley 2021-04-02 12:09:38 -07:00
commit a0e89c9b46
52 changed files with 17153 additions and 11734 deletions

View File

@ -539,7 +539,7 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/src/ThreadPool.zig"
"${CMAKE_SOURCE_DIR}/src/TypedValue.zig"
"${CMAKE_SOURCE_DIR}/src/WaitGroup.zig"
"${CMAKE_SOURCE_DIR}/src/astgen.zig"
"${CMAKE_SOURCE_DIR}/src/AstGen.zig"
"${CMAKE_SOURCE_DIR}/src/clang.zig"
"${CMAKE_SOURCE_DIR}/src/clang_options.zig"
"${CMAKE_SOURCE_DIR}/src/clang_options_data.zig"
@ -591,7 +591,7 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/src/value.zig"
"${CMAKE_SOURCE_DIR}/src/windows_sdk.zig"
"${CMAKE_SOURCE_DIR}/src/zir.zig"
"${CMAKE_SOURCE_DIR}/src/zir_sema.zig"
"${CMAKE_SOURCE_DIR}/src/Sema.zig"
)
if(MSVC)

View File

@ -1349,7 +1349,7 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
}
const escaped_stderr = try escapeHtml(allocator, result.stderr);
const colored_stderr = try termColor(allocator, escaped_stderr);
try out.print("<pre><code class=\"shell\">$ zig test {s}.zig{s}\n{s}</code></pre>\n", .{
try out.print("<pre><code class=\"shell\">$ zig test {s}.zig {s}\n{s}</code></pre>\n", .{
code.name,
mode_arg,
colored_stderr,

View File

@ -6594,13 +6594,11 @@ const std = @import("std");
const expect = std.testing.expect;
test "async and await" {
// Here we have an exception where we do not match an async
// with an await. The test block is not async and so cannot
// have a suspend point in it.
// This is well-defined behavior, and everything is OK here.
// Note however that there would be no way to collect the
// return value of amain, if it were something other than void.
_ = async amain();
// The test block is not async and so cannot have a suspend
// point in it. By using the nosuspend keyword, we promise that
// the code in amain will finish executing without suspending
// back to the test block.
nosuspend amain();
}
fn amain() void {
@ -10799,9 +10797,16 @@ fn readU32Be() u32 {}
<pre>{#syntax#}nosuspend{#endsyntax#}</pre>
</td>
<td>
The {#syntax#}nosuspend{#endsyntax#} keyword.
The {#syntax#}nosuspend{#endsyntax#} keyword can be used in front of a block, statement or expression, to mark a scope where no suspension points are reached.
In particular, inside a {#syntax#}nosuspend{#endsyntax#} scope:
<ul>
<li>TODO add documentation for nosuspend</li>
<li>Using the {#syntax#}suspend{#endsyntax#} keyword results in a compile error.</li>
<li>Using {#syntax#}await{#endsyntax#} on a function frame which hasn't completed yet results in safety-checked {#link|Undefined Behavior#}.</li>
<li>Calling an async function may result in safety-checked {#link|Undefined Behavior#}, because it's equivalent to <code>await async some_async_fn()</code>, which contains an {#syntax#}await{#endsyntax#}.</li>
</ul>
Code inside a {#syntax#}nosuspend{#endsyntax#} scope does not cause the enclosing function to become an {#link|async function|Async Functions#}.
<ul>
<li>See also {#link|Async Functions#}</li>
</ul>
</td>
</tr>

View File

@ -32,7 +32,7 @@ pub fn EnumFieldStruct(comptime E: type, comptime Data: type, comptime field_def
.fields = fields,
.decls = &[_]std.builtin.TypeInfo.Declaration{},
.is_tuple = false,
}});
} });
}
/// Looks up the supplied fields in the given enum type.
@ -70,7 +70,7 @@ pub fn values(comptime E: type) []const E {
test "std.enum.values" {
const E = extern enum { a, b, c, d = 0 };
testing.expectEqualSlices(E, &.{.a, .b, .c, .d}, values(E));
testing.expectEqualSlices(E, &.{ .a, .b, .c, .d }, values(E));
}
/// Returns the set of all unique named values in the given enum, in
@ -82,10 +82,10 @@ pub fn uniqueValues(comptime E: type) []const E {
test "std.enum.uniqueValues" {
const E = extern enum { a, b, c, d = 0, e, f = 3 };
testing.expectEqualSlices(E, &.{.a, .b, .c, .f}, uniqueValues(E));
testing.expectEqualSlices(E, &.{ .a, .b, .c, .f }, uniqueValues(E));
const F = enum { a, b, c };
testing.expectEqualSlices(F, &.{.a, .b, .c}, uniqueValues(F));
testing.expectEqualSlices(F, &.{ .a, .b, .c }, uniqueValues(F));
}
/// Returns the set of all unique field values in the given enum, in
@ -102,8 +102,7 @@ pub fn uniqueFields(comptime E: type) []const EnumField {
}
var unique_fields: []const EnumField = &[_]EnumField{};
outer:
for (raw_fields) |candidate| {
outer: for (raw_fields) |candidate| {
for (unique_fields) |u| {
if (u.value == candidate.value)
continue :outer;
@ -116,28 +115,25 @@ pub fn uniqueFields(comptime E: type) []const EnumField {
}
/// Determines the length of a direct-mapped enum array, indexed by
/// @intCast(usize, @enumToInt(enum_value)). The enum must be exhaustive.
/// @intCast(usize, @enumToInt(enum_value)).
/// If the enum is non-exhaustive, the resulting length will only be enough
/// to hold all explicit fields.
/// If the enum contains any fields with values that cannot be represented
/// by usize, a compile error is issued. The max_unused_slots parameter limits
/// the total number of items which have no matching enum key (holes in the enum
/// numbering). So for example, if an enum has values 1, 2, 5, and 6, max_unused_slots
/// must be at least 3, to allow unused slots 0, 3, and 4.
fn directEnumArrayLen(comptime E: type, comptime max_unused_slots: comptime_int) comptime_int {
const info = @typeInfo(E).Enum;
if (!info.is_exhaustive) {
@compileError("Cannot create direct array of non-exhaustive enum "++@typeName(E));
}
var max_value: comptime_int = -1;
const max_usize: comptime_int = ~@as(usize, 0);
const fields = uniqueFields(E);
for (fields) |f| {
if (f.value < 0) {
@compileError("Cannot create a direct enum array for "++@typeName(E)++", field ."++f.name++" has a negative value.");
@compileError("Cannot create a direct enum array for " ++ @typeName(E) ++ ", field ." ++ f.name ++ " has a negative value.");
}
if (f.value > max_value) {
if (f.value > max_usize) {
@compileError("Cannot create a direct enum array for "++@typeName(E)++", field ."++f.name++" is larger than the max value of usize.");
@compileError("Cannot create a direct enum array for " ++ @typeName(E) ++ ", field ." ++ f.name ++ " is larger than the max value of usize.");
}
max_value = f.value;
}
@ -147,14 +143,16 @@ fn directEnumArrayLen(comptime E: type, comptime max_unused_slots: comptime_int)
if (unused_slots > max_unused_slots) {
const unused_str = std.fmt.comptimePrint("{d}", .{unused_slots});
const allowed_str = std.fmt.comptimePrint("{d}", .{max_unused_slots});
@compileError("Cannot create a direct enum array for "++@typeName(E)++". It would have "++unused_str++" unused slots, but only "++allowed_str++" are allowed.");
@compileError("Cannot create a direct enum array for " ++ @typeName(E) ++ ". It would have " ++ unused_str ++ " unused slots, but only " ++ allowed_str ++ " are allowed.");
}
return max_value + 1;
}
/// Initializes an array of Data which can be indexed by
/// @intCast(usize, @enumToInt(enum_value)). The enum must be exhaustive.
/// @intCast(usize, @enumToInt(enum_value)).
/// If the enum is non-exhaustive, the resulting array will only be large enough
/// to hold all explicit fields.
/// If the enum contains any fields with values that cannot be represented
/// by usize, a compile error is issued. The max_unused_slots parameter limits
/// the total number of items which have no matching enum key (holes in the enum
@ -243,9 +241,9 @@ pub fn nameCast(comptime E: type, comptime value: anytype) E {
if (@hasField(E, n)) {
return @field(E, n);
}
@compileError("Enum "++@typeName(E)++" has no field named "++n);
@compileError("Enum " ++ @typeName(E) ++ " has no field named " ++ n);
}
@compileError("Cannot cast from "++@typeName(@TypeOf(value))++" to "++@typeName(E));
@compileError("Cannot cast from " ++ @typeName(@TypeOf(value)) ++ " to " ++ @typeName(E));
}
}
@ -256,7 +254,7 @@ test "std.enums.nameCast" {
testing.expectEqual(A.a, nameCast(A, A.a));
testing.expectEqual(A.a, nameCast(A, B.a));
testing.expectEqual(A.a, nameCast(A, "a"));
testing.expectEqual(A.a, nameCast(A, @as(*const[1]u8, "a")));
testing.expectEqual(A.a, nameCast(A, @as(*const [1]u8, "a")));
testing.expectEqual(A.a, nameCast(A, @as([:0]const u8, "a")));
testing.expectEqual(A.a, nameCast(A, @as([]const u8, "a")));
@ -398,12 +396,12 @@ pub fn EnumArray(comptime E: type, comptime V: type) type {
pub fn NoExtension(comptime Self: type) type {
return NoExt;
}
const NoExt = struct{};
const NoExt = struct {};
/// A set type with an Indexer mapping from keys to indices.
/// Presence or absence is stored as a dense bitfield. This
/// type does no allocation and can be copied by value.
pub fn IndexedSet(comptime I: type, comptime Ext: fn(type)type) type {
pub fn IndexedSet(comptime I: type, comptime Ext: fn (type) type) type {
comptime ensureIndexer(I);
return struct {
const Self = @This();
@ -422,7 +420,7 @@ pub fn IndexedSet(comptime I: type, comptime Ext: fn(type)type) type {
bits: BitSet = BitSet.initEmpty(),
/// Returns a set containing all possible keys.
/// Returns a set containing all possible keys.
pub fn initFull() Self {
return .{ .bits = BitSet.initFull() };
}
@ -492,7 +490,8 @@ pub fn IndexedSet(comptime I: type, comptime Ext: fn(type)type) type {
pub fn next(self: *Iterator) ?Key {
return if (self.inner.next()) |index|
Indexer.keyForIndex(index)
else null;
else
null;
}
};
};
@ -501,7 +500,7 @@ pub fn IndexedSet(comptime I: type, comptime Ext: fn(type)type) type {
/// A map from keys to values, using an index lookup. Uses a
/// bitfield to track presence and a dense array of values.
/// This type does no allocation and can be copied by value.
pub fn IndexedMap(comptime I: type, comptime V: type, comptime Ext: fn(type)type) type {
pub fn IndexedMap(comptime I: type, comptime V: type, comptime Ext: fn (type) type) type {
comptime ensureIndexer(I);
return struct {
const Self = @This();
@ -652,7 +651,8 @@ pub fn IndexedMap(comptime I: type, comptime V: type, comptime Ext: fn(type)type
.key = Indexer.keyForIndex(index),
.value = &self.values[index],
}
else null;
else
null;
}
};
};
@ -660,7 +660,7 @@ pub fn IndexedMap(comptime I: type, comptime V: type, comptime Ext: fn(type)type
/// A dense array of values, using an indexed lookup.
/// This type does no allocation and can be copied by value.
pub fn IndexedArray(comptime I: type, comptime V: type, comptime Ext: fn(type)type) type {
pub fn IndexedArray(comptime I: type, comptime V: type, comptime Ext: fn (type) type) type {
comptime ensureIndexer(I);
return struct {
const Self = @This();
@ -769,9 +769,9 @@ pub fn ensureIndexer(comptime T: type) void {
if (!@hasDecl(T, "count")) @compileError("Indexer must have decl count: usize.");
if (@TypeOf(T.count) != usize) @compileError("Indexer.count must be a usize.");
if (!@hasDecl(T, "indexOf")) @compileError("Indexer.indexOf must be a fn(Key)usize.");
if (@TypeOf(T.indexOf) != fn(T.Key)usize) @compileError("Indexer must have decl indexOf: fn(Key)usize.");
if (@TypeOf(T.indexOf) != fn (T.Key) usize) @compileError("Indexer must have decl indexOf: fn(Key)usize.");
if (!@hasDecl(T, "keyForIndex")) @compileError("Indexer must have decl keyForIndex: fn(usize)Key.");
if (@TypeOf(T.keyForIndex) != fn(usize)T.Key) @compileError("Indexer.keyForIndex must be a fn(usize)Key.");
if (@TypeOf(T.keyForIndex) != fn (usize) T.Key) @compileError("Indexer.keyForIndex must be a fn(usize)Key.");
}
}
@ -802,14 +802,18 @@ pub fn EnumIndexer(comptime E: type) type {
return struct {
pub const Key = E;
pub const count: usize = 0;
pub fn indexOf(e: E) usize { unreachable; }
pub fn keyForIndex(i: usize) E { unreachable; }
pub fn indexOf(e: E) usize {
unreachable;
}
pub fn keyForIndex(i: usize) E {
unreachable;
}
};
}
std.sort.sort(EnumField, &fields, {}, ascByValue);
const min = fields[0].value;
const max = fields[fields.len-1].value;
if (max - min == fields.len-1) {
const max = fields[fields.len - 1].value;
if (max - min == fields.len - 1) {
return struct {
pub const Key = E;
pub const count = fields.len;
@ -844,7 +848,7 @@ pub fn EnumIndexer(comptime E: type) type {
}
test "std.enums.EnumIndexer dense zeroed" {
const E = enum{ b = 1, a = 0, c = 2 };
const E = enum { b = 1, a = 0, c = 2 };
const Indexer = EnumIndexer(E);
ensureIndexer(Indexer);
testing.expectEqual(E, Indexer.Key);
@ -908,7 +912,7 @@ test "std.enums.EnumIndexer sparse" {
}
test "std.enums.EnumIndexer repeats" {
const E = extern enum{ a = -2, c = 6, b = 4, b2 = 4 };
const E = extern enum { a = -2, c = 6, b = 4, b2 = 4 };
const Indexer = EnumIndexer(E);
ensureIndexer(Indexer);
testing.expectEqual(E, Indexer.Key);
@ -957,7 +961,8 @@ test "std.enums.EnumSet" {
}
var mut = Set.init(.{
.a=true, .c=true,
.a = true,
.c = true,
});
testing.expectEqual(@as(usize, 2), mut.count());
testing.expectEqual(true, mut.contains(.a));
@ -986,7 +991,7 @@ test "std.enums.EnumSet" {
testing.expectEqual(@as(?E, null), it.next());
}
mut.toggleSet(Set.init(.{ .a=true, .b=true }));
mut.toggleSet(Set.init(.{ .a = true, .b = true }));
testing.expectEqual(@as(usize, 2), mut.count());
testing.expectEqual(true, mut.contains(.a));
testing.expectEqual(false, mut.contains(.b));
@ -994,7 +999,7 @@ test "std.enums.EnumSet" {
testing.expectEqual(true, mut.contains(.d));
testing.expectEqual(true, mut.contains(.e)); // aliases a
mut.setUnion(Set.init(.{ .a=true, .b=true }));
mut.setUnion(Set.init(.{ .a = true, .b = true }));
testing.expectEqual(@as(usize, 3), mut.count());
testing.expectEqual(true, mut.contains(.a));
testing.expectEqual(true, mut.contains(.b));
@ -1009,7 +1014,7 @@ test "std.enums.EnumSet" {
testing.expectEqual(false, mut.contains(.c));
testing.expectEqual(true, mut.contains(.d));
mut.setIntersection(Set.init(.{ .a=true, .b=true }));
mut.setIntersection(Set.init(.{ .a = true, .b = true }));
testing.expectEqual(@as(usize, 1), mut.count());
testing.expectEqual(true, mut.contains(.a));
testing.expectEqual(false, mut.contains(.b));
@ -1072,7 +1077,7 @@ test "std.enums.EnumArray sized" {
const undef = Array.initUndefined();
var inst = Array.initFill(5);
const inst2 = Array.init(.{ .a = 1, .b = 2, .c = 3, .d = 4 });
const inst3 = Array.initDefault(6, .{.b = 4, .c = 2});
const inst3 = Array.initDefault(6, .{ .b = 4, .c = 2 });
testing.expectEqual(@as(usize, 5), inst.get(.a));
testing.expectEqual(@as(usize, 5), inst.get(.b));
@ -1272,10 +1277,12 @@ test "std.enums.EnumMap sized" {
var iter = a.iterator();
const Entry = Map.Entry;
testing.expectEqual(@as(?Entry, Entry{
.key = .b, .value = &a.values[1],
.key = .b,
.value = &a.values[1],
}), iter.next());
testing.expectEqual(@as(?Entry, Entry{
.key = .d, .value = &a.values[3],
.key = .d,
.value = &a.values[3],
}), iter.next());
testing.expectEqual(@as(?Entry, null), iter.next());
}

View File

@ -3267,6 +3267,7 @@ pub fn connect(sock: socket_t, sock_addr: *const sockaddr, len: socklen_t) Conne
.WSAEADDRINUSE => return error.AddressInUse,
.WSAEADDRNOTAVAIL => return error.AddressNotAvailable,
.WSAECONNREFUSED => return error.ConnectionRefused,
.WSAECONNRESET => return error.ConnectionResetByPeer,
.WSAETIMEDOUT => return error.ConnectionTimedOut,
.WSAEHOSTUNREACH, // TODO: should we return NetworkUnreachable in this case as well?
.WSAENETUNREACH,
@ -3296,6 +3297,7 @@ pub fn connect(sock: socket_t, sock_addr: *const sockaddr, len: socklen_t) Conne
EALREADY => unreachable, // The socket is nonblocking and a previous connection attempt has not yet been completed.
EBADF => unreachable, // sockfd is not a valid open file descriptor.
ECONNREFUSED => return error.ConnectionRefused,
ECONNRESET => return error.ConnectionResetByPeer,
EFAULT => unreachable, // The socket structure address is outside the user's address space.
EINTR => continue,
EISCONN => unreachable, // The socket is already connected.

View File

@ -1353,7 +1353,9 @@ test "timeout (after a relative time)" {
.res = -linux.ETIME,
.flags = 0,
}, cqe);
testing.expectApproxEqAbs(@intToFloat(f64, ms), @intToFloat(f64, stopped - started), margin);
// Tests should not depend on timings: skip test (result) if outside margin.
if (!std.math.approxEqAbs(f64, ms, @intToFloat(f64, stopped - started), margin)) return error.SkipZigTest;
}
test "timeout (after a number of completions)" {

View File

@ -0,0 +1,972 @@
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2021 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
const std = @import("std.zig");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const warn = std.debug.warn;
const Order = std.math.Order;
const testing = std.testing;
const expect = testing.expect;
const expectEqual = testing.expectEqual;
const expectError = testing.expectError;
/// Priority Dequeue for storing generic data. Initialize with `init`.
pub fn PriorityDequeue(comptime T: type) type {
return struct {
const Self = @This();
items: []T,
len: usize,
allocator: *Allocator,
compareFn: fn (a: T, b: T) Order,
/// Initialize and return a new priority dequeue. Provide `compareFn`
/// that returns `Order.lt` when its first argument should
/// get min-popped before its second argument, `Order.eq` if the
/// arguments are of equal priority, or `Order.gt` if the second
/// argument should be min-popped first. Popping the max element works
/// in reverse. For example, to make `popMin` return the smallest
/// number, provide
///
/// `fn lessThan(a: T, b: T) Order { return std.math.order(a, b); }`
pub fn init(allocator: *Allocator, compareFn: fn (T, T) Order) Self {
return Self{
.items = &[_]T{},
.len = 0,
.allocator = allocator,
.compareFn = compareFn,
};
}
/// Free memory used by the dequeue.
pub fn deinit(self: Self) void {
self.allocator.free(self.items);
}
/// Insert a new element, maintaining priority.
pub fn add(self: *Self, elem: T) !void {
try ensureCapacity(self, self.len + 1);
addUnchecked(self, elem);
}
/// Add each element in `items` to the dequeue.
pub fn addSlice(self: *Self, items: []const T) !void {
try self.ensureCapacity(self.len + items.len);
for (items) |e| {
self.addUnchecked(e);
}
}
fn addUnchecked(self: *Self, elem: T) void {
self.items[self.len] = elem;
if (self.len > 0) {
const start = self.getStartForSiftUp(elem, self.len);
self.siftUp(start);
}
self.len += 1;
}
fn isMinLayer(index: usize) bool {
// In the min-max heap structure:
// The first element is on a min layer;
// next two are on a max layer;
// next four are on a min layer, and so on.
const leading_zeros = @clz(usize, index + 1);
const highest_set_bit = @bitSizeOf(usize) - 1 - leading_zeros;
return (highest_set_bit & 1) == 0;
}
fn nextIsMinLayer(self: Self) bool {
return isMinLayer(self.len);
}
const StartIndexAndLayer = struct {
index: usize,
min_layer: bool,
};
fn getStartForSiftUp(self: Self, child: T, index: usize) StartIndexAndLayer {
var child_index = index;
var parent_index = parentIndex(child_index);
const parent = self.items[parent_index];
const min_layer = self.nextIsMinLayer();
const order = self.compareFn(child, parent);
if ((min_layer and order == .gt) or (!min_layer and order == .lt)) {
// We must swap the item with it's parent if it is on the "wrong" layer
self.items[parent_index] = child;
self.items[child_index] = parent;
return .{
.index = parent_index,
.min_layer = !min_layer,
};
} else {
return .{
.index = child_index,
.min_layer = min_layer,
};
}
}
fn siftUp(self: *Self, start: StartIndexAndLayer) void {
if (start.min_layer) {
doSiftUp(self, start.index, .lt);
} else {
doSiftUp(self, start.index, .gt);
}
}
fn doSiftUp(self: *Self, start_index: usize, target_order: Order) void {
var child_index = start_index;
while (child_index > 2) {
var grandparent_index = grandparentIndex(child_index);
const child = self.items[child_index];
const grandparent = self.items[grandparent_index];
// If the grandparent is already better or equal, we have gone as far as we need to
if (self.compareFn(child, grandparent) != target_order) break;
// Otherwise swap the item with it's grandparent
self.items[grandparent_index] = child;
self.items[child_index] = grandparent;
child_index = grandparent_index;
}
}
/// Look at the smallest element in the dequeue. Returns
/// `null` if empty.
pub fn peekMin(self: *Self) ?T {
return if (self.len > 0) self.items[0] else null;
}
/// Look at the largest element in the dequeue. Returns
/// `null` if empty.
pub fn peekMax(self: *Self) ?T {
if (self.len == 0) return null;
if (self.len == 1) return self.items[0];
if (self.len == 2) return self.items[1];
return self.bestItemAtIndices(1, 2, .gt).item;
}
fn maxIndex(self: Self) ?usize {
if (self.len == 0) return null;
if (self.len == 1) return 0;
if (self.len == 2) return 1;
return self.bestItemAtIndices(1, 2, .gt).index;
}
/// Pop the smallest element from the dequeue. Returns
/// `null` if empty.
pub fn removeMinOrNull(self: *Self) ?T {
return if (self.len > 0) self.removeMin() else null;
}
/// Remove and return the smallest element from the
/// dequeue.
pub fn removeMin(self: *Self) T {
return self.removeIndex(0);
}
/// Pop the largest element from the dequeue. Returns
/// `null` if empty.
pub fn removeMaxOrNull(self: *Self) ?T {
return if (self.len > 0) self.removeMax() else null;
}
/// Remove and return the largest element from the
/// dequeue.
pub fn removeMax(self: *Self) T {
return self.removeIndex(self.maxIndex().?);
}
/// Remove and return element at index. Indices are in the
/// same order as iterator, which is not necessarily priority
/// order.
pub fn removeIndex(self: *Self, index: usize) T {
assert(self.len > index);
const item = self.items[index];
const last = self.items[self.len - 1];
self.items[index] = last;
self.len -= 1;
siftDown(self, index);
return item;
}
fn siftDown(self: *Self, index: usize) void {
if (isMinLayer(index)) {
self.doSiftDown(index, .lt);
} else {
self.doSiftDown(index, .gt);
}
}
fn doSiftDown(self: *Self, start_index: usize, target_order: Order) void {
var index = start_index;
const half = self.len >> 1;
while (true) {
const first_grandchild_index = firstGrandchildIndex(index);
const last_grandchild_index = first_grandchild_index + 3;
const elem = self.items[index];
if (last_grandchild_index < self.len) {
// All four grandchildren exist
const index2 = first_grandchild_index + 1;
const index3 = index2 + 1;
// Find the best grandchild
const best_left = self.bestItemAtIndices(first_grandchild_index, index2, target_order);
const best_right = self.bestItemAtIndices(index3, last_grandchild_index, target_order);
const best_grandchild = self.bestItem(best_left, best_right, target_order);
// If the item is better than or equal to its best grandchild, we are done
if (self.compareFn(best_grandchild.item, elem) != target_order) return;
// Otherwise, swap them
self.items[best_grandchild.index] = elem;
self.items[index] = best_grandchild.item;
index = best_grandchild.index;
// We might need to swap the element with it's parent
self.swapIfParentIsBetter(elem, index, target_order);
} else {
// The children or grandchildren are the last layer
const first_child_index = firstChildIndex(index);
if (first_child_index > self.len) return;
const best_descendent = self.bestDescendent(first_child_index, first_grandchild_index, target_order);
// If the item is better than or equal to its best descendant, we are done
if (self.compareFn(best_descendent.item, elem) != target_order) return;
// Otherwise swap them
self.items[best_descendent.index] = elem;
self.items[index] = best_descendent.item;
index = best_descendent.index;
// If we didn't swap a grandchild, we are done
if (index < first_grandchild_index) return;
// We might need to swap the element with it's parent
self.swapIfParentIsBetter(elem, index, target_order);
return;
}
// If we are now in the last layer, we are done
if (index >= half) return;
}
}
fn swapIfParentIsBetter(self: *Self, child: T, child_index: usize, target_order: Order) void {
const parent_index = parentIndex(child_index);
const parent = self.items[parent_index];
if (self.compareFn(parent, child) == target_order) {
self.items[parent_index] = child;
self.items[child_index] = parent;
}
}
const ItemAndIndex = struct {
item: T,
index: usize,
};
fn getItem(self: Self, index: usize) ItemAndIndex {
return .{
.item = self.items[index],
.index = index,
};
}
fn bestItem(self: Self, item1: ItemAndIndex, item2: ItemAndIndex, target_order: Order) ItemAndIndex {
if (self.compareFn(item1.item, item2.item) == target_order) {
return item1;
} else {
return item2;
}
}
fn bestItemAtIndices(self: Self, index1: usize, index2: usize, target_order: Order) ItemAndIndex {
var item1 = self.getItem(index1);
var item2 = self.getItem(index2);
return self.bestItem(item1, item2, target_order);
}
fn bestDescendent(self: Self, first_child_index: usize, first_grandchild_index: usize, target_order: Order) ItemAndIndex {
const second_child_index = first_child_index + 1;
if (first_grandchild_index >= self.len) {
// No grandchildren, find the best child (second may not exist)
if (second_child_index >= self.len) {
return .{
.item = self.items[first_child_index],
.index = first_child_index,
};
} else {
return self.bestItemAtIndices(first_child_index, second_child_index, target_order);
}
}
const second_grandchild_index = first_grandchild_index + 1;
if (second_grandchild_index >= self.len) {
// One grandchild, so we know there is a second child. Compare first grandchild and second child
return self.bestItemAtIndices(first_grandchild_index, second_child_index, target_order);
}
const best_left_grandchild_index = self.bestItemAtIndices(first_grandchild_index, second_grandchild_index, target_order).index;
const third_grandchild_index = second_grandchild_index + 1;
if (third_grandchild_index >= self.len) {
// Two grandchildren, and we know the best. Compare this to second child.
return self.bestItemAtIndices(best_left_grandchild_index, second_child_index, target_order);
} else {
// Three grandchildren, compare the min of the first two with the third
return self.bestItemAtIndices(best_left_grandchild_index, third_grandchild_index, target_order);
}
}
/// Return the number of elements remaining in the dequeue
pub fn count(self: Self) usize {
return self.len;
}
/// Return the number of elements that can be added to the
/// dequeue before more memory is allocated.
pub fn capacity(self: Self) usize {
return self.items.len;
}
/// Dequeue takes ownership of the passed in slice. The slice must have been
/// allocated with `allocator`.
/// De-initialize with `deinit`.
pub fn fromOwnedSlice(allocator: *Allocator, compareFn: fn (T, T) Order, items: []T) Self {
var queue = Self{
.items = items,
.len = items.len,
.allocator = allocator,
.compareFn = compareFn,
};
if (queue.len <= 1) return queue;
const half = (queue.len >> 1) - 1;
var i: usize = 0;
while (i <= half) : (i += 1) {
const index = half - i;
queue.siftDown(index);
}
return queue;
}
pub fn ensureCapacity(self: *Self, new_capacity: usize) !void {
var better_capacity = self.capacity();
if (better_capacity >= new_capacity) return;
while (true) {
better_capacity += better_capacity / 2 + 8;
if (better_capacity >= new_capacity) break;
}
self.items = try self.allocator.realloc(self.items, better_capacity);
}
/// Reduce allocated capacity to `new_len`.
pub fn shrinkAndFree(self: *Self, new_len: usize) void {
assert(new_len <= self.items.len);
// Cannot shrink to smaller than the current queue size without invalidating the heap property
assert(new_len >= self.len);
self.items = self.allocator.realloc(self.items[0..], new_len) catch |e| switch (e) {
error.OutOfMemory => { // no problem, capacity is still correct then.
self.items.len = new_len;
return;
},
};
self.len = new_len;
}
/// Reduce length to `new_len`.
pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void {
assert(new_len <= self.items.len);
// Cannot shrink to smaller than the current queue size without invalidating the heap property
assert(new_len >= self.len);
self.len = new_len;
}
pub fn update(self: *Self, elem: T, new_elem: T) !void {
var old_index: usize = std.mem.indexOfScalar(T, self.items[0..self.len], elem) orelse return error.ElementNotFound;
_ = self.removeIndex(old_index);
self.addUnchecked(new_elem);
}
pub const Iterator = struct {
queue: *PriorityDequeue(T),
count: usize,
pub fn next(it: *Iterator) ?T {
if (it.count >= it.queue.len) return null;
const out = it.count;
it.count += 1;
return it.queue.items[out];
}
pub fn reset(it: *Iterator) void {
it.count = 0;
}
};
/// Return an iterator that walks the queue without consuming
/// it. Invalidated if the queue is modified.
pub fn iterator(self: *Self) Iterator {
return Iterator{
.queue = self,
.count = 0,
};
}
fn dump(self: *Self) void {
warn("{{ ", .{});
warn("items: ", .{});
for (self.items) |e, i| {
if (i >= self.len) break;
warn("{}, ", .{e});
}
warn("array: ", .{});
for (self.items) |e, i| {
warn("{}, ", .{e});
}
warn("len: {} ", .{self.len});
warn("capacity: {}", .{self.capacity()});
warn(" }}\n", .{});
}
fn parentIndex(index: usize) usize {
return (index - 1) >> 1;
}
fn grandparentIndex(index: usize) usize {
return parentIndex(parentIndex(index));
}
fn firstChildIndex(index: usize) usize {
return (index << 1) + 1;
}
fn firstGrandchildIndex(index: usize) usize {
return firstChildIndex(firstChildIndex(index));
}
};
}
fn lessThanComparison(a: u32, b: u32) Order {
return std.math.order(a, b);
}
const PDQ = PriorityDequeue(u32);
test "std.PriorityDequeue: add and remove min" {
var queue = PDQ.init(testing.allocator, lessThanComparison);
defer queue.deinit();
try queue.add(54);
try queue.add(12);
try queue.add(7);
try queue.add(23);
try queue.add(25);
try queue.add(13);
expectEqual(@as(u32, 7), queue.removeMin());
expectEqual(@as(u32, 12), queue.removeMin());
expectEqual(@as(u32, 13), queue.removeMin());
expectEqual(@as(u32, 23), queue.removeMin());
expectEqual(@as(u32, 25), queue.removeMin());
expectEqual(@as(u32, 54), queue.removeMin());
}
test "std.PriorityDequeue: add and remove min structs" {
const S = struct {
size: u32,
};
var queue = PriorityDequeue(S).init(testing.allocator, struct {
fn order(a: S, b: S) Order {
return std.math.order(a.size, b.size);
}
}.order);
defer queue.deinit();
try queue.add(.{ .size = 54 });
try queue.add(.{ .size = 12 });
try queue.add(.{ .size = 7 });
try queue.add(.{ .size = 23 });
try queue.add(.{ .size = 25 });
try queue.add(.{ .size = 13 });
expectEqual(@as(u32, 7), queue.removeMin().size);
expectEqual(@as(u32, 12), queue.removeMin().size);
expectEqual(@as(u32, 13), queue.removeMin().size);
expectEqual(@as(u32, 23), queue.removeMin().size);
expectEqual(@as(u32, 25), queue.removeMin().size);
expectEqual(@as(u32, 54), queue.removeMin().size);
}
test "std.PriorityDequeue: add and remove max" {
var queue = PDQ.init(testing.allocator, lessThanComparison);
defer queue.deinit();
try queue.add(54);
try queue.add(12);
try queue.add(7);
try queue.add(23);
try queue.add(25);
try queue.add(13);
expectEqual(@as(u32, 54), queue.removeMax());
expectEqual(@as(u32, 25), queue.removeMax());
expectEqual(@as(u32, 23), queue.removeMax());
expectEqual(@as(u32, 13), queue.removeMax());
expectEqual(@as(u32, 12), queue.removeMax());
expectEqual(@as(u32, 7), queue.removeMax());
}
test "std.PriorityDequeue: add and remove same min" {
var queue = PDQ.init(testing.allocator, lessThanComparison);
defer queue.deinit();
try queue.add(1);
try queue.add(1);
try queue.add(2);
try queue.add(2);
try queue.add(1);
try queue.add(1);
expectEqual(@as(u32, 1), queue.removeMin());
expectEqual(@as(u32, 1), queue.removeMin());
expectEqual(@as(u32, 1), queue.removeMin());
expectEqual(@as(u32, 1), queue.removeMin());
expectEqual(@as(u32, 2), queue.removeMin());
expectEqual(@as(u32, 2), queue.removeMin());
}
test "std.PriorityDequeue: add and remove same max" {
var queue = PDQ.init(testing.allocator, lessThanComparison);
defer queue.deinit();
try queue.add(1);
try queue.add(1);
try queue.add(2);
try queue.add(2);
try queue.add(1);
try queue.add(1);
expectEqual(@as(u32, 2), queue.removeMax());
expectEqual(@as(u32, 2), queue.removeMax());
expectEqual(@as(u32, 1), queue.removeMax());
expectEqual(@as(u32, 1), queue.removeMax());
expectEqual(@as(u32, 1), queue.removeMax());
expectEqual(@as(u32, 1), queue.removeMax());
}
test "std.PriorityDequeue: removeOrNull empty" {
var queue = PDQ.init(testing.allocator, lessThanComparison);
defer queue.deinit();
expect(queue.removeMinOrNull() == null);
expect(queue.removeMaxOrNull() == null);
}
test "std.PriorityDequeue: edge case 3 elements" {
var queue = PDQ.init(testing.allocator, lessThanComparison);
defer queue.deinit();
try queue.add(9);
try queue.add(3);
try queue.add(2);
expectEqual(@as(u32, 2), queue.removeMin());
expectEqual(@as(u32, 3), queue.removeMin());
expectEqual(@as(u32, 9), queue.removeMin());
}
test "std.PriorityDequeue: edge case 3 elements max" {
var queue = PDQ.init(testing.allocator, lessThanComparison);
defer queue.deinit();
try queue.add(9);
try queue.add(3);
try queue.add(2);
expectEqual(@as(u32, 9), queue.removeMax());
expectEqual(@as(u32, 3), queue.removeMax());
expectEqual(@as(u32, 2), queue.removeMax());
}
test "std.PriorityDequeue: peekMin" {
var queue = PDQ.init(testing.allocator, lessThanComparison);
defer queue.deinit();
expect(queue.peekMin() == null);
try queue.add(9);
try queue.add(3);
try queue.add(2);
expect(queue.peekMin().? == 2);
expect(queue.peekMin().? == 2);
}
test "std.PriorityDequeue: peekMax" {
var queue = PDQ.init(testing.allocator, lessThanComparison);
defer queue.deinit();
expect(queue.peekMin() == null);
try queue.add(9);
try queue.add(3);
try queue.add(2);
expect(queue.peekMax().? == 9);
expect(queue.peekMax().? == 9);
}
test "std.PriorityDequeue: sift up with odd indices" {
var queue = PDQ.init(testing.allocator, lessThanComparison);
defer queue.deinit();
const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 };
for (items) |e| {
try queue.add(e);
}
const sorted_items = [_]u32{ 1, 2, 5, 6, 7, 7, 11, 12, 13, 14, 15, 15, 16, 21, 22, 24, 24, 25 };
for (sorted_items) |e| {
expectEqual(e, queue.removeMin());
}
}
test "std.PriorityDequeue: sift up with odd indices" {
var queue = PDQ.init(testing.allocator, lessThanComparison);
defer queue.deinit();
const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 };
for (items) |e| {
try queue.add(e);
}
const sorted_items = [_]u32{ 25, 24, 24, 22, 21, 16, 15, 15, 14, 13, 12, 11, 7, 7, 6, 5, 2, 1 };
for (sorted_items) |e| {
expectEqual(e, queue.removeMax());
}
}
test "std.PriorityDequeue: addSlice min" {
var queue = PDQ.init(testing.allocator, lessThanComparison);
defer queue.deinit();
const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 };
try queue.addSlice(items[0..]);
const sorted_items = [_]u32{ 1, 2, 5, 6, 7, 7, 11, 12, 13, 14, 15, 15, 16, 21, 22, 24, 24, 25 };
for (sorted_items) |e| {
expectEqual(e, queue.removeMin());
}
}
test "std.PriorityDequeue: addSlice max" {
var queue = PDQ.init(testing.allocator, lessThanComparison);
defer queue.deinit();
const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 };
try queue.addSlice(items[0..]);
const sorted_items = [_]u32{ 25, 24, 24, 22, 21, 16, 15, 15, 14, 13, 12, 11, 7, 7, 6, 5, 2, 1 };
for (sorted_items) |e| {
expectEqual(e, queue.removeMax());
}
}
test "std.PriorityDequeue: fromOwnedSlice trivial case 0" {
const items = [0]u32{};
const queue_items = try testing.allocator.dupe(u32, &items);
var queue = PDQ.fromOwnedSlice(testing.allocator, lessThanComparison, queue_items[0..]);
defer queue.deinit();
expectEqual(@as(usize, 0), queue.len);
expect(queue.removeMinOrNull() == null);
}
test "std.PriorityDequeue: fromOwnedSlice trivial case 1" {
const items = [1]u32{1};
const queue_items = try testing.allocator.dupe(u32, &items);
var queue = PDQ.fromOwnedSlice(testing.allocator, lessThanComparison, queue_items[0..]);
defer queue.deinit();
expectEqual(@as(usize, 1), queue.len);
expectEqual(items[0], queue.removeMin());
expect(queue.removeMinOrNull() == null);
}
test "std.PriorityDequeue: fromOwnedSlice" {
const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 };
const queue_items = try testing.allocator.dupe(u32, items[0..]);
var queue = PDQ.fromOwnedSlice(testing.allocator, lessThanComparison, queue_items[0..]);
defer queue.deinit();
const sorted_items = [_]u32{ 1, 2, 5, 6, 7, 7, 11, 12, 13, 14, 15, 15, 16, 21, 22, 24, 24, 25 };
for (sorted_items) |e| {
expectEqual(e, queue.removeMin());
}
}
test "std.PriorityDequeue: update min queue" {
var queue = PDQ.init(testing.allocator, lessThanComparison);
defer queue.deinit();
try queue.add(55);
try queue.add(44);
try queue.add(11);
try queue.update(55, 5);
try queue.update(44, 4);
try queue.update(11, 1);
expectEqual(@as(u32, 1), queue.removeMin());
expectEqual(@as(u32, 4), queue.removeMin());
expectEqual(@as(u32, 5), queue.removeMin());
}
test "std.PriorityDequeue: update same min queue" {
var queue = PDQ.init(testing.allocator, lessThanComparison);
defer queue.deinit();
try queue.add(1);
try queue.add(1);
try queue.add(2);
try queue.add(2);
try queue.update(1, 5);
try queue.update(2, 4);
expectEqual(@as(u32, 1), queue.removeMin());
expectEqual(@as(u32, 2), queue.removeMin());
expectEqual(@as(u32, 4), queue.removeMin());
expectEqual(@as(u32, 5), queue.removeMin());
}
test "std.PriorityDequeue: update max queue" {
var queue = PDQ.init(testing.allocator, lessThanComparison);
defer queue.deinit();
try queue.add(55);
try queue.add(44);
try queue.add(11);
try queue.update(55, 5);
try queue.update(44, 1);
try queue.update(11, 4);
expectEqual(@as(u32, 5), queue.removeMax());
expectEqual(@as(u32, 4), queue.removeMax());
expectEqual(@as(u32, 1), queue.removeMax());
}
test "std.PriorityDequeue: update same max queue" {
var queue = PDQ.init(testing.allocator, lessThanComparison);
defer queue.deinit();
try queue.add(1);
try queue.add(1);
try queue.add(2);
try queue.add(2);
try queue.update(1, 5);
try queue.update(2, 4);
expectEqual(@as(u32, 5), queue.removeMax());
expectEqual(@as(u32, 4), queue.removeMax());
expectEqual(@as(u32, 2), queue.removeMax());
expectEqual(@as(u32, 1), queue.removeMax());
}
test "std.PriorityDequeue: iterator" {
var queue = PDQ.init(testing.allocator, lessThanComparison);
var map = std.AutoHashMap(u32, void).init(testing.allocator);
defer {
queue.deinit();
map.deinit();
}
const items = [_]u32{ 54, 12, 7, 23, 25, 13 };
for (items) |e| {
_ = try queue.add(e);
_ = try map.put(e, {});
}
var it = queue.iterator();
while (it.next()) |e| {
_ = map.remove(e);
}
expectEqual(@as(usize, 0), map.count());
}
test "std.PriorityDequeue: remove at index" {
var queue = PDQ.init(testing.allocator, lessThanComparison);
defer queue.deinit();
try queue.add(3);
try queue.add(2);
try queue.add(1);
var it = queue.iterator();
var elem = it.next();
var idx: usize = 0;
const two_idx = while (elem != null) : (elem = it.next()) {
if (elem.? == 2)
break idx;
idx += 1;
} else unreachable;
expectEqual(queue.removeIndex(two_idx), 2);
expectEqual(queue.removeMin(), 1);
expectEqual(queue.removeMin(), 3);
expectEqual(queue.removeMinOrNull(), null);
}
test "std.PriorityDequeue: iterator while empty" {
var queue = PDQ.init(testing.allocator, lessThanComparison);
defer queue.deinit();
var it = queue.iterator();
expectEqual(it.next(), null);
}
test "std.PriorityDequeue: shrinkRetainingCapacity and shrinkAndFree" {
var queue = PDQ.init(testing.allocator, lessThanComparison);
defer queue.deinit();
try queue.ensureCapacity(4);
expect(queue.capacity() >= 4);
try queue.add(1);
try queue.add(2);
try queue.add(3);
expect(queue.capacity() >= 4);
expectEqual(@as(usize, 3), queue.len);
queue.shrinkRetainingCapacity(3);
expect(queue.capacity() >= 4);
expectEqual(@as(usize, 3), queue.len);
queue.shrinkAndFree(3);
expectEqual(@as(usize, 3), queue.capacity());
expectEqual(@as(usize, 3), queue.len);
expectEqual(@as(u32, 3), queue.removeMax());
expectEqual(@as(u32, 2), queue.removeMax());
expectEqual(@as(u32, 1), queue.removeMax());
expect(queue.removeMaxOrNull() == null);
}
test "std.PriorityDequeue: fuzz testing min" {
var prng = std.rand.DefaultPrng.init(0x12345678);
const test_case_count = 100;
const queue_size = 1_000;
var i: usize = 0;
while (i < test_case_count) : (i += 1) {
try fuzzTestMin(&prng.random, queue_size);
}
}
fn fuzzTestMin(rng: *std.rand.Random, comptime queue_size: usize) !void {
const allocator = testing.allocator;
const items = try generateRandomSlice(allocator, rng, queue_size);
var queue = PDQ.fromOwnedSlice(allocator, lessThanComparison, items);
defer queue.deinit();
var last_removed: ?u32 = null;
while (queue.removeMinOrNull()) |next| {
if (last_removed) |last| {
expect(last <= next);
}
last_removed = next;
}
}
test "std.PriorityDequeue: fuzz testing max" {
var prng = std.rand.DefaultPrng.init(0x87654321);
const test_case_count = 100;
const queue_size = 1_000;
var i: usize = 0;
while (i < test_case_count) : (i += 1) {
try fuzzTestMax(&prng.random, queue_size);
}
}
fn fuzzTestMax(rng: *std.rand.Random, queue_size: usize) !void {
const allocator = testing.allocator;
const items = try generateRandomSlice(allocator, rng, queue_size);
var queue = PDQ.fromOwnedSlice(testing.allocator, lessThanComparison, items);
defer queue.deinit();
var last_removed: ?u32 = null;
while (queue.removeMaxOrNull()) |next| {
if (last_removed) |last| {
expect(last >= next);
}
last_removed = next;
}
}
test "std.PriorityDequeue: fuzz testing min and max" {
var prng = std.rand.DefaultPrng.init(0x87654321);
const test_case_count = 100;
const queue_size = 1_000;
var i: usize = 0;
while (i < test_case_count) : (i += 1) {
try fuzzTestMinMax(&prng.random, queue_size);
}
}
fn fuzzTestMinMax(rng: *std.rand.Random, queue_size: usize) !void {
const allocator = testing.allocator;
const items = try generateRandomSlice(allocator, rng, queue_size);
var queue = PDQ.fromOwnedSlice(allocator, lessThanComparison, items);
defer queue.deinit();
var last_min: ?u32 = null;
var last_max: ?u32 = null;
var i: usize = 0;
while (i < queue_size) : (i += 1) {
if (i % 2 == 0) {
const next = queue.removeMin();
if (last_min) |last| {
expect(last <= next);
}
last_min = next;
} else {
const next = queue.removeMax();
if (last_max) |last| {
expect(last >= next);
}
last_max = next;
}
}
}
fn generateRandomSlice(allocator: *std.mem.Allocator, rng: *std.rand.Random, size: usize) ![]u32 {
var array = std.ArrayList(u32).init(allocator);
try array.ensureCapacity(size);
var i: usize = 0;
while (i < size) : (i += 1) {
const elem = rng.int(u32);
try array.append(elem);
}
return array.toOwnedSlice();
}

View File

@ -6,6 +6,8 @@
const std = @import("std.zig");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const warn = std.debug.warn;
const Order = std.math.Order;
const testing = std.testing;
const expect = testing.expect;
const expectEqual = testing.expectEqual;
@ -19,15 +21,17 @@ pub fn PriorityQueue(comptime T: type) type {
items: []T,
len: usize,
allocator: *Allocator,
compareFn: fn (a: T, b: T) bool,
compareFn: fn (a: T, b: T) Order,
/// Initialize and return a priority queue. Provide
/// `compareFn` that returns `true` when its first argument
/// should get popped before its second argument. For example,
/// to make `pop` return the minimum value, provide
/// Initialize and return a priority queue. Provide `compareFn`
/// that returns `Order.lt` when its first argument should
/// get popped before its second argument, `Order.eq` if the
/// arguments are of equal priority, or `Order.gt` if the second
/// argument should be popped first. For example, to make `pop`
/// return the smallest number, provide
///
/// `fn lessThan(a: T, b: T) bool { return a < b; }`
pub fn init(allocator: *Allocator, compareFn: fn (a: T, b: T) bool) Self {
/// `fn lessThan(a: T, b: T) Order { return std.math.order(a, b); }`
pub fn init(allocator: *Allocator, compareFn: fn (a: T, b: T) Order) Self {
return Self{
.items = &[_]T{},
.len = 0,
@ -60,7 +64,7 @@ pub fn PriorityQueue(comptime T: type) type {
const child = self.items[child_index];
const parent = self.items[parent_index];
if (!self.compareFn(child, parent)) break;
if (self.compareFn(child, parent) != .lt) break;
self.items[parent_index] = child;
self.items[child_index] = parent;
@ -132,14 +136,14 @@ pub fn PriorityQueue(comptime T: type) type {
var smallest = self.items[index];
if (left) |e| {
if (self.compareFn(e, smallest)) {
if (self.compareFn(e, smallest) == .lt) {
smallest_index = left_index;
smallest = e;
}
}
if (right) |e| {
if (self.compareFn(e, smallest)) {
if (self.compareFn(e, smallest) == .lt) {
smallest_index = right_index;
smallest = e;
}
@ -158,13 +162,16 @@ pub fn PriorityQueue(comptime T: type) type {
/// PriorityQueue takes ownership of the passed in slice. The slice must have been
/// allocated with `allocator`.
/// Deinitialize with `deinit`.
pub fn fromOwnedSlice(allocator: *Allocator, compareFn: fn (a: T, b: T) bool, items: []T) Self {
pub fn fromOwnedSlice(allocator: *Allocator, compareFn: fn (a: T, b: T) Order, items: []T) Self {
var queue = Self{
.items = items,
.len = items.len,
.allocator = allocator,
.compareFn = compareFn,
};
if (queue.len <= 1) return queue;
const half = (queue.len >> 1) - 1;
var i: usize = 0;
while (i <= half) : (i += 1) {
@ -183,25 +190,40 @@ pub fn PriorityQueue(comptime T: type) type {
self.items = try self.allocator.realloc(self.items, better_capacity);
}
pub fn resize(self: *Self, new_len: usize) !void {
try self.ensureCapacity(new_len);
/// Reduce allocated capacity to `new_len`.
pub fn shrinkAndFree(self: *Self, new_len: usize) void {
assert(new_len <= self.items.len);
// Cannot shrink to smaller than the current queue size without invalidating the heap property
assert(new_len >= self.len);
self.items = self.allocator.realloc(self.items[0..], new_len) catch |e| switch (e) {
error.OutOfMemory => { // no problem, capacity is still correct then.
self.items.len = new_len;
return;
},
};
self.len = new_len;
}
pub fn shrink(self: *Self, new_len: usize) void {
// TODO take advantage of the new realloc semantics
assert(new_len <= self.len);
/// Reduce length to `new_len`.
pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void {
assert(new_len <= self.items.len);
// Cannot shrink to smaller than the current queue size without invalidating the heap property
assert(new_len >= self.len);
self.len = new_len;
}
pub fn update(self: *Self, elem: T, new_elem: T) !void {
var update_index: usize = std.mem.indexOfScalar(T, self.items, elem) orelse return error.ElementNotFound;
var update_index: usize = std.mem.indexOfScalar(T, self.items[0..self.len], elem) orelse return error.ElementNotFound;
const old_elem: T = self.items[update_index];
self.items[update_index] = new_elem;
if (self.compareFn(new_elem, old_elem)) {
siftUp(self, update_index);
} else {
siftDown(self, update_index);
switch (self.compareFn(new_elem, old_elem)) {
.lt => siftUp(self, update_index),
.gt => siftDown(self, update_index),
.eq => {}, // Nothing to do as the items have equal priority
}
}
@ -248,12 +270,12 @@ pub fn PriorityQueue(comptime T: type) type {
};
}
fn lessThan(a: u32, b: u32) bool {
return a < b;
fn lessThan(a: u32, b: u32) Order {
return std.math.order(a, b);
}
fn greaterThan(a: u32, b: u32) bool {
return a > b;
fn greaterThan(a: u32, b: u32) Order {
return lessThan(a, b).invert();
}
const PQ = PriorityQueue(u32);
@ -351,6 +373,26 @@ test "std.PriorityQueue: addSlice" {
}
}
test "std.PriorityQueue: fromOwnedSlice trivial case 0" {
const items = [0]u32{};
const queue_items = try testing.allocator.dupe(u32, &items);
var queue = PQ.fromOwnedSlice(testing.allocator, lessThan, queue_items[0..]);
defer queue.deinit();
expectEqual(@as(usize, 0), queue.len);
expect(queue.removeOrNull() == null);
}
test "std.PriorityQueue: fromOwnedSlice trivial case 1" {
const items = [1]u32{1};
const queue_items = try testing.allocator.dupe(u32, &items);
var queue = PQ.fromOwnedSlice(testing.allocator, lessThan, queue_items[0..]);
defer queue.deinit();
expectEqual(@as(usize, 1), queue.len);
expectEqual(items[0], queue.remove());
expect(queue.removeOrNull() == null);
}
test "std.PriorityQueue: fromOwnedSlice" {
const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 };
const heap_items = try testing.allocator.dupe(u32, items[0..]);
@ -453,6 +495,33 @@ test "std.PriorityQueue: iterator while empty" {
expectEqual(it.next(), null);
}
test "std.PriorityQueue: shrinkRetainingCapacity and shrinkAndFree" {
var queue = PQ.init(testing.allocator, lessThan);
defer queue.deinit();
try queue.ensureCapacity(4);
expect(queue.capacity() >= 4);
try queue.add(1);
try queue.add(2);
try queue.add(3);
expect(queue.capacity() >= 4);
expectEqual(@as(usize, 3), queue.len);
queue.shrinkRetainingCapacity(3);
expect(queue.capacity() >= 4);
expectEqual(@as(usize, 3), queue.len);
queue.shrinkAndFree(3);
expectEqual(@as(usize, 3), queue.capacity());
expectEqual(@as(usize, 3), queue.len);
expectEqual(@as(u32, 1), queue.remove());
expectEqual(@as(u32, 2), queue.remove());
expectEqual(@as(u32, 3), queue.remove());
expect(queue.removeOrNull() == null);
}
test "std.PriorityQueue: update min heap" {
var queue = PQ.init(testing.allocator, lessThan);
defer queue.deinit();

View File

@ -208,3 +208,35 @@ test "isaac64 sequence" {
std.testing.expect(s == r.next());
}
}
test "isaac64 fill" {
var r = Isaac64.init(0);
// from reference implementation
const seq = [_]u64{
0xf67dfba498e4937c,
0x84a5066a9204f380,
0xfee34bd5f5514dbb,
0x4d1664739b8f80d6,
0x8607459ab52a14aa,
0x0e78bc5a98529e49,
0xfe5332822ad13777,
0x556c27525e33d01a,
0x08643ca615f3149f,
0xd0771faf3cb04714,
0x30e86f68a37b008d,
0x3074ebc0488a3adf,
0x270645ea7a2790bc,
0x5601a0a8d3763c6a,
0x2f83071f53f325dd,
0xb9090f3d42d2d2ea,
};
for (seq) |s| {
var buf0: [8]u8 = undefined;
var buf1: [7]u8 = undefined;
std.mem.writeIntLittle(u64, &buf0, s);
Isaac64.fill(&r.random, &buf1);
std.testing.expect(std.mem.eql(u8, buf0[0..7], buf1[0..]));
}
}

View File

@ -75,7 +75,7 @@ fn fill(r: *Random, buf: []u8) void {
var n = self.next();
while (i < buf.len) : (i += 1) {
buf[i] = @truncate(u8, n);
n >>= 4;
n >>= 8;
}
}
}
@ -99,3 +99,27 @@ test "pcg sequence" {
std.testing.expect(s == r.next());
}
}
test "pcg fill" {
var r = Pcg.init(0);
const s0: u64 = 0x9394bf54ce5d79de;
const s1: u64 = 0x84e9c579ef59bbf7;
r.seedTwo(s0, s1);
const seq = [_]u32{
2881561918,
3063928540,
1199791034,
2487695858,
1479648952,
3247963454,
};
for (seq) |s| {
var buf0: [4]u8 = undefined;
var buf1: [3]u8 = undefined;
std.mem.writeIntLittle(u32, &buf0, s);
Pcg.fill(&r.random, &buf1);
std.testing.expect(std.mem.eql(u8, buf0[0..3], buf1[0..]));
}
}

View File

@ -106,3 +106,35 @@ test "Sfc64 sequence" {
std.testing.expectEqual(s, r.next());
}
}
test "Sfc64 fill" {
// Unfortunately there does not seem to be an official test sequence.
var r = Sfc64.init(0);
const seq = [_]u64{
0x3acfa029e3cc6041,
0xf5b6515bf2ee419c,
0x1259635894a29b61,
0xb6ae75395f8ebd6,
0x225622285ce302e2,
0x520d28611395cb21,
0xdb909c818901599d,
0x8ffd195365216f57,
0xe8c4ad5e258ac04a,
0x8f8ef2c89fdb63ca,
0xf9865b01d98d8e2f,
0x46555871a65d08ba,
0x66868677c6298fcd,
0x2ce15a7e6329f57d,
0xb2f1833ca91ca79,
0x4b0890ac9bf453ca,
};
for (seq) |s| {
var buf0: [8]u8 = undefined;
var buf1: [7]u8 = undefined;
std.mem.writeIntLittle(u64, &buf0, s);
Sfc64.fill(&r.random, &buf1);
std.testing.expect(std.mem.eql(u8, buf0[0..7], buf1[0..]));
}
}

View File

@ -131,3 +131,26 @@ test "xoroshiro sequence" {
std.testing.expect(s == r.next());
}
}
test "xoroshiro fill" {
var r = Xoroshiro128.init(0);
r.s[0] = 0xaeecf86f7878dd75;
r.s[1] = 0x01cd153642e72622;
const seq = [_]u64{
0xb0ba0da5bb600397,
0x18a08afde614dccc,
0xa2635b956a31b929,
0xabe633c971efa045,
0x9ac19f9706ca3cac,
0xf62b426578c1e3fb,
};
for (seq) |s| {
var buf0: [8]u8 = undefined;
var buf1: [7]u8 = undefined;
std.mem.writeIntLittle(u64, &buf0, s);
Xoroshiro128.fill(&r.random, &buf1);
std.testing.expect(std.mem.eql(u8, buf0[0..7], buf1[0..]));
}
}

View File

@ -515,6 +515,7 @@
</style>
</head>
<body class="canvas">
<div style="background-color: darkred; width: 100vw; text-align: center; color: white; padding: 15px 5px;">These docs are experimental. <a style="color: bisque;text-decoration: underline;" href="https://kristoff.it/blog/zig-new-relationship-llvm/">Progress depends on the self-hosted compiler</a>, <a style="color: bisque;text-decoration: underline;" href="https://github.com/ziglang/zig/wiki/How-to-read-the-standard-library-source-code">consider reading the stlib source in the meantime</a>.</div>
<div class="flex-main">
<div class="flex-filler"></div>
<div class="flex-left sidebar">

View File

@ -31,6 +31,7 @@ pub const PackedIntArrayEndian = @import("packed_int_array.zig").PackedIntArrayE
pub const PackedIntSlice = @import("packed_int_array.zig").PackedIntSlice;
pub const PackedIntSliceEndian = @import("packed_int_array.zig").PackedIntSliceEndian;
pub const PriorityQueue = @import("priority_queue.zig").PriorityQueue;
pub const PriorityDequeue = @import("priority_dequeue.zig").PriorityDequeue;
pub const Progress = @import("Progress.zig");
pub const SemanticVersion = @import("SemanticVersion.zig");
pub const SinglyLinkedList = @import("linked_list.zig").SinglyLinkedList;

View File

@ -11,7 +11,7 @@ pub const Tokenizer = tokenizer.Tokenizer;
pub const fmtId = @import("zig/fmt.zig").fmtId;
pub const fmtEscapes = @import("zig/fmt.zig").fmtEscapes;
pub const parse = @import("zig/parse.zig").parse;
pub const parseStringLiteral = @import("zig/string_literal.zig").parse;
pub const string_literal = @import("zig/string_literal.zig");
pub const ast = @import("zig/ast.zig");
pub const system = @import("zig/system.zig");
pub const CrossTarget = @import("zig/cross_target.zig").CrossTarget;

View File

@ -1252,6 +1252,7 @@ pub const Tree = struct {
buffer[0] = data.lhs;
const params = if (data.lhs == 0) buffer[0..0] else buffer[0..1];
return tree.fullFnProto(.{
.proto_node = node,
.fn_token = tree.nodes.items(.main_token)[node],
.return_type = data.rhs,
.params = params,
@ -1267,6 +1268,7 @@ pub const Tree = struct {
const params_range = tree.extraData(data.lhs, Node.SubRange);
const params = tree.extra_data[params_range.start..params_range.end];
return tree.fullFnProto(.{
.proto_node = node,
.fn_token = tree.nodes.items(.main_token)[node],
.return_type = data.rhs,
.params = params,
@ -1283,6 +1285,7 @@ pub const Tree = struct {
buffer[0] = extra.param;
const params = if (extra.param == 0) buffer[0..0] else buffer[0..1];
return tree.fullFnProto(.{
.proto_node = node,
.fn_token = tree.nodes.items(.main_token)[node],
.return_type = data.rhs,
.params = params,
@ -1298,6 +1301,7 @@ pub const Tree = struct {
const extra = tree.extraData(data.lhs, Node.FnProto);
const params = tree.extra_data[extra.params_start..extra.params_end];
return tree.fullFnProto(.{
.proto_node = node,
.fn_token = tree.nodes.items(.main_token)[node],
.return_type = data.rhs,
.params = params,
@ -1430,7 +1434,7 @@ pub const Tree = struct {
.ast = .{
.lbracket = tree.nodes.items(.main_token)[node],
.elem_count = data.lhs,
.sentinel = null,
.sentinel = 0,
.elem_type = data.rhs,
},
};
@ -1440,6 +1444,7 @@ pub const Tree = struct {
assert(tree.nodes.items(.tag)[node] == .array_type_sentinel);
const data = tree.nodes.items(.data)[node];
const extra = tree.extraData(data.rhs, Node.ArrayTypeSentinel);
assert(extra.sentinel != 0);
return .{
.ast = .{
.lbracket = tree.nodes.items(.main_token)[node],
@ -2119,6 +2124,7 @@ pub const full = struct {
ast: Ast,
pub const Ast = struct {
proto_node: Node.Index,
fn_token: TokenIndex,
return_type: Node.Index,
params: []const Node.Index,
@ -2262,7 +2268,7 @@ pub const full = struct {
pub const Ast = struct {
lbracket: TokenIndex,
elem_count: Node.Index,
sentinel: ?Node.Index,
sentinel: Node.Index,
elem_type: Node.Index,
};
};
@ -2549,9 +2555,9 @@ pub const Node = struct {
@"await",
/// `?lhs`. rhs unused. main_token is the `?`.
optional_type,
/// `[lhs]rhs`. lhs can be omitted to make it a slice.
/// `[lhs]rhs`.
array_type,
/// `[lhs:a]b`. `array_type_sentinel[rhs]`.
/// `[lhs:a]b`. `ArrayTypeSentinel[rhs]`.
array_type_sentinel,
/// `[*]align(lhs) rhs`. lhs can be omitted.
/// `*align(lhs) rhs`. lhs can be omitted.

View File

@ -59,10 +59,7 @@ pub fn parse(gpa: *Allocator, source: []const u8) Allocator.Error!Tree {
parser.nodes.appendAssumeCapacity(.{
.tag = .root,
.main_token = 0,
.data = .{
.lhs = undefined,
.rhs = undefined,
},
.data = undefined,
});
const root_members = try parser.parseContainerMembers();
const root_decls = try root_members.toSpan(&parser);
@ -139,6 +136,16 @@ const Parser = struct {
return result;
}
fn setNode(p: *Parser, i: usize, elem: ast.NodeList.Elem) Node.Index {
p.nodes.set(i, elem);
return @intCast(Node.Index, i);
}
fn reserveNode(p: *Parser) !usize {
try p.nodes.resize(p.gpa, p.nodes.len + 1);
return p.nodes.len - 1;
}
fn addExtra(p: *Parser, extra: anytype) Allocator.Error!Node.Index {
const fields = std.meta.fields(@TypeOf(extra));
try p.extra_data.ensureCapacity(p.gpa, p.extra_data.items.len + fields.len);
@ -554,9 +561,10 @@ const Parser = struct {
return fn_proto;
},
.l_brace => {
const fn_decl_index = try p.reserveNode();
const body_block = try p.parseBlock();
assert(body_block != 0);
return p.addNode(.{
return p.setNode(fn_decl_index, .{
.tag = .fn_decl,
.main_token = p.nodes.items(.main_token)[fn_proto],
.data = .{
@ -634,6 +642,10 @@ const Parser = struct {
/// FnProto <- KEYWORD_fn IDENTIFIER? LPAREN ParamDeclList RPAREN ByteAlign? LinkSection? CallConv? EXCLAMATIONMARK? (Keyword_anytype / TypeExpr)
fn parseFnProto(p: *Parser) !Node.Index {
const fn_token = p.eatToken(.keyword_fn) orelse return null_node;
// We want the fn proto node to be before its children in the array.
const fn_proto_index = try p.reserveNode();
_ = p.eatToken(.identifier);
const params = try p.parseParamDeclList();
defer params.deinit(p.gpa);
@ -651,7 +663,7 @@ const Parser = struct {
if (align_expr == 0 and section_expr == 0 and callconv_expr == 0) {
switch (params) {
.zero_or_one => |param| return p.addNode(.{
.zero_or_one => |param| return p.setNode(fn_proto_index, .{
.tag = .fn_proto_simple,
.main_token = fn_token,
.data = .{
@ -661,7 +673,7 @@ const Parser = struct {
}),
.multi => |list| {
const span = try p.listToSpan(list);
return p.addNode(.{
return p.setNode(fn_proto_index, .{
.tag = .fn_proto_multi,
.main_token = fn_token,
.data = .{
@ -676,7 +688,7 @@ const Parser = struct {
}
}
switch (params) {
.zero_or_one => |param| return p.addNode(.{
.zero_or_one => |param| return p.setNode(fn_proto_index, .{
.tag = .fn_proto_one,
.main_token = fn_token,
.data = .{
@ -691,7 +703,7 @@ const Parser = struct {
}),
.multi => |list| {
const span = try p.listToSpan(list);
return p.addNode(.{
return p.setNode(fn_proto_index, .{
.tag = .fn_proto,
.main_token = fn_token,
.data = .{

View File

@ -717,9 +717,9 @@ fn renderArrayType(
ais.pushIndentNextLine();
try renderToken(ais, tree, array_type.ast.lbracket, inner_space); // lbracket
try renderExpression(gpa, ais, tree, array_type.ast.elem_count, inner_space);
if (array_type.ast.sentinel) |sentinel| {
try renderToken(ais, tree, tree.firstToken(sentinel) - 1, inner_space); // colon
try renderExpression(gpa, ais, tree, sentinel, inner_space);
if (array_type.ast.sentinel != 0) {
try renderToken(ais, tree, tree.firstToken(array_type.ast.sentinel) - 1, inner_space); // colon
try renderExpression(gpa, ais, tree, array_type.ast.sentinel, inner_space);
}
ais.popIndent();
try renderToken(ais, tree, rbracket, .none); // rbracket

View File

@ -6,112 +6,143 @@
const std = @import("../std.zig");
const assert = std.debug.assert;
const State = enum {
Start,
Backslash,
};
pub const ParseError = error{
OutOfMemory,
/// When this is returned, index will be the position of the character.
InvalidCharacter,
InvalidStringLiteral,
};
/// caller owns returned memory
pub fn parse(
allocator: *std.mem.Allocator,
bytes: []const u8,
bad_index: *usize, // populated if error.InvalidCharacter is returned
) ParseError![]u8 {
pub const Result = union(enum) {
success,
/// Found an invalid character at this index.
invalid_character: usize,
/// Expected hex digits at this index.
expected_hex_digits: usize,
/// Invalid hex digits at this index.
invalid_hex_escape: usize,
/// Invalid unicode escape at this index.
invalid_unicode_escape: usize,
/// The left brace at this index is missing a matching right brace.
missing_matching_rbrace: usize,
/// Expected unicode digits at this index.
expected_unicode_digits: usize,
};
/// Parses `bytes` as a Zig string literal and appends the result to `buf`.
/// Asserts `bytes` has '"' at beginning and end.
pub fn parseAppend(buf: *std.ArrayList(u8), bytes: []const u8) error{OutOfMemory}!Result {
assert(bytes.len >= 2 and bytes[0] == '"' and bytes[bytes.len - 1] == '"');
var list = std.ArrayList(u8).init(allocator);
errdefer list.deinit();
const slice = bytes[1..];
try list.ensureCapacity(slice.len - 1);
const prev_len = buf.items.len;
try buf.ensureCapacity(prev_len + slice.len - 1);
errdefer buf.shrinkRetainingCapacity(prev_len);
const State = enum {
Start,
Backslash,
};
var state = State.Start;
var index: usize = 0;
while (index < slice.len) : (index += 1) {
while (true) : (index += 1) {
const b = slice[index];
switch (state) {
State.Start => switch (b) {
'\\' => state = State.Backslash,
'\n' => {
bad_index.* = index;
return error.InvalidCharacter;
return Result{ .invalid_character = index };
},
'"' => return list.toOwnedSlice(),
else => try list.append(b),
'"' => return Result.success,
else => try buf.append(b),
},
State.Backslash => switch (b) {
'n' => {
try list.append('\n');
try buf.append('\n');
state = State.Start;
},
'r' => {
try list.append('\r');
try buf.append('\r');
state = State.Start;
},
'\\' => {
try list.append('\\');
try buf.append('\\');
state = State.Start;
},
't' => {
try list.append('\t');
try buf.append('\t');
state = State.Start;
},
'\'' => {
try list.append('\'');
try buf.append('\'');
state = State.Start;
},
'"' => {
try list.append('"');
try buf.append('"');
state = State.Start;
},
'x' => {
// TODO: add more/better/broader tests for this.
const index_continue = index + 3;
if (slice.len >= index_continue)
if (std.fmt.parseUnsigned(u8, slice[index + 1 .. index_continue], 16)) |char| {
try list.append(char);
state = State.Start;
index = index_continue - 1; // loop-header increments again
continue;
} else |_| {};
bad_index.* = index;
return error.InvalidCharacter;
if (slice.len < index_continue) {
return Result{ .expected_hex_digits = index };
}
if (std.fmt.parseUnsigned(u8, slice[index + 1 .. index_continue], 16)) |byte| {
try buf.append(byte);
state = State.Start;
index = index_continue - 1; // loop-header increments again
} else |err| switch (err) {
error.Overflow => unreachable, // 2 digits base 16 fits in a u8.
error.InvalidCharacter => {
return Result{ .invalid_hex_escape = index + 1 };
},
}
},
'u' => {
// TODO: add more/better/broader tests for this.
if (slice.len > index + 2 and slice[index + 1] == '{')
// TODO: we are already inside a nice, clean state machine... use it
// instead of this hacky code.
if (slice.len > index + 2 and slice[index + 1] == '{') {
if (std.mem.indexOfScalarPos(u8, slice[0..std.math.min(index + 9, slice.len)], index + 3, '}')) |index_end| {
const hex_str = slice[index + 2 .. index_end];
if (std.fmt.parseUnsigned(u32, hex_str, 16)) |uint| {
if (uint <= 0x10ffff) {
try list.appendSlice(std.mem.toBytes(uint)[0..]);
try buf.appendSlice(std.mem.toBytes(uint)[0..]);
state = State.Start;
index = index_end; // loop-header increments
continue;
}
} else |_| {}
};
bad_index.* = index;
return error.InvalidCharacter;
} else |err| switch (err) {
error.Overflow => unreachable,
error.InvalidCharacter => {
return Result{ .invalid_unicode_escape = index + 1 };
},
}
} else {
return Result{ .missing_matching_rbrace = index + 1 };
}
} else {
return Result{ .expected_unicode_digits = index };
}
},
else => {
bad_index.* = index;
return error.InvalidCharacter;
return Result{ .invalid_character = index };
},
},
}
} else unreachable; // TODO should not need else unreachable on while(true)
}
/// Higher level API. Does not return extra info about parse errors.
/// Caller owns returned memory.
pub fn parseAlloc(allocator: *std.mem.Allocator, bytes: []const u8) ParseError![]u8 {
var buf = std.ArrayList(u8).init(allocator);
defer buf.deinit();
switch (try parseAppend(&buf, bytes)) {
.success => return buf.toOwnedSlice(),
else => return error.InvalidStringLiteral,
}
unreachable;
}
test "parse" {
@ -121,9 +152,8 @@ test "parse" {
var fixed_buf_mem: [32]u8 = undefined;
var fixed_buf_alloc = std.heap.FixedBufferAllocator.init(fixed_buf_mem[0..]);
var alloc = &fixed_buf_alloc.allocator;
var bad_index: usize = undefined;
expect(eql(u8, "foo", try parse(alloc, "\"foo\"", &bad_index)));
expect(eql(u8, "foo", try parse(alloc, "\"f\x6f\x6f\"", &bad_index)));
expect(eql(u8, "f💯", try parse(alloc, "\"f\u{1f4af}\"", &bad_index)));
expect(eql(u8, "foo", try parseAlloc(alloc, "\"foo\"")));
expect(eql(u8, "foo", try parseAlloc(alloc, "\"f\x6f\x6f\"")));
expect(eql(u8, "f💯", try parseAlloc(alloc, "\"f\u{1f4af}\"")));
}

4450
src/AstGen.zig Normal file

File diff suppressed because it is too large Load Diff

View File

@ -259,7 +259,7 @@ pub const CObject = struct {
/// To support incremental compilation, errors are stored in various places
/// so that they can be created and destroyed appropriately. This structure
/// is used to collect all the errors from the various places into one
/// convenient place for API users to consume. It is allocated into 1 heap
/// convenient place for API users to consume. It is allocated into 1 arena
/// and freed all at once.
pub const AllErrors = struct {
arena: std.heap.ArenaAllocator.State,
@ -267,11 +267,11 @@ pub const AllErrors = struct {
pub const Message = union(enum) {
src: struct {
src_path: []const u8,
line: usize,
column: usize,
byte_offset: usize,
msg: []const u8,
src_path: []const u8,
line: u32,
column: u32,
byte_offset: u32,
notes: []Message = &.{},
},
plain: struct {
@ -316,29 +316,31 @@ pub const AllErrors = struct {
const notes = try arena.allocator.alloc(Message, module_err_msg.notes.len);
for (notes) |*note, i| {
const module_note = module_err_msg.notes[i];
const source = try module_note.src_loc.file_scope.getSource(module);
const loc = std.zig.findLineColumn(source, module_note.src_loc.byte_offset);
const sub_file_path = module_note.src_loc.file_scope.sub_file_path;
const source = try module_note.src_loc.fileScope().getSource(module);
const byte_offset = try module_note.src_loc.byteOffset();
const loc = std.zig.findLineColumn(source, byte_offset);
const sub_file_path = module_note.src_loc.fileScope().sub_file_path;
note.* = .{
.src = .{
.src_path = try arena.allocator.dupe(u8, sub_file_path),
.msg = try arena.allocator.dupe(u8, module_note.msg),
.byte_offset = module_note.src_loc.byte_offset,
.line = loc.line,
.column = loc.column,
.byte_offset = byte_offset,
.line = @intCast(u32, loc.line),
.column = @intCast(u32, loc.column),
},
};
}
const source = try module_err_msg.src_loc.file_scope.getSource(module);
const loc = std.zig.findLineColumn(source, module_err_msg.src_loc.byte_offset);
const sub_file_path = module_err_msg.src_loc.file_scope.sub_file_path;
const source = try module_err_msg.src_loc.fileScope().getSource(module);
const byte_offset = try module_err_msg.src_loc.byteOffset();
const loc = std.zig.findLineColumn(source, byte_offset);
const sub_file_path = module_err_msg.src_loc.fileScope().sub_file_path;
try errors.append(.{
.src = .{
.src_path = try arena.allocator.dupe(u8, sub_file_path),
.msg = try arena.allocator.dupe(u8, module_err_msg.msg),
.byte_offset = module_err_msg.src_loc.byte_offset,
.line = loc.line,
.column = loc.column,
.byte_offset = byte_offset,
.line = @intCast(u32, loc.line),
.column = @intCast(u32, loc.column),
.notes = notes,
},
});
@ -939,6 +941,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
};
const module = try arena.create(Module);
errdefer module.deinit();
module.* = .{
.gpa = gpa,
.comp = comp,
@ -946,7 +949,9 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
.root_scope = root_scope,
.zig_cache_artifact_directory = zig_cache_artifact_directory,
.emit_h = options.emit_h,
.error_name_list = try std.ArrayListUnmanaged([]const u8).initCapacity(gpa, 1),
};
module.error_name_list.appendAssumeCapacity("(no error)");
break :blk module;
} else blk: {
if (options.emit_h != null) return error.NoZigModuleForCHeader;

File diff suppressed because it is too large Load Diff

View File

@ -2,13 +2,14 @@ const std = @import("std");
const Order = std.math.Order;
const Value = @import("value.zig").Value;
const RangeSet = @This();
const SwitchProngSrc = @import("AstGen.zig").SwitchProngSrc;
ranges: std.ArrayList(Range),
pub const Range = struct {
start: Value,
end: Value,
src: usize,
first: Value,
last: Value,
src: SwitchProngSrc,
};
pub fn init(allocator: *std.mem.Allocator) RangeSet {
@ -21,18 +22,15 @@ pub fn deinit(self: *RangeSet) void {
self.ranges.deinit();
}
pub fn add(self: *RangeSet, start: Value, end: Value, src: usize) !?usize {
pub fn add(self: *RangeSet, first: Value, last: Value, src: SwitchProngSrc) !?SwitchProngSrc {
for (self.ranges.items) |range| {
if ((start.compare(.gte, range.start) and start.compare(.lte, range.end)) or
(end.compare(.gte, range.start) and end.compare(.lte, range.end)))
{
// ranges overlap
return range.src;
if (last.compare(.gte, range.first) and first.compare(.lte, range.last)) {
return range.src; // They overlap.
}
}
try self.ranges.append(.{
.start = start,
.end = end,
.first = first,
.last = last,
.src = src,
});
return null;
@ -40,14 +38,17 @@ pub fn add(self: *RangeSet, start: Value, end: Value, src: usize) !?usize {
/// Assumes a and b do not overlap
fn lessThan(_: void, a: Range, b: Range) bool {
return a.start.compare(.lt, b.start);
return a.first.compare(.lt, b.first);
}
pub fn spans(self: *RangeSet, start: Value, end: Value) !bool {
pub fn spans(self: *RangeSet, first: Value, last: Value) !bool {
if (self.ranges.items.len == 0)
return false;
std.sort.sort(Range, self.ranges.items, {}, lessThan);
if (!self.ranges.items[0].start.eql(start) or
!self.ranges.items[self.ranges.items.len - 1].end.eql(end))
if (!self.ranges.items[0].first.eql(first) or
!self.ranges.items[self.ranges.items.len - 1].last.eql(last))
{
return false;
}
@ -62,11 +63,11 @@ pub fn spans(self: *RangeSet, start: Value, end: Value) !bool {
// i starts counting from the second item.
const prev = self.ranges.items[i];
// prev.end + 1 == cur.start
try counter.copy(prev.end.toBigInt(&space));
// prev.last + 1 == cur.first
try counter.copy(prev.last.toBigInt(&space));
try counter.addScalar(counter.toConst(), 1);
const cur_start_int = cur.start.toBigInt(&space);
const cur_start_int = cur.first.toBigInt(&space);
if (!cur_start_int.eq(counter.toConst())) {
return false;
}

5091
src/Sema.zig Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -17,6 +17,8 @@ const DW = std.dwarf;
const leb128 = std.leb;
const log = std.log.scoped(.codegen);
const build_options = @import("build_options");
const LazySrcLoc = Module.LazySrcLoc;
const RegisterManager = @import("register_manager.zig").RegisterManager;
/// The codegen-related data that is stored in `ir.Inst.Block` instructions.
pub const BlockData = struct {
@ -285,11 +287,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
/// across each runtime branch upon joining.
branch_stack: *std.ArrayList(Branch),
/// The key must be canonical register.
registers: std.AutoHashMapUnmanaged(Register, *ir.Inst) = .{},
free_registers: FreeRegInt = math.maxInt(FreeRegInt),
/// Tracks all registers allocated in the course of this function
allocated_registers: FreeRegInt = 0,
register_manager: RegisterManager(Self, Register, &callee_preserved_regs) = .{},
/// Maps offset to what is stored there.
stack: std.AutoHashMapUnmanaged(u32, StackAllocation) = .{},
@ -381,49 +379,6 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
};
fn markRegUsed(self: *Self, reg: Register) void {
if (FreeRegInt == u0) return;
const index = reg.allocIndex() orelse return;
const ShiftInt = math.Log2Int(FreeRegInt);
const shift = @intCast(ShiftInt, index);
const mask = @as(FreeRegInt, 1) << shift;
self.free_registers &= ~mask;
self.allocated_registers |= mask;
}
fn markRegFree(self: *Self, reg: Register) void {
if (FreeRegInt == u0) return;
const index = reg.allocIndex() orelse return;
const ShiftInt = math.Log2Int(FreeRegInt);
const shift = @intCast(ShiftInt, index);
self.free_registers |= @as(FreeRegInt, 1) << shift;
}
/// Before calling, must ensureCapacity + 1 on self.registers.
/// Returns `null` if all registers are allocated.
fn allocReg(self: *Self, inst: *ir.Inst) ?Register {
const free_index = @ctz(FreeRegInt, self.free_registers);
if (free_index >= callee_preserved_regs.len) {
return null;
}
const mask = @as(FreeRegInt, 1) << free_index;
self.free_registers &= ~mask;
self.allocated_registers |= mask;
const reg = callee_preserved_regs[free_index];
self.registers.putAssumeCapacityNoClobber(reg, inst);
log.debug("alloc {} => {*}", .{ reg, inst });
return reg;
}
/// Does not track the register.
fn findUnusedReg(self: *Self) ?Register {
const free_index = @ctz(FreeRegInt, self.free_registers);
if (free_index >= callee_preserved_regs.len) {
return null;
}
return callee_preserved_regs[free_index];
}
const StackAllocation = struct {
inst: *ir.Inst,
/// TODO do we need size? should be determined by inst.ty.abiSize()
@ -494,11 +449,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.rbrace_src = src_data.rbrace_src,
.source = src_data.source,
};
defer function.registers.deinit(bin_file.allocator);
defer function.register_manager.deinit(bin_file.allocator);
defer function.stack.deinit(bin_file.allocator);
defer function.exitlude_jump_relocs.deinit(bin_file.allocator);
var call_info = function.resolveCallingConventionValues(src_loc.byte_offset, fn_type) catch |err| switch (err) {
var call_info = function.resolveCallingConventionValues(src_loc.lazy, fn_type) catch |err| switch (err) {
error.CodegenFail => return Result{ .fail = function.err_msg.? },
else => |e| return e,
};
@ -606,10 +561,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.r14 = true, // lr
};
inline for (callee_preserved_regs) |reg, i| {
const ShiftInt = math.Log2Int(FreeRegInt);
const shift = @intCast(ShiftInt, i);
const mask = @as(FreeRegInt, 1) << shift;
if (self.allocated_registers & mask != 0) {
if (self.register_manager.isRegAllocated(reg)) {
@field(saved_regs, @tagName(reg)) = true;
}
}
@ -791,8 +743,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
}
fn dbgAdvancePCAndLine(self: *Self, src: usize) InnerError!void {
self.prev_di_src = src;
fn dbgAdvancePCAndLine(self: *Self, abs_byte_off: usize) InnerError!void {
self.prev_di_src = abs_byte_off;
self.prev_di_pc = self.code.items.len;
switch (self.debug_output) {
.dwarf => |dbg_out| {
@ -800,7 +752,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
// lookup table, and changing ir.Inst from storing byte offset to token. Currently
// this involves scanning over the source code for newlines
// (but only from the previous byte offset to the new one).
const delta_line = std.zig.lineDelta(self.source, self.prev_di_src, src);
const delta_line = std.zig.lineDelta(self.source, self.prev_di_src, abs_byte_off);
const delta_pc = self.code.items.len - self.prev_di_pc;
// TODO Look into using the DWARF special opcodes to compress this data. It lets you emit
// single-byte opcodes that add different numbers to both the PC and the line number
@ -828,8 +780,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
switch (prev_value) {
.register => |reg| {
const canon_reg = toCanonicalReg(reg);
_ = self.registers.remove(canon_reg);
self.markRegFree(canon_reg);
self.register_manager.freeReg(canon_reg);
},
else => {}, // TODO process stack allocation death
}
@ -897,6 +848,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.is_null_ptr => return self.genIsNullPtr(inst.castTag(.is_null_ptr).?),
.is_err => return self.genIsErr(inst.castTag(.is_err).?),
.is_err_ptr => return self.genIsErrPtr(inst.castTag(.is_err_ptr).?),
.error_to_int => return self.genErrorToInt(inst.castTag(.error_to_int).?),
.int_to_error => return self.genIntToError(inst.castTag(.int_to_error).?),
.load => return self.genLoad(inst.castTag(.load).?),
.loop => return self.genLoop(inst.castTag(.loop).?),
.not => return self.genNot(inst.castTag(.not).?),
@ -907,6 +860,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.ret => return self.genRet(inst.castTag(.ret).?),
.retvoid => return self.genRetVoid(inst.castTag(.retvoid).?),
.store => return self.genStore(inst.castTag(.store).?),
.struct_field_ptr => return self.genStructFieldPtr(inst.castTag(.struct_field_ptr).?),
.sub => return self.genSub(inst.castTag(.sub).?),
.subwrap => return self.genSubWrap(inst.castTag(.subwrap).?),
.switchbr => return self.genSwitch(inst.castTag(.switchbr).?),
@ -965,8 +919,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const ptr_bits = arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
if (abi_size <= ptr_bytes) {
try self.registers.ensureCapacity(self.gpa, self.registers.count() + 1);
if (self.allocReg(inst)) |reg| {
try self.register_manager.registers.ensureCapacity(self.gpa, self.register_manager.registers.count() + 1);
if (self.register_manager.tryAllocReg(inst)) |reg| {
return MCValue{ .register = registerAlias(reg, abi_size) };
}
}
@ -975,26 +929,20 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return MCValue{ .stack_offset = stack_offset };
}
pub fn spillInstruction(self: *Self, src: LazySrcLoc, reg: Register, inst: *ir.Inst) !void {
const stack_mcv = try self.allocRegOrMem(inst, false);
const reg_mcv = self.getResolvedInstValue(inst);
assert(reg == toCanonicalReg(reg_mcv.register));
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
try branch.inst_table.put(self.gpa, inst, stack_mcv);
try self.genSetStack(src, inst.ty, stack_mcv.stack_offset, reg_mcv);
}
/// Copies a value to a register without tracking the register. The register is not considered
/// allocated. A second call to `copyToTmpRegister` may return the same register.
/// This can have a side effect of spilling instructions to the stack to free up a register.
fn copyToTmpRegister(self: *Self, src: usize, ty: Type, mcv: MCValue) !Register {
const reg = self.findUnusedReg() orelse b: {
// We'll take over the first register. Move the instruction that was previously
// there to a stack allocation.
const reg = callee_preserved_regs[0];
const regs_entry = self.registers.remove(reg).?;
const spilled_inst = regs_entry.value;
const stack_mcv = try self.allocRegOrMem(spilled_inst, false);
const reg_mcv = self.getResolvedInstValue(spilled_inst);
assert(reg == toCanonicalReg(reg_mcv.register));
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
try branch.inst_table.put(self.gpa, spilled_inst, stack_mcv);
try self.genSetStack(src, spilled_inst.ty, stack_mcv.stack_offset, reg_mcv);
break :b reg;
};
fn copyToTmpRegister(self: *Self, src: LazySrcLoc, ty: Type, mcv: MCValue) !Register {
const reg = try self.register_manager.allocRegWithoutTracking();
try self.genSetReg(src, ty, reg, mcv);
return reg;
}
@ -1003,25 +951,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
/// `reg_owner` is the instruction that gets associated with the register in the register table.
/// This can have a side effect of spilling instructions to the stack to free up a register.
fn copyToNewRegister(self: *Self, reg_owner: *ir.Inst, mcv: MCValue) !MCValue {
try self.registers.ensureCapacity(self.gpa, @intCast(u32, self.registers.count() + 1));
try self.register_manager.registers.ensureCapacity(self.gpa, @intCast(u32, self.register_manager.registers.count() + 1));
const reg = self.allocReg(reg_owner) orelse b: {
// We'll take over the first register. Move the instruction that was previously
// there to a stack allocation.
const reg = callee_preserved_regs[0];
const regs_entry = self.registers.getEntry(reg).?;
const spilled_inst = regs_entry.value;
regs_entry.value = reg_owner;
const stack_mcv = try self.allocRegOrMem(spilled_inst, false);
const reg_mcv = self.getResolvedInstValue(spilled_inst);
assert(reg == toCanonicalReg(reg_mcv.register));
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
try branch.inst_table.put(self.gpa, spilled_inst, stack_mcv);
try self.genSetStack(reg_owner.src, spilled_inst.ty, stack_mcv.stack_offset, reg_mcv);
break :b reg;
};
const reg = try self.register_manager.allocReg(reg_owner);
try self.genSetReg(reg_owner.src, reg_owner.ty, reg, mcv);
return MCValue{ .register = reg };
}
@ -1298,7 +1230,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.register => |reg| {
// If it's in the registers table, need to associate the register with the
// new instruction.
if (self.registers.getEntry(toCanonicalReg(reg))) |entry| {
if (self.register_manager.registers.getEntry(toCanonicalReg(reg))) |entry| {
entry.value = inst;
}
log.debug("reusing {} => {*}", .{ reg, inst });
@ -1400,6 +1332,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return .none;
}
fn genStructFieldPtr(self: *Self, inst: *ir.Inst.StructFieldPtr) !MCValue {
return self.fail(inst.base.src, "TODO implement codegen struct_field_ptr", .{});
}
fn genSub(self: *Self, inst: *ir.Inst.BinOp) !MCValue {
// No side effects, so if it's unreferenced, do nothing.
if (inst.base.isUnused())
@ -1457,7 +1393,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
fn genArmBinOpCode(
self: *Self,
src: usize,
src: LazySrcLoc,
dst_reg: Register,
lhs_mcv: MCValue,
rhs_mcv: MCValue,
@ -1620,7 +1556,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
fn genX8664BinMathCode(
self: *Self,
src: usize,
src: LazySrcLoc,
dst_ty: Type,
dst_mcv: MCValue,
src_mcv: MCValue,
@ -1706,7 +1642,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
}
fn genX8664ModRMRegToStack(self: *Self, src: usize, ty: Type, off: u32, reg: Register, opcode: u8) !void {
fn genX8664ModRMRegToStack(self: *Self, src: LazySrcLoc, ty: Type, off: u32, reg: Register, opcode: u8) !void {
const abi_size = ty.abiSize(self.target.*);
const adj_off = off + abi_size;
try self.code.ensureCapacity(self.code.items.len + 7);
@ -1787,7 +1723,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const arg_index = self.arg_index;
self.arg_index += 1;
if (FreeRegInt == u0) {
if (callee_preserved_regs.len == 0) {
return self.fail(inst.base.src, "TODO implement Register enum for {}", .{self.target.cpu.arch});
}
@ -1799,15 +1735,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
switch (result) {
.register => |reg| {
try self.registers.putNoClobber(self.gpa, toCanonicalReg(reg), &inst.base);
self.markRegUsed(reg);
try self.register_manager.getRegAssumeFree(toCanonicalReg(reg), &inst.base);
},
else => {},
}
return result;
}
fn genBreakpoint(self: *Self, src: usize) !MCValue {
fn genBreakpoint(self: *Self, src: LazySrcLoc) !MCValue {
switch (arch) {
.i386, .x86_64 => {
try self.code.append(0xcc); // int3
@ -2194,6 +2129,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
unreachable;
}
switch (info.return_value) {
.register => |reg| {
if (Register.allocIndex(reg) == null) {
// Save function return value in a callee saved register
return try self.copyToNewRegister(&inst.base, info.return_value);
}
},
else => {},
}
return info.return_value;
}
@ -2224,7 +2169,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
}
fn ret(self: *Self, src: usize, mcv: MCValue) !MCValue {
fn ret(self: *Self, src: LazySrcLoc, mcv: MCValue) !MCValue {
const ret_ty = self.fn_type.fnReturnType();
try self.setRegOrMem(src, ret_ty, self.ret_mcv, mcv);
switch (arch) {
@ -2314,8 +2259,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
}
fn genDbgStmt(self: *Self, inst: *ir.Inst.NoOp) !MCValue {
try self.dbgAdvancePCAndLine(inst.base.src);
fn genDbgStmt(self: *Self, inst: *ir.Inst.DbgStmt) !MCValue {
// TODO when reworking tzir memory layout, rework source locations here as
// well to be more efficient, as well as support inlined function calls correctly.
// For now we convert LazySrcLoc to absolute byte offset, to match what the
// existing codegen code expects.
try self.dbgAdvancePCAndLine(inst.byte_offset);
assert(inst.base.isUnused());
return MCValue.dead;
}
@ -2409,10 +2358,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
// Capture the state of register and stack allocation state so that we can revert to it.
const parent_next_stack_offset = self.next_stack_offset;
const parent_free_registers = self.free_registers;
const parent_free_registers = self.register_manager.free_registers;
var parent_stack = try self.stack.clone(self.gpa);
defer parent_stack.deinit(self.gpa);
var parent_registers = try self.registers.clone(self.gpa);
var parent_registers = try self.register_manager.registers.clone(self.gpa);
defer parent_registers.deinit(self.gpa);
try self.branch_stack.append(.{});
@ -2429,8 +2378,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
var saved_then_branch = self.branch_stack.pop();
defer saved_then_branch.deinit(self.gpa);
self.registers.deinit(self.gpa);
self.registers = parent_registers;
self.register_manager.registers.deinit(self.gpa);
self.register_manager.registers = parent_registers;
parent_registers = .{};
self.stack.deinit(self.gpa);
@ -2438,7 +2387,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
parent_stack = .{};
self.next_stack_offset = parent_next_stack_offset;
self.free_registers = parent_free_registers;
self.register_manager.free_registers = parent_free_registers;
try self.performReloc(inst.base.src, reloc);
const else_branch = self.branch_stack.addOneAssumeCapacity();
@ -2552,6 +2501,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.fail(inst.base.src, "TODO load the operand and call genIsErr", .{});
}
fn genErrorToInt(self: *Self, inst: *ir.Inst.UnOp) !MCValue {
return self.resolveInst(inst.operand);
}
fn genIntToError(self: *Self, inst: *ir.Inst.UnOp) !MCValue {
return self.resolveInst(inst.operand);
}
fn genLoop(self: *Self, inst: *ir.Inst.Loop) !MCValue {
// A loop is a setup to be able to jump back to the beginning.
const start_index = self.code.items.len;
@ -2561,7 +2518,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
/// Send control flow to the `index` of `self.code`.
fn jump(self: *Self, src: usize, index: usize) !void {
fn jump(self: *Self, src: LazySrcLoc, index: usize) !void {
switch (arch) {
.i386, .x86_64 => {
try self.code.ensureCapacity(self.code.items.len + 5);
@ -2618,7 +2575,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
}
fn performReloc(self: *Self, src: usize, reloc: Reloc) !void {
fn performReloc(self: *Self, src: LazySrcLoc, reloc: Reloc) !void {
switch (reloc) {
.rel32 => |pos| {
const amt = self.code.items.len - (pos + 4);
@ -2682,7 +2639,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
}
fn br(self: *Self, src: usize, block: *ir.Inst.Block, operand: *ir.Inst) !MCValue {
fn br(self: *Self, src: LazySrcLoc, block: *ir.Inst.Block, operand: *ir.Inst) !MCValue {
if (operand.ty.hasCodeGenBits()) {
const operand_mcv = try self.resolveInst(operand);
const block_mcv = @bitCast(MCValue, block.codegen.mcv);
@ -2695,7 +2652,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.brVoid(src, block);
}
fn brVoid(self: *Self, src: usize, block: *ir.Inst.Block) !MCValue {
fn brVoid(self: *Self, src: LazySrcLoc, block: *ir.Inst.Block) !MCValue {
// Emit a jump with a relocation. It will be patched up after the block ends.
try block.codegen.relocs.ensureCapacity(self.gpa, block.codegen.relocs.items.len + 1);
@ -2757,7 +2714,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.fail(inst.base.src, "TODO implement support for more arm assembly instructions", .{});
}
if (inst.output) |output| {
if (inst.output_name) |output| {
if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') {
return self.fail(inst.base.src, "unrecognized asm output constraint: '{s}'", .{output});
}
@ -2789,7 +2746,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.fail(inst.base.src, "TODO implement support for more aarch64 assembly instructions", .{});
}
if (inst.output) |output| {
if (inst.output_name) |output| {
if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') {
return self.fail(inst.base.src, "unrecognized asm output constraint: '{s}'", .{output});
}
@ -2819,7 +2776,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.fail(inst.base.src, "TODO implement support for more riscv64 assembly instructions", .{});
}
if (inst.output) |output| {
if (inst.output_name) |output| {
if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') {
return self.fail(inst.base.src, "unrecognized asm output constraint: '{s}'", .{output});
}
@ -2849,7 +2806,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.fail(inst.base.src, "TODO implement support for more x86 assembly instructions", .{});
}
if (inst.output) |output| {
if (inst.output_name) |output| {
if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') {
return self.fail(inst.base.src, "unrecognized asm output constraint: '{s}'", .{output});
}
@ -2899,7 +2856,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
/// Sets the value without any modifications to register allocation metadata or stack allocation metadata.
fn setRegOrMem(self: *Self, src: usize, ty: Type, loc: MCValue, val: MCValue) !void {
fn setRegOrMem(self: *Self, src: LazySrcLoc, ty: Type, loc: MCValue, val: MCValue) !void {
switch (loc) {
.none => return,
.register => |reg| return self.genSetReg(src, ty, reg, val),
@ -2911,7 +2868,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
}
fn genSetStack(self: *Self, src: usize, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
fn genSetStack(self: *Self, src: LazySrcLoc, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
switch (arch) {
.arm, .armeb => switch (mcv) {
.dead => unreachable,
@ -3111,7 +3068,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const adj_off = stack_offset + abi_size;
switch (abi_size) {
4, 8 => {
1, 2, 4, 8 => {
const offset = if (math.cast(i9, adj_off)) |imm|
Instruction.LoadStoreOffset.imm_post_index(-imm)
else |_|
@ -3121,8 +3078,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.aarch64_32 => .w29,
else => unreachable,
};
const str = switch (abi_size) {
1 => Instruction.strb,
2 => Instruction.strh,
4, 8 => Instruction.str,
else => unreachable, // unexpected abi size
};
writeInt(u32, try self.code.addManyAsArray(4), Instruction.str(reg, rn, .{
writeInt(u32, try self.code.addManyAsArray(4), str(reg, rn, .{
.offset = offset,
}).toU32());
},
@ -3144,7 +3107,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
}
fn genSetReg(self: *Self, src: usize, ty: Type, reg: Register, mcv: MCValue) InnerError!void {
fn genSetReg(self: *Self, src: LazySrcLoc, ty: Type, reg: Register, mcv: MCValue) InnerError!void {
switch (arch) {
.arm, .armeb => switch (mcv) {
.dead => unreachable,
@ -3687,7 +3650,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return mcv;
}
fn genTypedValue(self: *Self, src: usize, typed_value: TypedValue) InnerError!MCValue {
fn genTypedValue(self: *Self, src: LazySrcLoc, typed_value: TypedValue) InnerError!MCValue {
if (typed_value.val.isUndef())
return MCValue{ .undef = {} };
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
@ -3762,7 +3725,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
};
/// Caller must call `CallMCValues.deinit`.
fn resolveCallingConventionValues(self: *Self, src: usize, fn_ty: Type) !CallMCValues {
fn resolveCallingConventionValues(self: *Self, src: LazySrcLoc, fn_ty: Type) !CallMCValues {
const cc = fn_ty.fnCallingConvention();
const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen());
defer self.gpa.free(param_types);
@ -3976,13 +3939,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
};
}
fn fail(self: *Self, src: usize, comptime format: []const u8, args: anytype) InnerError {
fn fail(self: *Self, src: LazySrcLoc, comptime format: []const u8, args: anytype) InnerError {
@setCold(true);
assert(self.err_msg == null);
self.err_msg = try ErrorMsg.create(self.bin_file.allocator, .{
.file_scope = self.src_loc.file_scope,
.byte_offset = src,
}, format, args);
const src_loc = if (src != .unneeded)
src.toSrcLocWithDecl(self.mod_fn.owner_decl)
else
self.src_loc;
self.err_msg = try ErrorMsg.create(self.bin_file.allocator, src_loc, format, args);
return error.CodegenFail;
}
@ -4012,9 +3976,6 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
},
};
/// An integer whose bits represent all the registers and whether they are free.
const FreeRegInt = std.meta.Int(.unsigned, callee_preserved_regs.len);
fn parseRegName(name: []const u8) ?Register {
if (@hasDecl(Register, "parseRegName")) {
return Register.parseRegName(name);

View File

@ -484,7 +484,23 @@ pub const Instruction = union(enum) {
}
};
fn loadStoreRegister(rt: Register, rn: Register, offset: LoadStoreOffset, load: bool) Instruction {
/// Which kind of load/store to perform
const LoadStoreVariant = enum {
/// 32-bit or 64-bit
normal,
/// 16-bit
half,
/// 8-bit
byte,
};
fn loadStoreRegister(
rt: Register,
rn: Register,
offset: LoadStoreOffset,
variant: LoadStoreVariant,
load: bool,
) Instruction {
const off = offset.toU12();
const op1: u2 = blk: {
switch (offset) {
@ -497,35 +513,27 @@ pub const Instruction = union(enum) {
break :blk 0b00;
};
const opc: u2 = if (load) 0b01 else 0b00;
switch (rt.size()) {
32 => {
return Instruction{
.LoadStoreRegister = .{
.rt = rt.id(),
.rn = rn.id(),
.offset = offset.toU12(),
.opc = opc,
.op1 = op1,
.v = 0,
.size = 0b10,
},
};
return Instruction{
.LoadStoreRegister = .{
.rt = rt.id(),
.rn = rn.id(),
.offset = off,
.opc = opc,
.op1 = op1,
.v = 0,
.size = blk: {
switch (variant) {
.normal => switch (rt.size()) {
32 => break :blk 0b10,
64 => break :blk 0b11,
else => unreachable, // unexpected register size
},
.half => break :blk 0b01,
.byte => break :blk 0b00,
}
},
},
64 => {
return Instruction{
.LoadStoreRegister = .{
.rt = rt.id(),
.rn = rn.id(),
.offset = offset.toU12(),
.opc = opc,
.op1 = op1,
.v = 0,
.size = 0b11,
},
};
},
else => unreachable, // unexpected register size
}
};
}
fn loadStorePairOfRegisters(
@ -748,7 +756,7 @@ pub const Instruction = union(enum) {
pub fn ldr(rt: Register, args: LdrArgs) Instruction {
switch (args) {
.register => |info| return loadStoreRegister(rt, info.rn, info.offset, true),
.register => |info| return loadStoreRegister(rt, info.rn, info.offset, .normal, true),
.literal => |literal| return loadLiteral(rt, literal),
}
}
@ -758,7 +766,15 @@ pub const Instruction = union(enum) {
};
pub fn str(rt: Register, rn: Register, args: StrArgs) Instruction {
return loadStoreRegister(rt, rn, args.offset, false);
return loadStoreRegister(rt, rn, args.offset, .normal, false);
}
pub fn strh(rt: Register, rn: Register, args: StrArgs) Instruction {
return loadStoreRegister(rt, rn, args.offset, .half, false);
}
pub fn strb(rt: Register, rn: Register, args: StrArgs) Instruction {
return loadStoreRegister(rt, rn, args.offset, .byte, false);
}
// Load or store pair of registers
@ -996,6 +1012,14 @@ test "serialize instructions" {
.inst = Instruction.str(.x2, .x1, .{ .offset = Instruction.LoadStoreOffset.reg(.x3) }),
.expected = 0b11_111_0_00_00_1_00011_011_0_10_00001_00010,
},
.{ // strh w0, [x1]
.inst = Instruction.strh(.w0, .x1, .{}),
.expected = 0b01_111_0_01_00_000000000000_00001_00000,
},
.{ // strb w8, [x9]
.inst = Instruction.strb(.w8, .x9, .{}),
.expected = 0b00_111_0_01_00_000000000000_01001_01000,
},
.{ // adr x2, #0x8
.inst = Instruction.adr(.x2, 0x8),
.expected = 0b0_00_10000_0000000000000000010_00010,

View File

@ -14,6 +14,7 @@ const TypedValue = @import("../TypedValue.zig");
const C = link.File.C;
const Decl = Module.Decl;
const trace = @import("../tracy.zig").trace;
const LazySrcLoc = Module.LazySrcLoc;
const Mutability = enum { Const, Mut };
@ -145,11 +146,10 @@ pub const DeclGen = struct {
error_msg: ?*Module.ErrorMsg,
typedefs: TypedefMap,
fn fail(dg: *DeclGen, src: usize, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } {
dg.error_msg = try Module.ErrorMsg.create(dg.module.gpa, .{
.file_scope = dg.decl.getFileScope(),
.byte_offset = src,
}, format, args);
fn fail(dg: *DeclGen, src: LazySrcLoc, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } {
@setCold(true);
const src_loc = src.toSrcLocWithDecl(dg.decl);
dg.error_msg = try Module.ErrorMsg.create(dg.module.gpa, src_loc, format, args);
return error.AnalysisFail;
}
@ -160,7 +160,7 @@ pub const DeclGen = struct {
val: Value,
) error{ OutOfMemory, AnalysisFail }!void {
if (val.isUndef()) {
return dg.fail(dg.decl.src(), "TODO: C backend: properly handle undefined in all cases (with debug safety?)", .{});
return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: properly handle undefined in all cases (with debug safety?)", .{});
}
switch (t.zigTypeTag()) {
.Int => {
@ -193,7 +193,7 @@ pub const DeclGen = struct {
try writer.print("{s}", .{decl.name});
},
else => |e| return dg.fail(
dg.decl.src(),
.{ .node_offset = 0 },
"TODO: C backend: implement Pointer value {s}",
.{@tagName(e)},
),
@ -276,7 +276,7 @@ pub const DeclGen = struct {
try writer.writeAll(", .error = 0 }");
}
},
else => |e| return dg.fail(dg.decl.src(), "TODO: C backend: implement value {s}", .{
else => |e| return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement value {s}", .{
@tagName(e),
}),
}
@ -350,7 +350,7 @@ pub const DeclGen = struct {
break;
}
} else {
return dg.fail(dg.decl.src(), "TODO: C backend: implement integer types larger than 128 bits", .{});
return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement integer types larger than 128 bits", .{});
}
},
else => unreachable,
@ -358,7 +358,7 @@ pub const DeclGen = struct {
},
.Pointer => {
if (t.isSlice()) {
return dg.fail(dg.decl.src(), "TODO: C backend: implement slices", .{});
return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement slices", .{});
} else {
try dg.renderType(w, t.elemType());
try w.writeAll(" *");
@ -431,7 +431,7 @@ pub const DeclGen = struct {
dg.typedefs.putAssumeCapacityNoClobber(t, .{ .name = name, .rendered = rendered });
},
.Null, .Undefined => unreachable, // must be const or comptime
else => |e| return dg.fail(dg.decl.src(), "TODO: C backend: implement type {s}", .{
else => |e| return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type {s}", .{
@tagName(e),
}),
}
@ -569,13 +569,15 @@ pub fn genBody(o: *Object, body: ir.Body) error{ AnalysisFail, OutOfMemory }!voi
.optional_payload_ptr => try genOptionalPayload(o, inst.castTag(.optional_payload_ptr).?),
.is_err => try genIsErr(o, inst.castTag(.is_err).?),
.is_err_ptr => try genIsErr(o, inst.castTag(.is_err_ptr).?),
.error_to_int => try genErrorToInt(o, inst.castTag(.error_to_int).?),
.int_to_error => try genIntToError(o, inst.castTag(.int_to_error).?),
.unwrap_errunion_payload => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload).?),
.unwrap_errunion_err => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err).?),
.unwrap_errunion_payload_ptr => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload_ptr).?),
.unwrap_errunion_err_ptr => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err_ptr).?),
.wrap_errunion_payload => try genWrapErrUnionPay(o, inst.castTag(.wrap_errunion_payload).?),
.wrap_errunion_err => try genWrapErrUnionErr(o, inst.castTag(.wrap_errunion_err).?),
else => |e| return o.dg.fail(o.dg.decl.src(), "TODO: C backend: implement codegen for {}", .{e}),
else => |e| return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement codegen for {}", .{e}),
};
switch (result_value) {
.none => {},
@ -756,11 +758,11 @@ fn genCall(o: *Object, inst: *Inst.Call) !CValue {
try writer.writeAll(");\n");
return result_local;
} else {
return o.dg.fail(o.dg.decl.src(), "TODO: C backend: implement function pointers", .{});
return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement function pointers", .{});
}
}
fn genDbgStmt(o: *Object, inst: *Inst.NoOp) !CValue {
fn genDbgStmt(o: *Object, inst: *Inst.DbgStmt) !CValue {
// TODO emit #line directive here with line number and filename
return CValue.none;
}
@ -913,13 +915,13 @@ fn genAsm(o: *Object, as: *Inst.Assembly) !CValue {
try o.writeCValue(writer, arg_c_value);
try writer.writeAll(";\n");
} else {
return o.dg.fail(o.dg.decl.src(), "TODO non-explicit inline asm regs", .{});
return o.dg.fail(.{ .node_offset = 0 }, "TODO non-explicit inline asm regs", .{});
}
}
const volatile_string: []const u8 = if (as.is_volatile) "volatile " else "";
try writer.print("__asm {s}(\"{s}\"", .{ volatile_string, as.asm_source });
if (as.output) |_| {
return o.dg.fail(o.dg.decl.src(), "TODO inline asm output", .{});
return o.dg.fail(.{ .node_offset = 0 }, "TODO inline asm output", .{});
}
if (as.inputs.len > 0) {
if (as.output == null) {
@ -945,7 +947,7 @@ fn genAsm(o: *Object, as: *Inst.Assembly) !CValue {
if (as.base.isUnused())
return CValue.none;
return o.dg.fail(o.dg.decl.src(), "TODO: C backend: inline asm expression result used", .{});
return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: inline asm expression result used", .{});
}
fn genIsNull(o: *Object, inst: *Inst.UnOp) !CValue {
@ -1072,6 +1074,14 @@ fn genIsErr(o: *Object, inst: *Inst.UnOp) !CValue {
return local;
}
fn genIntToError(o: *Object, inst: *Inst.UnOp) !CValue {
return o.resolveInst(inst.operand);
}
fn genErrorToInt(o: *Object, inst: *Inst.UnOp) !CValue {
return o.resolveInst(inst.operand);
}
fn IndentWriter(comptime UnderlyingWriter: type) type {
return struct {
const Self = @This();

File diff suppressed because it is too large Load Diff

View File

@ -14,6 +14,7 @@ const Type = @import("../type.zig").Type;
const Value = @import("../value.zig").Value;
const Compilation = @import("../Compilation.zig");
const AnyMCValue = @import("../codegen.zig").AnyMCValue;
const LazySrcLoc = Module.LazySrcLoc;
/// Wasm Value, created when generating an instruction
const WValue = union(enum) {
@ -70,11 +71,9 @@ pub const Context = struct {
}
/// Sets `err_msg` on `Context` and returns `error.CodegemFail` which is caught in link/Wasm.zig
fn fail(self: *Context, src: usize, comptime fmt: []const u8, args: anytype) InnerError {
self.err_msg = try Module.ErrorMsg.create(self.gpa, .{
.file_scope = self.decl.getFileScope(),
.byte_offset = src,
}, fmt, args);
fn fail(self: *Context, src: LazySrcLoc, comptime fmt: []const u8, args: anytype) InnerError {
const src_loc = src.toSrcLocWithDecl(self.decl);
self.err_msg = try Module.ErrorMsg.create(self.gpa, src_loc, fmt, args);
return error.CodegenFail;
}
@ -91,7 +90,7 @@ pub const Context = struct {
}
/// Using a given `Type`, returns the corresponding wasm value type
fn genValtype(self: *Context, src: usize, ty: Type) InnerError!u8 {
fn genValtype(self: *Context, src: LazySrcLoc, ty: Type) InnerError!u8 {
return switch (ty.tag()) {
.f32 => wasm.valtype(.f32),
.f64 => wasm.valtype(.f64),
@ -104,7 +103,7 @@ pub const Context = struct {
/// Using a given `Type`, returns the corresponding wasm value type
/// Differently from `genValtype` this also allows `void` to create a block
/// with no return type
fn genBlockType(self: *Context, src: usize, ty: Type) InnerError!u8 {
fn genBlockType(self: *Context, src: LazySrcLoc, ty: Type) InnerError!u8 {
return switch (ty.tag()) {
.void, .noreturn => wasm.block_empty,
else => self.genValtype(src, ty),
@ -139,7 +138,7 @@ pub const Context = struct {
ty.fnParamTypes(params);
for (params) |param_type| {
// Can we maybe get the source index of each param?
const val_type = try self.genValtype(self.decl.src(), param_type);
const val_type = try self.genValtype(.{ .node_offset = 0 }, param_type);
try writer.writeByte(val_type);
}
}
@ -151,7 +150,7 @@ pub const Context = struct {
else => |ret_type| {
try leb.writeULEB128(writer, @as(u32, 1));
// Can we maybe get the source index of the return type?
const val_type = try self.genValtype(self.decl.src(), return_type);
const val_type = try self.genValtype(.{ .node_offset = 0 }, return_type);
try writer.writeByte(val_type);
},
}
@ -168,7 +167,7 @@ pub const Context = struct {
const mod_fn = blk: {
if (tv.val.castTag(.function)) |func| break :blk func.data;
if (tv.val.castTag(.extern_fn)) |ext_fn| return; // don't need codegen for extern functions
return self.fail(self.decl.src(), "TODO: Wasm codegen for decl type '{s}'", .{tv.ty.tag()});
return self.fail(.{ .node_offset = 0 }, "TODO: Wasm codegen for decl type '{s}'", .{tv.ty.tag()});
};
// Reserve space to write the size after generating the code as well as space for locals count

View File

@ -25,8 +25,7 @@ pub const Inst = struct {
/// lifetimes of operands are encoded elsewhere.
deaths: DeathsInt = undefined,
ty: Type,
/// Byte offset into the source.
src: usize,
src: Module.LazySrcLoc,
pub const DeathsInt = u16;
pub const DeathsBitIndex = std.math.Log2Int(DeathsInt);
@ -81,22 +80,28 @@ pub const Inst = struct {
condbr,
constant,
dbg_stmt,
// ?T => bool
/// ?T => bool
is_null,
// ?T => bool (inverted logic)
/// ?T => bool (inverted logic)
is_non_null,
// *?T => bool
/// *?T => bool
is_null_ptr,
// *?T => bool (inverted logic)
/// *?T => bool (inverted logic)
is_non_null_ptr,
// E!T => bool
/// E!T => bool
is_err,
// *E!T => bool
/// *E!T => bool
is_err_ptr,
/// E => u16
error_to_int,
/// u16 => E
int_to_error,
bool_and,
bool_or,
/// Read a value from a pointer.
load,
/// A labeled block of code that loops forever. At the end of the body it is implied
/// to repeat; no explicit "repeat" instruction terminates loop bodies.
loop,
ptrtoint,
ref,
@ -113,9 +118,9 @@ pub const Inst = struct {
not,
floatcast,
intcast,
// ?T => T
/// ?T => T
optional_payload,
// *?T => *T
/// *?T => *T
optional_payload_ptr,
wrap_optional,
/// E!T -> T
@ -132,6 +137,8 @@ pub const Inst = struct {
wrap_errunion_err,
xor,
switchbr,
/// Given a pointer to a struct and a field index, returns a pointer to the field.
struct_field_ptr,
pub fn Type(tag: Tag) type {
return switch (tag) {
@ -139,7 +146,6 @@ pub const Inst = struct {
.retvoid,
.unreach,
.breakpoint,
.dbg_stmt,
=> NoOp,
.ref,
@ -152,6 +158,8 @@ pub const Inst = struct {
.is_null_ptr,
.is_err,
.is_err_ptr,
.int_to_error,
.error_to_int,
.ptrtoint,
.floatcast,
.intcast,
@ -198,7 +206,9 @@ pub const Inst = struct {
.constant => Constant,
.loop => Loop,
.varptr => VarPtr,
.struct_field_ptr => StructFieldPtr,
.switchbr => SwitchBr,
.dbg_stmt => DbgStmt,
};
}
@ -360,7 +370,8 @@ pub const Inst = struct {
base: Inst,
asm_source: []const u8,
is_volatile: bool,
output: ?[]const u8,
output: ?*Inst,
output_name: ?[]const u8,
inputs: []const []const u8,
clobbers: []const []const u8,
args: []const *Inst,
@ -544,6 +555,27 @@ pub const Inst = struct {
}
};
pub const StructFieldPtr = struct {
pub const base_tag = Tag.struct_field_ptr;
base: Inst,
struct_ptr: *Inst,
field_index: usize,
pub fn operandCount(self: *const StructFieldPtr) usize {
return 1;
}
pub fn getOperand(self: *const StructFieldPtr, index: usize) ?*Inst {
var i = index;
if (i < 1)
return self.struct_ptr;
i -= 1;
return null;
}
};
pub const SwitchBr = struct {
pub const base_tag = Tag.switchbr;
@ -584,8 +616,528 @@ pub const Inst = struct {
return (self.deaths + self.else_index)[0..self.else_deaths];
}
};
pub const DbgStmt = struct {
pub const base_tag = Tag.dbg_stmt;
base: Inst,
byte_offset: u32,
pub fn operandCount(self: *const DbgStmt) usize {
return 0;
}
pub fn getOperand(self: *const DbgStmt, index: usize) ?*Inst {
return null;
}
};
};
pub const Body = struct {
instructions: []*Inst,
};
/// For debugging purposes, prints a function representation to stderr.
pub fn dumpFn(old_module: Module, module_fn: *Module.Fn) void {
const allocator = old_module.gpa;
var ctx: DumpTzir = .{
.allocator = allocator,
.arena = std.heap.ArenaAllocator.init(allocator),
.old_module = &old_module,
.module_fn = module_fn,
.indent = 2,
.inst_table = DumpTzir.InstTable.init(allocator),
.partial_inst_table = DumpTzir.InstTable.init(allocator),
.const_table = DumpTzir.InstTable.init(allocator),
};
defer ctx.inst_table.deinit();
defer ctx.partial_inst_table.deinit();
defer ctx.const_table.deinit();
defer ctx.arena.deinit();
switch (module_fn.state) {
.queued => std.debug.print("(queued)", .{}),
.inline_only => std.debug.print("(inline_only)", .{}),
.in_progress => std.debug.print("(in_progress)", .{}),
.sema_failure => std.debug.print("(sema_failure)", .{}),
.dependency_failure => std.debug.print("(dependency_failure)", .{}),
.success => {
const writer = std.io.getStdErr().writer();
ctx.dump(module_fn.body, writer) catch @panic("failed to dump TZIR");
},
}
}
const DumpTzir = struct {
allocator: *std.mem.Allocator,
arena: std.heap.ArenaAllocator,
old_module: *const Module,
module_fn: *Module.Fn,
indent: usize,
inst_table: InstTable,
partial_inst_table: InstTable,
const_table: InstTable,
next_index: usize = 0,
next_partial_index: usize = 0,
next_const_index: usize = 0,
const InstTable = std.AutoArrayHashMap(*Inst, usize);
/// TODO: Improve this code to include a stack of Body and store the instructions
/// in there. Now we are putting all the instructions in a function local table,
/// however instructions that are in a Body can be thown away when the Body ends.
fn dump(dtz: *DumpTzir, body: Body, writer: std.fs.File.Writer) !void {
// First pass to pre-populate the table so that we can show even invalid references.
// Must iterate the same order we iterate the second time.
// We also look for constants and put them in the const_table.
try dtz.fetchInstsAndResolveConsts(body);
std.debug.print("Module.Function(name={s}):\n", .{dtz.module_fn.owner_decl.name});
for (dtz.const_table.items()) |entry| {
const constant = entry.key.castTag(.constant).?;
try writer.print(" @{d}: {} = {};\n", .{
entry.value, constant.base.ty, constant.val,
});
}
return dtz.dumpBody(body, writer);
}
fn fetchInstsAndResolveConsts(dtz: *DumpTzir, body: Body) error{OutOfMemory}!void {
for (body.instructions) |inst| {
try dtz.inst_table.put(inst, dtz.next_index);
dtz.next_index += 1;
switch (inst.tag) {
.alloc,
.retvoid,
.unreach,
.breakpoint,
.dbg_stmt,
.arg,
=> {},
.ref,
.ret,
.bitcast,
.not,
.is_non_null,
.is_non_null_ptr,
.is_null,
.is_null_ptr,
.is_err,
.is_err_ptr,
.error_to_int,
.int_to_error,
.ptrtoint,
.floatcast,
.intcast,
.load,
.optional_payload,
.optional_payload_ptr,
.wrap_optional,
.wrap_errunion_payload,
.wrap_errunion_err,
.unwrap_errunion_payload,
.unwrap_errunion_err,
.unwrap_errunion_payload_ptr,
.unwrap_errunion_err_ptr,
=> {
const un_op = inst.cast(Inst.UnOp).?;
try dtz.findConst(un_op.operand);
},
.add,
.addwrap,
.sub,
.subwrap,
.mul,
.mulwrap,
.cmp_lt,
.cmp_lte,
.cmp_eq,
.cmp_gte,
.cmp_gt,
.cmp_neq,
.store,
.bool_and,
.bool_or,
.bit_and,
.bit_or,
.xor,
=> {
const bin_op = inst.cast(Inst.BinOp).?;
try dtz.findConst(bin_op.lhs);
try dtz.findConst(bin_op.rhs);
},
.br => {
const br = inst.castTag(.br).?;
try dtz.findConst(&br.block.base);
try dtz.findConst(br.operand);
},
.br_block_flat => {
const br_block_flat = inst.castTag(.br_block_flat).?;
try dtz.findConst(&br_block_flat.block.base);
try dtz.fetchInstsAndResolveConsts(br_block_flat.body);
},
.br_void => {
const br_void = inst.castTag(.br_void).?;
try dtz.findConst(&br_void.block.base);
},
.block => {
const block = inst.castTag(.block).?;
try dtz.fetchInstsAndResolveConsts(block.body);
},
.condbr => {
const condbr = inst.castTag(.condbr).?;
try dtz.findConst(condbr.condition);
try dtz.fetchInstsAndResolveConsts(condbr.then_body);
try dtz.fetchInstsAndResolveConsts(condbr.else_body);
},
.switchbr => {
const switchbr = inst.castTag(.switchbr).?;
try dtz.findConst(switchbr.target);
try dtz.fetchInstsAndResolveConsts(switchbr.else_body);
for (switchbr.cases) |case| {
try dtz.fetchInstsAndResolveConsts(case.body);
}
},
.loop => {
const loop = inst.castTag(.loop).?;
try dtz.fetchInstsAndResolveConsts(loop.body);
},
.call => {
const call = inst.castTag(.call).?;
try dtz.findConst(call.func);
for (call.args) |arg| {
try dtz.findConst(arg);
}
},
.struct_field_ptr => {
const struct_field_ptr = inst.castTag(.struct_field_ptr).?;
try dtz.findConst(struct_field_ptr.struct_ptr);
},
// TODO fill out this debug printing
.assembly,
.constant,
.varptr,
=> {},
}
}
}
fn dumpBody(dtz: *DumpTzir, body: Body, writer: std.fs.File.Writer) (std.fs.File.WriteError || error{OutOfMemory})!void {
for (body.instructions) |inst| {
const my_index = dtz.next_partial_index;
try dtz.partial_inst_table.put(inst, my_index);
dtz.next_partial_index += 1;
try writer.writeByteNTimes(' ', dtz.indent);
try writer.print("%{d}: {} = {s}(", .{
my_index, inst.ty, @tagName(inst.tag),
});
switch (inst.tag) {
.alloc,
.retvoid,
.unreach,
.breakpoint,
.dbg_stmt,
=> try writer.writeAll(")\n"),
.ref,
.ret,
.bitcast,
.not,
.is_non_null,
.is_null,
.is_non_null_ptr,
.is_null_ptr,
.is_err,
.is_err_ptr,
.error_to_int,
.int_to_error,
.ptrtoint,
.floatcast,
.intcast,
.load,
.optional_payload,
.optional_payload_ptr,
.wrap_optional,
.wrap_errunion_err,
.wrap_errunion_payload,
.unwrap_errunion_err,
.unwrap_errunion_payload,
.unwrap_errunion_payload_ptr,
.unwrap_errunion_err_ptr,
=> {
const un_op = inst.cast(Inst.UnOp).?;
const kinky = try dtz.writeInst(writer, un_op.operand);
if (kinky != null) {
try writer.writeAll(") // Instruction does not dominate all uses!\n");
} else {
try writer.writeAll(")\n");
}
},
.add,
.addwrap,
.sub,
.subwrap,
.mul,
.mulwrap,
.cmp_lt,
.cmp_lte,
.cmp_eq,
.cmp_gte,
.cmp_gt,
.cmp_neq,
.store,
.bool_and,
.bool_or,
.bit_and,
.bit_or,
.xor,
=> {
const bin_op = inst.cast(Inst.BinOp).?;
const lhs_kinky = try dtz.writeInst(writer, bin_op.lhs);
try writer.writeAll(", ");
const rhs_kinky = try dtz.writeInst(writer, bin_op.rhs);
if (lhs_kinky != null or rhs_kinky != null) {
try writer.writeAll(") // Instruction does not dominate all uses!");
if (lhs_kinky) |lhs| {
try writer.print(" %{d}", .{lhs});
}
if (rhs_kinky) |rhs| {
try writer.print(" %{d}", .{rhs});
}
try writer.writeAll("\n");
} else {
try writer.writeAll(")\n");
}
},
.arg => {
const arg = inst.castTag(.arg).?;
try writer.print("{s})\n", .{arg.name});
},
.br => {
const br = inst.castTag(.br).?;
const lhs_kinky = try dtz.writeInst(writer, &br.block.base);
try writer.writeAll(", ");
const rhs_kinky = try dtz.writeInst(writer, br.operand);
if (lhs_kinky != null or rhs_kinky != null) {
try writer.writeAll(") // Instruction does not dominate all uses!");
if (lhs_kinky) |lhs| {
try writer.print(" %{d}", .{lhs});
}
if (rhs_kinky) |rhs| {
try writer.print(" %{d}", .{rhs});
}
try writer.writeAll("\n");
} else {
try writer.writeAll(")\n");
}
},
.br_block_flat => {
const br_block_flat = inst.castTag(.br_block_flat).?;
const block_kinky = try dtz.writeInst(writer, &br_block_flat.block.base);
if (block_kinky != null) {
try writer.writeAll(", { // Instruction does not dominate all uses!\n");
} else {
try writer.writeAll(", {\n");
}
const old_indent = dtz.indent;
dtz.indent += 2;
try dtz.dumpBody(br_block_flat.body, writer);
dtz.indent = old_indent;
try writer.writeByteNTimes(' ', dtz.indent);
try writer.writeAll("})\n");
},
.br_void => {
const br_void = inst.castTag(.br_void).?;
const kinky = try dtz.writeInst(writer, &br_void.block.base);
if (kinky) |_| {
try writer.writeAll(") // Instruction does not dominate all uses!\n");
} else {
try writer.writeAll(")\n");
}
},
.block => {
const block = inst.castTag(.block).?;
try writer.writeAll("{\n");
const old_indent = dtz.indent;
dtz.indent += 2;
try dtz.dumpBody(block.body, writer);
dtz.indent = old_indent;
try writer.writeByteNTimes(' ', dtz.indent);
try writer.writeAll("})\n");
},
.condbr => {
const condbr = inst.castTag(.condbr).?;
const condition_kinky = try dtz.writeInst(writer, condbr.condition);
if (condition_kinky != null) {
try writer.writeAll(", { // Instruction does not dominate all uses!\n");
} else {
try writer.writeAll(", {\n");
}
const old_indent = dtz.indent;
dtz.indent += 2;
try dtz.dumpBody(condbr.then_body, writer);
try writer.writeByteNTimes(' ', old_indent);
try writer.writeAll("}, {\n");
try dtz.dumpBody(condbr.else_body, writer);
dtz.indent = old_indent;
try writer.writeByteNTimes(' ', old_indent);
try writer.writeAll("})\n");
},
.switchbr => {
const switchbr = inst.castTag(.switchbr).?;
const condition_kinky = try dtz.writeInst(writer, switchbr.target);
if (condition_kinky != null) {
try writer.writeAll(", { // Instruction does not dominate all uses!\n");
} else {
try writer.writeAll(", {\n");
}
const old_indent = dtz.indent;
if (switchbr.else_body.instructions.len != 0) {
dtz.indent += 2;
try dtz.dumpBody(switchbr.else_body, writer);
try writer.writeByteNTimes(' ', old_indent);
try writer.writeAll("}, {\n");
dtz.indent = old_indent;
}
for (switchbr.cases) |case| {
dtz.indent += 2;
try dtz.dumpBody(case.body, writer);
try writer.writeByteNTimes(' ', old_indent);
try writer.writeAll("}, {\n");
dtz.indent = old_indent;
}
try writer.writeByteNTimes(' ', old_indent);
try writer.writeAll("})\n");
},
.loop => {
const loop = inst.castTag(.loop).?;
try writer.writeAll("{\n");
const old_indent = dtz.indent;
dtz.indent += 2;
try dtz.dumpBody(loop.body, writer);
dtz.indent = old_indent;
try writer.writeByteNTimes(' ', dtz.indent);
try writer.writeAll("})\n");
},
.call => {
const call = inst.castTag(.call).?;
const args_kinky = try dtz.allocator.alloc(?usize, call.args.len);
defer dtz.allocator.free(args_kinky);
std.mem.set(?usize, args_kinky, null);
var any_kinky_args = false;
const func_kinky = try dtz.writeInst(writer, call.func);
for (call.args) |arg, i| {
try writer.writeAll(", ");
args_kinky[i] = try dtz.writeInst(writer, arg);
any_kinky_args = any_kinky_args or args_kinky[i] != null;
}
if (func_kinky != null or any_kinky_args) {
try writer.writeAll(") // Instruction does not dominate all uses!");
if (func_kinky) |func_index| {
try writer.print(" %{d}", .{func_index});
}
for (args_kinky) |arg_kinky| {
if (arg_kinky) |arg_index| {
try writer.print(" %{d}", .{arg_index});
}
}
try writer.writeAll("\n");
} else {
try writer.writeAll(")\n");
}
},
.struct_field_ptr => {
const struct_field_ptr = inst.castTag(.struct_field_ptr).?;
const kinky = try dtz.writeInst(writer, struct_field_ptr.struct_ptr);
if (kinky != null) {
try writer.print("{d}) // Instruction does not dominate all uses!\n", .{
struct_field_ptr.field_index,
});
} else {
try writer.print("{d})\n", .{struct_field_ptr.field_index});
}
},
// TODO fill out this debug printing
.assembly,
.constant,
.varptr,
=> {
try writer.writeAll("!TODO!)\n");
},
}
}
}
fn writeInst(dtz: *DumpTzir, writer: std.fs.File.Writer, inst: *Inst) !?usize {
if (dtz.partial_inst_table.get(inst)) |operand_index| {
try writer.print("%{d}", .{operand_index});
return null;
} else if (dtz.const_table.get(inst)) |operand_index| {
try writer.print("@{d}", .{operand_index});
return null;
} else if (dtz.inst_table.get(inst)) |operand_index| {
try writer.print("%{d}", .{operand_index});
return operand_index;
} else {
try writer.writeAll("!BADREF!");
return null;
}
}
fn findConst(dtz: *DumpTzir, operand: *Inst) !void {
if (operand.tag == .constant) {
try dtz.const_table.put(operand, dtz.next_const_index);
dtz.next_const_index += 1;
}
}
};

View File

@ -185,8 +185,7 @@ pub fn flushModule(self: *C, comp: *Compilation) !void {
if (module.global_error_set.size == 0) break :render_errors;
var it = module.global_error_set.iterator();
while (it.next()) |entry| {
// + 1 because 0 represents no error
try err_typedef_writer.print("#define zig_error_{s} {d}\n", .{ entry.key, entry.value + 1 });
try err_typedef_writer.print("#define zig_error_{s} {d}\n", .{ entry.key, entry.value });
}
try err_typedef_writer.writeByte('\n');
}

View File

@ -34,7 +34,7 @@ pub const base_tag: link.File.Tag = .coff;
const msdos_stub = @embedFile("msdos-stub.bin");
/// If this is not null, an object file is created by LLVM and linked with LLD afterwards.
llvm_ir_module: ?*llvm_backend.LLVMIRModule = null,
llvm_object: ?*llvm_backend.Object = null,
base: link.File,
ptr_width: PtrWidth,
@ -129,7 +129,7 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio
const self = try createEmpty(allocator, options);
errdefer self.base.destroy();
self.llvm_ir_module = try llvm_backend.LLVMIRModule.create(allocator, sub_path, options);
self.llvm_object = try llvm_backend.Object.create(allocator, sub_path, options);
return self;
}
@ -413,7 +413,7 @@ pub fn createEmpty(gpa: *Allocator, options: link.Options) !*Coff {
}
pub fn allocateDeclIndexes(self: *Coff, decl: *Module.Decl) !void {
if (self.llvm_ir_module) |_| return;
if (self.llvm_object) |_| return;
try self.offset_table.ensureCapacity(self.base.allocator, self.offset_table.items.len + 1);
@ -660,7 +660,7 @@ pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void {
defer tracy.end();
if (build_options.have_llvm)
if (self.llvm_ir_module) |llvm_ir_module| return try llvm_ir_module.updateDecl(module, decl);
if (self.llvm_object) |llvm_object| return try llvm_object.updateDecl(module, decl);
const typed_value = decl.typed_value.most_recent.typed_value;
if (typed_value.val.tag() == .extern_fn) {
@ -720,15 +720,15 @@ pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void {
}
pub fn freeDecl(self: *Coff, decl: *Module.Decl) void {
if (self.llvm_ir_module) |_| return;
if (self.llvm_object) |_| return;
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
self.freeTextBlock(&decl.link.coff);
self.offset_table_free_list.append(self.base.allocator, decl.link.coff.offset_table_index) catch {};
}
pub fn updateDeclExports(self: *Coff, module: *Module, decl: *const Module.Decl, exports: []const *Module.Export) !void {
if (self.llvm_ir_module) |_| return;
pub fn updateDeclExports(self: *Coff, module: *Module, decl: *Module.Decl, exports: []const *Module.Export) !void {
if (self.llvm_object) |_| return;
for (exports) |exp| {
if (exp.options.section) |section_name| {
@ -771,7 +771,7 @@ pub fn flushModule(self: *Coff, comp: *Compilation) !void {
defer tracy.end();
if (build_options.have_llvm)
if (self.llvm_ir_module) |llvm_ir_module| return try llvm_ir_module.flushModule(comp);
if (self.llvm_object) |llvm_object| return try llvm_object.flushModule(comp);
if (self.text_section_size_dirty) {
// Write the new raw size in the .text header
@ -1308,7 +1308,7 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
}
pub fn getDeclVAddr(self: *Coff, decl: *const Module.Decl) u64 {
assert(self.llvm_ir_module == null);
assert(self.llvm_object == null);
return self.text_section_virtual_address + decl.link.coff.text_offset;
}
@ -1318,7 +1318,7 @@ pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl: *Module.Decl) !v
pub fn deinit(self: *Coff) void {
if (build_options.have_llvm)
if (self.llvm_ir_module) |ir_module| ir_module.deinit(self.base.allocator);
if (self.llvm_object) |ir_module| ir_module.deinit(self.base.allocator);
self.text_block_free_list.deinit(self.base.allocator);
self.offset_table.deinit(self.base.allocator);

View File

@ -35,7 +35,7 @@ base: File,
ptr_width: PtrWidth,
/// If this is not null, an object file is created by LLVM and linked with LLD afterwards.
llvm_ir_module: ?*llvm_backend.LLVMIRModule = null,
llvm_object: ?*llvm_backend.Object = null,
/// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write.
/// Same order as in the file.
@ -232,7 +232,7 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio
const self = try createEmpty(allocator, options);
errdefer self.base.destroy();
self.llvm_ir_module = try llvm_backend.LLVMIRModule.create(allocator, sub_path, options);
self.llvm_object = try llvm_backend.Object.create(allocator, sub_path, options);
return self;
}
@ -299,7 +299,7 @@ pub fn createEmpty(gpa: *Allocator, options: link.Options) !*Elf {
pub fn deinit(self: *Elf) void {
if (build_options.have_llvm)
if (self.llvm_ir_module) |ir_module|
if (self.llvm_object) |ir_module|
ir_module.deinit(self.base.allocator);
self.sections.deinit(self.base.allocator);
@ -318,7 +318,7 @@ pub fn deinit(self: *Elf) void {
}
pub fn getDeclVAddr(self: *Elf, decl: *const Module.Decl) u64 {
assert(self.llvm_ir_module == null);
assert(self.llvm_object == null);
assert(decl.link.elf.local_sym_index != 0);
return self.local_symbols.items[decl.link.elf.local_sym_index].st_value;
}
@ -438,7 +438,7 @@ fn updateString(self: *Elf, old_str_off: u32, new_name: []const u8) !u32 {
}
pub fn populateMissingMetadata(self: *Elf) !void {
assert(self.llvm_ir_module == null);
assert(self.llvm_object == null);
const small_ptr = switch (self.ptr_width) {
.p32 => true,
@ -745,7 +745,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation) !void {
defer tracy.end();
if (build_options.have_llvm)
if (self.llvm_ir_module) |llvm_ir_module| return try llvm_ir_module.flushModule(comp);
if (self.llvm_object) |llvm_object| return try llvm_object.flushModule(comp);
// TODO This linker code currently assumes there is only 1 compilation unit and it corresponds to the
// Zig source code.
@ -2111,7 +2111,7 @@ fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, al
}
pub fn allocateDeclIndexes(self: *Elf, decl: *Module.Decl) !void {
if (self.llvm_ir_module) |_| return;
if (self.llvm_object) |_| return;
if (decl.link.elf.local_sym_index != 0) return;
@ -2149,7 +2149,7 @@ pub fn allocateDeclIndexes(self: *Elf, decl: *Module.Decl) !void {
}
pub fn freeDecl(self: *Elf, decl: *Module.Decl) void {
if (self.llvm_ir_module) |_| return;
if (self.llvm_object) |_| return;
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
self.freeTextBlock(&decl.link.elf);
@ -2189,7 +2189,7 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
defer tracy.end();
if (build_options.have_llvm)
if (self.llvm_ir_module) |llvm_ir_module| return try llvm_ir_module.updateDecl(module, decl);
if (self.llvm_object) |llvm_object| return try llvm_object.updateDecl(module, decl);
const typed_value = decl.typed_value.most_recent.typed_value;
if (typed_value.val.tag() == .extern_fn) {
@ -2670,10 +2670,10 @@ fn writeDeclDebugInfo(self: *Elf, text_block: *TextBlock, dbg_info_buf: []const
pub fn updateDeclExports(
self: *Elf,
module: *Module,
decl: *const Module.Decl,
decl: *Module.Decl,
exports: []const *Module.Export,
) !void {
if (self.llvm_ir_module) |_| return;
if (self.llvm_object) |_| return;
const tracy = trace(@src());
defer tracy.end();
@ -2748,7 +2748,7 @@ pub fn updateDeclLineNumber(self: *Elf, module: *Module, decl: *const Module.Dec
const tracy = trace(@src());
defer tracy.end();
if (self.llvm_ir_module) |_| return;
if (self.llvm_object) |_| return;
const tree = decl.container.file_scope.tree;
const node_tags = tree.nodes.items(.tag);
@ -2773,7 +2773,7 @@ pub fn updateDeclLineNumber(self: *Elf, module: *Module, decl: *const Module.Dec
}
pub fn deleteExport(self: *Elf, exp: Export) void {
if (self.llvm_ir_module) |_| return;
if (self.llvm_object) |_| return;
const sym_index = exp.sym_index orelse return;
self.global_symbol_free_list.append(self.base.allocator, sym_index) catch {};

View File

@ -1340,7 +1340,7 @@ pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl: *const Module.D
pub fn updateDeclExports(
self: *MachO,
module: *Module,
decl: *const Module.Decl,
decl: *Module.Decl,
exports: []const *Module.Export,
) !void {
const tracy = trace(@src());

View File

@ -1487,7 +1487,7 @@ fn buildOutputType(
for (diags.arch.?.allCpuModels()) |cpu| {
help_text.writer().print(" {s}\n", .{cpu.name}) catch break :help;
}
std.log.info("Available CPUs for architecture '{s}': {s}", .{
std.log.info("Available CPUs for architecture '{s}':\n{s}", .{
@tagName(diags.arch.?), help_text.items,
});
}
@ -1499,7 +1499,7 @@ fn buildOutputType(
for (diags.arch.?.allFeaturesList()) |feature| {
help_text.writer().print(" {s}: {s}\n", .{ feature.name, feature.description }) catch break :help;
}
std.log.info("Available CPU features for architecture '{s}': {s}", .{
std.log.info("Available CPU features for architecture '{s}':\n{s}", .{
@tagName(diags.arch.?), help_text.items,
});
}
@ -1750,15 +1750,12 @@ fn buildOutputType(
}
const self_exe_path = try fs.selfExePathAlloc(arena);
var zig_lib_directory: Compilation.Directory = if (override_lib_dir) |lib_dir|
.{
.path = lib_dir,
.handle = try fs.cwd().openDir(lib_dir, .{}),
}
else
introspect.findZigLibDirFromSelfExe(arena, self_exe_path) catch |err| {
fatal("unable to find zig installation directory: {s}", .{@errorName(err)});
};
var zig_lib_directory: Compilation.Directory = if (override_lib_dir) |lib_dir| .{
.path = lib_dir,
.handle = try fs.cwd().openDir(lib_dir, .{}),
} else introspect.findZigLibDirFromSelfExe(arena, self_exe_path) catch |err| {
fatal("unable to find zig installation directory: {s}", .{@errorName(err)});
};
defer zig_lib_directory.handle.close();
var thread_pool: ThreadPool = undefined;
@ -2115,12 +2112,37 @@ fn updateModule(gpa: *Allocator, comp: *Compilation, hook: AfterUpdateHook) !voi
} else switch (hook) {
.none => {},
.print => |bin_path| try io.getStdOut().writer().print("{s}\n", .{bin_path}),
.update => |full_path| _ = try comp.bin_file.options.emit.?.directory.handle.updateFile(
comp.bin_file.options.emit.?.sub_path,
fs.cwd(),
full_path,
.{},
),
.update => |full_path| {
const bin_sub_path = comp.bin_file.options.emit.?.sub_path;
const cwd = fs.cwd();
const cache_dir = comp.bin_file.options.emit.?.directory.handle;
_ = try cache_dir.updateFile(bin_sub_path, cwd, full_path, .{});
// If a .pdb file is part of the expected output, we must also copy
// it into place here.
const coff_or_pe = switch (comp.bin_file.options.object_format) {
.coff, .pe => true,
else => false,
};
const have_pdb = coff_or_pe and !comp.bin_file.options.strip;
if (have_pdb) {
// Replace `.out` or `.exe` with `.pdb` on both the source and destination
const src_bin_ext = fs.path.extension(bin_sub_path);
const dst_bin_ext = fs.path.extension(full_path);
const src_pdb_path = try std.fmt.allocPrint(gpa, "{s}.pdb", .{
bin_sub_path[0 .. bin_sub_path.len - src_bin_ext.len],
});
defer gpa.free(src_pdb_path);
const dst_pdb_path = try std.fmt.allocPrint(gpa, "{s}.pdb", .{
full_path[0 .. full_path.len - dst_bin_ext.len],
});
defer gpa.free(dst_pdb_path);
_ = try cache_dir.updateFile(src_pdb_path, cwd, dst_pdb_path, .{});
}
},
}
}
@ -2461,15 +2483,12 @@ pub fn cmdBuild(gpa: *Allocator, arena: *Allocator, args: []const []const u8) !v
}
}
var zig_lib_directory: Compilation.Directory = if (override_lib_dir) |lib_dir|
.{
.path = lib_dir,
.handle = try fs.cwd().openDir(lib_dir, .{}),
}
else
introspect.findZigLibDirFromSelfExe(arena, self_exe_path) catch |err| {
fatal("unable to find zig installation directory: {s}", .{@errorName(err)});
};
var zig_lib_directory: Compilation.Directory = if (override_lib_dir) |lib_dir| .{
.path = lib_dir,
.handle = try fs.cwd().openDir(lib_dir, .{}),
} else introspect.findZigLibDirFromSelfExe(arena, self_exe_path) catch |err| {
fatal("unable to find zig installation directory: {s}", .{@errorName(err)});
};
defer zig_lib_directory.handle.close();
const std_special = "std" ++ fs.path.sep_str ++ "special";
@ -3281,8 +3300,7 @@ pub const ClangArgIterator = struct {
self.zig_equivalent = clang_arg.zig_equivalent;
break :find_clang_arg;
},
}
else {
} else {
fatal("Unknown Clang option: '{s}'", .{arg});
}
}

228
src/register_manager.zig Normal file
View File

@ -0,0 +1,228 @@
const std = @import("std");
const math = std.math;
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const ir = @import("ir.zig");
const Type = @import("type.zig").Type;
const Module = @import("Module.zig");
const LazySrcLoc = Module.LazySrcLoc;
const log = std.log.scoped(.register_manager);
pub fn RegisterManager(
comptime Function: type,
comptime Register: type,
comptime callee_preserved_regs: []const Register,
) type {
return struct {
/// The key must be canonical register.
registers: std.AutoHashMapUnmanaged(Register, *ir.Inst) = .{},
free_registers: FreeRegInt = math.maxInt(FreeRegInt),
/// Tracks all registers allocated in the course of this function
allocated_registers: FreeRegInt = 0,
const Self = @This();
/// An integer whose bits represent all the registers and whether they are free.
const FreeRegInt = std.meta.Int(.unsigned, callee_preserved_regs.len);
const ShiftInt = math.Log2Int(FreeRegInt);
fn getFunction(self: *Self) *Function {
return @fieldParentPtr(Function, "register_manager", self);
}
pub fn deinit(self: *Self, allocator: *Allocator) void {
self.registers.deinit(allocator);
}
fn markRegUsed(self: *Self, reg: Register) void {
if (FreeRegInt == u0) return;
const index = reg.allocIndex() orelse return;
const shift = @intCast(ShiftInt, index);
const mask = @as(FreeRegInt, 1) << shift;
self.free_registers &= ~mask;
self.allocated_registers |= mask;
}
fn markRegFree(self: *Self, reg: Register) void {
if (FreeRegInt == u0) return;
const index = reg.allocIndex() orelse return;
const shift = @intCast(ShiftInt, index);
self.free_registers |= @as(FreeRegInt, 1) << shift;
}
/// Returns whether this register was allocated in the course
/// of this function
pub fn isRegAllocated(self: Self, reg: Register) bool {
if (FreeRegInt == u0) return false;
const index = reg.allocIndex() orelse return false;
const shift = @intCast(ShiftInt, index);
return self.allocated_registers & @as(FreeRegInt, 1) << shift != 0;
}
/// Before calling, must ensureCapacity + 1 on self.registers.
/// Returns `null` if all registers are allocated.
pub fn tryAllocReg(self: *Self, inst: *ir.Inst) ?Register {
const free_index = @ctz(FreeRegInt, self.free_registers);
if (free_index >= callee_preserved_regs.len) {
return null;
}
// This is necessary because the return type of @ctz is 1
// bit longer than ShiftInt if callee_preserved_regs.len
// is a power of two. This int cast is always safe because
// free_index < callee_preserved_regs.len
const shift = @intCast(ShiftInt, free_index);
const mask = @as(FreeRegInt, 1) << shift;
self.free_registers &= ~mask;
self.allocated_registers |= mask;
const reg = callee_preserved_regs[free_index];
self.registers.putAssumeCapacityNoClobber(reg, inst);
log.debug("alloc {} => {*}", .{ reg, inst });
return reg;
}
/// Before calling, must ensureCapacity + 1 on self.registers.
pub fn allocReg(self: *Self, inst: *ir.Inst) !Register {
return self.tryAllocReg(inst) orelse b: {
// We'll take over the first register. Move the instruction that was previously
// there to a stack allocation.
const reg = callee_preserved_regs[0];
const regs_entry = self.registers.getEntry(reg).?;
const spilled_inst = regs_entry.value;
regs_entry.value = inst;
try self.getFunction().spillInstruction(spilled_inst.src, reg, spilled_inst);
break :b reg;
};
}
/// Does not track the register.
/// Returns `null` if all registers are allocated.
pub fn findUnusedReg(self: *Self) ?Register {
const free_index = @ctz(FreeRegInt, self.free_registers);
if (free_index >= callee_preserved_regs.len) {
return null;
}
return callee_preserved_regs[free_index];
}
/// Does not track the register.
pub fn allocRegWithoutTracking(self: *Self) !Register {
return self.findUnusedReg() orelse b: {
// We'll take over the first register. Move the instruction that was previously
// there to a stack allocation.
const reg = callee_preserved_regs[0];
const regs_entry = self.registers.remove(reg).?;
const spilled_inst = regs_entry.value;
try self.getFunction().spillInstruction(spilled_inst.src, reg, spilled_inst);
break :b reg;
};
}
pub fn getRegAssumeFree(self: *Self, reg: Register, inst: *ir.Inst) !void {
try self.registers.putNoClobber(self.getFunction().gpa, reg, inst);
self.markRegUsed(reg);
}
pub fn freeReg(self: *Self, reg: Register) void {
_ = self.registers.remove(reg);
self.markRegFree(reg);
}
};
}
const MockRegister = enum(u2) {
r0, r1, r2, r3,
pub fn allocIndex(self: MockRegister) ?u2 {
inline for (mock_callee_preserved_regs) |cpreg, i| {
if (self == cpreg) return i;
}
return null;
}
};
const mock_callee_preserved_regs = [_]MockRegister{ .r2, .r3 };
const MockFunction = struct {
allocator: *Allocator,
register_manager: RegisterManager(Self, MockRegister, &mock_callee_preserved_regs) = .{},
spilled: std.ArrayListUnmanaged(MockRegister) = .{},
const Self = @This();
pub fn deinit(self: *Self) void {
self.register_manager.deinit(self.allocator);
self.spilled.deinit(self.allocator);
}
pub fn spillInstruction(self: *Self, src: LazySrcLoc, reg: MockRegister, inst: *ir.Inst) !void {
try self.spilled.append(self.allocator, reg);
}
};
test "tryAllocReg: no spilling" {
const allocator = std.testing.allocator;
var function = MockFunction{
.allocator = allocator,
};
defer function.deinit();
var mock_instruction = ir.Inst{
.tag = .breakpoint,
.ty = Type.initTag(.void),
.src = .unneeded,
};
std.testing.expect(!function.register_manager.isRegAllocated(.r2));
std.testing.expect(!function.register_manager.isRegAllocated(.r3));
try function.register_manager.registers.ensureCapacity(allocator, function.register_manager.registers.count() + 2);
std.testing.expectEqual(@as(?MockRegister, .r2), function.register_manager.tryAllocReg(&mock_instruction));
std.testing.expectEqual(@as(?MockRegister, .r3), function.register_manager.tryAllocReg(&mock_instruction));
std.testing.expectEqual(@as(?MockRegister, null), function.register_manager.tryAllocReg(&mock_instruction));
std.testing.expect(function.register_manager.isRegAllocated(.r2));
std.testing.expect(function.register_manager.isRegAllocated(.r3));
function.register_manager.freeReg(.r2);
function.register_manager.freeReg(.r3);
std.testing.expect(function.register_manager.isRegAllocated(.r2));
std.testing.expect(function.register_manager.isRegAllocated(.r3));
}
test "allocReg: spilling" {
const allocator = std.testing.allocator;
var function = MockFunction{
.allocator = allocator,
};
defer function.deinit();
var mock_instruction = ir.Inst{
.tag = .breakpoint,
.ty = Type.initTag(.void),
.src = .unneeded,
};
std.testing.expect(!function.register_manager.isRegAllocated(.r2));
std.testing.expect(!function.register_manager.isRegAllocated(.r3));
try function.register_manager.registers.ensureCapacity(allocator, function.register_manager.registers.count() + 2);
std.testing.expectEqual(@as(?MockRegister, .r2), try function.register_manager.allocReg(&mock_instruction));
std.testing.expectEqual(@as(?MockRegister, .r3), try function.register_manager.allocReg(&mock_instruction));
// Spill a register
std.testing.expectEqual(@as(?MockRegister, .r2), try function.register_manager.allocReg(&mock_instruction));
std.testing.expectEqualSlices(MockRegister, &[_]MockRegister{.r2}, function.spilled.items);
// No spilling necessary
function.register_manager.freeReg(.r3);
std.testing.expectEqual(@as(?MockRegister, .r3), try function.register_manager.allocReg(&mock_instruction));
std.testing.expectEqualSlices(MockRegister, &[_]MockRegister{.r2}, function.spilled.items);
}

View File

@ -8723,7 +8723,9 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS
assert(async_frame_type->id == ZigTypeIdFnFrame);
assert(field_type->id == ZigTypeIdFn);
resolve_llvm_types_fn(g, async_frame_type->data.frame.fn);
llvm_type = LLVMPointerType(async_frame_type->data.frame.fn->raw_type_ref, 0);
const unsigned addrspace = ZigLLVMDataLayoutGetProgramAddressSpace(g->target_data_ref);
llvm_type = LLVMPointerType(async_frame_type->data.frame.fn->raw_type_ref, addrspace);
} else {
llvm_type = get_llvm_type(g, field_type);
}

View File

@ -7641,12 +7641,7 @@ static IrInstSrc *ir_gen_fn_call(IrBuilderSrc *irb, Scope *scope, AstNode *node,
bool is_nosuspend = get_scope_nosuspend(scope) != nullptr;
CallModifier modifier = node->data.fn_call_expr.modifier;
if (is_nosuspend) {
if (modifier == CallModifierAsync) {
add_node_error(irb->codegen, node,
buf_sprintf("async call in nosuspend scope"));
return irb->codegen->invalid_inst_src;
}
if (is_nosuspend && modifier != CallModifierAsync) {
modifier = CallModifierNoSuspend;
}
@ -10129,10 +10124,6 @@ static IrInstSrc *ir_gen_fn_proto(IrBuilderSrc *irb, Scope *parent_scope, AstNod
static IrInstSrc *ir_gen_resume(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
assert(node->type == NodeTypeResume);
if (get_scope_nosuspend(scope) != nullptr) {
add_node_error(irb->codegen, node, buf_sprintf("resume in nosuspend scope"));
return irb->codegen->invalid_inst_src;
}
IrInstSrc *target_inst = ir_gen_node_extra(irb, node->data.resume_expr.expr, scope, LValPtr, nullptr);
if (target_inst == irb->codegen->invalid_inst_src)

View File

@ -4343,7 +4343,7 @@ fn isZigPrimitiveType(name: []const u8) bool {
}
return true;
}
return @import("astgen.zig").simple_types.has(name);
return @import("AstGen.zig").simple_types.has(name);
}
const MacroCtx = struct {

View File

@ -4,6 +4,7 @@ const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const Target = std.Target;
const Module = @import("Module.zig");
const log = std.log.scoped(.Type);
/// This is the raw data, with no bookkeeping, no memory awareness, no de-duplication.
/// It's important for this type to be small.
@ -92,11 +93,9 @@ pub const Type = extern union {
.anyerror_void_error_union, .error_union => return .ErrorUnion,
.anyframe_T, .@"anyframe" => return .AnyFrame,
.@"struct", .empty_struct => return .Struct,
.@"enum" => return .Enum,
.@"union" => return .Union,
.empty_struct => return .Struct,
.empty_struct_literal => return .Struct,
.@"struct" => return .Struct,
.var_args_param => unreachable, // can be any type
}
@ -173,6 +172,125 @@ pub const Type = extern union {
};
}
pub fn ptrInfo(self: Type) Payload.Pointer {
switch (self.tag()) {
.single_const_pointer_to_comptime_int => return .{ .data = .{
.pointee_type = Type.initTag(.comptime_int),
.sentinel = null,
.@"align" = 0,
.bit_offset = 0,
.host_size = 0,
.@"allowzero" = false,
.mutable = false,
.@"volatile" = false,
.size = .One,
} },
.const_slice_u8 => return .{ .data = .{
.pointee_type = Type.initTag(.u8),
.sentinel = null,
.@"align" = 0,
.bit_offset = 0,
.host_size = 0,
.@"allowzero" = false,
.mutable = false,
.@"volatile" = false,
.size = .Slice,
} },
.single_const_pointer => return .{ .data = .{
.pointee_type = self.castPointer().?.data,
.sentinel = null,
.@"align" = 0,
.bit_offset = 0,
.host_size = 0,
.@"allowzero" = false,
.mutable = false,
.@"volatile" = false,
.size = .One,
} },
.single_mut_pointer => return .{ .data = .{
.pointee_type = self.castPointer().?.data,
.sentinel = null,
.@"align" = 0,
.bit_offset = 0,
.host_size = 0,
.@"allowzero" = false,
.mutable = true,
.@"volatile" = false,
.size = .One,
} },
.many_const_pointer => return .{ .data = .{
.pointee_type = self.castPointer().?.data,
.sentinel = null,
.@"align" = 0,
.bit_offset = 0,
.host_size = 0,
.@"allowzero" = false,
.mutable = false,
.@"volatile" = false,
.size = .Many,
} },
.many_mut_pointer => return .{ .data = .{
.pointee_type = self.castPointer().?.data,
.sentinel = null,
.@"align" = 0,
.bit_offset = 0,
.host_size = 0,
.@"allowzero" = false,
.mutable = true,
.@"volatile" = false,
.size = .Many,
} },
.c_const_pointer => return .{ .data = .{
.pointee_type = self.castPointer().?.data,
.sentinel = null,
.@"align" = 0,
.bit_offset = 0,
.host_size = 0,
.@"allowzero" = false,
.mutable = false,
.@"volatile" = false,
.size = .C,
} },
.c_mut_pointer => return .{ .data = .{
.pointee_type = self.castPointer().?.data,
.sentinel = null,
.@"align" = 0,
.bit_offset = 0,
.host_size = 0,
.@"allowzero" = false,
.mutable = true,
.@"volatile" = false,
.size = .C,
} },
.const_slice => return .{ .data = .{
.pointee_type = self.castPointer().?.data,
.sentinel = null,
.@"align" = 0,
.bit_offset = 0,
.host_size = 0,
.@"allowzero" = false,
.mutable = false,
.@"volatile" = false,
.size = .Slice,
} },
.mut_slice => return .{ .data = .{
.pointee_type = self.castPointer().?.data,
.sentinel = null,
.@"align" = 0,
.bit_offset = 0,
.host_size = 0,
.@"allowzero" = false,
.mutable = true,
.@"volatile" = false,
.size = .Slice,
} },
.pointer => return self.castTag(.pointer).?.*,
else => unreachable,
}
}
pub fn eql(a: Type, b: Type) bool {
// As a shortcut, if the small tags / addresses match, we're done.
if (a.tag_if_small_enough == b.tag_if_small_enough)
@ -195,25 +313,38 @@ pub const Type = extern union {
return a.elemType().eql(b.elemType());
},
.Pointer => {
// Hot path for common case:
if (a.castPointer()) |a_payload| {
if (b.castPointer()) |b_payload| {
return a.tag() == b.tag() and eql(a_payload.data, b_payload.data);
const info_a = a.ptrInfo().data;
const info_b = b.ptrInfo().data;
if (!info_a.pointee_type.eql(info_b.pointee_type))
return false;
if (info_a.size != info_b.size)
return false;
if (info_a.mutable != info_b.mutable)
return false;
if (info_a.@"volatile" != info_b.@"volatile")
return false;
if (info_a.@"allowzero" != info_b.@"allowzero")
return false;
if (info_a.bit_offset != info_b.bit_offset)
return false;
if (info_a.host_size != info_b.host_size)
return false;
const sentinel_a = info_a.sentinel;
const sentinel_b = info_b.sentinel;
if (sentinel_a) |sa| {
if (sentinel_b) |sb| {
if (!sa.eql(sb))
return false;
} else {
return false;
}
} else {
if (sentinel_b != null)
return false;
}
const is_slice_a = isSlice(a);
const is_slice_b = isSlice(b);
if (is_slice_a != is_slice_b)
return false;
const ptr_size_a = ptrSize(a);
const ptr_size_b = ptrSize(b);
if (ptr_size_a != ptr_size_b)
return false;
std.debug.panic("TODO implement more pointer Type equality comparison: {} and {}", .{
a, b,
});
return true;
},
.Int => {
// Detect that e.g. u64 != usize, even if the bits match on a particular target.
@ -399,10 +530,10 @@ pub const Type = extern union {
.const_slice_u8,
.enum_literal,
.anyerror_void_error_union,
.@"anyframe",
.inferred_alloc_const,
.inferred_alloc_mut,
.var_args_param,
.empty_struct_literal,
=> unreachable,
.array_u8,
@ -420,7 +551,6 @@ pub const Type = extern union {
.optional,
.optional_single_mut_pointer,
.optional_single_const_pointer,
.anyframe_T,
=> return self.copyPayloadShallow(allocator, Payload.ElemType),
.int_signed,
@ -480,13 +610,10 @@ pub const Type = extern union {
.payload = try payload.payload.copy(allocator),
});
},
.error_set => return self.copyPayloadShallow(allocator, Payload.Decl),
.error_set => return self.copyPayloadShallow(allocator, Payload.ErrorSet),
.error_set_single => return self.copyPayloadShallow(allocator, Payload.Name),
.empty_struct => return self.copyPayloadShallow(allocator, Payload.ContainerScope),
.@"enum" => return self.copyPayloadShallow(allocator, Payload.Enum),
.@"struct" => return self.copyPayloadShallow(allocator, Payload.Struct),
.@"union" => return self.copyPayloadShallow(allocator, Payload.Union),
.@"opaque" => return self.copyPayloadShallow(allocator, Payload.Opaque),
}
}
@ -549,9 +676,8 @@ pub const Type = extern union {
.@"null" => return out_stream.writeAll("@Type(.Null)"),
.@"undefined" => return out_stream.writeAll("@Type(.Undefined)"),
// TODO this should print the structs name
.empty_struct => return out_stream.writeAll("struct {}"),
.@"anyframe" => return out_stream.writeAll("anyframe"),
.empty_struct, .empty_struct_literal => return out_stream.writeAll("struct {}"),
.@"struct" => return out_stream.writeAll("(struct)"),
.anyerror_void_error_union => return out_stream.writeAll("anyerror!void"),
.const_slice_u8 => return out_stream.writeAll("[]const u8"),
.fn_noreturn_no_args => return out_stream.writeAll("fn() noreturn"),
@ -579,12 +705,6 @@ pub const Type = extern union {
continue;
},
.anyframe_T => {
const return_type = ty.castTag(.anyframe_T).?.data;
try out_stream.print("anyframe->", .{});
ty = return_type;
continue;
},
.array_u8 => {
const len = ty.castTag(.array_u8).?.data;
return out_stream.print("[{d}]u8", .{len});
@ -715,8 +835,8 @@ pub const Type = extern union {
continue;
},
.error_set => {
const decl = ty.castTag(.error_set).?.data;
return out_stream.writeAll(std.mem.spanZ(decl.name));
const error_set = ty.castTag(.error_set).?.data;
return out_stream.writeAll(std.mem.spanZ(error_set.owner_decl.name));
},
.error_set_single => {
const name = ty.castTag(.error_set_single).?.data;
@ -725,9 +845,6 @@ pub const Type = extern union {
.inferred_alloc_const => return out_stream.writeAll("(inferred_alloc_const)"),
.inferred_alloc_mut => return out_stream.writeAll("(inferred_alloc_mut)"),
// TODO use declaration name
.@"enum" => return out_stream.writeAll("enum {}"),
.@"struct" => return out_stream.writeAll("struct {}"),
.@"union" => return out_stream.writeAll("union {}"),
.@"opaque" => return out_stream.writeAll("opaque {}"),
}
unreachable;
@ -822,12 +939,22 @@ pub const Type = extern union {
.optional,
.optional_single_mut_pointer,
.optional_single_const_pointer,
.@"anyframe",
.anyframe_T,
.anyerror_void_error_union,
.error_set,
.error_set_single,
=> true,
.@"struct" => {
// TODO introduce lazy value mechanism
const struct_obj = self.castTag(.@"struct").?.data;
for (struct_obj.fields.entries.items) |entry| {
if (entry.value.ty.hasCodeGenBits())
return true;
} else {
return false;
}
},
// TODO lazy types
.array => self.elemType().hasCodeGenBits() and self.arrayLen() != 0,
.array_u8 => self.arrayLen() != 0,
@ -839,10 +966,6 @@ pub const Type = extern union {
return payload.error_set.hasCodeGenBits() or payload.payload.hasCodeGenBits();
},
.@"enum" => @panic("TODO"),
.@"struct" => @panic("TODO"),
.@"union" => @panic("TODO"),
.c_void,
.void,
.type,
@ -853,6 +976,7 @@ pub const Type = extern union {
.@"undefined",
.enum_literal,
.empty_struct,
.empty_struct_literal,
.@"opaque",
=> false,
@ -863,7 +987,39 @@ pub const Type = extern union {
}
pub fn isNoReturn(self: Type) bool {
return self.zigTypeTag() == .NoReturn;
const definitely_correct_result = self.zigTypeTag() == .NoReturn;
const fast_result = self.tag_if_small_enough == @enumToInt(Tag.noreturn);
assert(fast_result == definitely_correct_result);
return fast_result;
}
pub fn ptrAlignment(self: Type, target: Target) u32 {
switch (self.tag()) {
.single_const_pointer,
.single_mut_pointer,
.many_const_pointer,
.many_mut_pointer,
.c_const_pointer,
.c_mut_pointer,
.const_slice,
.mut_slice,
.optional_single_const_pointer,
.optional_single_mut_pointer,
=> return self.cast(Payload.ElemType).?.data.abiAlignment(target),
.const_slice_u8 => return 1,
.pointer => {
const ptr_info = self.castTag(.pointer).?.data;
if (ptr_info.@"align" != 0) {
return ptr_info.@"align";
} else {
return ptr_info.pointee_type.abiAlignment();
}
},
else => unreachable,
}
}
/// Asserts that hasCodeGenBits() is true.
@ -907,17 +1063,9 @@ pub const Type = extern union {
.mut_slice,
.optional_single_const_pointer,
.optional_single_mut_pointer,
.@"anyframe",
.anyframe_T,
.pointer,
=> return @divExact(target.cpu.arch.ptrBitWidth(), 8),
.pointer => {
const payload = self.castTag(.pointer).?.data;
if (payload.@"align" != 0) return payload.@"align";
return @divExact(target.cpu.arch.ptrBitWidth(), 8);
},
.c_short => return @divExact(CType.short.sizeInBits(target), 8),
.c_ushort => return @divExact(CType.ushort.sizeInBits(target), 8),
.c_int => return @divExact(CType.int.sizeInBits(target), 8),
@ -967,9 +1115,9 @@ pub const Type = extern union {
@panic("TODO abiAlignment error union");
},
.@"enum" => self.cast(Payload.Enum).?.abiAlignment(target),
.@"struct" => @panic("TODO"),
.@"union" => @panic("TODO"),
.@"struct" => {
@panic("TODO abiAlignment struct");
},
.c_void,
.void,
@ -981,6 +1129,7 @@ pub const Type = extern union {
.@"undefined",
.enum_literal,
.empty_struct,
.empty_struct_literal,
.inferred_alloc_const,
.inferred_alloc_mut,
.@"opaque",
@ -1008,11 +1157,16 @@ pub const Type = extern union {
.enum_literal => unreachable,
.single_const_pointer_to_comptime_int => unreachable,
.empty_struct => unreachable,
.empty_struct_literal => unreachable,
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
.@"opaque" => unreachable,
.var_args_param => unreachable,
.@"struct" => {
@panic("TODO abiSize struct");
},
.u8,
.i8,
.bool,
@ -1038,7 +1192,7 @@ pub const Type = extern union {
.i64, .u64 => return 8,
.u128, .i128 => return 16,
.@"anyframe", .anyframe_T, .isize, .usize => return @divExact(target.cpu.arch.ptrBitWidth(), 8),
.isize, .usize => return @divExact(target.cpu.arch.ptrBitWidth(), 8),
.const_slice,
.mut_slice,
@ -1119,10 +1273,6 @@ pub const Type = extern union {
}
@panic("TODO abiSize error union");
},
.@"enum" => @panic("TODO"),
.@"struct" => @panic("TODO"),
.@"union" => @panic("TODO"),
};
}
@ -1186,15 +1336,12 @@ pub const Type = extern union {
.const_slice,
.mut_slice,
.error_union,
.@"anyframe",
.anyframe_T,
.anyerror_void_error_union,
.error_set,
.error_set_single,
.empty_struct,
.@"enum",
.@"struct",
.@"union",
.empty_struct,
.empty_struct_literal,
.@"opaque",
.var_args_param,
=> false,
@ -1264,16 +1411,13 @@ pub const Type = extern union {
.optional_single_const_pointer,
.enum_literal,
.error_union,
.@"anyframe",
.anyframe_T,
.anyerror_void_error_union,
.error_set,
.error_set_single,
.empty_struct,
.@"enum",
.@"struct",
.@"union",
.empty_struct_literal,
.@"opaque",
.@"struct",
.var_args_param,
=> unreachable,
@ -1361,17 +1505,14 @@ pub const Type = extern union {
.optional_single_const_pointer,
.enum_literal,
.error_union,
.@"anyframe",
.anyframe_T,
.anyerror_void_error_union,
.error_set,
.error_set_single,
.empty_struct,
.empty_struct_literal,
.inferred_alloc_const,
.inferred_alloc_mut,
.@"enum",
.@"struct",
.@"union",
.@"opaque",
.var_args_param,
=> false,
@ -1442,17 +1583,14 @@ pub const Type = extern union {
.enum_literal,
.mut_slice,
.error_union,
.@"anyframe",
.anyframe_T,
.anyerror_void_error_union,
.error_set,
.error_set_single,
.empty_struct,
.empty_struct_literal,
.inferred_alloc_const,
.inferred_alloc_mut,
.@"enum",
.@"struct",
.@"union",
.@"opaque",
.var_args_param,
=> false,
@ -1532,17 +1670,14 @@ pub const Type = extern union {
.optional_single_const_pointer,
.enum_literal,
.error_union,
.@"anyframe",
.anyframe_T,
.anyerror_void_error_union,
.error_set,
.error_set_single,
.empty_struct,
.empty_struct_literal,
.inferred_alloc_const,
.inferred_alloc_mut,
.@"enum",
.@"struct",
.@"union",
.@"opaque",
.var_args_param,
=> false,
@ -1617,17 +1752,14 @@ pub const Type = extern union {
.optional_single_const_pointer,
.enum_literal,
.error_union,
.@"anyframe",
.anyframe_T,
.anyerror_void_error_union,
.error_set,
.error_set_single,
.empty_struct,
.empty_struct_literal,
.inferred_alloc_const,
.inferred_alloc_mut,
.@"enum",
.@"struct",
.@"union",
.@"opaque",
.var_args_param,
=> false,
@ -1689,7 +1821,11 @@ pub const Type = extern union {
.ErrorUnion => ty = ty.errorUnionChild(),
.Fn => @panic("TODO fn isValidVarType"),
.Struct => @panic("TODO struct isValidVarType"),
.Struct => {
// TODO this is not always correct; introduce lazy value mechanism
// and here we need to force a resolve of "type requires comptime".
return true;
},
.Union => @panic("TODO union isValidVarType"),
};
}
@ -1744,17 +1880,14 @@ pub const Type = extern union {
.optional_single_mut_pointer => unreachable,
.enum_literal => unreachable,
.error_union => unreachable,
.@"anyframe" => unreachable,
.anyframe_T => unreachable,
.anyerror_void_error_union => unreachable,
.error_set => unreachable,
.error_set_single => unreachable,
.@"struct" => unreachable,
.empty_struct => unreachable,
.empty_struct_literal => unreachable,
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
.@"enum" => unreachable,
.@"struct" => unreachable,
.@"union" => unreachable,
.@"opaque" => unreachable,
.var_args_param => unreachable,
@ -1897,17 +2030,14 @@ pub const Type = extern union {
.optional_single_const_pointer,
.enum_literal,
.error_union,
.@"anyframe",
.anyframe_T,
.anyerror_void_error_union,
.error_set,
.error_set_single,
.@"struct",
.empty_struct,
.empty_struct_literal,
.inferred_alloc_const,
.inferred_alloc_mut,
.@"enum",
.@"struct",
.@"union",
.@"opaque",
.var_args_param,
=> unreachable,
@ -1972,17 +2102,14 @@ pub const Type = extern union {
.optional_single_const_pointer,
.enum_literal,
.error_union,
.@"anyframe",
.anyframe_T,
.anyerror_void_error_union,
.error_set,
.error_set_single,
.@"struct",
.empty_struct,
.empty_struct_literal,
.inferred_alloc_const,
.inferred_alloc_mut,
.@"enum",
.@"struct",
.@"union",
.@"opaque",
.var_args_param,
=> unreachable,
@ -2062,17 +2189,14 @@ pub const Type = extern union {
.optional_single_const_pointer,
.enum_literal,
.error_union,
.@"anyframe",
.anyframe_T,
.anyerror_void_error_union,
.error_set,
.error_set_single,
.@"struct",
.empty_struct,
.empty_struct_literal,
.inferred_alloc_const,
.inferred_alloc_mut,
.@"enum",
.@"struct",
.@"union",
.@"opaque",
.var_args_param,
=> false,
@ -2148,17 +2272,14 @@ pub const Type = extern union {
.optional_single_const_pointer,
.enum_literal,
.error_union,
.@"anyframe",
.anyframe_T,
.anyerror_void_error_union,
.error_set,
.error_set_single,
.@"struct",
.empty_struct,
.empty_struct_literal,
.inferred_alloc_const,
.inferred_alloc_mut,
.@"enum",
.@"struct",
.@"union",
.@"opaque",
.var_args_param,
=> false,
@ -2220,17 +2341,14 @@ pub const Type = extern union {
.optional_single_const_pointer,
.enum_literal,
.error_union,
.@"anyframe",
.anyframe_T,
.anyerror_void_error_union,
.error_set,
.error_set_single,
.@"struct",
.empty_struct,
.empty_struct_literal,
.inferred_alloc_const,
.inferred_alloc_mut,
.@"enum",
.@"struct",
.@"union",
.@"opaque",
.var_args_param,
=> unreachable,
@ -2320,17 +2438,14 @@ pub const Type = extern union {
.optional_single_const_pointer,
.enum_literal,
.error_union,
.@"anyframe",
.anyframe_T,
.anyerror_void_error_union,
.error_set,
.error_set_single,
.@"struct",
.empty_struct,
.empty_struct_literal,
.inferred_alloc_const,
.inferred_alloc_mut,
.@"enum",
.@"struct",
.@"union",
.@"opaque",
.var_args_param,
=> false,
@ -2441,17 +2556,14 @@ pub const Type = extern union {
.optional_single_const_pointer,
.enum_literal,
.error_union,
.@"anyframe",
.anyframe_T,
.anyerror_void_error_union,
.error_set,
.error_set_single,
.@"struct",
.empty_struct,
.empty_struct_literal,
.inferred_alloc_const,
.inferred_alloc_mut,
.@"enum",
.@"struct",
.@"union",
.@"opaque",
.var_args_param,
=> unreachable,
@ -2528,17 +2640,14 @@ pub const Type = extern union {
.optional_single_const_pointer,
.enum_literal,
.error_union,
.@"anyframe",
.anyframe_T,
.anyerror_void_error_union,
.error_set,
.error_set_single,
.@"struct",
.empty_struct,
.empty_struct_literal,
.inferred_alloc_const,
.inferred_alloc_mut,
.@"enum",
.@"struct",
.@"union",
.@"opaque",
.var_args_param,
=> unreachable,
@ -2614,17 +2723,14 @@ pub const Type = extern union {
.optional_single_const_pointer,
.enum_literal,
.error_union,
.@"anyframe",
.anyframe_T,
.anyerror_void_error_union,
.error_set,
.error_set_single,
.@"struct",
.empty_struct,
.empty_struct_literal,
.inferred_alloc_const,
.inferred_alloc_mut,
.@"enum",
.@"struct",
.@"union",
.@"opaque",
.var_args_param,
=> unreachable,
@ -2700,17 +2806,14 @@ pub const Type = extern union {
.optional_single_const_pointer,
.enum_literal,
.error_union,
.@"anyframe",
.anyframe_T,
.anyerror_void_error_union,
.error_set,
.error_set_single,
.@"struct",
.empty_struct,
.empty_struct_literal,
.inferred_alloc_const,
.inferred_alloc_mut,
.@"enum",
.@"struct",
.@"union",
.@"opaque",
.var_args_param,
=> unreachable,
@ -2783,17 +2886,14 @@ pub const Type = extern union {
.optional_single_const_pointer,
.enum_literal,
.error_union,
.@"anyframe",
.anyframe_T,
.anyerror_void_error_union,
.error_set,
.error_set_single,
.@"struct",
.empty_struct,
.empty_struct_literal,
.inferred_alloc_const,
.inferred_alloc_mut,
.@"enum",
.@"struct",
.@"union",
.@"opaque",
.var_args_param,
=> unreachable,
@ -2866,17 +2966,14 @@ pub const Type = extern union {
.optional_single_const_pointer,
.enum_literal,
.error_union,
.@"anyframe",
.anyframe_T,
.anyerror_void_error_union,
.error_set,
.error_set_single,
.@"struct",
.empty_struct,
.empty_struct_literal,
.inferred_alloc_const,
.inferred_alloc_mut,
.@"enum",
.@"struct",
.@"union",
.@"opaque",
.var_args_param,
=> unreachable,
@ -2949,17 +3046,14 @@ pub const Type = extern union {
.optional_single_const_pointer,
.enum_literal,
.error_union,
.@"anyframe",
.anyframe_T,
.anyerror_void_error_union,
.error_set,
.error_set_single,
.@"struct",
.empty_struct,
.empty_struct_literal,
.inferred_alloc_const,
.inferred_alloc_mut,
.@"enum",
.@"struct",
.@"union",
.@"opaque",
.var_args_param,
=> false,
@ -3016,8 +3110,6 @@ pub const Type = extern union {
.optional_single_const_pointer,
.enum_literal,
.anyerror_void_error_union,
.anyframe_T,
.@"anyframe",
.error_union,
.error_set,
.error_set_single,
@ -3025,11 +3117,12 @@ pub const Type = extern union {
.var_args_param,
=> return null,
.@"enum" => @panic("TODO onePossibleValue enum"),
.@"struct" => @panic("TODO onePossibleValue struct"),
.@"union" => @panic("TODO onePossibleValue union"),
.@"struct" => {
log.warn("TODO implement Type.onePossibleValue for structs", .{});
return null;
},
.empty_struct => return Value.initTag(.empty_struct_value),
.empty_struct, .empty_struct_literal => return Value.initTag(.empty_struct_value),
.void => return Value.initTag(.void_value),
.noreturn => return Value.initTag(.unreachable_value),
.@"null" => return Value.initTag(.null_value),
@ -3128,17 +3221,14 @@ pub const Type = extern union {
.optional_single_const_pointer,
.enum_literal,
.error_union,
.@"anyframe",
.anyframe_T,
.anyerror_void_error_union,
.error_set,
.error_set_single,
.@"struct",
.empty_struct,
.empty_struct_literal,
.inferred_alloc_const,
.inferred_alloc_mut,
.@"enum",
.@"struct",
.@"union",
.@"opaque",
.var_args_param,
=> return false,
@ -3220,8 +3310,6 @@ pub const Type = extern union {
.optional_single_const_pointer,
.enum_literal,
.error_union,
.@"anyframe",
.anyframe_T,
.anyerror_void_error_union,
.error_set,
.error_set_single,
@ -3231,13 +3319,12 @@ pub const Type = extern union {
.inferred_alloc_const,
.inferred_alloc_mut,
.var_args_param,
.empty_struct_literal,
=> unreachable,
.@"struct" => &self.castTag(.@"struct").?.data.container,
.empty_struct => self.castTag(.empty_struct).?.data,
.@"enum" => &self.castTag(.@"enum").?.scope,
.@"struct" => &self.castTag(.@"struct").?.scope,
.@"union" => &self.castTag(.@"union").?.scope,
.@"opaque" => &self.castTag(.@"opaque").?.scope,
.@"opaque" => &self.castTag(.@"opaque").?.data,
};
}
@ -3296,6 +3383,10 @@ pub const Type = extern union {
}
}
pub fn isExhaustiveEnum(ty: Type) bool {
return false; // TODO
}
/// This enum does not directly correspond to `std.builtin.TypeId` because
/// it has extra enum tags in it, as a way of using less memory. For example,
/// even though Zig recognizes `*align(10) i32` and `*i32` both as Pointer types
@ -3346,11 +3437,12 @@ pub const Type = extern union {
fn_ccc_void_no_args,
single_const_pointer_to_comptime_int,
anyerror_void_error_union,
@"anyframe",
const_slice_u8,
/// This is a special type for variadic parameters of a function call.
/// Casts to it will validate that the type can be passed to a c calling convetion function.
var_args_param,
/// Same as `empty_struct` except it has an empty namespace.
empty_struct_literal,
/// This is a special value that tracks a set of types that have been stored
/// to an inferred allocation. It does not support most of the normal type queries.
/// However it does respond to `isConstPtr`, `ptrSize`, `zigTypeTag`, etc.
@ -3379,14 +3471,11 @@ pub const Type = extern union {
optional_single_mut_pointer,
optional_single_const_pointer,
error_union,
anyframe_T,
error_set,
error_set_single,
empty_struct,
@"enum",
@"struct",
@"union",
@"opaque",
@"struct",
pub const last_no_payload_tag = Tag.inferred_alloc_const;
pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1;
@ -3435,11 +3524,11 @@ pub const Type = extern union {
.fn_ccc_void_no_args,
.single_const_pointer_to_comptime_int,
.anyerror_void_error_union,
.@"anyframe",
.const_slice_u8,
.inferred_alloc_const,
.inferred_alloc_mut,
.var_args_param,
.empty_struct_literal,
=> @compileError("Type Tag " ++ @tagName(t) ++ " has no payload"),
.array_u8,
@ -3457,25 +3546,23 @@ pub const Type = extern union {
.optional,
.optional_single_mut_pointer,
.optional_single_const_pointer,
.anyframe_T,
=> Payload.ElemType,
.int_signed,
.int_unsigned,
=> Payload.Bits,
.error_set => Payload.ErrorSet,
.array => Payload.Array,
.array_sentinel => Payload.ArraySentinel,
.pointer => Payload.Pointer,
.function => Payload.Function,
.error_union => Payload.ErrorUnion,
.error_set => Payload.Decl,
.error_set_single => Payload.Name,
.empty_struct => Payload.ContainerScope,
.@"enum" => Payload.Enum,
.@"struct" => Payload.Struct,
.@"union" => Payload.Union,
.@"opaque" => Payload.Opaque,
.@"struct" => Payload.Struct,
.empty_struct => Payload.ContainerScope,
};
}
@ -3550,6 +3637,13 @@ pub const Type = extern union {
},
};
pub const ErrorSet = struct {
pub const base_tag = Tag.error_set;
base: Payload = Payload{ .tag = base_tag },
data: *Module.ErrorSet,
};
pub const Pointer = struct {
pub const base_tag = Tag.pointer;
@ -3598,13 +3692,13 @@ pub const Type = extern union {
pub const Opaque = struct {
base: Payload = .{ .tag = .@"opaque" },
scope: Module.Scope.Container,
data: Module.Scope.Container,
};
pub const Enum = @import("type/Enum.zig");
pub const Struct = @import("type/Struct.zig");
pub const Union = @import("type/Union.zig");
pub const Struct = struct {
base: Payload = .{ .tag = .@"struct" },
data: *Module.Struct,
};
};
};

View File

@ -1,55 +0,0 @@
const std = @import("std");
const zir = @import("../zir.zig");
const Value = @import("../value.zig").Value;
const Type = @import("../type.zig").Type;
const Module = @import("../Module.zig");
const Scope = Module.Scope;
const Enum = @This();
base: Type.Payload = .{ .tag = .@"enum" },
analysis: union(enum) {
queued: Zir,
in_progress,
resolved: Size,
failed,
},
scope: Scope.Container,
pub const Field = struct {
value: Value,
};
pub const Zir = struct {
body: zir.Body,
inst: *zir.Inst,
};
pub const Size = struct {
tag_type: Type,
fields: std.StringArrayHashMapUnmanaged(Field),
};
pub fn resolve(self: *Enum, mod: *Module, scope: *Scope) !void {
const zir = switch (self.analysis) {
.failed => return error.AnalysisFail,
.resolved => return,
.in_progress => {
return mod.fail(scope, src, "enum '{}' depends on itself", .{enum_name});
},
.queued => |zir| zir,
};
self.analysis = .in_progress;
// TODO
}
// TODO should this resolve the type or assert that it has already been resolved?
pub fn abiAlignment(self: *Enum, target: std.Target) u32 {
switch (self.analysis) {
.queued => unreachable, // alignment has not been resolved
.in_progress => unreachable, // alignment has not been resolved
.failed => unreachable, // type resolution failed
.resolved => |r| return r.tag_type.abiAlignment(target),
}
}

View File

@ -1,56 +0,0 @@
const std = @import("std");
const zir = @import("../zir.zig");
const Value = @import("../value.zig").Value;
const Type = @import("../type.zig").Type;
const Module = @import("../Module.zig");
const Scope = Module.Scope;
const Struct = @This();
base: Type.Payload = .{ .tag = .@"struct" },
analysis: union(enum) {
queued: Zir,
zero_bits_in_progress,
zero_bits: Zero,
in_progress,
// alignment: Align,
resolved: Size,
failed,
},
scope: Scope.Container,
pub const Field = struct {
value: Value,
};
pub const Zir = struct {
body: zir.Body,
inst: *zir.Inst,
};
pub const Zero = struct {
is_zero_bits: bool,
fields: std.StringArrayHashMapUnmanaged(Field),
};
pub const Size = struct {
is_zero_bits: bool,
alignment: u32,
size: u32,
fields: std.StringArrayHashMapUnmanaged(Field),
};
pub fn resolveZeroBits(self: *Struct, mod: *Module, scope: *Scope) !void {
const zir = switch (self.analysis) {
.failed => return error.AnalysisFail,
.zero_bits_in_progress => {
return mod.fail(scope, src, "struct '{}' depends on itself", .{});
},
.queued => |zir| zir,
else => return,
};
self.analysis = .zero_bits_in_progress;
// TODO
}

View File

@ -1,56 +0,0 @@
const std = @import("std");
const zir = @import("../zir.zig");
const Value = @import("../value.zig").Value;
const Type = @import("../type.zig").Type;
const Module = @import("../Module.zig");
const Scope = Module.Scope;
const Union = @This();
base: Type.Payload = .{ .tag = .@"struct" },
analysis: union(enum) {
queued: Zir,
zero_bits_in_progress,
zero_bits: Zero,
in_progress,
// alignment: Align,
resolved: Size,
failed,
},
scope: Scope.Container,
pub const Field = struct {
value: Value,
};
pub const Zir = struct {
body: zir.Body,
inst: *zir.Inst,
};
pub const Zero = struct {
is_zero_bits: bool,
fields: std.StringArrayHashMapUnmanaged(Field),
};
pub const Size = struct {
is_zero_bits: bool,
alignment: u32,
size: u32,
fields: std.StringArrayHashMapUnmanaged(Field),
};
pub fn resolveZeroBits(self: *Union, mod: *Module, scope: *Scope) !void {
const zir = switch (self.analysis) {
.failed => return error.AnalysisFail,
.zero_bits_in_progress => {
return mod.fail(scope, src, "union '{}' depends on itself", .{});
},
.queued => |zir| zir,
else => return,
};
self.analysis = .zero_bits_in_progress;
// TODO
}

View File

@ -30,6 +30,8 @@ pub const Value = extern union {
i32_type,
u64_type,
i64_type,
u128_type,
i128_type,
usize_type,
isize_type,
c_short_type,
@ -62,18 +64,19 @@ pub const Value = extern union {
single_const_pointer_to_comptime_int_type,
const_slice_u8_type,
enum_literal_type,
anyframe_type,
undef,
zero,
one,
void_value,
unreachable_value,
empty_struct_value,
empty_array,
null_value,
bool_true,
bool_false, // See last_no_payload_tag below.
bool_false,
abi_align_default,
empty_struct_value,
empty_array, // See last_no_payload_tag below.
// After this, the tag requires a payload.
ty,
@ -100,14 +103,13 @@ pub const Value = extern union {
float_64,
float_128,
enum_literal,
error_set,
@"error",
error_union,
/// This is a special value that tracks a set of types that have been stored
/// to an inferred allocation. It does not support any of the normal value queries.
inferred_alloc,
pub const last_no_payload_tag = Tag.bool_false;
pub const last_no_payload_tag = Tag.empty_array;
pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1;
pub fn Type(comptime t: Tag) type {
@ -120,6 +122,8 @@ pub const Value = extern union {
.i32_type,
.u64_type,
.i64_type,
.u128_type,
.i128_type,
.usize_type,
.isize_type,
.c_short_type,
@ -152,7 +156,6 @@ pub const Value = extern union {
.single_const_pointer_to_comptime_int_type,
.const_slice_u8_type,
.enum_literal_type,
.anyframe_type,
.undef,
.zero,
.one,
@ -163,6 +166,7 @@ pub const Value = extern union {
.null_value,
.bool_true,
.bool_false,
.abi_align_default,
=> @compileError("Value Tag " ++ @tagName(t) ++ " has no payload"),
.int_big_positive,
@ -193,7 +197,6 @@ pub const Value = extern union {
.float_32 => Payload.Float_32,
.float_64 => Payload.Float_64,
.float_128 => Payload.Float_128,
.error_set => Payload.ErrorSet,
.@"error" => Payload.Error,
.inferred_alloc => Payload.InferredAlloc,
};
@ -275,6 +278,8 @@ pub const Value = extern union {
.i32_type,
.u64_type,
.i64_type,
.u128_type,
.i128_type,
.usize_type,
.isize_type,
.c_short_type,
@ -307,7 +312,6 @@ pub const Value = extern union {
.single_const_pointer_to_comptime_int_type,
.const_slice_u8_type,
.enum_literal_type,
.anyframe_type,
.undef,
.zero,
.one,
@ -318,6 +322,7 @@ pub const Value = extern union {
.bool_true,
.bool_false,
.empty_struct_value,
.abi_align_default,
=> unreachable,
.ty => {
@ -400,7 +405,6 @@ pub const Value = extern union {
return Value{ .ptr_otherwise = &new_payload.base };
},
.error_set => return self.copyPayloadShallow(allocator, Payload.ErrorSet),
.inferred_alloc => unreachable,
}
}
@ -429,6 +433,8 @@ pub const Value = extern union {
.i32_type => return out_stream.writeAll("i32"),
.u64_type => return out_stream.writeAll("u64"),
.i64_type => return out_stream.writeAll("i64"),
.u128_type => return out_stream.writeAll("u128"),
.i128_type => return out_stream.writeAll("i128"),
.isize_type => return out_stream.writeAll("isize"),
.usize_type => return out_stream.writeAll("usize"),
.c_short_type => return out_stream.writeAll("c_short"),
@ -461,9 +467,8 @@ pub const Value = extern union {
.single_const_pointer_to_comptime_int_type => return out_stream.writeAll("*const comptime_int"),
.const_slice_u8_type => return out_stream.writeAll("[]const u8"),
.enum_literal_type => return out_stream.writeAll("@Type(.EnumLiteral)"),
.anyframe_type => return out_stream.writeAll("anyframe"),
.abi_align_default => return out_stream.writeAll("(default ABI alignment)"),
// TODO this should print `NAME{}`
.empty_struct_value => return out_stream.writeAll("struct {}{}"),
.null_value => return out_stream.writeAll("null"),
.undef => return out_stream.writeAll("undefined"),
@ -510,15 +515,6 @@ pub const Value = extern union {
.float_32 => return out_stream.print("{}", .{val.castTag(.float_32).?.data}),
.float_64 => return out_stream.print("{}", .{val.castTag(.float_64).?.data}),
.float_128 => return out_stream.print("{}", .{val.castTag(.float_128).?.data}),
.error_set => {
const error_set = val.castTag(.error_set).?.data;
try out_stream.writeAll("error{");
var it = error_set.fields.iterator();
while (it.next()) |entry| {
try out_stream.print("{},", .{entry.value});
}
return out_stream.writeAll("}");
},
.@"error" => return out_stream.print("error.{s}", .{val.castTag(.@"error").?.data.name}),
// TODO to print this it should be error{ Set, Items }!T(val), but we need the type for that
.error_union => return out_stream.print("error_union_val({})", .{val.castTag(.error_union).?.data}),
@ -557,6 +553,8 @@ pub const Value = extern union {
.i32_type => Type.initTag(.i32),
.u64_type => Type.initTag(.u64),
.i64_type => Type.initTag(.i64),
.u128_type => Type.initTag(.u128),
.i128_type => Type.initTag(.i128),
.usize_type => Type.initTag(.usize),
.isize_type => Type.initTag(.isize),
.c_short_type => Type.initTag(.c_short),
@ -589,7 +587,6 @@ pub const Value = extern union {
.single_const_pointer_to_comptime_int_type => Type.initTag(.single_const_pointer_to_comptime_int),
.const_slice_u8_type => Type.initTag(.const_slice_u8),
.enum_literal_type => Type.initTag(.enum_literal),
.anyframe_type => Type.initTag(.@"anyframe"),
.int_type => {
const payload = self.castTag(.int_type).?.data;
@ -602,10 +599,6 @@ pub const Value = extern union {
};
return Type.initPayload(&new.base);
},
.error_set => {
const payload = self.castTag(.error_set).?.data;
return Type.Tag.error_set.create(allocator, payload.decl);
},
.undef,
.zero,
@ -637,6 +630,7 @@ pub const Value = extern union {
.error_union,
.empty_struct_value,
.inferred_alloc,
.abi_align_default,
=> unreachable,
};
}
@ -654,6 +648,8 @@ pub const Value = extern union {
.i32_type,
.u64_type,
.i64_type,
.u128_type,
.i128_type,
.usize_type,
.isize_type,
.c_short_type,
@ -686,7 +682,6 @@ pub const Value = extern union {
.single_const_pointer_to_comptime_int_type,
.const_slice_u8_type,
.enum_literal_type,
.anyframe_type,
.null_value,
.function,
.extern_fn,
@ -704,11 +699,11 @@ pub const Value = extern union {
.unreachable_value,
.empty_array,
.enum_literal,
.error_set,
.error_union,
.@"error",
.empty_struct_value,
.inferred_alloc,
.abi_align_default,
=> unreachable,
.undef => unreachable,
@ -741,6 +736,8 @@ pub const Value = extern union {
.i32_type,
.u64_type,
.i64_type,
.u128_type,
.i128_type,
.usize_type,
.isize_type,
.c_short_type,
@ -773,7 +770,6 @@ pub const Value = extern union {
.single_const_pointer_to_comptime_int_type,
.const_slice_u8_type,
.enum_literal_type,
.anyframe_type,
.null_value,
.function,
.extern_fn,
@ -791,11 +787,11 @@ pub const Value = extern union {
.unreachable_value,
.empty_array,
.enum_literal,
.error_set,
.@"error",
.error_union,
.empty_struct_value,
.inferred_alloc,
.abi_align_default,
=> unreachable,
.undef => unreachable,
@ -828,6 +824,8 @@ pub const Value = extern union {
.i32_type,
.u64_type,
.i64_type,
.u128_type,
.i128_type,
.usize_type,
.isize_type,
.c_short_type,
@ -860,7 +858,6 @@ pub const Value = extern union {
.single_const_pointer_to_comptime_int_type,
.const_slice_u8_type,
.enum_literal_type,
.anyframe_type,
.null_value,
.function,
.extern_fn,
@ -878,11 +875,11 @@ pub const Value = extern union {
.unreachable_value,
.empty_array,
.enum_literal,
.error_set,
.@"error",
.error_union,
.empty_struct_value,
.inferred_alloc,
.abi_align_default,
=> unreachable,
.undef => unreachable,
@ -942,6 +939,8 @@ pub const Value = extern union {
.i32_type,
.u64_type,
.i64_type,
.u128_type,
.i128_type,
.usize_type,
.isize_type,
.c_short_type,
@ -974,7 +973,6 @@ pub const Value = extern union {
.single_const_pointer_to_comptime_int_type,
.const_slice_u8_type,
.enum_literal_type,
.anyframe_type,
.null_value,
.function,
.extern_fn,
@ -993,11 +991,11 @@ pub const Value = extern union {
.unreachable_value,
.empty_array,
.enum_literal,
.error_set,
.@"error",
.error_union,
.empty_struct_value,
.inferred_alloc,
.abi_align_default,
=> unreachable,
.zero,
@ -1034,6 +1032,8 @@ pub const Value = extern union {
.i32_type,
.u64_type,
.i64_type,
.u128_type,
.i128_type,
.usize_type,
.isize_type,
.c_short_type,
@ -1066,7 +1066,6 @@ pub const Value = extern union {
.single_const_pointer_to_comptime_int_type,
.const_slice_u8_type,
.enum_literal_type,
.anyframe_type,
.null_value,
.function,
.extern_fn,
@ -1084,11 +1083,11 @@ pub const Value = extern union {
.unreachable_value,
.empty_array,
.enum_literal,
.error_set,
.@"error",
.error_union,
.empty_struct_value,
.inferred_alloc,
.abi_align_default,
=> unreachable,
.zero,
@ -1191,6 +1190,8 @@ pub const Value = extern union {
.i32_type,
.u64_type,
.i64_type,
.u128_type,
.i128_type,
.usize_type,
.isize_type,
.c_short_type,
@ -1223,7 +1224,6 @@ pub const Value = extern union {
.single_const_pointer_to_comptime_int_type,
.const_slice_u8_type,
.enum_literal_type,
.anyframe_type,
.bool_true,
.bool_false,
.null_value,
@ -1244,11 +1244,11 @@ pub const Value = extern union {
.void_value,
.unreachable_value,
.enum_literal,
.error_set,
.@"error",
.error_union,
.empty_struct_value,
.inferred_alloc,
.abi_align_default,
=> unreachable,
.zero,
@ -1275,6 +1275,8 @@ pub const Value = extern union {
.i32_type,
.u64_type,
.i64_type,
.u128_type,
.i128_type,
.usize_type,
.isize_type,
.c_short_type,
@ -1307,7 +1309,6 @@ pub const Value = extern union {
.single_const_pointer_to_comptime_int_type,
.const_slice_u8_type,
.enum_literal_type,
.anyframe_type,
.null_value,
.function,
.extern_fn,
@ -1322,11 +1323,11 @@ pub const Value = extern union {
.unreachable_value,
.empty_array,
.enum_literal,
.error_set,
.@"error",
.error_union,
.empty_struct_value,
.inferred_alloc,
.abi_align_default,
=> unreachable,
.zero,
@ -1427,6 +1428,8 @@ pub const Value = extern union {
.i32_type,
.u64_type,
.i64_type,
.u128_type,
.i128_type,
.usize_type,
.isize_type,
.c_short_type,
@ -1459,18 +1462,13 @@ pub const Value = extern union {
.single_const_pointer_to_comptime_int_type,
.const_slice_u8_type,
.enum_literal_type,
.anyframe_type,
.ty,
.abi_align_default,
=> {
// Directly return Type.hash, toType can only fail for .int_type and .error_set.
// Directly return Type.hash, toType can only fail for .int_type.
var allocator = std.heap.FixedBufferAllocator.init(&[_]u8{});
return (self.toType(&allocator.allocator) catch unreachable).hash();
},
.error_set => {
// Payload.decl should be same for all instances of the type.
const payload = self.castTag(.error_set).?.data;
std.hash.autoHash(&hasher, payload.decl);
},
.int_type => {
const payload = self.castTag(.int_type).?.data;
var int_payload = Type.Payload.Bits{
@ -1585,6 +1583,8 @@ pub const Value = extern union {
.i32_type,
.u64_type,
.i64_type,
.u128_type,
.i128_type,
.usize_type,
.isize_type,
.c_short_type,
@ -1617,7 +1617,6 @@ pub const Value = extern union {
.single_const_pointer_to_comptime_int_type,
.const_slice_u8_type,
.enum_literal_type,
.anyframe_type,
.zero,
.one,
.bool_true,
@ -1641,11 +1640,11 @@ pub const Value = extern union {
.unreachable_value,
.empty_array,
.enum_literal,
.error_set,
.@"error",
.error_union,
.empty_struct_value,
.inferred_alloc,
.abi_align_default,
=> unreachable,
.ref_val => self.castTag(.ref_val).?.data,
@ -1672,6 +1671,8 @@ pub const Value = extern union {
.i32_type,
.u64_type,
.i64_type,
.u128_type,
.i128_type,
.usize_type,
.isize_type,
.c_short_type,
@ -1704,7 +1705,6 @@ pub const Value = extern union {
.single_const_pointer_to_comptime_int_type,
.const_slice_u8_type,
.enum_literal_type,
.anyframe_type,
.zero,
.one,
.bool_true,
@ -1728,11 +1728,11 @@ pub const Value = extern union {
.void_value,
.unreachable_value,
.enum_literal,
.error_set,
.@"error",
.error_union,
.empty_struct_value,
.inferred_alloc,
.abi_align_default,
=> unreachable,
.empty_array => unreachable, // out of bounds array index
@ -1776,6 +1776,8 @@ pub const Value = extern union {
.i32_type,
.u64_type,
.i64_type,
.u128_type,
.i128_type,
.usize_type,
.isize_type,
.c_short_type,
@ -1808,7 +1810,6 @@ pub const Value = extern union {
.single_const_pointer_to_comptime_int_type,
.const_slice_u8_type,
.enum_literal_type,
.anyframe_type,
.zero,
.one,
.empty_array,
@ -1832,10 +1833,10 @@ pub const Value = extern union {
.float_128,
.void_value,
.enum_literal,
.error_set,
.@"error",
.error_union,
.empty_struct_value,
.abi_align_default,
=> false,
.undef => unreachable,
@ -1858,6 +1859,8 @@ pub const Value = extern union {
.i32_type,
.u64_type,
.i64_type,
.u128_type,
.i128_type,
.usize_type,
.isize_type,
.c_short_type,
@ -1890,7 +1893,6 @@ pub const Value = extern union {
.single_const_pointer_to_comptime_int_type,
.const_slice_u8_type,
.enum_literal_type,
.anyframe_type,
.zero,
.one,
.null_value,
@ -1915,8 +1917,8 @@ pub const Value = extern union {
.float_128,
.void_value,
.enum_literal,
.error_set,
.empty_struct_value,
.abi_align_default,
=> null,
.error_union => {
@ -1960,6 +1962,8 @@ pub const Value = extern union {
.i32_type,
.u64_type,
.i64_type,
.u128_type,
.i128_type,
.usize_type,
.isize_type,
.c_short_type,
@ -1992,8 +1996,6 @@ pub const Value = extern union {
.single_const_pointer_to_comptime_int_type,
.const_slice_u8_type,
.enum_literal_type,
.anyframe_type,
.error_set,
=> true,
.zero,
@ -2023,6 +2025,7 @@ pub const Value = extern union {
.error_union,
.empty_struct_value,
.null_value,
.abi_align_default,
=> false,
.undef => unreachable,
@ -2137,18 +2140,6 @@ pub const Value = extern union {
data: f128,
};
/// TODO move to type.zig
pub const ErrorSet = struct {
pub const base_tag = Tag.error_set;
base: Payload = .{ .tag = base_tag },
data: struct {
/// TODO revisit this when we have the concept of the error tag type
fields: std.StringHashMapUnmanaged(void),
decl: *Module.Decl,
},
};
pub const Error = struct {
base: Payload = .{ .tag = .@"error" },
data: struct {

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1027,9 +1027,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\}
\\fn foo() void {}
, &[_][]const u8{
"tmp.zig:3:21: error: async call in nosuspend scope",
"tmp.zig:4:9: error: suspend in nosuspend scope",
"tmp.zig:5:9: error: resume in nosuspend scope",
});
cases.add("atomicrmw with bool op not .Xchg",

View File

@ -1,5 +1,5 @@
const std = @import("std");
const builtin = @import("builtin");
const builtin = std.builtin;
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
const expectEqualStrings = std.testing.expectEqualStrings;
@ -1545,6 +1545,68 @@ test "nosuspend on function calls" {
expectEqual(@as(i32, 42), (try nosuspend S1.d()).b);
}
test "nosuspend on async function calls" {
const S0 = struct {
b: i32 = 42,
};
const S1 = struct {
fn c() S0 {
return S0{};
}
fn d() !S0 {
return S0{};
}
};
var frame_c = nosuspend async S1.c();
expectEqual(@as(i32, 42), (await frame_c).b);
var frame_d = nosuspend async S1.d();
expectEqual(@as(i32, 42), (try await frame_d).b);
}
// test "resume nosuspend async function calls" {
// const S0 = struct {
// b: i32 = 42,
// };
// const S1 = struct {
// fn c() S0 {
// suspend;
// return S0{};
// }
// fn d() !S0 {
// suspend;
// return S0{};
// }
// };
// var frame_c = nosuspend async S1.c();
// resume frame_c;
// expectEqual(@as(i32, 42), (await frame_c).b);
// var frame_d = nosuspend async S1.d();
// resume frame_d;
// expectEqual(@as(i32, 42), (try await frame_d).b);
// }
test "nosuspend resume async function calls" {
const S0 = struct {
b: i32 = 42,
};
const S1 = struct {
fn c() S0 {
suspend;
return S0{};
}
fn d() !S0 {
suspend;
return S0{};
}
};
var frame_c = async S1.c();
nosuspend resume frame_c;
expectEqual(@as(i32, 42), (await frame_c).b);
var frame_d = async S1.d();
nosuspend resume frame_d;
expectEqual(@as(i32, 42), (try await frame_d).b);
}
test "avoid forcing frame alignment resolution implicit cast to *c_void" {
const S = struct {
var x: ?*c_void = null;

View File

@ -378,4 +378,45 @@ pub fn addCases(ctx: *TestContext) !void {
"",
);
}
{
var case = ctx.exe("save function return values in callee preserved register", linux_arm);
// Here, it is necessary to save the result of bar() into a
// callee preserved register, otherwise it will be overwritten
// by the first parameter to baz.
case.addCompareOutput(
\\export fn _start() noreturn {
\\ assert(foo() == 43);
\\ exit();
\\}
\\
\\fn foo() u32 {
\\ return bar() + baz(42);
\\}
\\
\\fn bar() u32 {
\\ return 1;
\\}
\\
\\fn baz(x: u32) u32 {
\\ return x;
\\}
\\
\\fn assert(ok: bool) void {
\\ if (!ok) unreachable;
\\}
\\
\\fn exit() noreturn {
\\ asm volatile ("svc #0"
\\ :
\\ : [number] "{r7}" (1),
\\ [arg1] "{r0}" (0)
\\ : "memory"
\\ );
\\ unreachable;
\\}
,
"",
);
}
}

View File

@ -39,6 +39,21 @@ pub fn addCases(ctx: *TestContext) !void {
\\}
\\fn unused() void {}
, "yo!" ++ std.cstr.line_sep);
// Comptime return type and calling convention expected.
case.addError(
\\var x: i32 = 1234;
\\export fn main() x {
\\ return 0;
\\}
\\export fn foo() callconv(y) c_int {
\\ return 0;
\\}
\\var y: i32 = 1234;
, &.{
":2:18: error: unable to resolve comptime value",
":5:26: error: unable to resolve comptime value",
});
}
{
@ -54,6 +69,42 @@ pub fn addCases(ctx: *TestContext) !void {
, "Hello, world!" ++ std.cstr.line_sep);
}
{
var case = ctx.exeFromCompiledC("@intToError", .{});
case.addCompareOutput(
\\pub export fn main() c_int {
\\ // comptime checks
\\ const a = error.A;
\\ const b = error.B;
\\ const c = @intToError(2);
\\ const d = @intToError(1);
\\ if (!(c == b)) unreachable;
\\ if (!(a == d)) unreachable;
\\ // runtime checks
\\ var x = error.A;
\\ var y = error.B;
\\ var z = @intToError(2);
\\ var f = @intToError(1);
\\ if (!(y == z)) unreachable;
\\ if (!(x == f)) unreachable;
\\ return 0;
\\}
, "");
case.addError(
\\pub export fn main() c_int {
\\ const c = @intToError(0);
\\ return 0;
\\}
, &.{":2:27: error: integer value 0 represents no error"});
case.addError(
\\pub export fn main() c_int {
\\ const c = @intToError(3);
\\ return 0;
\\}
, &.{":2:27: error: integer value 3 represents no error"});
}
{
var case = ctx.exeFromCompiledC("x86_64-linux inline assembly", linux_x64);
@ -243,6 +294,134 @@ pub fn addCases(ctx: *TestContext) !void {
\\ return a - 4;
\\}
, "");
// Switch expression missing else case.
case.addError(
\\export fn main() c_int {
\\ var cond: c_int = 0;
\\ const a: c_int = switch (cond) {
\\ 1 => 1,
\\ 2 => 2,
\\ 3 => 3,
\\ 4 => 4,
\\ };
\\ return a - 4;
\\}
, &.{":3:22: error: switch must handle all possibilities"});
// Switch expression, has an unreachable prong.
case.addCompareOutput(
\\export fn main() c_int {
\\ var cond: c_int = 0;
\\ const a: c_int = switch (cond) {
\\ 1 => 1,
\\ 2 => 2,
\\ 99...300, 12 => 3,
\\ 0 => 4,
\\ 13 => unreachable,
\\ else => 5,
\\ };
\\ return a - 4;
\\}
, "");
// Switch expression, has an unreachable prong and prongs write
// to result locations.
case.addCompareOutput(
\\export fn main() c_int {
\\ var cond: c_int = 0;
\\ var a: c_int = switch (cond) {
\\ 1 => 1,
\\ 2 => 2,
\\ 99...300, 12 => 3,
\\ 0 => 4,
\\ 13 => unreachable,
\\ else => 5,
\\ };
\\ return a - 4;
\\}
, "");
// Integer switch expression has duplicate case value.
case.addError(
\\export fn main() c_int {
\\ var cond: c_int = 0;
\\ const a: c_int = switch (cond) {
\\ 1 => 1,
\\ 2 => 2,
\\ 96, 11...13, 97 => 3,
\\ 0 => 4,
\\ 90, 12 => 100,
\\ else => 5,
\\ };
\\ return a - 4;
\\}
, &.{
":8:13: error: duplicate switch value",
":6:15: note: previous value here",
});
// Boolean switch expression has duplicate case value.
case.addError(
\\export fn main() c_int {
\\ var a: bool = false;
\\ const b: c_int = switch (a) {
\\ false => 1,
\\ true => 2,
\\ false => 3,
\\ };
\\}
, &.{
":6:9: error: duplicate switch value",
});
// Sparse (no range capable) switch expression has duplicate case value.
case.addError(
\\export fn main() c_int {
\\ const A: type = i32;
\\ const b: c_int = switch (A) {
\\ i32 => 1,
\\ bool => 2,
\\ f64, i32 => 3,
\\ else => 4,
\\ };
\\}
, &.{
":6:14: error: duplicate switch value",
":4:9: note: previous value here",
});
// Ranges not allowed for some kinds of switches.
case.addError(
\\export fn main() c_int {
\\ const A: type = i32;
\\ const b: c_int = switch (A) {
\\ i32 => 1,
\\ bool => 2,
\\ f16...f64 => 3,
\\ else => 4,
\\ };
\\}
, &.{
":3:30: error: ranges not allowed when switching on type 'type'",
":6:12: note: range here",
});
// Switch expression has unreachable else prong.
case.addError(
\\export fn main() c_int {
\\ var a: u2 = 0;
\\ const b: i32 = switch (a) {
\\ 0 => 10,
\\ 1 => 20,
\\ 2 => 30,
\\ 3 => 40,
\\ else => 50,
\\ };
\\}
, &.{
":8:14: error: unreachable else prong; all cases already handled",
});
}
//{
// var case = ctx.exeFromCompiledC("optionals", .{});
@ -271,6 +450,7 @@ pub fn addCases(ctx: *TestContext) !void {
// \\}
// , "");
//}
{
var case = ctx.exeFromCompiledC("errors", .{});
case.addCompareOutput(

View File

@ -355,7 +355,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ const z = @TypeOf(true, 1);
\\ unreachable;
\\}
, &[_][]const u8{":2:29: error: incompatible types: 'bool' and 'comptime_int'"});
, &[_][]const u8{":2:15: error: incompatible types: 'bool' and 'comptime_int'"});
}
{
@ -621,6 +621,43 @@ pub fn addCases(ctx: *TestContext) !void {
"hello\nhello\nhello\nhello\n",
);
// inline while requires the condition to be comptime known.
case.addError(
\\export fn _start() noreturn {
\\ var i: u32 = 0;
\\ inline while (i < 4) : (i += 1) print();
\\ assert(i == 4);
\\
\\ exit();
\\}
\\
\\fn print() void {
\\ asm volatile ("syscall"
\\ :
\\ : [number] "{rax}" (1),
\\ [arg1] "{rdi}" (1),
\\ [arg2] "{rsi}" (@ptrToInt("hello\n")),
\\ [arg3] "{rdx}" (6)
\\ : "rcx", "r11", "memory"
\\ );
\\ return;
\\}
\\
\\pub fn assert(ok: bool) void {
\\ if (!ok) unreachable; // assertion failure
\\}
\\
\\fn exit() noreturn {
\\ asm volatile ("syscall"
\\ :
\\ : [number] "{rax}" (231),
\\ [arg1] "{rdi}" (0)
\\ : "rcx", "r11", "memory"
\\ );
\\ unreachable;
\\}
, &[_][]const u8{":3:21: error: unable to resolve comptime value"});
// Labeled blocks (no conditional branch)
case.addCompareOutput(
\\export fn _start() noreturn {
@ -1070,7 +1107,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\}
\\fn x() void {}
, &[_][]const u8{
":11:8: error: found compile log statement",
":9:5: error: found compile log statement",
":4:5: note: also here",
});
}
@ -1294,10 +1331,9 @@ pub fn addCases(ctx: *TestContext) !void {
,
"",
);
// TODO this should be :8:21 not :8:19. we need to improve source locations
// to be relative to the containing Decl so that they can survive when the byte
// offset of a previous Decl changes. Here the change from 7 to 999 introduces
// +2 to the byte offset and makes the error location wrong by 2 bytes.
// This additionally tests that the compile error reports the correct source location.
// Without storing source locations relative to the owner decl, the compile error
// here would be off by 2 bytes (from the "7" -> "999").
case.addError(
\\export fn _start() noreturn {
\\ const y = fibonacci(999);
@ -1318,7 +1354,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ );
\\ unreachable;
\\}
, &[_][]const u8{":8:19: error: evaluation exceeded 1000 backwards branches"});
, &[_][]const u8{":8:21: error: evaluation exceeded 1000 backwards branches"});
}
{
var case = ctx.exe("orelse at comptime", linux_x64);
@ -1442,6 +1478,7 @@ pub fn addCases(ctx: *TestContext) !void {
,
"",
);
case.addCompareOutput(
\\export fn _start() noreturn {
\\ const i: anyerror!u64 = error.B;
@ -1464,6 +1501,7 @@ pub fn addCases(ctx: *TestContext) !void {
,
"",
);
case.addCompareOutput(
\\export fn _start() noreturn {
\\ const a: anyerror!comptime_int = 42;
@ -1485,11 +1523,12 @@ pub fn addCases(ctx: *TestContext) !void {
\\ unreachable;
\\}
, "");
case.addCompareOutput(
\\export fn _start() noreturn {
\\const a: anyerror!u32 = error.B;
\\_ = &(a catch |err| assert(err == error.B));
\\exit();
\\ const a: anyerror!u32 = error.B;
\\ _ = &(a catch |err| assert(err == error.B));
\\ exit();
\\}
\\fn assert(b: bool) void {
\\ if (!b) unreachable;
@ -1504,6 +1543,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ unreachable;
\\}
, "");
case.addCompareOutput(
\\export fn _start() noreturn {
\\ const a: anyerror!u32 = error.Bar;