diff --git a/CMakeLists.txt b/CMakeLists.txt index d83dfa3efb..6d3d564648 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -539,7 +539,7 @@ set(ZIG_STAGE2_SOURCES "${CMAKE_SOURCE_DIR}/src/ThreadPool.zig" "${CMAKE_SOURCE_DIR}/src/TypedValue.zig" "${CMAKE_SOURCE_DIR}/src/WaitGroup.zig" - "${CMAKE_SOURCE_DIR}/src/astgen.zig" + "${CMAKE_SOURCE_DIR}/src/AstGen.zig" "${CMAKE_SOURCE_DIR}/src/clang.zig" "${CMAKE_SOURCE_DIR}/src/clang_options.zig" "${CMAKE_SOURCE_DIR}/src/clang_options_data.zig" @@ -591,7 +591,7 @@ set(ZIG_STAGE2_SOURCES "${CMAKE_SOURCE_DIR}/src/value.zig" "${CMAKE_SOURCE_DIR}/src/windows_sdk.zig" "${CMAKE_SOURCE_DIR}/src/zir.zig" - "${CMAKE_SOURCE_DIR}/src/zir_sema.zig" + "${CMAKE_SOURCE_DIR}/src/Sema.zig" ) if(MSVC) diff --git a/doc/docgen.zig b/doc/docgen.zig index f7d8c2c1b8..c77f439f06 100644 --- a/doc/docgen.zig +++ b/doc/docgen.zig @@ -1349,7 +1349,7 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any } const escaped_stderr = try escapeHtml(allocator, result.stderr); const colored_stderr = try termColor(allocator, escaped_stderr); - try out.print("
$ zig test {s}.zig{s}\n{s}
\n", .{ + try out.print("
$ zig test {s}.zig {s}\n{s}
\n", .{ code.name, mode_arg, colored_stderr, diff --git a/doc/langref.html.in b/doc/langref.html.in index dc18c0a069..9aa106fb54 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -6594,13 +6594,11 @@ const std = @import("std"); const expect = std.testing.expect; test "async and await" { - // Here we have an exception where we do not match an async - // with an await. The test block is not async and so cannot - // have a suspend point in it. - // This is well-defined behavior, and everything is OK here. - // Note however that there would be no way to collect the - // return value of amain, if it were something other than void. - _ = async amain(); + // The test block is not async and so cannot have a suspend + // point in it. By using the nosuspend keyword, we promise that + // the code in amain will finish executing without suspending + // back to the test block. + nosuspend amain(); } fn amain() void { @@ -10799,9 +10797,16 @@ fn readU32Be() u32 {}
{#syntax#}nosuspend{#endsyntax#}
- The {#syntax#}nosuspend{#endsyntax#} keyword. + The {#syntax#}nosuspend{#endsyntax#} keyword can be used in front of a block, statement or expression, to mark a scope where no suspension points are reached. + In particular, inside a {#syntax#}nosuspend{#endsyntax#} scope: + Code inside a {#syntax#}nosuspend{#endsyntax#} scope does not cause the enclosing function to become an {#link|async function|Async Functions#}. + diff --git a/lib/std/enums.zig b/lib/std/enums.zig index bddda38c9f..a868bdeb26 100644 --- a/lib/std/enums.zig +++ b/lib/std/enums.zig @@ -32,7 +32,7 @@ pub fn EnumFieldStruct(comptime E: type, comptime Data: type, comptime field_def .fields = fields, .decls = &[_]std.builtin.TypeInfo.Declaration{}, .is_tuple = false, - }}); + } }); } /// Looks up the supplied fields in the given enum type. @@ -70,7 +70,7 @@ pub fn values(comptime E: type) []const E { test "std.enum.values" { const E = extern enum { a, b, c, d = 0 }; - testing.expectEqualSlices(E, &.{.a, .b, .c, .d}, values(E)); + testing.expectEqualSlices(E, &.{ .a, .b, .c, .d }, values(E)); } /// Returns the set of all unique named values in the given enum, in @@ -82,10 +82,10 @@ pub fn uniqueValues(comptime E: type) []const E { test "std.enum.uniqueValues" { const E = extern enum { a, b, c, d = 0, e, f = 3 }; - testing.expectEqualSlices(E, &.{.a, .b, .c, .f}, uniqueValues(E)); + testing.expectEqualSlices(E, &.{ .a, .b, .c, .f }, uniqueValues(E)); const F = enum { a, b, c }; - testing.expectEqualSlices(F, &.{.a, .b, .c}, uniqueValues(F)); + testing.expectEqualSlices(F, &.{ .a, .b, .c }, uniqueValues(F)); } /// Returns the set of all unique field values in the given enum, in @@ -102,8 +102,7 @@ pub fn uniqueFields(comptime E: type) []const EnumField { } var unique_fields: []const EnumField = &[_]EnumField{}; - outer: - for (raw_fields) |candidate| { + outer: for (raw_fields) |candidate| { for (unique_fields) |u| { if (u.value == candidate.value) continue :outer; @@ -116,28 +115,25 @@ pub fn uniqueFields(comptime E: type) []const EnumField { } /// Determines the length of a direct-mapped enum array, indexed by -/// @intCast(usize, @enumToInt(enum_value)). The enum must be exhaustive. +/// @intCast(usize, @enumToInt(enum_value)). +/// If the enum is non-exhaustive, the resulting length will only be enough +/// to hold all explicit fields. /// If the enum contains any fields with values that cannot be represented /// by usize, a compile error is issued. The max_unused_slots parameter limits /// the total number of items which have no matching enum key (holes in the enum /// numbering). So for example, if an enum has values 1, 2, 5, and 6, max_unused_slots /// must be at least 3, to allow unused slots 0, 3, and 4. fn directEnumArrayLen(comptime E: type, comptime max_unused_slots: comptime_int) comptime_int { - const info = @typeInfo(E).Enum; - if (!info.is_exhaustive) { - @compileError("Cannot create direct array of non-exhaustive enum "++@typeName(E)); - } - var max_value: comptime_int = -1; const max_usize: comptime_int = ~@as(usize, 0); const fields = uniqueFields(E); for (fields) |f| { if (f.value < 0) { - @compileError("Cannot create a direct enum array for "++@typeName(E)++", field ."++f.name++" has a negative value."); + @compileError("Cannot create a direct enum array for " ++ @typeName(E) ++ ", field ." ++ f.name ++ " has a negative value."); } if (f.value > max_value) { if (f.value > max_usize) { - @compileError("Cannot create a direct enum array for "++@typeName(E)++", field ."++f.name++" is larger than the max value of usize."); + @compileError("Cannot create a direct enum array for " ++ @typeName(E) ++ ", field ." ++ f.name ++ " is larger than the max value of usize."); } max_value = f.value; } @@ -147,14 +143,16 @@ fn directEnumArrayLen(comptime E: type, comptime max_unused_slots: comptime_int) if (unused_slots > max_unused_slots) { const unused_str = std.fmt.comptimePrint("{d}", .{unused_slots}); const allowed_str = std.fmt.comptimePrint("{d}", .{max_unused_slots}); - @compileError("Cannot create a direct enum array for "++@typeName(E)++". It would have "++unused_str++" unused slots, but only "++allowed_str++" are allowed."); + @compileError("Cannot create a direct enum array for " ++ @typeName(E) ++ ". It would have " ++ unused_str ++ " unused slots, but only " ++ allowed_str ++ " are allowed."); } return max_value + 1; } /// Initializes an array of Data which can be indexed by -/// @intCast(usize, @enumToInt(enum_value)). The enum must be exhaustive. +/// @intCast(usize, @enumToInt(enum_value)). +/// If the enum is non-exhaustive, the resulting array will only be large enough +/// to hold all explicit fields. /// If the enum contains any fields with values that cannot be represented /// by usize, a compile error is issued. The max_unused_slots parameter limits /// the total number of items which have no matching enum key (holes in the enum @@ -243,9 +241,9 @@ pub fn nameCast(comptime E: type, comptime value: anytype) E { if (@hasField(E, n)) { return @field(E, n); } - @compileError("Enum "++@typeName(E)++" has no field named "++n); + @compileError("Enum " ++ @typeName(E) ++ " has no field named " ++ n); } - @compileError("Cannot cast from "++@typeName(@TypeOf(value))++" to "++@typeName(E)); + @compileError("Cannot cast from " ++ @typeName(@TypeOf(value)) ++ " to " ++ @typeName(E)); } } @@ -256,7 +254,7 @@ test "std.enums.nameCast" { testing.expectEqual(A.a, nameCast(A, A.a)); testing.expectEqual(A.a, nameCast(A, B.a)); testing.expectEqual(A.a, nameCast(A, "a")); - testing.expectEqual(A.a, nameCast(A, @as(*const[1]u8, "a"))); + testing.expectEqual(A.a, nameCast(A, @as(*const [1]u8, "a"))); testing.expectEqual(A.a, nameCast(A, @as([:0]const u8, "a"))); testing.expectEqual(A.a, nameCast(A, @as([]const u8, "a"))); @@ -398,12 +396,12 @@ pub fn EnumArray(comptime E: type, comptime V: type) type { pub fn NoExtension(comptime Self: type) type { return NoExt; } -const NoExt = struct{}; +const NoExt = struct {}; /// A set type with an Indexer mapping from keys to indices. /// Presence or absence is stored as a dense bitfield. This /// type does no allocation and can be copied by value. -pub fn IndexedSet(comptime I: type, comptime Ext: fn(type)type) type { +pub fn IndexedSet(comptime I: type, comptime Ext: fn (type) type) type { comptime ensureIndexer(I); return struct { const Self = @This(); @@ -422,7 +420,7 @@ pub fn IndexedSet(comptime I: type, comptime Ext: fn(type)type) type { bits: BitSet = BitSet.initEmpty(), - /// Returns a set containing all possible keys. + /// Returns a set containing all possible keys. pub fn initFull() Self { return .{ .bits = BitSet.initFull() }; } @@ -492,7 +490,8 @@ pub fn IndexedSet(comptime I: type, comptime Ext: fn(type)type) type { pub fn next(self: *Iterator) ?Key { return if (self.inner.next()) |index| Indexer.keyForIndex(index) - else null; + else + null; } }; }; @@ -501,7 +500,7 @@ pub fn IndexedSet(comptime I: type, comptime Ext: fn(type)type) type { /// A map from keys to values, using an index lookup. Uses a /// bitfield to track presence and a dense array of values. /// This type does no allocation and can be copied by value. -pub fn IndexedMap(comptime I: type, comptime V: type, comptime Ext: fn(type)type) type { +pub fn IndexedMap(comptime I: type, comptime V: type, comptime Ext: fn (type) type) type { comptime ensureIndexer(I); return struct { const Self = @This(); @@ -652,7 +651,8 @@ pub fn IndexedMap(comptime I: type, comptime V: type, comptime Ext: fn(type)type .key = Indexer.keyForIndex(index), .value = &self.values[index], } - else null; + else + null; } }; }; @@ -660,7 +660,7 @@ pub fn IndexedMap(comptime I: type, comptime V: type, comptime Ext: fn(type)type /// A dense array of values, using an indexed lookup. /// This type does no allocation and can be copied by value. -pub fn IndexedArray(comptime I: type, comptime V: type, comptime Ext: fn(type)type) type { +pub fn IndexedArray(comptime I: type, comptime V: type, comptime Ext: fn (type) type) type { comptime ensureIndexer(I); return struct { const Self = @This(); @@ -769,9 +769,9 @@ pub fn ensureIndexer(comptime T: type) void { if (!@hasDecl(T, "count")) @compileError("Indexer must have decl count: usize."); if (@TypeOf(T.count) != usize) @compileError("Indexer.count must be a usize."); if (!@hasDecl(T, "indexOf")) @compileError("Indexer.indexOf must be a fn(Key)usize."); - if (@TypeOf(T.indexOf) != fn(T.Key)usize) @compileError("Indexer must have decl indexOf: fn(Key)usize."); + if (@TypeOf(T.indexOf) != fn (T.Key) usize) @compileError("Indexer must have decl indexOf: fn(Key)usize."); if (!@hasDecl(T, "keyForIndex")) @compileError("Indexer must have decl keyForIndex: fn(usize)Key."); - if (@TypeOf(T.keyForIndex) != fn(usize)T.Key) @compileError("Indexer.keyForIndex must be a fn(usize)Key."); + if (@TypeOf(T.keyForIndex) != fn (usize) T.Key) @compileError("Indexer.keyForIndex must be a fn(usize)Key."); } } @@ -802,14 +802,18 @@ pub fn EnumIndexer(comptime E: type) type { return struct { pub const Key = E; pub const count: usize = 0; - pub fn indexOf(e: E) usize { unreachable; } - pub fn keyForIndex(i: usize) E { unreachable; } + pub fn indexOf(e: E) usize { + unreachable; + } + pub fn keyForIndex(i: usize) E { + unreachable; + } }; } std.sort.sort(EnumField, &fields, {}, ascByValue); const min = fields[0].value; - const max = fields[fields.len-1].value; - if (max - min == fields.len-1) { + const max = fields[fields.len - 1].value; + if (max - min == fields.len - 1) { return struct { pub const Key = E; pub const count = fields.len; @@ -844,7 +848,7 @@ pub fn EnumIndexer(comptime E: type) type { } test "std.enums.EnumIndexer dense zeroed" { - const E = enum{ b = 1, a = 0, c = 2 }; + const E = enum { b = 1, a = 0, c = 2 }; const Indexer = EnumIndexer(E); ensureIndexer(Indexer); testing.expectEqual(E, Indexer.Key); @@ -908,7 +912,7 @@ test "std.enums.EnumIndexer sparse" { } test "std.enums.EnumIndexer repeats" { - const E = extern enum{ a = -2, c = 6, b = 4, b2 = 4 }; + const E = extern enum { a = -2, c = 6, b = 4, b2 = 4 }; const Indexer = EnumIndexer(E); ensureIndexer(Indexer); testing.expectEqual(E, Indexer.Key); @@ -957,7 +961,8 @@ test "std.enums.EnumSet" { } var mut = Set.init(.{ - .a=true, .c=true, + .a = true, + .c = true, }); testing.expectEqual(@as(usize, 2), mut.count()); testing.expectEqual(true, mut.contains(.a)); @@ -986,7 +991,7 @@ test "std.enums.EnumSet" { testing.expectEqual(@as(?E, null), it.next()); } - mut.toggleSet(Set.init(.{ .a=true, .b=true })); + mut.toggleSet(Set.init(.{ .a = true, .b = true })); testing.expectEqual(@as(usize, 2), mut.count()); testing.expectEqual(true, mut.contains(.a)); testing.expectEqual(false, mut.contains(.b)); @@ -994,7 +999,7 @@ test "std.enums.EnumSet" { testing.expectEqual(true, mut.contains(.d)); testing.expectEqual(true, mut.contains(.e)); // aliases a - mut.setUnion(Set.init(.{ .a=true, .b=true })); + mut.setUnion(Set.init(.{ .a = true, .b = true })); testing.expectEqual(@as(usize, 3), mut.count()); testing.expectEqual(true, mut.contains(.a)); testing.expectEqual(true, mut.contains(.b)); @@ -1009,7 +1014,7 @@ test "std.enums.EnumSet" { testing.expectEqual(false, mut.contains(.c)); testing.expectEqual(true, mut.contains(.d)); - mut.setIntersection(Set.init(.{ .a=true, .b=true })); + mut.setIntersection(Set.init(.{ .a = true, .b = true })); testing.expectEqual(@as(usize, 1), mut.count()); testing.expectEqual(true, mut.contains(.a)); testing.expectEqual(false, mut.contains(.b)); @@ -1072,7 +1077,7 @@ test "std.enums.EnumArray sized" { const undef = Array.initUndefined(); var inst = Array.initFill(5); const inst2 = Array.init(.{ .a = 1, .b = 2, .c = 3, .d = 4 }); - const inst3 = Array.initDefault(6, .{.b = 4, .c = 2}); + const inst3 = Array.initDefault(6, .{ .b = 4, .c = 2 }); testing.expectEqual(@as(usize, 5), inst.get(.a)); testing.expectEqual(@as(usize, 5), inst.get(.b)); @@ -1272,10 +1277,12 @@ test "std.enums.EnumMap sized" { var iter = a.iterator(); const Entry = Map.Entry; testing.expectEqual(@as(?Entry, Entry{ - .key = .b, .value = &a.values[1], + .key = .b, + .value = &a.values[1], }), iter.next()); testing.expectEqual(@as(?Entry, Entry{ - .key = .d, .value = &a.values[3], + .key = .d, + .value = &a.values[3], }), iter.next()); testing.expectEqual(@as(?Entry, null), iter.next()); } diff --git a/lib/std/os.zig b/lib/std/os.zig index 9d9fd872a8..e4bd96de05 100644 --- a/lib/std/os.zig +++ b/lib/std/os.zig @@ -3267,6 +3267,7 @@ pub fn connect(sock: socket_t, sock_addr: *const sockaddr, len: socklen_t) Conne .WSAEADDRINUSE => return error.AddressInUse, .WSAEADDRNOTAVAIL => return error.AddressNotAvailable, .WSAECONNREFUSED => return error.ConnectionRefused, + .WSAECONNRESET => return error.ConnectionResetByPeer, .WSAETIMEDOUT => return error.ConnectionTimedOut, .WSAEHOSTUNREACH, // TODO: should we return NetworkUnreachable in this case as well? .WSAENETUNREACH, @@ -3296,6 +3297,7 @@ pub fn connect(sock: socket_t, sock_addr: *const sockaddr, len: socklen_t) Conne EALREADY => unreachable, // The socket is nonblocking and a previous connection attempt has not yet been completed. EBADF => unreachable, // sockfd is not a valid open file descriptor. ECONNREFUSED => return error.ConnectionRefused, + ECONNRESET => return error.ConnectionResetByPeer, EFAULT => unreachable, // The socket structure address is outside the user's address space. EINTR => continue, EISCONN => unreachable, // The socket is already connected. diff --git a/lib/std/os/linux/io_uring.zig b/lib/std/os/linux/io_uring.zig index 4342beca00..6ffee12bfd 100644 --- a/lib/std/os/linux/io_uring.zig +++ b/lib/std/os/linux/io_uring.zig @@ -1353,7 +1353,9 @@ test "timeout (after a relative time)" { .res = -linux.ETIME, .flags = 0, }, cqe); - testing.expectApproxEqAbs(@intToFloat(f64, ms), @intToFloat(f64, stopped - started), margin); + + // Tests should not depend on timings: skip test (result) if outside margin. + if (!std.math.approxEqAbs(f64, ms, @intToFloat(f64, stopped - started), margin)) return error.SkipZigTest; } test "timeout (after a number of completions)" { diff --git a/lib/std/priority_dequeue.zig b/lib/std/priority_dequeue.zig new file mode 100644 index 0000000000..6894dcc997 --- /dev/null +++ b/lib/std/priority_dequeue.zig @@ -0,0 +1,972 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2015-2021 Zig Contributors +// This file is part of [zig](https://ziglang.org/), which is MIT licensed. +// The MIT license requires this copyright notice to be included in all copies +// and substantial portions of the software. +const std = @import("std.zig"); +const Allocator = std.mem.Allocator; +const assert = std.debug.assert; +const warn = std.debug.warn; +const Order = std.math.Order; +const testing = std.testing; +const expect = testing.expect; +const expectEqual = testing.expectEqual; +const expectError = testing.expectError; + +/// Priority Dequeue for storing generic data. Initialize with `init`. +pub fn PriorityDequeue(comptime T: type) type { + return struct { + const Self = @This(); + + items: []T, + len: usize, + allocator: *Allocator, + compareFn: fn (a: T, b: T) Order, + + /// Initialize and return a new priority dequeue. Provide `compareFn` + /// that returns `Order.lt` when its first argument should + /// get min-popped before its second argument, `Order.eq` if the + /// arguments are of equal priority, or `Order.gt` if the second + /// argument should be min-popped first. Popping the max element works + /// in reverse. For example, to make `popMin` return the smallest + /// number, provide + /// + /// `fn lessThan(a: T, b: T) Order { return std.math.order(a, b); }` + pub fn init(allocator: *Allocator, compareFn: fn (T, T) Order) Self { + return Self{ + .items = &[_]T{}, + .len = 0, + .allocator = allocator, + .compareFn = compareFn, + }; + } + + /// Free memory used by the dequeue. + pub fn deinit(self: Self) void { + self.allocator.free(self.items); + } + + /// Insert a new element, maintaining priority. + pub fn add(self: *Self, elem: T) !void { + try ensureCapacity(self, self.len + 1); + addUnchecked(self, elem); + } + + /// Add each element in `items` to the dequeue. + pub fn addSlice(self: *Self, items: []const T) !void { + try self.ensureCapacity(self.len + items.len); + for (items) |e| { + self.addUnchecked(e); + } + } + + fn addUnchecked(self: *Self, elem: T) void { + self.items[self.len] = elem; + + if (self.len > 0) { + const start = self.getStartForSiftUp(elem, self.len); + self.siftUp(start); + } + + self.len += 1; + } + + fn isMinLayer(index: usize) bool { + // In the min-max heap structure: + // The first element is on a min layer; + // next two are on a max layer; + // next four are on a min layer, and so on. + const leading_zeros = @clz(usize, index + 1); + const highest_set_bit = @bitSizeOf(usize) - 1 - leading_zeros; + return (highest_set_bit & 1) == 0; + } + + fn nextIsMinLayer(self: Self) bool { + return isMinLayer(self.len); + } + + const StartIndexAndLayer = struct { + index: usize, + min_layer: bool, + }; + + fn getStartForSiftUp(self: Self, child: T, index: usize) StartIndexAndLayer { + var child_index = index; + var parent_index = parentIndex(child_index); + const parent = self.items[parent_index]; + + const min_layer = self.nextIsMinLayer(); + const order = self.compareFn(child, parent); + if ((min_layer and order == .gt) or (!min_layer and order == .lt)) { + // We must swap the item with it's parent if it is on the "wrong" layer + self.items[parent_index] = child; + self.items[child_index] = parent; + return .{ + .index = parent_index, + .min_layer = !min_layer, + }; + } else { + return .{ + .index = child_index, + .min_layer = min_layer, + }; + } + } + + fn siftUp(self: *Self, start: StartIndexAndLayer) void { + if (start.min_layer) { + doSiftUp(self, start.index, .lt); + } else { + doSiftUp(self, start.index, .gt); + } + } + + fn doSiftUp(self: *Self, start_index: usize, target_order: Order) void { + var child_index = start_index; + while (child_index > 2) { + var grandparent_index = grandparentIndex(child_index); + const child = self.items[child_index]; + const grandparent = self.items[grandparent_index]; + + // If the grandparent is already better or equal, we have gone as far as we need to + if (self.compareFn(child, grandparent) != target_order) break; + + // Otherwise swap the item with it's grandparent + self.items[grandparent_index] = child; + self.items[child_index] = grandparent; + child_index = grandparent_index; + } + } + + /// Look at the smallest element in the dequeue. Returns + /// `null` if empty. + pub fn peekMin(self: *Self) ?T { + return if (self.len > 0) self.items[0] else null; + } + + /// Look at the largest element in the dequeue. Returns + /// `null` if empty. + pub fn peekMax(self: *Self) ?T { + if (self.len == 0) return null; + if (self.len == 1) return self.items[0]; + if (self.len == 2) return self.items[1]; + return self.bestItemAtIndices(1, 2, .gt).item; + } + + fn maxIndex(self: Self) ?usize { + if (self.len == 0) return null; + if (self.len == 1) return 0; + if (self.len == 2) return 1; + return self.bestItemAtIndices(1, 2, .gt).index; + } + + /// Pop the smallest element from the dequeue. Returns + /// `null` if empty. + pub fn removeMinOrNull(self: *Self) ?T { + return if (self.len > 0) self.removeMin() else null; + } + + /// Remove and return the smallest element from the + /// dequeue. + pub fn removeMin(self: *Self) T { + return self.removeIndex(0); + } + + /// Pop the largest element from the dequeue. Returns + /// `null` if empty. + pub fn removeMaxOrNull(self: *Self) ?T { + return if (self.len > 0) self.removeMax() else null; + } + + /// Remove and return the largest element from the + /// dequeue. + pub fn removeMax(self: *Self) T { + return self.removeIndex(self.maxIndex().?); + } + + /// Remove and return element at index. Indices are in the + /// same order as iterator, which is not necessarily priority + /// order. + pub fn removeIndex(self: *Self, index: usize) T { + assert(self.len > index); + const item = self.items[index]; + const last = self.items[self.len - 1]; + + self.items[index] = last; + self.len -= 1; + siftDown(self, index); + + return item; + } + + fn siftDown(self: *Self, index: usize) void { + if (isMinLayer(index)) { + self.doSiftDown(index, .lt); + } else { + self.doSiftDown(index, .gt); + } + } + + fn doSiftDown(self: *Self, start_index: usize, target_order: Order) void { + var index = start_index; + const half = self.len >> 1; + while (true) { + const first_grandchild_index = firstGrandchildIndex(index); + const last_grandchild_index = first_grandchild_index + 3; + + const elem = self.items[index]; + + if (last_grandchild_index < self.len) { + // All four grandchildren exist + const index2 = first_grandchild_index + 1; + const index3 = index2 + 1; + + // Find the best grandchild + const best_left = self.bestItemAtIndices(first_grandchild_index, index2, target_order); + const best_right = self.bestItemAtIndices(index3, last_grandchild_index, target_order); + const best_grandchild = self.bestItem(best_left, best_right, target_order); + + // If the item is better than or equal to its best grandchild, we are done + if (self.compareFn(best_grandchild.item, elem) != target_order) return; + + // Otherwise, swap them + self.items[best_grandchild.index] = elem; + self.items[index] = best_grandchild.item; + index = best_grandchild.index; + + // We might need to swap the element with it's parent + self.swapIfParentIsBetter(elem, index, target_order); + } else { + // The children or grandchildren are the last layer + const first_child_index = firstChildIndex(index); + if (first_child_index > self.len) return; + + const best_descendent = self.bestDescendent(first_child_index, first_grandchild_index, target_order); + + // If the item is better than or equal to its best descendant, we are done + if (self.compareFn(best_descendent.item, elem) != target_order) return; + + // Otherwise swap them + self.items[best_descendent.index] = elem; + self.items[index] = best_descendent.item; + index = best_descendent.index; + + // If we didn't swap a grandchild, we are done + if (index < first_grandchild_index) return; + + // We might need to swap the element with it's parent + self.swapIfParentIsBetter(elem, index, target_order); + return; + } + + // If we are now in the last layer, we are done + if (index >= half) return; + } + } + + fn swapIfParentIsBetter(self: *Self, child: T, child_index: usize, target_order: Order) void { + const parent_index = parentIndex(child_index); + const parent = self.items[parent_index]; + + if (self.compareFn(parent, child) == target_order) { + self.items[parent_index] = child; + self.items[child_index] = parent; + } + } + + const ItemAndIndex = struct { + item: T, + index: usize, + }; + + fn getItem(self: Self, index: usize) ItemAndIndex { + return .{ + .item = self.items[index], + .index = index, + }; + } + + fn bestItem(self: Self, item1: ItemAndIndex, item2: ItemAndIndex, target_order: Order) ItemAndIndex { + if (self.compareFn(item1.item, item2.item) == target_order) { + return item1; + } else { + return item2; + } + } + + fn bestItemAtIndices(self: Self, index1: usize, index2: usize, target_order: Order) ItemAndIndex { + var item1 = self.getItem(index1); + var item2 = self.getItem(index2); + return self.bestItem(item1, item2, target_order); + } + + fn bestDescendent(self: Self, first_child_index: usize, first_grandchild_index: usize, target_order: Order) ItemAndIndex { + const second_child_index = first_child_index + 1; + if (first_grandchild_index >= self.len) { + // No grandchildren, find the best child (second may not exist) + if (second_child_index >= self.len) { + return .{ + .item = self.items[first_child_index], + .index = first_child_index, + }; + } else { + return self.bestItemAtIndices(first_child_index, second_child_index, target_order); + } + } + + const second_grandchild_index = first_grandchild_index + 1; + if (second_grandchild_index >= self.len) { + // One grandchild, so we know there is a second child. Compare first grandchild and second child + return self.bestItemAtIndices(first_grandchild_index, second_child_index, target_order); + } + + const best_left_grandchild_index = self.bestItemAtIndices(first_grandchild_index, second_grandchild_index, target_order).index; + const third_grandchild_index = second_grandchild_index + 1; + if (third_grandchild_index >= self.len) { + // Two grandchildren, and we know the best. Compare this to second child. + return self.bestItemAtIndices(best_left_grandchild_index, second_child_index, target_order); + } else { + // Three grandchildren, compare the min of the first two with the third + return self.bestItemAtIndices(best_left_grandchild_index, third_grandchild_index, target_order); + } + } + + /// Return the number of elements remaining in the dequeue + pub fn count(self: Self) usize { + return self.len; + } + + /// Return the number of elements that can be added to the + /// dequeue before more memory is allocated. + pub fn capacity(self: Self) usize { + return self.items.len; + } + + /// Dequeue takes ownership of the passed in slice. The slice must have been + /// allocated with `allocator`. + /// De-initialize with `deinit`. + pub fn fromOwnedSlice(allocator: *Allocator, compareFn: fn (T, T) Order, items: []T) Self { + var queue = Self{ + .items = items, + .len = items.len, + .allocator = allocator, + .compareFn = compareFn, + }; + + if (queue.len <= 1) return queue; + + const half = (queue.len >> 1) - 1; + var i: usize = 0; + while (i <= half) : (i += 1) { + const index = half - i; + queue.siftDown(index); + } + return queue; + } + + pub fn ensureCapacity(self: *Self, new_capacity: usize) !void { + var better_capacity = self.capacity(); + if (better_capacity >= new_capacity) return; + while (true) { + better_capacity += better_capacity / 2 + 8; + if (better_capacity >= new_capacity) break; + } + self.items = try self.allocator.realloc(self.items, better_capacity); + } + + /// Reduce allocated capacity to `new_len`. + pub fn shrinkAndFree(self: *Self, new_len: usize) void { + assert(new_len <= self.items.len); + + // Cannot shrink to smaller than the current queue size without invalidating the heap property + assert(new_len >= self.len); + + self.items = self.allocator.realloc(self.items[0..], new_len) catch |e| switch (e) { + error.OutOfMemory => { // no problem, capacity is still correct then. + self.items.len = new_len; + return; + }, + }; + self.len = new_len; + } + + /// Reduce length to `new_len`. + pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void { + assert(new_len <= self.items.len); + + // Cannot shrink to smaller than the current queue size without invalidating the heap property + assert(new_len >= self.len); + + self.len = new_len; + } + + pub fn update(self: *Self, elem: T, new_elem: T) !void { + var old_index: usize = std.mem.indexOfScalar(T, self.items[0..self.len], elem) orelse return error.ElementNotFound; + _ = self.removeIndex(old_index); + self.addUnchecked(new_elem); + } + + pub const Iterator = struct { + queue: *PriorityDequeue(T), + count: usize, + + pub fn next(it: *Iterator) ?T { + if (it.count >= it.queue.len) return null; + const out = it.count; + it.count += 1; + return it.queue.items[out]; + } + + pub fn reset(it: *Iterator) void { + it.count = 0; + } + }; + + /// Return an iterator that walks the queue without consuming + /// it. Invalidated if the queue is modified. + pub fn iterator(self: *Self) Iterator { + return Iterator{ + .queue = self, + .count = 0, + }; + } + + fn dump(self: *Self) void { + warn("{{ ", .{}); + warn("items: ", .{}); + for (self.items) |e, i| { + if (i >= self.len) break; + warn("{}, ", .{e}); + } + warn("array: ", .{}); + for (self.items) |e, i| { + warn("{}, ", .{e}); + } + warn("len: {} ", .{self.len}); + warn("capacity: {}", .{self.capacity()}); + warn(" }}\n", .{}); + } + + fn parentIndex(index: usize) usize { + return (index - 1) >> 1; + } + + fn grandparentIndex(index: usize) usize { + return parentIndex(parentIndex(index)); + } + + fn firstChildIndex(index: usize) usize { + return (index << 1) + 1; + } + + fn firstGrandchildIndex(index: usize) usize { + return firstChildIndex(firstChildIndex(index)); + } + }; +} + +fn lessThanComparison(a: u32, b: u32) Order { + return std.math.order(a, b); +} + +const PDQ = PriorityDequeue(u32); + +test "std.PriorityDequeue: add and remove min" { + var queue = PDQ.init(testing.allocator, lessThanComparison); + defer queue.deinit(); + + try queue.add(54); + try queue.add(12); + try queue.add(7); + try queue.add(23); + try queue.add(25); + try queue.add(13); + + expectEqual(@as(u32, 7), queue.removeMin()); + expectEqual(@as(u32, 12), queue.removeMin()); + expectEqual(@as(u32, 13), queue.removeMin()); + expectEqual(@as(u32, 23), queue.removeMin()); + expectEqual(@as(u32, 25), queue.removeMin()); + expectEqual(@as(u32, 54), queue.removeMin()); +} + +test "std.PriorityDequeue: add and remove min structs" { + const S = struct { + size: u32, + }; + var queue = PriorityDequeue(S).init(testing.allocator, struct { + fn order(a: S, b: S) Order { + return std.math.order(a.size, b.size); + } + }.order); + defer queue.deinit(); + + try queue.add(.{ .size = 54 }); + try queue.add(.{ .size = 12 }); + try queue.add(.{ .size = 7 }); + try queue.add(.{ .size = 23 }); + try queue.add(.{ .size = 25 }); + try queue.add(.{ .size = 13 }); + + expectEqual(@as(u32, 7), queue.removeMin().size); + expectEqual(@as(u32, 12), queue.removeMin().size); + expectEqual(@as(u32, 13), queue.removeMin().size); + expectEqual(@as(u32, 23), queue.removeMin().size); + expectEqual(@as(u32, 25), queue.removeMin().size); + expectEqual(@as(u32, 54), queue.removeMin().size); +} + +test "std.PriorityDequeue: add and remove max" { + var queue = PDQ.init(testing.allocator, lessThanComparison); + defer queue.deinit(); + + try queue.add(54); + try queue.add(12); + try queue.add(7); + try queue.add(23); + try queue.add(25); + try queue.add(13); + + expectEqual(@as(u32, 54), queue.removeMax()); + expectEqual(@as(u32, 25), queue.removeMax()); + expectEqual(@as(u32, 23), queue.removeMax()); + expectEqual(@as(u32, 13), queue.removeMax()); + expectEqual(@as(u32, 12), queue.removeMax()); + expectEqual(@as(u32, 7), queue.removeMax()); +} + +test "std.PriorityDequeue: add and remove same min" { + var queue = PDQ.init(testing.allocator, lessThanComparison); + defer queue.deinit(); + + try queue.add(1); + try queue.add(1); + try queue.add(2); + try queue.add(2); + try queue.add(1); + try queue.add(1); + + expectEqual(@as(u32, 1), queue.removeMin()); + expectEqual(@as(u32, 1), queue.removeMin()); + expectEqual(@as(u32, 1), queue.removeMin()); + expectEqual(@as(u32, 1), queue.removeMin()); + expectEqual(@as(u32, 2), queue.removeMin()); + expectEqual(@as(u32, 2), queue.removeMin()); +} + +test "std.PriorityDequeue: add and remove same max" { + var queue = PDQ.init(testing.allocator, lessThanComparison); + defer queue.deinit(); + + try queue.add(1); + try queue.add(1); + try queue.add(2); + try queue.add(2); + try queue.add(1); + try queue.add(1); + + expectEqual(@as(u32, 2), queue.removeMax()); + expectEqual(@as(u32, 2), queue.removeMax()); + expectEqual(@as(u32, 1), queue.removeMax()); + expectEqual(@as(u32, 1), queue.removeMax()); + expectEqual(@as(u32, 1), queue.removeMax()); + expectEqual(@as(u32, 1), queue.removeMax()); +} + +test "std.PriorityDequeue: removeOrNull empty" { + var queue = PDQ.init(testing.allocator, lessThanComparison); + defer queue.deinit(); + + expect(queue.removeMinOrNull() == null); + expect(queue.removeMaxOrNull() == null); +} + +test "std.PriorityDequeue: edge case 3 elements" { + var queue = PDQ.init(testing.allocator, lessThanComparison); + defer queue.deinit(); + + try queue.add(9); + try queue.add(3); + try queue.add(2); + + expectEqual(@as(u32, 2), queue.removeMin()); + expectEqual(@as(u32, 3), queue.removeMin()); + expectEqual(@as(u32, 9), queue.removeMin()); +} + +test "std.PriorityDequeue: edge case 3 elements max" { + var queue = PDQ.init(testing.allocator, lessThanComparison); + defer queue.deinit(); + + try queue.add(9); + try queue.add(3); + try queue.add(2); + + expectEqual(@as(u32, 9), queue.removeMax()); + expectEqual(@as(u32, 3), queue.removeMax()); + expectEqual(@as(u32, 2), queue.removeMax()); +} + +test "std.PriorityDequeue: peekMin" { + var queue = PDQ.init(testing.allocator, lessThanComparison); + defer queue.deinit(); + + expect(queue.peekMin() == null); + + try queue.add(9); + try queue.add(3); + try queue.add(2); + + expect(queue.peekMin().? == 2); + expect(queue.peekMin().? == 2); +} + +test "std.PriorityDequeue: peekMax" { + var queue = PDQ.init(testing.allocator, lessThanComparison); + defer queue.deinit(); + + expect(queue.peekMin() == null); + + try queue.add(9); + try queue.add(3); + try queue.add(2); + + expect(queue.peekMax().? == 9); + expect(queue.peekMax().? == 9); +} + +test "std.PriorityDequeue: sift up with odd indices" { + var queue = PDQ.init(testing.allocator, lessThanComparison); + defer queue.deinit(); + const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 }; + for (items) |e| { + try queue.add(e); + } + + const sorted_items = [_]u32{ 1, 2, 5, 6, 7, 7, 11, 12, 13, 14, 15, 15, 16, 21, 22, 24, 24, 25 }; + for (sorted_items) |e| { + expectEqual(e, queue.removeMin()); + } +} + +test "std.PriorityDequeue: sift up with odd indices" { + var queue = PDQ.init(testing.allocator, lessThanComparison); + defer queue.deinit(); + const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 }; + for (items) |e| { + try queue.add(e); + } + + const sorted_items = [_]u32{ 25, 24, 24, 22, 21, 16, 15, 15, 14, 13, 12, 11, 7, 7, 6, 5, 2, 1 }; + for (sorted_items) |e| { + expectEqual(e, queue.removeMax()); + } +} + +test "std.PriorityDequeue: addSlice min" { + var queue = PDQ.init(testing.allocator, lessThanComparison); + defer queue.deinit(); + const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 }; + try queue.addSlice(items[0..]); + + const sorted_items = [_]u32{ 1, 2, 5, 6, 7, 7, 11, 12, 13, 14, 15, 15, 16, 21, 22, 24, 24, 25 }; + for (sorted_items) |e| { + expectEqual(e, queue.removeMin()); + } +} + +test "std.PriorityDequeue: addSlice max" { + var queue = PDQ.init(testing.allocator, lessThanComparison); + defer queue.deinit(); + const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 }; + try queue.addSlice(items[0..]); + + const sorted_items = [_]u32{ 25, 24, 24, 22, 21, 16, 15, 15, 14, 13, 12, 11, 7, 7, 6, 5, 2, 1 }; + for (sorted_items) |e| { + expectEqual(e, queue.removeMax()); + } +} + +test "std.PriorityDequeue: fromOwnedSlice trivial case 0" { + const items = [0]u32{}; + const queue_items = try testing.allocator.dupe(u32, &items); + var queue = PDQ.fromOwnedSlice(testing.allocator, lessThanComparison, queue_items[0..]); + defer queue.deinit(); + expectEqual(@as(usize, 0), queue.len); + expect(queue.removeMinOrNull() == null); +} + +test "std.PriorityDequeue: fromOwnedSlice trivial case 1" { + const items = [1]u32{1}; + const queue_items = try testing.allocator.dupe(u32, &items); + var queue = PDQ.fromOwnedSlice(testing.allocator, lessThanComparison, queue_items[0..]); + defer queue.deinit(); + + expectEqual(@as(usize, 1), queue.len); + expectEqual(items[0], queue.removeMin()); + expect(queue.removeMinOrNull() == null); +} + +test "std.PriorityDequeue: fromOwnedSlice" { + const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 }; + const queue_items = try testing.allocator.dupe(u32, items[0..]); + var queue = PDQ.fromOwnedSlice(testing.allocator, lessThanComparison, queue_items[0..]); + defer queue.deinit(); + + const sorted_items = [_]u32{ 1, 2, 5, 6, 7, 7, 11, 12, 13, 14, 15, 15, 16, 21, 22, 24, 24, 25 }; + for (sorted_items) |e| { + expectEqual(e, queue.removeMin()); + } +} + +test "std.PriorityDequeue: update min queue" { + var queue = PDQ.init(testing.allocator, lessThanComparison); + defer queue.deinit(); + + try queue.add(55); + try queue.add(44); + try queue.add(11); + try queue.update(55, 5); + try queue.update(44, 4); + try queue.update(11, 1); + expectEqual(@as(u32, 1), queue.removeMin()); + expectEqual(@as(u32, 4), queue.removeMin()); + expectEqual(@as(u32, 5), queue.removeMin()); +} + +test "std.PriorityDequeue: update same min queue" { + var queue = PDQ.init(testing.allocator, lessThanComparison); + defer queue.deinit(); + + try queue.add(1); + try queue.add(1); + try queue.add(2); + try queue.add(2); + try queue.update(1, 5); + try queue.update(2, 4); + expectEqual(@as(u32, 1), queue.removeMin()); + expectEqual(@as(u32, 2), queue.removeMin()); + expectEqual(@as(u32, 4), queue.removeMin()); + expectEqual(@as(u32, 5), queue.removeMin()); +} + +test "std.PriorityDequeue: update max queue" { + var queue = PDQ.init(testing.allocator, lessThanComparison); + defer queue.deinit(); + + try queue.add(55); + try queue.add(44); + try queue.add(11); + try queue.update(55, 5); + try queue.update(44, 1); + try queue.update(11, 4); + + expectEqual(@as(u32, 5), queue.removeMax()); + expectEqual(@as(u32, 4), queue.removeMax()); + expectEqual(@as(u32, 1), queue.removeMax()); +} + +test "std.PriorityDequeue: update same max queue" { + var queue = PDQ.init(testing.allocator, lessThanComparison); + defer queue.deinit(); + + try queue.add(1); + try queue.add(1); + try queue.add(2); + try queue.add(2); + try queue.update(1, 5); + try queue.update(2, 4); + expectEqual(@as(u32, 5), queue.removeMax()); + expectEqual(@as(u32, 4), queue.removeMax()); + expectEqual(@as(u32, 2), queue.removeMax()); + expectEqual(@as(u32, 1), queue.removeMax()); +} + +test "std.PriorityDequeue: iterator" { + var queue = PDQ.init(testing.allocator, lessThanComparison); + var map = std.AutoHashMap(u32, void).init(testing.allocator); + defer { + queue.deinit(); + map.deinit(); + } + + const items = [_]u32{ 54, 12, 7, 23, 25, 13 }; + for (items) |e| { + _ = try queue.add(e); + _ = try map.put(e, {}); + } + + var it = queue.iterator(); + while (it.next()) |e| { + _ = map.remove(e); + } + + expectEqual(@as(usize, 0), map.count()); +} + +test "std.PriorityDequeue: remove at index" { + var queue = PDQ.init(testing.allocator, lessThanComparison); + defer queue.deinit(); + + try queue.add(3); + try queue.add(2); + try queue.add(1); + + var it = queue.iterator(); + var elem = it.next(); + var idx: usize = 0; + const two_idx = while (elem != null) : (elem = it.next()) { + if (elem.? == 2) + break idx; + idx += 1; + } else unreachable; + + expectEqual(queue.removeIndex(two_idx), 2); + expectEqual(queue.removeMin(), 1); + expectEqual(queue.removeMin(), 3); + expectEqual(queue.removeMinOrNull(), null); +} + +test "std.PriorityDequeue: iterator while empty" { + var queue = PDQ.init(testing.allocator, lessThanComparison); + defer queue.deinit(); + + var it = queue.iterator(); + + expectEqual(it.next(), null); +} + +test "std.PriorityDequeue: shrinkRetainingCapacity and shrinkAndFree" { + var queue = PDQ.init(testing.allocator, lessThanComparison); + defer queue.deinit(); + + try queue.ensureCapacity(4); + expect(queue.capacity() >= 4); + + try queue.add(1); + try queue.add(2); + try queue.add(3); + expect(queue.capacity() >= 4); + expectEqual(@as(usize, 3), queue.len); + + queue.shrinkRetainingCapacity(3); + expect(queue.capacity() >= 4); + expectEqual(@as(usize, 3), queue.len); + + queue.shrinkAndFree(3); + expectEqual(@as(usize, 3), queue.capacity()); + expectEqual(@as(usize, 3), queue.len); + + expectEqual(@as(u32, 3), queue.removeMax()); + expectEqual(@as(u32, 2), queue.removeMax()); + expectEqual(@as(u32, 1), queue.removeMax()); + expect(queue.removeMaxOrNull() == null); +} + +test "std.PriorityDequeue: fuzz testing min" { + var prng = std.rand.DefaultPrng.init(0x12345678); + + const test_case_count = 100; + const queue_size = 1_000; + + var i: usize = 0; + while (i < test_case_count) : (i += 1) { + try fuzzTestMin(&prng.random, queue_size); + } +} + +fn fuzzTestMin(rng: *std.rand.Random, comptime queue_size: usize) !void { + const allocator = testing.allocator; + const items = try generateRandomSlice(allocator, rng, queue_size); + + var queue = PDQ.fromOwnedSlice(allocator, lessThanComparison, items); + defer queue.deinit(); + + var last_removed: ?u32 = null; + while (queue.removeMinOrNull()) |next| { + if (last_removed) |last| { + expect(last <= next); + } + last_removed = next; + } +} + +test "std.PriorityDequeue: fuzz testing max" { + var prng = std.rand.DefaultPrng.init(0x87654321); + + const test_case_count = 100; + const queue_size = 1_000; + + var i: usize = 0; + while (i < test_case_count) : (i += 1) { + try fuzzTestMax(&prng.random, queue_size); + } +} + +fn fuzzTestMax(rng: *std.rand.Random, queue_size: usize) !void { + const allocator = testing.allocator; + const items = try generateRandomSlice(allocator, rng, queue_size); + + var queue = PDQ.fromOwnedSlice(testing.allocator, lessThanComparison, items); + defer queue.deinit(); + + var last_removed: ?u32 = null; + while (queue.removeMaxOrNull()) |next| { + if (last_removed) |last| { + expect(last >= next); + } + last_removed = next; + } +} + +test "std.PriorityDequeue: fuzz testing min and max" { + var prng = std.rand.DefaultPrng.init(0x87654321); + + const test_case_count = 100; + const queue_size = 1_000; + + var i: usize = 0; + while (i < test_case_count) : (i += 1) { + try fuzzTestMinMax(&prng.random, queue_size); + } +} + +fn fuzzTestMinMax(rng: *std.rand.Random, queue_size: usize) !void { + const allocator = testing.allocator; + const items = try generateRandomSlice(allocator, rng, queue_size); + + var queue = PDQ.fromOwnedSlice(allocator, lessThanComparison, items); + defer queue.deinit(); + + var last_min: ?u32 = null; + var last_max: ?u32 = null; + var i: usize = 0; + while (i < queue_size) : (i += 1) { + if (i % 2 == 0) { + const next = queue.removeMin(); + if (last_min) |last| { + expect(last <= next); + } + last_min = next; + } else { + const next = queue.removeMax(); + if (last_max) |last| { + expect(last >= next); + } + last_max = next; + } + } +} + +fn generateRandomSlice(allocator: *std.mem.Allocator, rng: *std.rand.Random, size: usize) ![]u32 { + var array = std.ArrayList(u32).init(allocator); + try array.ensureCapacity(size); + + var i: usize = 0; + while (i < size) : (i += 1) { + const elem = rng.int(u32); + try array.append(elem); + } + + return array.toOwnedSlice(); +} diff --git a/lib/std/priority_queue.zig b/lib/std/priority_queue.zig index ff671c9ff7..2685a7105e 100644 --- a/lib/std/priority_queue.zig +++ b/lib/std/priority_queue.zig @@ -6,6 +6,8 @@ const std = @import("std.zig"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; +const warn = std.debug.warn; +const Order = std.math.Order; const testing = std.testing; const expect = testing.expect; const expectEqual = testing.expectEqual; @@ -19,15 +21,17 @@ pub fn PriorityQueue(comptime T: type) type { items: []T, len: usize, allocator: *Allocator, - compareFn: fn (a: T, b: T) bool, + compareFn: fn (a: T, b: T) Order, - /// Initialize and return a priority queue. Provide - /// `compareFn` that returns `true` when its first argument - /// should get popped before its second argument. For example, - /// to make `pop` return the minimum value, provide + /// Initialize and return a priority queue. Provide `compareFn` + /// that returns `Order.lt` when its first argument should + /// get popped before its second argument, `Order.eq` if the + /// arguments are of equal priority, or `Order.gt` if the second + /// argument should be popped first. For example, to make `pop` + /// return the smallest number, provide /// - /// `fn lessThan(a: T, b: T) bool { return a < b; }` - pub fn init(allocator: *Allocator, compareFn: fn (a: T, b: T) bool) Self { + /// `fn lessThan(a: T, b: T) Order { return std.math.order(a, b); }` + pub fn init(allocator: *Allocator, compareFn: fn (a: T, b: T) Order) Self { return Self{ .items = &[_]T{}, .len = 0, @@ -60,7 +64,7 @@ pub fn PriorityQueue(comptime T: type) type { const child = self.items[child_index]; const parent = self.items[parent_index]; - if (!self.compareFn(child, parent)) break; + if (self.compareFn(child, parent) != .lt) break; self.items[parent_index] = child; self.items[child_index] = parent; @@ -132,14 +136,14 @@ pub fn PriorityQueue(comptime T: type) type { var smallest = self.items[index]; if (left) |e| { - if (self.compareFn(e, smallest)) { + if (self.compareFn(e, smallest) == .lt) { smallest_index = left_index; smallest = e; } } if (right) |e| { - if (self.compareFn(e, smallest)) { + if (self.compareFn(e, smallest) == .lt) { smallest_index = right_index; smallest = e; } @@ -158,13 +162,16 @@ pub fn PriorityQueue(comptime T: type) type { /// PriorityQueue takes ownership of the passed in slice. The slice must have been /// allocated with `allocator`. /// Deinitialize with `deinit`. - pub fn fromOwnedSlice(allocator: *Allocator, compareFn: fn (a: T, b: T) bool, items: []T) Self { + pub fn fromOwnedSlice(allocator: *Allocator, compareFn: fn (a: T, b: T) Order, items: []T) Self { var queue = Self{ .items = items, .len = items.len, .allocator = allocator, .compareFn = compareFn, }; + + if (queue.len <= 1) return queue; + const half = (queue.len >> 1) - 1; var i: usize = 0; while (i <= half) : (i += 1) { @@ -183,25 +190,40 @@ pub fn PriorityQueue(comptime T: type) type { self.items = try self.allocator.realloc(self.items, better_capacity); } - pub fn resize(self: *Self, new_len: usize) !void { - try self.ensureCapacity(new_len); + /// Reduce allocated capacity to `new_len`. + pub fn shrinkAndFree(self: *Self, new_len: usize) void { + assert(new_len <= self.items.len); + + // Cannot shrink to smaller than the current queue size without invalidating the heap property + assert(new_len >= self.len); + + self.items = self.allocator.realloc(self.items[0..], new_len) catch |e| switch (e) { + error.OutOfMemory => { // no problem, capacity is still correct then. + self.items.len = new_len; + return; + }, + }; self.len = new_len; } - pub fn shrink(self: *Self, new_len: usize) void { - // TODO take advantage of the new realloc semantics - assert(new_len <= self.len); + /// Reduce length to `new_len`. + pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void { + assert(new_len <= self.items.len); + + // Cannot shrink to smaller than the current queue size without invalidating the heap property + assert(new_len >= self.len); + self.len = new_len; } pub fn update(self: *Self, elem: T, new_elem: T) !void { - var update_index: usize = std.mem.indexOfScalar(T, self.items, elem) orelse return error.ElementNotFound; + var update_index: usize = std.mem.indexOfScalar(T, self.items[0..self.len], elem) orelse return error.ElementNotFound; const old_elem: T = self.items[update_index]; self.items[update_index] = new_elem; - if (self.compareFn(new_elem, old_elem)) { - siftUp(self, update_index); - } else { - siftDown(self, update_index); + switch (self.compareFn(new_elem, old_elem)) { + .lt => siftUp(self, update_index), + .gt => siftDown(self, update_index), + .eq => {}, // Nothing to do as the items have equal priority } } @@ -248,12 +270,12 @@ pub fn PriorityQueue(comptime T: type) type { }; } -fn lessThan(a: u32, b: u32) bool { - return a < b; +fn lessThan(a: u32, b: u32) Order { + return std.math.order(a, b); } -fn greaterThan(a: u32, b: u32) bool { - return a > b; +fn greaterThan(a: u32, b: u32) Order { + return lessThan(a, b).invert(); } const PQ = PriorityQueue(u32); @@ -351,6 +373,26 @@ test "std.PriorityQueue: addSlice" { } } +test "std.PriorityQueue: fromOwnedSlice trivial case 0" { + const items = [0]u32{}; + const queue_items = try testing.allocator.dupe(u32, &items); + var queue = PQ.fromOwnedSlice(testing.allocator, lessThan, queue_items[0..]); + defer queue.deinit(); + expectEqual(@as(usize, 0), queue.len); + expect(queue.removeOrNull() == null); +} + +test "std.PriorityQueue: fromOwnedSlice trivial case 1" { + const items = [1]u32{1}; + const queue_items = try testing.allocator.dupe(u32, &items); + var queue = PQ.fromOwnedSlice(testing.allocator, lessThan, queue_items[0..]); + defer queue.deinit(); + + expectEqual(@as(usize, 1), queue.len); + expectEqual(items[0], queue.remove()); + expect(queue.removeOrNull() == null); +} + test "std.PriorityQueue: fromOwnedSlice" { const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 }; const heap_items = try testing.allocator.dupe(u32, items[0..]); @@ -453,6 +495,33 @@ test "std.PriorityQueue: iterator while empty" { expectEqual(it.next(), null); } +test "std.PriorityQueue: shrinkRetainingCapacity and shrinkAndFree" { + var queue = PQ.init(testing.allocator, lessThan); + defer queue.deinit(); + + try queue.ensureCapacity(4); + expect(queue.capacity() >= 4); + + try queue.add(1); + try queue.add(2); + try queue.add(3); + expect(queue.capacity() >= 4); + expectEqual(@as(usize, 3), queue.len); + + queue.shrinkRetainingCapacity(3); + expect(queue.capacity() >= 4); + expectEqual(@as(usize, 3), queue.len); + + queue.shrinkAndFree(3); + expectEqual(@as(usize, 3), queue.capacity()); + expectEqual(@as(usize, 3), queue.len); + + expectEqual(@as(u32, 1), queue.remove()); + expectEqual(@as(u32, 2), queue.remove()); + expectEqual(@as(u32, 3), queue.remove()); + expect(queue.removeOrNull() == null); +} + test "std.PriorityQueue: update min heap" { var queue = PQ.init(testing.allocator, lessThan); defer queue.deinit(); diff --git a/lib/std/rand/Isaac64.zig b/lib/std/rand/Isaac64.zig index e1d4dedf5a..7efba9b5bc 100644 --- a/lib/std/rand/Isaac64.zig +++ b/lib/std/rand/Isaac64.zig @@ -208,3 +208,35 @@ test "isaac64 sequence" { std.testing.expect(s == r.next()); } } + +test "isaac64 fill" { + var r = Isaac64.init(0); + + // from reference implementation + const seq = [_]u64{ + 0xf67dfba498e4937c, + 0x84a5066a9204f380, + 0xfee34bd5f5514dbb, + 0x4d1664739b8f80d6, + 0x8607459ab52a14aa, + 0x0e78bc5a98529e49, + 0xfe5332822ad13777, + 0x556c27525e33d01a, + 0x08643ca615f3149f, + 0xd0771faf3cb04714, + 0x30e86f68a37b008d, + 0x3074ebc0488a3adf, + 0x270645ea7a2790bc, + 0x5601a0a8d3763c6a, + 0x2f83071f53f325dd, + 0xb9090f3d42d2d2ea, + }; + + for (seq) |s| { + var buf0: [8]u8 = undefined; + var buf1: [7]u8 = undefined; + std.mem.writeIntLittle(u64, &buf0, s); + Isaac64.fill(&r.random, &buf1); + std.testing.expect(std.mem.eql(u8, buf0[0..7], buf1[0..])); + } +} diff --git a/lib/std/rand/Pcg.zig b/lib/std/rand/Pcg.zig index 6be17b3bb8..87df0521f2 100644 --- a/lib/std/rand/Pcg.zig +++ b/lib/std/rand/Pcg.zig @@ -75,7 +75,7 @@ fn fill(r: *Random, buf: []u8) void { var n = self.next(); while (i < buf.len) : (i += 1) { buf[i] = @truncate(u8, n); - n >>= 4; + n >>= 8; } } } @@ -99,3 +99,27 @@ test "pcg sequence" { std.testing.expect(s == r.next()); } } + +test "pcg fill" { + var r = Pcg.init(0); + const s0: u64 = 0x9394bf54ce5d79de; + const s1: u64 = 0x84e9c579ef59bbf7; + r.seedTwo(s0, s1); + + const seq = [_]u32{ + 2881561918, + 3063928540, + 1199791034, + 2487695858, + 1479648952, + 3247963454, + }; + + for (seq) |s| { + var buf0: [4]u8 = undefined; + var buf1: [3]u8 = undefined; + std.mem.writeIntLittle(u32, &buf0, s); + Pcg.fill(&r.random, &buf1); + std.testing.expect(std.mem.eql(u8, buf0[0..3], buf1[0..])); + } +} diff --git a/lib/std/rand/Sfc64.zig b/lib/std/rand/Sfc64.zig index 3b5f1eda82..67eb684f60 100644 --- a/lib/std/rand/Sfc64.zig +++ b/lib/std/rand/Sfc64.zig @@ -106,3 +106,35 @@ test "Sfc64 sequence" { std.testing.expectEqual(s, r.next()); } } + +test "Sfc64 fill" { + // Unfortunately there does not seem to be an official test sequence. + var r = Sfc64.init(0); + + const seq = [_]u64{ + 0x3acfa029e3cc6041, + 0xf5b6515bf2ee419c, + 0x1259635894a29b61, + 0xb6ae75395f8ebd6, + 0x225622285ce302e2, + 0x520d28611395cb21, + 0xdb909c818901599d, + 0x8ffd195365216f57, + 0xe8c4ad5e258ac04a, + 0x8f8ef2c89fdb63ca, + 0xf9865b01d98d8e2f, + 0x46555871a65d08ba, + 0x66868677c6298fcd, + 0x2ce15a7e6329f57d, + 0xb2f1833ca91ca79, + 0x4b0890ac9bf453ca, + }; + + for (seq) |s| { + var buf0: [8]u8 = undefined; + var buf1: [7]u8 = undefined; + std.mem.writeIntLittle(u64, &buf0, s); + Sfc64.fill(&r.random, &buf1); + std.testing.expect(std.mem.eql(u8, buf0[0..7], buf1[0..])); + } +} diff --git a/lib/std/rand/Xoroshiro128.zig b/lib/std/rand/Xoroshiro128.zig index 816bb9f58c..04980cea41 100644 --- a/lib/std/rand/Xoroshiro128.zig +++ b/lib/std/rand/Xoroshiro128.zig @@ -131,3 +131,26 @@ test "xoroshiro sequence" { std.testing.expect(s == r.next()); } } + +test "xoroshiro fill" { + var r = Xoroshiro128.init(0); + r.s[0] = 0xaeecf86f7878dd75; + r.s[1] = 0x01cd153642e72622; + + const seq = [_]u64{ + 0xb0ba0da5bb600397, + 0x18a08afde614dccc, + 0xa2635b956a31b929, + 0xabe633c971efa045, + 0x9ac19f9706ca3cac, + 0xf62b426578c1e3fb, + }; + + for (seq) |s| { + var buf0: [8]u8 = undefined; + var buf1: [7]u8 = undefined; + std.mem.writeIntLittle(u64, &buf0, s); + Xoroshiro128.fill(&r.random, &buf1); + std.testing.expect(std.mem.eql(u8, buf0[0..7], buf1[0..])); + } +} diff --git a/lib/std/special/docs/index.html b/lib/std/special/docs/index.html index 66944e04de..98f1938cc6 100644 --- a/lib/std/special/docs/index.html +++ b/lib/std/special/docs/index.html @@ -515,6 +515,7 @@ +
These docs are experimental. Progress depends on the self-hosted compiler, consider reading the stlib source in the meantime.