From 138afd5cbfbe17829082efa3084f63de88aa1c90 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 10 Jun 2021 20:13:43 -0700 Subject: [PATCH] zig fmt --- lib/std/array_hash_map.zig | 110 ++++++++++----------- lib/std/hash_map.zig | 104 +++++++++---------- lib/std/heap/general_purpose_allocator.zig | 16 +-- lib/std/heap/logging_allocator.zig | 2 +- lib/std/math.zig | 4 +- lib/std/multi_array_list.zig | 17 ++-- lib/std/os.zig | 2 +- lib/std/zig/parse.zig | 8 +- src/codegen/spirv.zig | 15 ++- src/link/MachO.zig | 2 +- src/link/SpirV.zig | 5 +- test/behavior.zig | 1 - test/behavior/bugs/4769_c.zig | 1 - test/behavior/fn.zig | 2 +- test/behavior/type.zig | 2 +- test/standalone/c_compiler/build.zig | 2 +- tools/gen_spirv_spec.zig | 21 ++-- tools/update_clang_options.zig | 2 +- tools/update_spirv_features.zig | 55 +++++------ 19 files changed, 174 insertions(+), 197 deletions(-) delete mode 100644 test/behavior/bugs/4769_c.zig diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig index 7acc65c66b..1ddbfce20c 100644 --- a/lib/std/array_hash_map.zig +++ b/lib/std/array_hash_map.zig @@ -124,7 +124,7 @@ pub fn ArrayHashMap( /// Create an ArrayHashMap instance which will use a specified allocator. pub fn init(allocator: *Allocator) Self { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call initContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call initContext instead."); return initContext(allocator, undefined); } pub fn initContext(allocator: *Allocator, ctx: Context) Self { @@ -518,7 +518,7 @@ pub fn ArrayHashMapUnmanaged( /// the promoted map should no longer be used. pub fn promote(self: Self, allocator: *Allocator) Managed { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call promoteContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call promoteContext instead."); return self.promoteContext(allocator, undefined); } pub fn promoteContext(self: Self, allocator: *Allocator, ctx: Context) Managed { @@ -618,7 +618,7 @@ pub fn ArrayHashMapUnmanaged( /// the value (but not the key). pub fn getOrPut(self: *Self, allocator: *Allocator, key: K) !GetOrPutResult { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call getOrPutContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContext instead."); return self.getOrPutContext(allocator, key, undefined); } pub fn getOrPutContext(self: *Self, allocator: *Allocator, key: K, ctx: Context) !GetOrPutResult { @@ -630,7 +630,7 @@ pub fn ArrayHashMapUnmanaged( } pub fn getOrPutAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype) !GetOrPutResult { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call getOrPutContextAdapted instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContextAdapted instead."); return self.getOrPutContextAdapted(allocator, key, key_ctx, undefined); } pub fn getOrPutContextAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype, ctx: Context) !GetOrPutResult { @@ -658,7 +658,7 @@ pub fn ArrayHashMapUnmanaged( /// is enough capacity to store it. pub fn getOrPutAssumeCapacity(self: *Self, key: K) GetOrPutResult { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call getOrPutAssumeCapacityContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutAssumeCapacityContext instead."); return self.getOrPutAssumeCapacityContext(key, undefined); } pub fn getOrPutAssumeCapacityContext(self: *Self, key: K, ctx: Context) GetOrPutResult { @@ -716,7 +716,7 @@ pub fn ArrayHashMapUnmanaged( pub fn getOrPutValue(self: *Self, allocator: *Allocator, key: K, value: V) !GetOrPutResult { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call getOrPutValueContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutValueContext instead."); return self.getOrPutValueContext(allocator, key, value, undefined); } pub fn getOrPutValueContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !GetOrPutResult { @@ -735,7 +735,7 @@ pub fn ArrayHashMapUnmanaged( /// `expected_count` will not cause an allocation, and therefore cannot fail. pub fn ensureTotalCapacity(self: *Self, allocator: *Allocator, new_capacity: usize) !void { if (@sizeOf(ByIndexContext) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call ensureTotalCapacityContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call ensureTotalCapacityContext instead."); return self.ensureTotalCapacityContext(allocator, new_capacity, undefined); } pub fn ensureTotalCapacityContext(self: *Self, allocator: *Allocator, new_capacity: usize, ctx: Context) !void { @@ -769,7 +769,7 @@ pub fn ArrayHashMapUnmanaged( additional_capacity: usize, ) !void { if (@sizeOf(ByIndexContext) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call ensureTotalCapacityContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call ensureTotalCapacityContext instead."); return self.ensureUnusedCapacityContext(allocator, additional_capacity, undefined); } pub fn ensureUnusedCapacityContext( @@ -794,7 +794,7 @@ pub fn ArrayHashMapUnmanaged( /// existing data, see `getOrPut`. pub fn put(self: *Self, allocator: *Allocator, key: K, value: V) !void { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call putContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putContext instead."); return self.putContext(allocator, key, value, undefined); } pub fn putContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void { @@ -806,7 +806,7 @@ pub fn ArrayHashMapUnmanaged( /// entry with the same key is already present pub fn putNoClobber(self: *Self, allocator: *Allocator, key: K, value: V) !void { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call putNoClobberContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putNoClobberContext instead."); return self.putNoClobberContext(allocator, key, value, undefined); } pub fn putNoClobberContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void { @@ -820,7 +820,7 @@ pub fn ArrayHashMapUnmanaged( /// existing data, see `getOrPutAssumeCapacity`. pub fn putAssumeCapacity(self: *Self, key: K, value: V) void { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call putAssumeCapacityContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putAssumeCapacityContext instead."); return self.putAssumeCapacityContext(key, value, undefined); } pub fn putAssumeCapacityContext(self: *Self, key: K, value: V, ctx: Context) void { @@ -833,7 +833,7 @@ pub fn ArrayHashMapUnmanaged( /// To detect if a put would clobber existing data, see `getOrPutAssumeCapacity`. pub fn putAssumeCapacityNoClobber(self: *Self, key: K, value: V) void { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call putAssumeCapacityNoClobberContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putAssumeCapacityNoClobberContext instead."); return self.putAssumeCapacityNoClobberContext(key, value, undefined); } pub fn putAssumeCapacityNoClobberContext(self: *Self, key: K, value: V, ctx: Context) void { @@ -845,7 +845,7 @@ pub fn ArrayHashMapUnmanaged( /// Inserts a new `Entry` into the hash map, returning the previous one, if any. pub fn fetchPut(self: *Self, allocator: *Allocator, key: K, value: V) !?KV { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call fetchPutContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchPutContext instead."); return self.fetchPutContext(allocator, key, value, undefined); } pub fn fetchPutContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !?KV { @@ -865,7 +865,7 @@ pub fn ArrayHashMapUnmanaged( /// If insertion happens, asserts there is enough capacity without allocating. pub fn fetchPutAssumeCapacity(self: *Self, key: K, value: V) ?KV { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call fetchPutAssumeCapacityContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchPutAssumeCapacityContext instead."); return self.fetchPutAssumeCapacityContext(key, value, undefined); } pub fn fetchPutAssumeCapacityContext(self: *Self, key: K, value: V, ctx: Context) ?KV { @@ -884,7 +884,7 @@ pub fn ArrayHashMapUnmanaged( /// Finds pointers to the key and value storage associated with a key. pub fn getEntry(self: Self, key: K) ?Entry { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call getEntryContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getEntryContext instead."); return self.getEntryContext(key, undefined); } pub fn getEntryContext(self: Self, key: K, ctx: Context) ?Entry { @@ -903,7 +903,7 @@ pub fn ArrayHashMapUnmanaged( /// Finds the index in the `entries` array where a key is stored pub fn getIndex(self: Self, key: K) ?usize { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call getIndexContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getIndexContext instead."); return self.getIndexContext(key, undefined); } pub fn getIndexContext(self: Self, key: K, ctx: Context) ?usize { @@ -938,7 +938,7 @@ pub fn ArrayHashMapUnmanaged( /// Find the value associated with a key pub fn get(self: Self, key: K) ?V { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call getContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getContext instead."); return self.getContext(key, undefined); } pub fn getContext(self: Self, key: K, ctx: Context) ?V { @@ -952,7 +952,7 @@ pub fn ArrayHashMapUnmanaged( /// Find a pointer to the value associated with a key pub fn getPtr(self: Self, key: K) ?*V { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call getPtrContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getPtrContext instead."); return self.getPtrContext(key, undefined); } pub fn getPtrContext(self: Self, key: K, ctx: Context) ?*V { @@ -967,7 +967,7 @@ pub fn ArrayHashMapUnmanaged( /// Check whether a key is stored in the map pub fn contains(self: Self, key: K) bool { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call containsContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call containsContext instead."); return self.containsContext(key, undefined); } pub fn containsContext(self: Self, key: K, ctx: Context) bool { @@ -983,7 +983,7 @@ pub fn ArrayHashMapUnmanaged( /// element. pub fn fetchSwapRemove(self: *Self, key: K) ?KV { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call fetchSwapRemoveContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchSwapRemoveContext instead."); return self.fetchSwapRemoveContext(key, undefined); } pub fn fetchSwapRemoveContext(self: *Self, key: K, ctx: Context) ?KV { @@ -991,7 +991,7 @@ pub fn ArrayHashMapUnmanaged( } pub fn fetchSwapRemoveAdapted(self: *Self, key: anytype, ctx: anytype) ?KV { if (@sizeOf(ByIndexContext) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call fetchSwapRemoveContextAdapted instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchSwapRemoveContextAdapted instead."); return self.fetchSwapRemoveContextAdapted(key, ctx, undefined); } pub fn fetchSwapRemoveContextAdapted(self: *Self, key: anytype, key_ctx: anytype, ctx: Context) ?KV { @@ -1004,7 +1004,7 @@ pub fn ArrayHashMapUnmanaged( /// thereby maintaining the current ordering. pub fn fetchOrderedRemove(self: *Self, key: K) ?KV { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call fetchOrderedRemoveContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchOrderedRemoveContext instead."); return self.fetchOrderedRemoveContext(key, undefined); } pub fn fetchOrderedRemoveContext(self: *Self, key: K, ctx: Context) ?KV { @@ -1012,7 +1012,7 @@ pub fn ArrayHashMapUnmanaged( } pub fn fetchOrderedRemoveAdapted(self: *Self, key: anytype, ctx: anytype) ?KV { if (@sizeOf(ByIndexContext) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call fetchOrderedRemoveContextAdapted instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchOrderedRemoveContextAdapted instead."); return self.fetchOrderedRemoveContextAdapted(key, ctx, undefined); } pub fn fetchOrderedRemoveContextAdapted(self: *Self, key: anytype, key_ctx: anytype, ctx: Context) ?KV { @@ -1025,7 +1025,7 @@ pub fn ArrayHashMapUnmanaged( /// was removed, false otherwise. pub fn swapRemove(self: *Self, key: K) bool { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call swapRemoveContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call swapRemoveContext instead."); return self.swapRemoveContext(key, undefined); } pub fn swapRemoveContext(self: *Self, key: K, ctx: Context) bool { @@ -1033,7 +1033,7 @@ pub fn ArrayHashMapUnmanaged( } pub fn swapRemoveAdapted(self: *Self, key: anytype, ctx: anytype) bool { if (@sizeOf(ByIndexContext) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call swapRemoveContextAdapted instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call swapRemoveContextAdapted instead."); return self.swapRemoveContextAdapted(key, ctx, undefined); } pub fn swapRemoveContextAdapted(self: *Self, key: anytype, key_ctx: anytype, ctx: Context) bool { @@ -1046,7 +1046,7 @@ pub fn ArrayHashMapUnmanaged( /// current ordering. Returns true if an entry was removed, false otherwise. pub fn orderedRemove(self: *Self, key: K) bool { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call orderedRemoveContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call orderedRemoveContext instead."); return self.orderedRemoveContext(key, undefined); } pub fn orderedRemoveContext(self: *Self, key: K, ctx: Context) bool { @@ -1054,7 +1054,7 @@ pub fn ArrayHashMapUnmanaged( } pub fn orderedRemoveAdapted(self: *Self, key: anytype, ctx: anytype) bool { if (@sizeOf(ByIndexContext) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call orderedRemoveContextAdapted instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call orderedRemoveContextAdapted instead."); return self.orderedRemoveContextAdapted(key, ctx, undefined); } pub fn orderedRemoveContextAdapted(self: *Self, key: anytype, key_ctx: anytype, ctx: Context) bool { @@ -1066,7 +1066,7 @@ pub fn ArrayHashMapUnmanaged( /// by swapping it with the last element. pub fn swapRemoveAt(self: *Self, index: usize) void { if (@sizeOf(ByIndexContext) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call swapRemoveAtContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call swapRemoveAtContext instead."); return self.swapRemoveAtContext(index, undefined); } pub fn swapRemoveAtContext(self: *Self, index: usize, ctx: Context) void { @@ -1079,7 +1079,7 @@ pub fn ArrayHashMapUnmanaged( /// current ordering. pub fn orderedRemoveAt(self: *Self, index: usize) void { if (@sizeOf(ByIndexContext) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call orderedRemoveAtContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call orderedRemoveAtContext instead."); return self.orderedRemoveAtContext(index, undefined); } pub fn orderedRemoveAtContext(self: *Self, index: usize, ctx: Context) void { @@ -1090,7 +1090,7 @@ pub fn ArrayHashMapUnmanaged( /// The copy uses the same context and allocator as this instance. pub fn clone(self: Self, allocator: *Allocator) !Self { if (@sizeOf(ByIndexContext) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call cloneContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call cloneContext instead."); return self.cloneContext(allocator, undefined); } pub fn cloneContext(self: Self, allocator: *Allocator, ctx: Context) !Self { @@ -1110,7 +1110,7 @@ pub fn ArrayHashMapUnmanaged( /// can call `reIndex` to update the indexes to account for these new entries. pub fn reIndex(self: *Self, allocator: *Allocator) !void { if (@sizeOf(ByIndexContext) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call reIndexContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call reIndexContext instead."); return self.reIndexContext(allocator, undefined); } pub fn reIndexContext(self: *Self, allocator: *Allocator, ctx: Context) !void { @@ -1128,7 +1128,7 @@ pub fn ArrayHashMapUnmanaged( /// index entries. Keeps capacity the same. pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void { if (@sizeOf(ByIndexContext) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call shrinkRetainingCapacityContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call shrinkRetainingCapacityContext instead."); return self.shrinkRetainingCapacityContext(new_len, undefined); } pub fn shrinkRetainingCapacityContext(self: *Self, new_len: usize, ctx: Context) void { @@ -1147,7 +1147,7 @@ pub fn ArrayHashMapUnmanaged( /// index entries. Reduces allocated capacity. pub fn shrinkAndFree(self: *Self, allocator: *Allocator, new_len: usize) void { if (@sizeOf(ByIndexContext) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call shrinkAndFreeContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call shrinkAndFreeContext instead."); return self.shrinkAndFreeContext(allocator, new_len, undefined); } pub fn shrinkAndFreeContext(self: *Self, allocator: *Allocator, new_len: usize, ctx: Context) void { @@ -1165,13 +1165,13 @@ pub fn ArrayHashMapUnmanaged( /// Removes the last inserted `Entry` in the hash map and returns it. pub fn pop(self: *Self) KV { if (@sizeOf(ByIndexContext) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call popContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call popContext instead."); return self.popContext(undefined); } pub fn popContext(self: *Self, ctx: Context) KV { - const item = self.entries.get(self.entries.len-1); + const item = self.entries.get(self.entries.len - 1); if (self.index_header) |header| - self.removeFromIndexByIndex(self.entries.len-1, if (store_hash) {} else ctx, header); + self.removeFromIndexByIndex(self.entries.len - 1, if (store_hash) {} else ctx, header); self.entries.len -= 1; return .{ .key = item.key, @@ -1276,7 +1276,7 @@ pub fn ArrayHashMapUnmanaged( } fn removeFromArrayAndUpdateIndex(self: *Self, entry_index: usize, ctx: ByIndexContext, header: *IndexHeader, comptime I: type, indexes: []Index(I), comptime removal_type: RemovalType) void { - const last_index = self.entries.len-1; // overflow => remove from empty map + const last_index = self.entries.len - 1; // overflow => remove from empty map switch (removal_type) { .swap => { if (last_index != entry_index) { @@ -1358,8 +1358,7 @@ pub fn ArrayHashMapUnmanaged( fn getSlotByIndex(self: *Self, entry_index: usize, ctx: ByIndexContext, header: *IndexHeader, comptime I: type, indexes: []Index(I)) usize { const slice = self.entries.slice(); - const h = if (store_hash) slice.items(.hash)[entry_index] - else checkedHash(ctx, slice.items(.key)[entry_index]); + const h = if (store_hash) slice.items(.hash)[entry_index] else checkedHash(ctx, slice.items(.key)[entry_index]); const start_index = safeTruncate(usize, h); const end_index = start_index +% indexes.len; @@ -1569,30 +1568,30 @@ pub fn ArrayHashMapUnmanaged( } } - fn checkedHash(ctx: anytype, key: anytype) callconv(.Inline) u32 { + inline fn checkedHash(ctx: anytype, key: anytype) u32 { comptime std.hash_map.verifyContext(@TypeOf(ctx), @TypeOf(key), K, u32); // If you get a compile error on the next line, it means that const hash = ctx.hash(key); // your generic hash function doesn't accept your key if (@TypeOf(hash) != u32) { - @compileError("Context "++@typeName(@TypeOf(ctx))++" has a generic hash function that returns the wrong type!\n"++ - @typeName(u32)++" was expected, but found "++@typeName(@TypeOf(hash))); + @compileError("Context " ++ @typeName(@TypeOf(ctx)) ++ " has a generic hash function that returns the wrong type!\n" ++ + @typeName(u32) ++ " was expected, but found " ++ @typeName(@TypeOf(hash))); } return hash; } - fn checkedEql(ctx: anytype, a: anytype, b: K) callconv(.Inline) bool { + inline fn checkedEql(ctx: anytype, a: anytype, b: K) bool { comptime std.hash_map.verifyContext(@TypeOf(ctx), @TypeOf(a), K, u32); // If you get a compile error on the next line, it means that const eql = ctx.eql(a, b); // your generic eql function doesn't accept (self, adapt key, K) if (@TypeOf(eql) != bool) { - @compileError("Context "++@typeName(@TypeOf(ctx))++" has a generic eql function that returns the wrong type!\n"++ - @typeName(bool)++" was expected, but found "++@typeName(@TypeOf(eql))); + @compileError("Context " ++ @typeName(@TypeOf(ctx)) ++ " has a generic eql function that returns the wrong type!\n" ++ + @typeName(bool) ++ " was expected, but found " ++ @typeName(@TypeOf(eql))); } return eql; } fn dumpState(self: Self, comptime keyFmt: []const u8, comptime valueFmt: []const u8) void { if (@sizeOf(ByIndexContext) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call dumpStateContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call dumpStateContext instead."); self.dumpStateContext(keyFmt, valueFmt, undefined); } fn dumpStateContext(self: Self, comptime keyFmt: []const u8, comptime valueFmt: []const u8, ctx: Context) void { @@ -1600,21 +1599,20 @@ pub fn ArrayHashMapUnmanaged( p("{s}:\n", .{@typeName(Self)}); const slice = self.entries.slice(); const hash_status = if (store_hash) "stored" else "computed"; - p(" len={} capacity={} hashes {s}\n", .{slice.len, slice.capacity, hash_status}); + p(" len={} capacity={} hashes {s}\n", .{ slice.len, slice.capacity, hash_status }); var i: usize = 0; const mask: u32 = if (self.index_header) |header| header.mask() else ~@as(u32, 0); while (i < slice.len) : (i += 1) { - const hash = if (store_hash) slice.items(.hash)[i] - else checkedHash(ctx, slice.items(.key)[i]); + const hash = if (store_hash) slice.items(.hash)[i] else checkedHash(ctx, slice.items(.key)[i]); if (store_hash) { p( - " [{}]: key="++keyFmt++" value="++valueFmt++" hash=0x{x} slot=[0x{x}]\n", - .{i, slice.items(.key)[i], slice.items(.value)[i], hash, hash & mask}, + " [{}]: key=" ++ keyFmt ++ " value=" ++ valueFmt ++ " hash=0x{x} slot=[0x{x}]\n", + .{ i, slice.items(.key)[i], slice.items(.value)[i], hash, hash & mask }, ); } else { p( - " [{}]: key="++keyFmt++" value="++valueFmt++" slot=[0x{x}]\n", - .{i, slice.items(.key)[i], slice.items(.value)[i], hash & mask}, + " [{}]: key=" ++ keyFmt ++ " value=" ++ valueFmt ++ " slot=[0x{x}]\n", + .{ i, slice.items(.key)[i], slice.items(.value)[i], hash & mask }, ); } } @@ -1629,7 +1627,7 @@ pub fn ArrayHashMapUnmanaged( } fn dumpIndex(self: Self, header: *IndexHeader, comptime I: type) void { const p = std.debug.print; - p(" index len=0x{x} type={}\n", .{header.length(), header.capacityIndexType()}); + p(" index len=0x{x} type={}\n", .{ header.length(), header.capacityIndexType() }); const indexes = header.indexes(I); if (indexes.len == 0) return; var is_empty = false; @@ -1641,7 +1639,7 @@ pub fn ArrayHashMapUnmanaged( is_empty = false; p(" ...\n", .{}); } - p(" [0x{x}]: [{}] +{}\n", .{i, idx.entry_index, idx.distance_from_start_index}); + p(" [0x{x}]: [{}] +{}\n", .{ i, idx.entry_index, idx.distance_from_start_index }); } } if (is_empty) { @@ -1730,7 +1728,7 @@ const max_capacity = (1 << max_bit_index) - 1; const index_capacities = blk: { var caps: [max_bit_index + 1]u32 = undefined; for (caps[0..max_bit_index]) |*item, i| { - item.* = (1< {}, // Special-case .Opaque for a better error message - .Opaque => @compileError("Hash context must be a type with hash and eql member functions. Cannot use "++@typeName(Context)++" because it is opaque. Use a pointer instead."), + .Opaque => @compileError("Hash context must be a type with hash and eql member functions. Cannot use " ++ @typeName(Context) ++ " because it is opaque. Use a pointer instead."), .Pointer => |ptr| { if (ptr.size != .One) { - @compileError("Hash context must be a type with hash and eql member functions. Cannot use "++@typeName(Context)++" because it is not a single pointer."); + @compileError("Hash context must be a type with hash and eql member functions. Cannot use " ++ @typeName(Context) ++ " because it is not a single pointer."); } Context = ptr.child; allow_const_ptr = true; allow_mutable_ptr = !ptr.is_const; switch (@typeInfo(Context)) { .Struct, .Union, .Enum, .Opaque => {}, - else => @compileError("Hash context must be a type with hash and eql member functions. Cannot use "++@typeName(Context)), + else => @compileError("Hash context must be a type with hash and eql member functions. Cannot use " ++ @typeName(Context)), } }, - else => @compileError("Hash context must be a type with hash and eql member functions. Cannot use "++@typeName(Context)), + else => @compileError("Hash context must be a type with hash and eql member functions. Cannot use " ++ @typeName(Context)), } // Keep track of multiple errors so we can report them all. @@ -140,12 +140,12 @@ pub fn verifyContext(comptime RawContext: type, comptime PseudoKey: type, compti const lazy = struct { const prefix = "\n "; const deep_prefix = prefix ++ " "; - const hash_signature = "fn (self, "++@typeName(PseudoKey)++") "++@typeName(Hash); - const eql_signature = "fn (self, "++@typeName(PseudoKey)++", "++@typeName(Key)++") bool"; + const hash_signature = "fn (self, " ++ @typeName(PseudoKey) ++ ") " ++ @typeName(Hash); + const eql_signature = "fn (self, " ++ @typeName(PseudoKey) ++ ", " ++ @typeName(Key) ++ ") bool"; const err_invalid_hash_signature = prefix ++ @typeName(Context) ++ ".hash must be " ++ hash_signature ++ - deep_prefix ++ "but is actually " ++ @typeName(@TypeOf(Context.hash)); + deep_prefix ++ "but is actually " ++ @typeName(@TypeOf(Context.hash)); const err_invalid_eql_signature = prefix ++ @typeName(Context) ++ ".eql must be " ++ eql_signature ++ - deep_prefix ++ "but is actually " ++ @typeName(@TypeOf(Context.eql)); + deep_prefix ++ "but is actually " ++ @typeName(@TypeOf(Context.eql)); }; // Verify Context.hash(self, PseudoKey) => Hash @@ -167,7 +167,7 @@ pub fn verifyContext(comptime RawContext: type, comptime PseudoKey: type, compti errors = errors ++ lazy.err_invalid_hash_signature; emitted_signature = true; } - errors = errors ++ lazy.deep_prefix ++ "First parameter must be "++@typeName(Context)++", but is "++@typeName(Self); + errors = errors ++ lazy.deep_prefix ++ "First parameter must be " ++ @typeName(Context) ++ ", but is " ++ @typeName(Self); errors = errors ++ lazy.deep_prefix ++ "Note: Cannot be a pointer because it is passed by value."; } } else if (Self == *Context) { @@ -177,10 +177,10 @@ pub fn verifyContext(comptime RawContext: type, comptime PseudoKey: type, compti emitted_signature = true; } if (!allow_const_ptr) { - errors = errors ++ lazy.deep_prefix ++ "First parameter must be "++@typeName(Context)++", but is "++@typeName(Self); + errors = errors ++ lazy.deep_prefix ++ "First parameter must be " ++ @typeName(Context) ++ ", but is " ++ @typeName(Self); errors = errors ++ lazy.deep_prefix ++ "Note: Cannot be a pointer because it is passed by value."; } else { - errors = errors ++ lazy.deep_prefix ++ "First parameter must be "++@typeName(Context)++" or "++@typeName(*const Context)++", but is "++@typeName(Self); + errors = errors ++ lazy.deep_prefix ++ "First parameter must be " ++ @typeName(Context) ++ " or " ++ @typeName(*const Context) ++ ", but is " ++ @typeName(Self); errors = errors ++ lazy.deep_prefix ++ "Note: Cannot be non-const because it is passed by const pointer."; } } @@ -189,14 +189,14 @@ pub fn verifyContext(comptime RawContext: type, comptime PseudoKey: type, compti errors = errors ++ lazy.err_invalid_hash_signature; emitted_signature = true; } - errors = errors ++ lazy.deep_prefix ++ "First parameter must be "++@typeName(Context); + errors = errors ++ lazy.deep_prefix ++ "First parameter must be " ++ @typeName(Context); if (allow_const_ptr) { - errors = errors++" or "++@typeName(*const Context); + errors = errors ++ " or " ++ @typeName(*const Context); if (allow_mutable_ptr) { - errors = errors++" or "++@typeName(*Context); + errors = errors ++ " or " ++ @typeName(*Context); } } - errors = errors++", but is "++@typeName(Self); + errors = errors ++ ", but is " ++ @typeName(Self); } } if (func.args[1].arg_type != null and func.args[1].arg_type.? != PseudoKey) { @@ -204,14 +204,14 @@ pub fn verifyContext(comptime RawContext: type, comptime PseudoKey: type, compti errors = errors ++ lazy.err_invalid_hash_signature; emitted_signature = true; } - errors = errors ++ lazy.deep_prefix ++ "Second parameter must be "++@typeName(PseudoKey)++", but is "++@typeName(func.args[1].arg_type.?); + errors = errors ++ lazy.deep_prefix ++ "Second parameter must be " ++ @typeName(PseudoKey) ++ ", but is " ++ @typeName(func.args[1].arg_type.?); } if (func.return_type != null and func.return_type.? != Hash) { if (!emitted_signature) { errors = errors ++ lazy.err_invalid_hash_signature; emitted_signature = true; } - errors = errors ++ lazy.deep_prefix ++ "Return type must be "++@typeName(Hash)++", but was "++@typeName(func.return_type.?); + errors = errors ++ lazy.deep_prefix ++ "Return type must be " ++ @typeName(Hash) ++ ", but was " ++ @typeName(func.return_type.?); } // If any of these are generic (null), we cannot verify them. // The call sites check the return type, but cannot check the @@ -243,7 +243,7 @@ pub fn verifyContext(comptime RawContext: type, comptime PseudoKey: type, compti errors = errors ++ lazy.err_invalid_eql_signature; emitted_signature = true; } - errors = errors ++ lazy.deep_prefix ++ "First parameter must be "++@typeName(Context)++", but is "++@typeName(Self); + errors = errors ++ lazy.deep_prefix ++ "First parameter must be " ++ @typeName(Context) ++ ", but is " ++ @typeName(Self); errors = errors ++ lazy.deep_prefix ++ "Note: Cannot be a pointer because it is passed by value."; } } else if (Self == *Context) { @@ -253,10 +253,10 @@ pub fn verifyContext(comptime RawContext: type, comptime PseudoKey: type, compti emitted_signature = true; } if (!allow_const_ptr) { - errors = errors ++ lazy.deep_prefix ++ "First parameter must be "++@typeName(Context)++", but is "++@typeName(Self); + errors = errors ++ lazy.deep_prefix ++ "First parameter must be " ++ @typeName(Context) ++ ", but is " ++ @typeName(Self); errors = errors ++ lazy.deep_prefix ++ "Note: Cannot be a pointer because it is passed by value."; } else { - errors = errors ++ lazy.deep_prefix ++ "First parameter must be "++@typeName(Context)++" or "++@typeName(*const Context)++", but is "++@typeName(Self); + errors = errors ++ lazy.deep_prefix ++ "First parameter must be " ++ @typeName(Context) ++ " or " ++ @typeName(*const Context) ++ ", but is " ++ @typeName(Self); errors = errors ++ lazy.deep_prefix ++ "Note: Cannot be non-const because it is passed by const pointer."; } } @@ -265,14 +265,14 @@ pub fn verifyContext(comptime RawContext: type, comptime PseudoKey: type, compti errors = errors ++ lazy.err_invalid_eql_signature; emitted_signature = true; } - errors = errors ++ lazy.deep_prefix ++ "First parameter must be "++@typeName(Context); + errors = errors ++ lazy.deep_prefix ++ "First parameter must be " ++ @typeName(Context); if (allow_const_ptr) { - errors = errors++" or "++@typeName(*const Context); + errors = errors ++ " or " ++ @typeName(*const Context); if (allow_mutable_ptr) { - errors = errors++" or "++@typeName(*Context); + errors = errors ++ " or " ++ @typeName(*Context); } } - errors = errors++", but is "++@typeName(Self); + errors = errors ++ ", but is " ++ @typeName(Self); } } if (func.args[1].arg_type.? != PseudoKey) { @@ -280,21 +280,21 @@ pub fn verifyContext(comptime RawContext: type, comptime PseudoKey: type, compti errors = errors ++ lazy.err_invalid_eql_signature; emitted_signature = true; } - errors = errors ++ lazy.deep_prefix ++ "Second parameter must be "++@typeName(PseudoKey)++", but is "++@typeName(func.args[1].arg_type.?); + errors = errors ++ lazy.deep_prefix ++ "Second parameter must be " ++ @typeName(PseudoKey) ++ ", but is " ++ @typeName(func.args[1].arg_type.?); } if (func.args[2].arg_type.? != Key) { if (!emitted_signature) { errors = errors ++ lazy.err_invalid_eql_signature; emitted_signature = true; } - errors = errors ++ lazy.deep_prefix ++ "Third parameter must be "++@typeName(Key)++", but is "++@typeName(func.args[2].arg_type.?); + errors = errors ++ lazy.deep_prefix ++ "Third parameter must be " ++ @typeName(Key) ++ ", but is " ++ @typeName(func.args[2].arg_type.?); } if (func.return_type.? != bool) { if (!emitted_signature) { errors = errors ++ lazy.err_invalid_eql_signature; emitted_signature = true; } - errors = errors ++ lazy.deep_prefix ++ "Return type must be bool, but was "++@typeName(func.return_type.?); + errors = errors ++ lazy.deep_prefix ++ "Return type must be bool, but was " ++ @typeName(func.return_type.?); } // If any of these are generic (null), we cannot verify them. // The call sites check the return type, but cannot check the @@ -309,7 +309,7 @@ pub fn verifyContext(comptime RawContext: type, comptime PseudoKey: type, compti if (errors.len != 0) { // errors begins with a newline (from lazy.prefix) - @compileError("Problems found with hash context type "++@typeName(Context)++":"++errors); + @compileError("Problems found with hash context type " ++ @typeName(Context) ++ ":" ++ errors); } } } @@ -790,7 +790,7 @@ pub fn HashMapUnmanaged( pub fn promote(self: Self, allocator: *Allocator) Managed { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call promoteContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call promoteContext instead."); return promoteContext(self, allocator, undefined); } @@ -819,7 +819,7 @@ pub fn HashMapUnmanaged( pub fn ensureCapacity(self: *Self, allocator: *Allocator, new_size: Size) !void { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call ensureCapacityContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call ensureCapacityContext instead."); return ensureCapacityContext(self, allocator, new_size, undefined); } pub fn ensureCapacityContext(self: *Self, allocator: *Allocator, new_size: Size, ctx: Context) !void { @@ -902,7 +902,7 @@ pub fn HashMapUnmanaged( /// Insert an entry in the map. Assumes it is not already present. pub fn putNoClobber(self: *Self, allocator: *Allocator, key: K, value: V) !void { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call putNoClobberContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putNoClobberContext instead."); return self.putNoClobberContext(allocator, key, value, undefined); } pub fn putNoClobberContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void { @@ -917,7 +917,7 @@ pub fn HashMapUnmanaged( /// existing data, see `getOrPutAssumeCapacity`. pub fn putAssumeCapacity(self: *Self, key: K, value: V) void { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call putAssumeCapacityContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putAssumeCapacityContext instead."); return self.putAssumeCapacityContext(key, value, undefined); } pub fn putAssumeCapacityContext(self: *Self, key: K, value: V, ctx: Context) void { @@ -929,7 +929,7 @@ pub fn HashMapUnmanaged( /// and that no allocation is needed. pub fn putAssumeCapacityNoClobber(self: *Self, key: K, value: V) void { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call putAssumeCapacityNoClobberContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putAssumeCapacityNoClobberContext instead."); return self.putAssumeCapacityNoClobberContext(key, value, undefined); } pub fn putAssumeCapacityNoClobberContext(self: *Self, key: K, value: V, ctx: Context) void { @@ -961,7 +961,7 @@ pub fn HashMapUnmanaged( /// Inserts a new `Entry` into the hash map, returning the previous one, if any. pub fn fetchPut(self: *Self, allocator: *Allocator, key: K, value: V) !?KV { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call fetchPutContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchPutContext instead."); return self.fetchPutContext(allocator, key, value, undefined); } pub fn fetchPutContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !?KV { @@ -981,7 +981,7 @@ pub fn HashMapUnmanaged( /// If insertion happens, asserts there is enough capacity without allocating. pub fn fetchPutAssumeCapacity(self: *Self, key: K, value: V) ?KV { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call fetchPutAssumeCapacityContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchPutAssumeCapacityContext instead."); return self.fetchPutAssumeCapacityContext(key, value, undefined); } pub fn fetchPutAssumeCapacityContext(self: *Self, key: K, value: V, ctx: Context) ?KV { @@ -1001,7 +1001,7 @@ pub fn HashMapUnmanaged( /// the hash map, and then returned from this function. pub fn fetchRemove(self: *Self, key: K) ?KV { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call fetchRemoveContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchRemoveContext instead."); return self.fetchRemoveContext(key, undefined); } pub fn fetchRemoveContext(self: *Self, key: K, ctx: Context) ?KV { @@ -1033,7 +1033,7 @@ pub fn HashMapUnmanaged( /// fuse the basic blocks after the branch to the basic blocks /// from this function. To encourage that, this function is /// marked as inline. - fn getIndex(self: Self, key: anytype, ctx: anytype) callconv(.Inline) ?usize { + inline fn getIndex(self: Self, key: anytype, ctx: anytype) ?usize { comptime verifyContext(@TypeOf(ctx), @TypeOf(key), K, Hash); if (self.size == 0) { @@ -1046,7 +1046,7 @@ pub fn HashMapUnmanaged( // verifyContext can't verify the return type of generic hash functions, // so we need to double-check it here. if (@TypeOf(hash) != Hash) { - @compileError("Context "++@typeName(@TypeOf(ctx))++" has a generic hash function that returns the wrong type! "++@typeName(Hash)++" was expected, but found "++@typeName(@TypeOf(hash))); + @compileError("Context " ++ @typeName(@TypeOf(ctx)) ++ " has a generic hash function that returns the wrong type! " ++ @typeName(Hash) ++ " was expected, but found " ++ @typeName(@TypeOf(hash))); } const mask = self.capacity() - 1; const fingerprint = Metadata.takeFingerprint(hash); @@ -1062,7 +1062,7 @@ pub fn HashMapUnmanaged( // verifyContext can't verify the return type of generic eql functions, // so we need to double-check it here. if (@TypeOf(eql) != bool) { - @compileError("Context "++@typeName(@TypeOf(ctx))++" has a generic eql function that returns the wrong type! bool was expected, but found "++@typeName(@TypeOf(eql))); + @compileError("Context " ++ @typeName(@TypeOf(ctx)) ++ " has a generic eql function that returns the wrong type! bool was expected, but found " ++ @typeName(@TypeOf(eql))); } if (eql) { return idx; @@ -1078,7 +1078,7 @@ pub fn HashMapUnmanaged( pub fn getEntry(self: Self, key: K) ?Entry { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call getEntryContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getEntryContext instead."); return self.getEntryContext(key, undefined); } pub fn getEntryContext(self: Self, key: K, ctx: Context) ?Entry { @@ -1097,7 +1097,7 @@ pub fn HashMapUnmanaged( /// Insert an entry if the associated key is not already present, otherwise update preexisting value. pub fn put(self: *Self, allocator: *Allocator, key: K, value: V) !void { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call putContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putContext instead."); return self.putContext(allocator, key, value, undefined); } pub fn putContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void { @@ -1108,7 +1108,7 @@ pub fn HashMapUnmanaged( /// Get an optional pointer to the value associated with key, if present. pub fn getPtr(self: Self, key: K) ?*V { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call getPtrContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getPtrContext instead."); return self.getPtrContext(key, undefined); } pub fn getPtrContext(self: Self, key: K, ctx: Context) ?*V { @@ -1124,7 +1124,7 @@ pub fn HashMapUnmanaged( /// Get a copy of the value associated with key, if present. pub fn get(self: Self, key: K) ?V { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call getContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getContext instead."); return self.getContext(key, undefined); } pub fn getContext(self: Self, key: K, ctx: Context) ?V { @@ -1139,7 +1139,7 @@ pub fn HashMapUnmanaged( pub fn getOrPut(self: *Self, allocator: *Allocator, key: K) !GetOrPutResult { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call getOrPutContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContext instead."); return self.getOrPutContext(allocator, key, undefined); } pub fn getOrPutContext(self: *Self, allocator: *Allocator, key: K, ctx: Context) !GetOrPutResult { @@ -1151,7 +1151,7 @@ pub fn HashMapUnmanaged( } pub fn getOrPutAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype) !GetOrPutResult { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call getOrPutContextAdapted instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContextAdapted instead."); return self.getOrPutContextAdapted(allocator, key, key_ctx, undefined); } pub fn getOrPutContextAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype, ctx: Context) !GetOrPutResult { @@ -1171,7 +1171,7 @@ pub fn HashMapUnmanaged( pub fn getOrPutAssumeCapacity(self: *Self, key: K) GetOrPutResult { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call getOrPutAssumeCapacityContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutAssumeCapacityContext instead."); return self.getOrPutAssumeCapacityContext(key, undefined); } pub fn getOrPutAssumeCapacityContext(self: *Self, key: K, ctx: Context) GetOrPutResult { @@ -1190,7 +1190,7 @@ pub fn HashMapUnmanaged( // verifyContext can't verify the return type of generic hash functions, // so we need to double-check it here. if (@TypeOf(hash) != Hash) { - @compileError("Context "++@typeName(@TypeOf(ctx))++" has a generic hash function that returns the wrong type! "++@typeName(Hash)++" was expected, but found "++@typeName(@TypeOf(hash))); + @compileError("Context " ++ @typeName(@TypeOf(ctx)) ++ " has a generic hash function that returns the wrong type! " ++ @typeName(Hash) ++ " was expected, but found " ++ @typeName(@TypeOf(hash))); } const mask = self.capacity() - 1; const fingerprint = Metadata.takeFingerprint(hash); @@ -1207,7 +1207,7 @@ pub fn HashMapUnmanaged( // verifyContext can't verify the return type of generic eql functions, // so we need to double-check it here. if (@TypeOf(eql) != bool) { - @compileError("Context "++@typeName(@TypeOf(ctx))++" has a generic eql function that returns the wrong type! bool was expected, but found "++@typeName(@TypeOf(eql))); + @compileError("Context " ++ @typeName(@TypeOf(ctx)) ++ " has a generic eql function that returns the wrong type! bool was expected, but found " ++ @typeName(@TypeOf(eql))); } if (eql) { return GetOrPutResult{ @@ -1249,7 +1249,7 @@ pub fn HashMapUnmanaged( pub fn getOrPutValue(self: *Self, allocator: *Allocator, key: K, value: V) !Entry { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call getOrPutValueContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutValueContext instead."); return self.getOrPutValueContext(allocator, key, value, undefined); } pub fn getOrPutValueContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !Entry { @@ -1264,7 +1264,7 @@ pub fn HashMapUnmanaged( /// Return true if there is a value associated with key in the map. pub fn contains(self: *const Self, key: K) bool { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call containsContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call containsContext instead."); return self.containsContext(key, undefined); } pub fn containsContext(self: *const Self, key: K, ctx: Context) bool { @@ -1279,7 +1279,7 @@ pub fn HashMapUnmanaged( /// function returns false. pub fn remove(self: *Self, key: K) bool { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call removeContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call removeContext instead."); return self.removeContext(key, undefined); } pub fn removeContext(self: *Self, key: K, ctx: Context) bool { @@ -1317,7 +1317,7 @@ pub fn HashMapUnmanaged( pub fn clone(self: Self, allocator: *Allocator) !Self { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context "++@typeName(Context)++", call cloneContext instead."); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call cloneContext instead."); return self.cloneContext(allocator, @as(Context, undefined)); } pub fn cloneContext(self: Self, allocator: *Allocator, new_ctx: anytype) !HashMapUnmanaged(K, V, @TypeOf(new_ctx), max_load_percentage) { diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig index 1b7477e806..006a41dae8 100644 --- a/lib/std/heap/general_purpose_allocator.zig +++ b/lib/std/heap/general_purpose_allocator.zig @@ -391,8 +391,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { var it = self.large_allocations.iterator(); while (it.next()) |large| { if (large.value_ptr.freed) { - _ = self.backing_allocator.resizeFn(self.backing_allocator, large.value_ptr.bytes, - large.value_ptr.ptr_align, 0, 0, @returnAddress()) catch unreachable; + _ = self.backing_allocator.resizeFn(self.backing_allocator, large.value_ptr.bytes, large.value_ptr.ptr_align, 0, 0, @returnAddress()) catch unreachable; } } } @@ -532,10 +531,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { if (config.retain_metadata and entry.value_ptr.freed) { if (config.safety) { - reportDoubleFree(ret_addr, - entry.value_ptr.getStackTrace(.alloc), - entry.value_ptr.getStackTrace(.free) - ); + reportDoubleFree(ret_addr, entry.value_ptr.getStackTrace(.alloc), entry.value_ptr.getStackTrace(.free)); if (new_size == 0) { // Recoverable. Restore self.total_requested_bytes if needed. if (config.enable_memory_limit) { @@ -564,8 +560,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { }); } - const result_len = if (config.never_unmap and new_size == 0) 0 else - try self.backing_allocator.resizeFn(self.backing_allocator, old_mem, old_align, new_size, len_align, ret_addr); + const result_len = if (config.never_unmap and new_size == 0) 0 else try self.backing_allocator.resizeFn(self.backing_allocator, old_mem, old_align, new_size, len_align, ret_addr); if (result_len == 0) { if (config.verbose_log) { @@ -659,10 +654,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { const is_used = @truncate(u1, used_byte.* >> used_bit_index) != 0; if (!is_used) { if (config.safety) { - reportDoubleFree(ret_addr, - bucketStackTrace(bucket, size_class, slot_index, .alloc), - bucketStackTrace(bucket, size_class, slot_index, .free) - ); + reportDoubleFree(ret_addr, bucketStackTrace(bucket, size_class, slot_index, .alloc), bucketStackTrace(bucket, size_class, slot_index, .free)); if (new_size == 0) { // Recoverable. Restore self.total_requested_bytes if needed, as we // don't return an error value so the errdefer above does not run. diff --git a/lib/std/heap/logging_allocator.zig b/lib/std/heap/logging_allocator.zig index 2c82c3abf4..ea63a76b72 100644 --- a/lib/std/heap/logging_allocator.zig +++ b/lib/std/heap/logging_allocator.zig @@ -43,7 +43,7 @@ pub fn ScopedLoggingAllocator( } // This function is required as the `std.log.log` function is not public - fn logHelper(comptime log_level: std.log.Level, comptime format: []const u8, args: anytype) callconv(.Inline) void { + inline fn logHelper(comptime log_level: std.log.Level, comptime format: []const u8, args: anytype) void { switch (log_level) { .emerg => log.emerg(format, args), .alert => log.alert(format, args), diff --git a/lib/std/math.zig b/lib/std/math.zig index ac28cbb4e2..9f92e2e82b 100644 --- a/lib/std/math.zig +++ b/lib/std/math.zig @@ -1087,14 +1087,14 @@ fn testCeilPowerOfTwo() !void { pub fn log2_int(comptime T: type, x: T) Log2Int(T) { if (@typeInfo(T) != .Int or @typeInfo(T).Int.signedness != .unsigned) - @compileError("log2_int requires an unsigned integer, found "++@typeName(T)); + @compileError("log2_int requires an unsigned integer, found " ++ @typeName(T)); assert(x != 0); return @intCast(Log2Int(T), @typeInfo(T).Int.bits - 1 - @clz(T, x)); } pub fn log2_int_ceil(comptime T: type, x: T) Log2IntCeil(T) { if (@typeInfo(T) != .Int or @typeInfo(T).Int.signedness != .unsigned) - @compileError("log2_int_ceil requires an unsigned integer, found "++@typeName(T)); + @compileError("log2_int_ceil requires an unsigned integer, found " ++ @typeName(T)); assert(x != 0); if (x == 1) return 0; const log2_val: Log2IntCeil(T) = log2_int(T, x - 1); diff --git a/lib/std/multi_array_list.zig b/lib/std/multi_array_list.zig index c96af48cb2..7a071a8928 100644 --- a/lib/std/multi_array_list.zig +++ b/lib/std/multi_array_list.zig @@ -46,8 +46,7 @@ pub fn MultiArrayList(comptime S: type) type { return &[_]F{}; } const byte_ptr = self.ptrs[@enumToInt(field)]; - const casted_ptr: [*]F = if (@sizeOf([*]F) == 0) undefined - else @ptrCast([*]F, @alignCast(@alignOf(F), byte_ptr)); + const casted_ptr: [*]F = if (@sizeOf([*]F) == 0) undefined else @ptrCast([*]F, @alignCast(@alignOf(F), byte_ptr)); return casted_ptr[0..self.len]; } @@ -197,7 +196,7 @@ pub fn MultiArrayList(comptime S: type) type { try self.ensureCapacity(gpa, self.len + 1); self.insertAssumeCapacity(index, elem); } - + /// Inserts an item into an ordered list which has room for it. /// Shifts all elements after and including the specified index /// back by one and sets the given index to the specified element. @@ -209,9 +208,9 @@ pub fn MultiArrayList(comptime S: type) type { const slices = self.slice(); inline for (fields) |field_info, field_index| { const field_slice = slices.items(@intToEnum(Field, field_index)); - var i: usize = self.len-1; + var i: usize = self.len - 1; while (i > index) : (i -= 1) { - field_slice[i] = field_slice[i-1]; + field_slice[i] = field_slice[i - 1]; } field_slice[index] = @field(elem, field_info.name); } @@ -224,8 +223,8 @@ pub fn MultiArrayList(comptime S: type) type { const slices = self.slice(); inline for (fields) |field_info, i| { const field_slice = slices.items(@intToEnum(Field, i)); - field_slice[index] = field_slice[self.len-1]; - field_slice[self.len-1] = undefined; + field_slice[index] = field_slice[self.len - 1]; + field_slice[self.len - 1] = undefined; } self.len -= 1; } @@ -237,8 +236,8 @@ pub fn MultiArrayList(comptime S: type) type { inline for (fields) |field_info, field_index| { const field_slice = slices.items(@intToEnum(Field, field_index)); var i = index; - while (i < self.len-1) : (i += 1) { - field_slice[i] = field_slice[i+1]; + while (i < self.len - 1) : (i += 1) { + field_slice[i] = field_slice[i + 1]; } field_slice[i] = undefined; } diff --git a/lib/std/os.zig b/lib/std/os.zig index ffd1bee5ab..4c2fa9f9d0 100644 --- a/lib/std/os.zig +++ b/lib/std/os.zig @@ -6121,7 +6121,7 @@ pub fn getrlimit(resource: rlimit_resource) GetrlimitError!rlimit { } } -pub const SetrlimitError = error{PermissionDenied, LimitTooBig} || UnexpectedError; +pub const SetrlimitError = error{ PermissionDenied, LimitTooBig } || UnexpectedError; pub fn setrlimit(resource: rlimit_resource, limits: rlimit) SetrlimitError!void { const setrlimit_sym = if (builtin.os.tag == .linux and builtin.link_libc) diff --git a/lib/std/zig/parse.zig b/lib/std/zig/parse.zig index 508809f1de..78ef8e78cb 100644 --- a/lib/std/zig/parse.zig +++ b/lib/std/zig/parse.zig @@ -3553,7 +3553,7 @@ const Parser = struct { _ = try p.expectToken(.l_paren); const scratch_top = p.scratch.items.len; defer p.scratch.shrinkRetainingCapacity(scratch_top); - var varargs: union(enum){ none, seen, nonfinal: TokenIndex } = .none; + var varargs: union(enum) { none, seen, nonfinal: TokenIndex } = .none; while (true) { if (p.eatToken(.r_paren)) |_| break; if (varargs == .seen) varargs = .{ .nonfinal = p.tok_i }; @@ -3583,9 +3583,9 @@ const Parser = struct { } const params = p.scratch.items[scratch_top..]; return switch (params.len) { - 0 => SmallSpan { .zero_or_one = 0 }, - 1 => SmallSpan { .zero_or_one = params[0] }, - else => SmallSpan { .multi = try p.listToSpan(params) }, + 0 => SmallSpan{ .zero_or_one = 0 }, + 1 => SmallSpan{ .zero_or_one = params[0] }, + else => SmallSpan{ .multi = try p.listToSpan(params) }, }; } diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 4ce5de7523..3bff09bd8d 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -885,10 +885,7 @@ pub const DeclGen = struct { if (inst.operand.ty.hasCodeGenBits()) { const operand_id = try self.resolve(inst.operand); // current_block_label_id should not be undefined here, lest there is a br or br_void in the function's body. - try target.incoming_blocks.append(self.spv.gpa, .{ - .src_label_id = self.current_block_label_id, - .break_value_id = operand_id - }); + try target.incoming_blocks.append(self.spv.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id }); } try writeInstruction(&self.code, .OpBranch, &[_]Word{target.label_id}); @@ -936,9 +933,9 @@ pub const DeclGen = struct { const result_id = self.spv.allocResultId(); const operands = if (inst.base.ty.isVolatilePtr()) - &[_]Word{ result_type_id, result_id, operand_id, @bitCast(u32, spec.MemoryAccess{.Volatile = true}) } + &[_]Word{ result_type_id, result_id, operand_id, @bitCast(u32, spec.MemoryAccess{ .Volatile = true }) } else - &[_]Word{ result_type_id, result_id, operand_id}; + &[_]Word{ result_type_id, result_id, operand_id }; try writeInstruction(&self.code, .OpLoad, operands); @@ -950,14 +947,14 @@ pub const DeclGen = struct { const loop_label_id = self.spv.allocResultId(); // Jump to the loop entry point - try writeInstruction(&self.code, .OpBranch, &[_]Word{ loop_label_id }); + try writeInstruction(&self.code, .OpBranch, &[_]Word{loop_label_id}); // TODO: Look into OpLoopMerge. try self.beginSPIRVBlock(loop_label_id); try self.genBody(inst.body); - try writeInstruction(&self.code, .OpBranch, &[_]Word{ loop_label_id }); + try writeInstruction(&self.code, .OpBranch, &[_]Word{loop_label_id}); } fn genRet(self: *DeclGen, inst: *Inst.UnOp) !void { @@ -976,7 +973,7 @@ pub const DeclGen = struct { const src_val_id = try self.resolve(inst.rhs); const operands = if (inst.lhs.ty.isVolatilePtr()) - &[_]Word{ dst_ptr_id, src_val_id, @bitCast(u32, spec.MemoryAccess{.Volatile = true}) } + &[_]Word{ dst_ptr_id, src_val_id, @bitCast(u32, spec.MemoryAccess{ .Volatile = true }) } else &[_]Word{ dst_ptr_id, src_val_id }; diff --git a/src/link/MachO.zig b/src/link/MachO.zig index d4d7de15e4..24dfa2c328 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -2517,7 +2517,7 @@ fn allocatedSizeLinkedit(self: *MachO, start: u64) u64 { return min_pos - start; } -fn checkForCollision(start: u64, end: u64, off: u64, size: u64) callconv(.Inline) ?u64 { +inline fn checkForCollision(start: u64, end: u64, off: u64, size: u64) ?u64 { const increased_size = padToIdeal(size); const test_end = off + increased_size; if (end > off and start < test_end) { diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index f80e4aec01..9d64245bbb 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -187,10 +187,7 @@ pub fn flushModule(self: *SpirV, comp: *Compilation) !void { var iovc_buffers: [buffers.len]std.os.iovec_const = undefined; for (iovc_buffers) |*iovc, i| { const bytes = std.mem.sliceAsBytes(buffers[i]); - iovc.* = .{ - .iov_base = bytes.ptr, - .iov_len = bytes.len - }; + iovc.* = .{ .iov_base = bytes.ptr, .iov_len = bytes.len }; } var file_size: u64 = 0; diff --git a/test/behavior.zig b/test/behavior.zig index 07b66fa618..479fe9b422 100644 --- a/test/behavior.zig +++ b/test/behavior.zig @@ -55,7 +55,6 @@ comptime { _ = @import("behavior/bugs/4560.zig"); _ = @import("behavior/bugs/4769_a.zig"); _ = @import("behavior/bugs/4769_b.zig"); - _ = @import("behavior/bugs/4769_c.zig"); _ = @import("behavior/bugs/4954.zig"); _ = @import("behavior/bugs/5398.zig"); _ = @import("behavior/bugs/5413.zig"); diff --git a/test/behavior/bugs/4769_c.zig b/test/behavior/bugs/4769_c.zig deleted file mode 100644 index 4894ddf7e8..0000000000 --- a/test/behavior/bugs/4769_c.zig +++ /dev/null @@ -1 +0,0 @@ -/// \ No newline at end of file diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig index afd590b1da..75a1131072 100644 --- a/test/behavior/fn.zig +++ b/test/behavior/fn.zig @@ -114,7 +114,7 @@ test "assign inline fn to const variable" { a(); } -fn inlineFn() callconv(.Inline) void {} +inline fn inlineFn() void {} test "pass by non-copying value" { try expect(addPointCoords(Point{ .x = 1, .y = 2 }) == 3); diff --git a/test/behavior/type.zig b/test/behavior/type.zig index 92756b9e66..5fd89ab43c 100644 --- a/test/behavior/type.zig +++ b/test/behavior/type.zig @@ -182,7 +182,7 @@ test "Type.Optional" { } test "Type.ErrorUnion" { - try testTypes(&[_]type{ + try testTypes(&[_]type{ error{}!void, error{Error}!void, }); diff --git a/test/standalone/c_compiler/build.zig b/test/standalone/c_compiler/build.zig index 1026f2ca50..78c9c45dc0 100644 --- a/test/standalone/c_compiler/build.zig +++ b/test/standalone/c_compiler/build.zig @@ -30,7 +30,7 @@ pub fn build(b: *Builder) void { exe_cpp.linkSystemLibrary("c++"); // disable broken LTO links: - switch(target.getOsTag()) { + switch (target.getOsTag()) { .windows => { exe_cpp.want_lto = false; }, diff --git a/tools/gen_spirv_spec.zig b/tools/gen_spirv_spec.zig index 36b9d06a7c..f00bd1884b 100644 --- a/tools/gen_spirv_spec.zig +++ b/tools/gen_spirv_spec.zig @@ -15,7 +15,7 @@ pub fn main() !void { const spec = try std.fs.cwd().readFileAlloc(allocator, spec_path, std.math.maxInt(usize)); var tokens = std.json.TokenStream.init(spec); - var registry = try std.json.parse(g.Registry, &tokens, .{.allocator = allocator}); + var registry = try std.json.parse(g.Registry, &tokens, .{ .allocator = allocator }); var bw = std.io.bufferedWriter(std.io.getStdOut().writer()); try render(bw.writer(), registry); @@ -36,7 +36,8 @@ fn render(writer: anytype, registry: g.Registry) !void { \\pub const version = Version{{ .major = {}, .minor = {}, .patch = {} }}; \\pub const magic_number: u32 = {s}; \\ - , .{ core_reg.major_version, core_reg.minor_version, core_reg.revision, core_reg.magic_number }, + , + .{ core_reg.major_version, core_reg.minor_version, core_reg.revision, core_reg.magic_number }, ); try renderOpcodes(writer, core_reg.instructions); try renderOperandKinds(writer, core_reg.operand_kinds); @@ -45,11 +46,12 @@ fn render(writer: anytype, registry: g.Registry) !void { try writer.print( \\pub const version = Version{{ .major = {}, .minor = 0, .patch = {} }}; \\ - , .{ ext_reg.version, ext_reg.revision }, + , + .{ ext_reg.version, ext_reg.revision }, ); try renderOpcodes(writer, ext_reg.instructions); try renderOperandKinds(writer, ext_reg.operand_kinds); - } + }, } } @@ -72,7 +74,7 @@ fn renderOperandKinds(writer: anytype, kinds: []const g.OperandKind) !void { } fn renderValueEnum(writer: anytype, enumeration: g.OperandKind) !void { - try writer.print("pub const {s} = extern enum(u32) {{\n", .{ enumeration.kind }); + try writer.print("pub const {s} = extern enum(u32) {{\n", .{enumeration.kind}); const enumerants = enumeration.enumerants orelse return error.InvalidRegistry; for (enumerants) |enumerant| { @@ -85,7 +87,7 @@ fn renderValueEnum(writer: anytype, enumeration: g.OperandKind) !void { } fn renderBitEnum(writer: anytype, enumeration: g.OperandKind) !void { - try writer.print("pub const {s} = packed struct {{\n", .{ enumeration.kind }); + try writer.print("pub const {s} = packed struct {{\n", .{enumeration.kind}); var flags_by_bitpos = [_]?[]const u8{null} ** 32; const enumerants = enumeration.enumerants orelse return error.InvalidRegistry; @@ -97,7 +99,7 @@ fn renderBitEnum(writer: anytype, enumeration: g.OperandKind) !void { } var bitpos = std.math.log2_int(u32, value); - if (flags_by_bitpos[bitpos]) |*existing|{ + if (flags_by_bitpos[bitpos]) |*existing| { // Keep the shortest if (enumerant.enumerant.len < existing.len) existing.* = enumerant.enumerant; @@ -128,7 +130,7 @@ fn parseHexInt(text: []const u8) !u31 { const prefix = "0x"; if (!std.mem.startsWith(u8, text, prefix)) return error.InvalidHexInt; - return try std.fmt.parseInt(u31, text[prefix.len ..], 16); + return try std.fmt.parseInt(u31, text[prefix.len..], 16); } fn usageAndExit(file: std.fs.File, arg0: []const u8, code: u8) noreturn { @@ -142,7 +144,6 @@ fn usageAndExit(file: std.fs.File, arg0: []const u8, code: u8) noreturn { \\The relevant specifications can be obtained from the SPIR-V registry: \\https://github.com/KhronosGroup/SPIRV-Headers/blob/master/include/spirv/unified1/ \\ - , .{arg0} - ) catch std.process.exit(1); + , .{arg0}) catch std.process.exit(1); std.process.exit(code); } diff --git a/tools/update_clang_options.zig b/tools/update_clang_options.zig index 6f986ee6df..ad2a9ffe53 100644 --- a/tools/update_clang_options.zig +++ b/tools/update_clang_options.zig @@ -339,7 +339,7 @@ const known_options = [_]KnownOpt{ .{ .name = "mexec-model", .ident = "exec_model", - } + }, }; const blacklisted_options = [_][]const u8{}; diff --git a/tools/update_spirv_features.zig b/tools/update_spirv_features.zig index 94fdb38229..da1cefcd9c 100644 --- a/tools/update_spirv_features.zig +++ b/tools/update_spirv_features.zig @@ -1,8 +1,3 @@ -const std = @import("std"); -const fs = std.fs; -const Allocator = std.mem.Allocator; -const g = @import("spirv/grammar.zig"); - //! This tool generates SPIR-V features from the grammar files in the SPIRV-Headers //! (https://github.com/KhronosGroup/SPIRV-Headers/) and SPIRV-Registry (https://github.com/KhronosGroup/SPIRV-Registry/) //! repositories. Currently it only generates a basic feature set definition consisting of versions, extensions and capabilities. @@ -14,6 +9,11 @@ const g = @import("spirv/grammar.zig"); //! from an intel project (https://github.com/intel/llvm/, https://github.com/intel/llvm/tree/sycl/sycl/doc/extensions/SPIRV), //! and so ONLY extensions in the SPIRV-Registry should be included. +const std = @import("std"); +const fs = std.fs; +const Allocator = std.mem.Allocator; +const g = @import("spirv/grammar.zig"); + const Version = struct { major: u32, minor: u32, @@ -38,9 +38,9 @@ const Version = struct { fn lessThan(ctx: void, a: Version, b: Version) bool { return if (a.major == b.major) - a.minor < b.minor - else - a.major < b.major; + a.minor < b.minor + else + a.major < b.major; } }; @@ -103,11 +103,11 @@ pub fn main() !void { } for (extensions) |ext| { - try w.print(" {},\n", .{ std.zig.fmtId(ext) }); + try w.print(" {},\n", .{std.zig.fmtId(ext)}); } for (capabilities) |cap| { - try w.print(" {},\n", .{ std.zig.fmtId(cap.enumerant) }); + try w.print(" {},\n", .{std.zig.fmtId(cap.enumerant)}); } try w.writeAll( @@ -129,8 +129,7 @@ pub fn main() !void { \\ .llvm_name = null, \\ .description = "SPIR-V version {0}.{1}", \\ - , .{ ver.major, ver.minor } - ); + , .{ ver.major, ver.minor }); if (i == 0) { try w.writeAll( @@ -145,8 +144,7 @@ pub fn main() !void { \\ }}), \\ }}; \\ - , .{ versions[i - 1].major, versions[i - 1].minor } - ); + , .{ versions[i - 1].major, versions[i - 1].minor }); } } @@ -159,11 +157,10 @@ pub fn main() !void { \\ .dependencies = featureSet(&[_]Feature{{}}), \\ }}; \\ - , .{ - std.zig.fmtId(ext), - ext, - } - ); + , .{ + std.zig.fmtId(ext), + ext, + }); } // TODO: Capability extension dependencies. @@ -174,11 +171,10 @@ pub fn main() !void { \\ .description = "Enable SPIR-V capability {s}", \\ .dependencies = featureSet(&[_]Feature{{ \\ - , .{ - std.zig.fmtId(cap.enumerant), - cap.enumerant, - } - ); + , .{ + std.zig.fmtId(cap.enumerant), + cap.enumerant, + }); if (cap.version) |ver_str| { if (!std.mem.eql(u8, ver_str, "None")) { @@ -188,7 +184,7 @@ pub fn main() !void { } for (cap.capabilities) |cap_dep| { - try w.print(" .{},\n", .{ std.zig.fmtId(cap_dep) }); + try w.print(" .{},\n", .{std.zig.fmtId(cap_dep)}); } try w.writeAll( @@ -198,7 +194,7 @@ pub fn main() !void { ); } - try w.writeAll( + try w.writeAll( \\ const ti = @typeInfo(Feature); \\ for (result) |*elem, i| { \\ elem.index = i; @@ -217,7 +213,7 @@ pub fn main() !void { /// registered ones. /// TODO: Unfortunately, neither repository contains a machine-readable list of extension dependencies. fn gather_extensions(allocator: *Allocator, spirv_registry_root: []const u8) ![]const []const u8 { - const extensions_path = try fs.path.join(allocator, &.{spirv_registry_root, "extensions"}); + const extensions_path = try fs.path.join(allocator, &.{ spirv_registry_root, "extensions" }); var extensions_dir = try fs.cwd().openDir(extensions_path, .{ .iterate = true }); defer extensions_dir.close(); @@ -262,7 +258,7 @@ fn gather_extensions(allocator: *Allocator, spirv_registry_root: []const u8) ![] } const ext_end = std.mem.indexOfScalarPos(u8, ext_spec, ext_start, '\n') orelse return error.InvalidRegistry; - const ext = ext_spec[ext_start .. ext_end]; + const ext = ext_spec[ext_start..ext_end]; std.debug.assert(std.mem.startsWith(u8, ext, "SPV_")); // Sanity check, all extensions should have a name like SPV_VENDOR_extension. @@ -315,7 +311,6 @@ fn usageAndExit(file: fs.File, arg0: []const u8, code: u8) noreturn { \\SPIRV-Headers can be cloned from https://github.com/KhronosGroup/SPIRV-Headers, \\SPIRV-Registry can be cloned from https://github.com/KhronosGroup/SPIRV-Registry. \\ - , .{arg0} - ) catch std.process.exit(1); + , .{arg0}) catch std.process.exit(1); std.process.exit(code); }