diff --git a/CMakeLists.txt b/CMakeLists.txt
index 6e89d87ca9..1c0218e305 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -801,31 +801,32 @@ endif()
install(TARGETS zig DESTINATION bin)
-set(ZIG_INSTALL_ARGS "build"
- --override-lib-dir "${CMAKE_SOURCE_DIR}/lib"
- "-Dlib-files-only"
- --prefix "${CMAKE_INSTALL_PREFIX}"
- "-Dconfig_h=${ZIG_CONFIG_H_OUT}"
- install
-)
+set(ZIG_SKIP_INSTALL_LIB_FILES off CACHE BOOL
+ "Disable copying lib/ files to install prefix during the build phase")
-# CODE has no effect with Visual Studio build system generator, therefore
-# when using Visual Studio build system generator we resort to running
-# `zig build install` during the build phase.
-if(MSVC)
- set(ZIG_SKIP_INSTALL_LIB_FILES off CACHE BOOL
- "Windows-only: Disable copying lib/ files to install prefix during the build phase")
- if(NOT ZIG_SKIP_INSTALL_LIB_FILES)
+if(NOT ZIG_SKIP_INSTALL_LIB_FILES)
+ set(ZIG_INSTALL_ARGS "build"
+ --override-lib-dir "${CMAKE_SOURCE_DIR}/lib"
+ "-Dlib-files-only"
+ --prefix "${CMAKE_INSTALL_PREFIX}"
+ "-Dconfig_h=${ZIG_CONFIG_H_OUT}"
+ install
+ )
+
+ # CODE has no effect with Visual Studio build system generator, therefore
+ # when using Visual Studio build system generator we resort to running
+ # `zig build install` during the build phase.
+ if(MSVC)
add_custom_target(zig_install_lib_files ALL
COMMAND zig ${ZIG_INSTALL_ARGS}
DEPENDS zig
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
)
+ else()
+ get_target_property(zig_BINARY_DIR zig BINARY_DIR)
+ install(CODE "set(zig_EXE \"${ZIG_EXECUTABLE}\")")
+ install(CODE "set(ZIG_INSTALL_ARGS \"${ZIG_INSTALL_ARGS}\")")
+ install(CODE "set(CMAKE_SOURCE_DIR \"${CMAKE_SOURCE_DIR}\")")
+ install(SCRIPT ${CMAKE_CURRENT_SOURCE_DIR}/cmake/install.cmake)
endif()
-else()
- get_target_property(zig_BINARY_DIR zig BINARY_DIR)
- install(CODE "set(zig_EXE \"${ZIG_EXECUTABLE}\")")
- install(CODE "set(ZIG_INSTALL_ARGS \"${ZIG_INSTALL_ARGS}\")")
- install(CODE "set(CMAKE_SOURCE_DIR \"${CMAKE_SOURCE_DIR}\")")
- install(SCRIPT ${CMAKE_CURRENT_SOURCE_DIR}/cmake/install.cmake)
endif()
diff --git a/doc/langref.html.in b/doc/langref.html.in
index a716336015..645c03dcbb 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -10447,13 +10447,40 @@ fn readU32Be() u32 {}
{#header_close#}
{#header_open|Source Encoding#}
Zig source code is encoded in UTF-8. An invalid UTF-8 byte sequence results in a compile error.
- Throughout all zig source code (including in comments), some codepoints are never allowed:
+ Throughout all zig source code (including in comments), some code points are never allowed:
- - Ascii control characters, except for U+000a (LF): U+0000 - U+0009, U+000b - U+0001f, U+007f. (Note that Windows line endings (CRLF) are not allowed, and hard tabs are not allowed.)
+ - Ascii control characters, except for U+000a (LF), U+000d (CR), and U+0009 (HT): U+0000 - U+0008, U+000b - U+000c, U+000e - U+0001f, U+007f.
- Non-Ascii Unicode line endings: U+0085 (NEL), U+2028 (LS), U+2029 (PS).
- The codepoint U+000a (LF) (which is encoded as the single-byte value 0x0a) is the line terminator character. This character always terminates a line of zig source code (except possibly the last line of the file).
- For some discussion on the rationale behind these design decisions, see issue #663
+
+ LF (byte value 0x0a, code point U+000a, {#syntax#}'\n'{#endsyntax#}) is the line terminator in Zig source code.
+ This byte value terminates every line of zig source code except the last line of the file.
+ It is recommended that non-empty source files end with an empty line, which means the last byte would be 0x0a (LF).
+
+
+ Each LF may be immediately preceded by a single CR (byte value 0x0d, code point U+000d, {#syntax#}'\r'{#endsyntax#})
+ to form a Windows style line ending, but this is discouraged.
+ A CR in any other context is not allowed.
+
+
+ HT hard tabs (byte value 0x09, code point U+0009, {#syntax#}'\t'{#endsyntax#}) are interchangeable with
+ SP spaces (byte value 0x20, code point U+0020, {#syntax#}' '{#endsyntax#}) as a token separator,
+ but use of hard tabs is discouraged. See {#link|Grammar#}.
+
+
+ Note that running zig fmt on a source file will implement all recommendations mentioned here.
+ Note also that the stage1 compiler does not yet support CR or HT control characters.
+
+
+ Note that a tool reading Zig source code can make assumptions if the source code is assumed to be correct Zig code.
+ For example, when identifying the ends of lines, a tool can use a naive search such as /\n/,
+ or an advanced
+ search such as /\r\n?|[\n\u0085\u2028\u2029]/, and in either case line endings will be correctly identified.
+ For another example, when identifying the whitespace before the first token on a line,
+ a tool can either use a naive search such as /[ \t]/,
+ or an advanced search such as /\s/,
+ and in either case whitespace will be correctly identified.
+
{#header_close#}
{#header_open|Keyword Reference#}
@@ -11373,6 +11400,7 @@ ExprList <- (Expr COMMA)* Expr?
# *** Tokens ***
eof <- !.
+eol <- ('\r'? '\n') | eof
hex <- [0-9a-fA-F]
hex_ <- ('_'/hex)
dec <- [0-9]
@@ -11382,39 +11410,39 @@ dec_int <- dec (dec_* dec)?
hex_int <- hex (hex_* dec)?
char_escape
- <- "\\x" hex hex
- / "\\u{" hex+ "}"
- / "\\" [nr\\t'"]
+ <- '\\x' hex hex
+ / '\\u{' hex+ '}'
+ / '\\' [nr\\t'"]
char_char
<- char_escape
- / [^\\'\n]
+ / [^\\'\r\n]
string_char
<- char_escape
- / [^\\"\n]
+ / [^\\"\r\n]
-line_comment <- '//'[^\n]*
-line_string <- ("\\\\" [^\n]* [ \n]*)+
-skip <- ([ \n] / line_comment)*
+line_comment <- '//'[^\r\n]* eol
+line_string <- ('\\\\' [^\r\n]* eol skip)+
+skip <- ([ \t] / eol / line_comment)*
CHAR_LITERAL <- "'" char_char "'" skip
FLOAT
- <- "0x" hex_* hex "." hex_int ([pP] [-+]? hex_int)? skip
- / dec_int "." dec_int ([eE] [-+]? dec_int)? skip
- / "0x" hex_* hex "."? [pP] [-+]? hex_int skip
- / dec_int "."? [eE] [-+]? dec_int skip
+ <- '0x' hex_* hex '.' hex_int ([pP] [-+]? hex_int)? skip
+ / dec_int '.' dec_int ([eE] [-+]? dec_int)? skip
+ / '0x' hex_* hex '.'? [pP] [-+]? hex_int skip
+ / dec_int '.'? [eE] [-+]? dec_int skip
INTEGER
- <- "0b" [_01]* [01] skip
- / "0o" [_0-7]* [0-7] skip
- / "0x" hex_* hex skip
+ <- '0b' [_01]* [01] skip
+ / '0o' [_0-7]* [0-7] skip
+ / '0x' hex_* hex skip
/ dec_int skip
-STRINGLITERALSINGLE <- "\"" string_char* "\"" skip
+STRINGLITERALSINGLE <- '"' string_char* '"' skip
STRINGLITERAL
<- STRINGLITERALSINGLE
- / line_string skip
+ / line_string skip
IDENTIFIER
<- !keyword [A-Za-z_] [A-Za-z0-9_]* skip
- / "@\"" string_char* "\"" skip
-BUILTINIDENTIFIER <- "@"[A-Za-z_][A-Za-z0-9_]* skip
+ / '@"' string_char* '"' skip
+BUILTINIDENTIFIER <- '@'[A-Za-z_][A-Za-z0-9_]* skip
AMPERSAND <- '&' ![=] skip
diff --git a/lib/std/bit_set.zig b/lib/std/bit_set.zig
new file mode 100644
index 0000000000..29ad0d7963
--- /dev/null
+++ b/lib/std/bit_set.zig
@@ -0,0 +1,1255 @@
+// SPDX-License-Identifier: MIT
+// Copyright (c) 2021 Zig Contributors
+// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
+// The MIT license requires this copyright notice to be included in all copies
+// and substantial portions of the software.
+
+//! This file defines several variants of bit sets. A bit set
+//! is a densely stored set of integers with a known maximum,
+//! in which each integer gets a single bit. Bit sets have very
+//! fast presence checks, update operations, and union and intersection
+//! operations. However, if the number of possible items is very
+//! large and the number of actual items in a given set is usually
+//! small, they may be less memory efficient than an array set.
+//!
+//! There are five variants defined here:
+//!
+//! IntegerBitSet:
+//! A bit set with static size, which is backed by a single integer.
+//! This set is good for sets with a small size, but may generate
+//! inefficient code for larger sets, especially in debug mode.
+//!
+//! ArrayBitSet:
+//! A bit set with static size, which is backed by an array of usize.
+//! This set is good for sets with a larger size, but may use
+//! more bytes than necessary if your set is small.
+//!
+//! StaticBitSet:
+//! Picks either IntegerBitSet or ArrayBitSet depending on the requested
+//! size. The interfaces of these two types match exactly, except for fields.
+//!
+//! DynamicBitSet:
+//! A bit set with runtime known size, backed by an allocated slice
+//! of usize.
+//!
+//! DynamicBitSetUnmanaged:
+//! A variant of DynamicBitSet which does not store a pointer to its
+//! allocator, in order to save space.
+
+const std = @import("std");
+const assert = std.debug.assert;
+const Allocator = std.mem.Allocator;
+
+/// Returns the optimal static bit set type for the specified number
+/// of elements. The returned type will perform no allocations,
+/// can be copied by value, and does not require deinitialization.
+/// Both possible implementations fulfill the same interface.
+pub fn StaticBitSet(comptime size: usize) type {
+ if (size <= @bitSizeOf(usize)) {
+ return IntegerBitSet(size);
+ } else {
+ return ArrayBitSet(usize, size);
+ }
+}
+
+/// A bit set with static size, which is backed by a single integer.
+/// This set is good for sets with a small size, but may generate
+/// inefficient code for larger sets, especially in debug mode.
+pub fn IntegerBitSet(comptime size: u16) type {
+ return struct {
+ const Self = @This();
+
+ // TODO: Make this a comptime field once those are fixed
+ /// The number of items in this bit set
+ pub const bit_length: usize = size;
+
+ /// The integer type used to represent a mask in this bit set
+ pub const MaskInt = std.meta.Int(.unsigned, size);
+
+ /// The integer type used to shift a mask in this bit set
+ pub const ShiftInt = std.math.Log2Int(MaskInt);
+
+ /// The bit mask, as a single integer
+ mask: MaskInt,
+
+ /// Creates a bit set with no elements present.
+ pub fn initEmpty() Self {
+ return .{ .mask = 0 };
+ }
+
+ /// Creates a bit set with all elements present.
+ pub fn initFull() Self {
+ return .{ .mask = ~@as(MaskInt, 0) };
+ }
+
+ /// Returns the number of bits in this bit set
+ pub fn capacity(self: Self) callconv(.Inline) usize {
+ return bit_length;
+ }
+
+ /// Returns true if the bit at the specified index
+ /// is present in the set, false otherwise.
+ pub fn isSet(self: Self, index: usize) bool {
+ assert(index < bit_length);
+ return (self.mask & maskBit(index)) != 0;
+ }
+
+ /// Returns the total number of set bits in this bit set.
+ pub fn count(self: Self) usize {
+ return @popCount(MaskInt, self.mask);
+ }
+
+ /// Changes the value of the specified bit of the bit
+ /// set to match the passed boolean.
+ pub fn setValue(self: *Self, index: usize, value: bool) void {
+ assert(index < bit_length);
+ if (MaskInt == u0) return;
+ const bit = maskBit(index);
+ const new_bit = bit & std.math.boolMask(MaskInt, value);
+ self.mask = (self.mask & ~bit) | new_bit;
+ }
+
+ /// Adds a specific bit to the bit set
+ pub fn set(self: *Self, index: usize) void {
+ assert(index < bit_length);
+ self.mask |= maskBit(index);
+ }
+
+ /// Removes a specific bit from the bit set
+ pub fn unset(self: *Self, index: usize) void {
+ assert(index < bit_length);
+ // Workaround for #7953
+ if (MaskInt == u0) return;
+ self.mask &= ~maskBit(index);
+ }
+
+ /// Flips a specific bit in the bit set
+ pub fn toggle(self: *Self, index: usize) void {
+ assert(index < bit_length);
+ self.mask ^= maskBit(index);
+ }
+
+ /// Flips all bits in this bit set which are present
+ /// in the toggles bit set.
+ pub fn toggleSet(self: *Self, toggles: Self) void {
+ self.mask ^= toggles.mask;
+ }
+
+ /// Flips every bit in the bit set.
+ pub fn toggleAll(self: *Self) void {
+ self.mask = ~self.mask;
+ }
+
+ /// Performs a union of two bit sets, and stores the
+ /// result in the first one. Bits in the result are
+ /// set if the corresponding bits were set in either input.
+ pub fn setUnion(self: *Self, other: Self) void {
+ self.mask |= other.mask;
+ }
+
+ /// Performs an intersection of two bit sets, and stores
+ /// the result in the first one. Bits in the result are
+ /// set if the corresponding bits were set in both inputs.
+ pub fn setIntersection(self: *Self, other: Self) void {
+ self.mask &= other.mask;
+ }
+
+ /// Finds the index of the first set bit.
+ /// If no bits are set, returns null.
+ pub fn findFirstSet(self: Self) ?usize {
+ const mask = self.mask;
+ if (mask == 0) return null;
+ return @ctz(MaskInt, mask);
+ }
+
+ /// Finds the index of the first set bit, and unsets it.
+ /// If no bits are set, returns null.
+ pub fn toggleFirstSet(self: *Self) ?usize {
+ const mask = self.mask;
+ if (mask == 0) return null;
+ const index = @ctz(MaskInt, mask);
+ self.mask = mask & (mask - 1);
+ return index;
+ }
+
+ /// Iterates through the items in the set, according to the options.
+ /// The default options (.{}) will iterate indices of set bits in
+ /// ascending order. Modifications to the underlying bit set may
+ /// or may not be observed by the iterator.
+ pub fn iterator(self: *const Self, comptime options: IteratorOptions) Iterator(options.direction) {
+ return .{
+ .bits_remain = switch (options.kind) {
+ .set => self.mask,
+ .unset => ~self.mask,
+ },
+ };
+ }
+
+ fn Iterator(comptime direction: IteratorOptions.Direction) type {
+ return struct {
+ const IterSelf = @This();
+ // all bits which have not yet been iterated over
+ bits_remain: MaskInt,
+
+ /// Returns the index of the next unvisited set bit
+ /// in the bit set, in ascending order.
+ pub fn next(self: *IterSelf) ?usize {
+ if (self.bits_remain == 0) return null;
+
+ switch (direction) {
+ .forward => {
+ const next_index = @ctz(MaskInt, self.bits_remain);
+ self.bits_remain &= self.bits_remain - 1;
+ return next_index;
+ },
+ .reverse => {
+ const leading_zeroes = @clz(MaskInt, self.bits_remain);
+ const top_bit = (@bitSizeOf(MaskInt) - 1) - leading_zeroes;
+ self.bits_remain &= (@as(MaskInt, 1) << @intCast(ShiftInt, top_bit)) - 1;
+ return top_bit;
+ },
+ }
+ }
+ };
+ }
+
+ fn maskBit(index: usize) MaskInt {
+ if (MaskInt == u0) return 0;
+ return @as(MaskInt, 1) << @intCast(ShiftInt, index);
+ }
+ fn boolMaskBit(index: usize, value: bool) MaskInt {
+ if (MaskInt == u0) return 0;
+ return @as(MaskInt, @boolToInt(value)) << @intCast(ShiftInt, index);
+ }
+ };
+}
+
+/// A bit set with static size, which is backed by an array of usize.
+/// This set is good for sets with a larger size, but may use
+/// more bytes than necessary if your set is small.
+pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type {
+ const mask_info: std.builtin.TypeInfo = @typeInfo(MaskIntType);
+
+ // Make sure the mask int is indeed an int
+ if (mask_info != .Int) @compileError("ArrayBitSet can only operate on integer masks, but was passed " ++ @typeName(MaskIntType));
+
+ // It must also be unsigned.
+ if (mask_info.Int.signedness != .unsigned) @compileError("ArrayBitSet requires an unsigned integer mask type, but was passed " ++ @typeName(MaskIntType));
+
+ // And it must not be empty.
+ if (MaskIntType == u0)
+ @compileError("ArrayBitSet requires a sized integer for its mask int. u0 does not work.");
+
+ const byte_size = std.mem.byte_size_in_bits;
+
+ // We use shift and truncate to decompose indices into mask indices and bit indices.
+ // This operation requires that the mask has an exact power of two number of bits.
+ if (!std.math.isPowerOfTwo(@bitSizeOf(MaskIntType))) {
+ var desired_bits = std.math.ceilPowerOfTwoAssert(usize, @bitSizeOf(MaskIntType));
+ if (desired_bits < byte_size) desired_bits = byte_size;
+ const FixedMaskType = std.meta.Int(.unsigned, desired_bits);
+ @compileError("ArrayBitSet was passed integer type " ++ @typeName(MaskIntType) ++
+ ", which is not a power of two. Please round this up to a power of two integer size (i.e. " ++ @typeName(FixedMaskType) ++ ").");
+ }
+
+ // Make sure the integer has no padding bits.
+ // Those would be wasteful here and are probably a mistake by the user.
+ // This case may be hit with small powers of two, like u4.
+ if (@bitSizeOf(MaskIntType) != @sizeOf(MaskIntType) * byte_size) {
+ var desired_bits = @sizeOf(MaskIntType) * byte_size;
+ desired_bits = std.math.ceilPowerOfTwoAssert(usize, desired_bits);
+ const FixedMaskType = std.meta.Int(.unsigned, desired_bits);
+ @compileError("ArrayBitSet was passed integer type " ++ @typeName(MaskIntType) ++
+ ", which contains padding bits. Please round this up to an unpadded integer size (i.e. " ++ @typeName(FixedMaskType) ++ ").");
+ }
+
+ return struct {
+ const Self = @This();
+
+ // TODO: Make this a comptime field once those are fixed
+ /// The number of items in this bit set
+ pub const bit_length: usize = size;
+
+ /// The integer type used to represent a mask in this bit set
+ pub const MaskInt = MaskIntType;
+
+ /// The integer type used to shift a mask in this bit set
+ pub const ShiftInt = std.math.Log2Int(MaskInt);
+
+ // bits in one mask
+ const mask_len = @bitSizeOf(MaskInt);
+ // total number of masks
+ const num_masks = (size + mask_len - 1) / mask_len;
+ // padding bits in the last mask (may be 0)
+ const last_pad_bits = mask_len * num_masks - size;
+ // Mask of valid bits in the last mask.
+ // All functions will ensure that the invalid
+ // bits in the last mask are zero.
+ pub const last_item_mask = ~@as(MaskInt, 0) >> last_pad_bits;
+
+ /// The bit masks, ordered with lower indices first.
+ /// Padding bits at the end are undefined.
+ masks: [num_masks]MaskInt,
+
+ /// Creates a bit set with no elements present.
+ pub fn initEmpty() Self {
+ return .{ .masks = [_]MaskInt{0} ** num_masks };
+ }
+
+ /// Creates a bit set with all elements present.
+ pub fn initFull() Self {
+ if (num_masks == 0) {
+ return .{ .masks = .{} };
+ } else {
+ return .{ .masks = [_]MaskInt{~@as(MaskInt, 0)} ** (num_masks - 1) ++ [_]MaskInt{last_item_mask} };
+ }
+ }
+
+ /// Returns the number of bits in this bit set
+ pub fn capacity(self: Self) callconv(.Inline) usize {
+ return bit_length;
+ }
+
+ /// Returns true if the bit at the specified index
+ /// is present in the set, false otherwise.
+ pub fn isSet(self: Self, index: usize) bool {
+ assert(index < bit_length);
+ if (num_masks == 0) return false; // doesn't compile in this case
+ return (self.masks[maskIndex(index)] & maskBit(index)) != 0;
+ }
+
+ /// Returns the total number of set bits in this bit set.
+ pub fn count(self: Self) usize {
+ var total: usize = 0;
+ for (self.masks) |mask| {
+ total += @popCount(MaskInt, mask);
+ }
+ return total;
+ }
+
+ /// Changes the value of the specified bit of the bit
+ /// set to match the passed boolean.
+ pub fn setValue(self: *Self, index: usize, value: bool) void {
+ assert(index < bit_length);
+ if (num_masks == 0) return; // doesn't compile in this case
+ const bit = maskBit(index);
+ const mask_index = maskIndex(index);
+ const new_bit = bit & std.math.boolMask(MaskInt, value);
+ self.masks[mask_index] = (self.masks[mask_index] & ~bit) | new_bit;
+ }
+
+ /// Adds a specific bit to the bit set
+ pub fn set(self: *Self, index: usize) void {
+ assert(index < bit_length);
+ if (num_masks == 0) return; // doesn't compile in this case
+ self.masks[maskIndex(index)] |= maskBit(index);
+ }
+
+ /// Removes a specific bit from the bit set
+ pub fn unset(self: *Self, index: usize) void {
+ assert(index < bit_length);
+ if (num_masks == 0) return; // doesn't compile in this case
+ self.masks[maskIndex(index)] &= ~maskBit(index);
+ }
+
+ /// Flips a specific bit in the bit set
+ pub fn toggle(self: *Self, index: usize) void {
+ assert(index < bit_length);
+ if (num_masks == 0) return; // doesn't compile in this case
+ self.masks[maskIndex(index)] ^= maskBit(index);
+ }
+
+ /// Flips all bits in this bit set which are present
+ /// in the toggles bit set.
+ pub fn toggleSet(self: *Self, toggles: Self) void {
+ for (self.masks) |*mask, i| {
+ mask.* ^= toggles.masks[i];
+ }
+ }
+
+ /// Flips every bit in the bit set.
+ pub fn toggleAll(self: *Self) void {
+ for (self.masks) |*mask, i| {
+ mask.* = ~mask.*;
+ }
+
+ // Zero the padding bits
+ if (num_masks > 0) {
+ self.masks[num_masks - 1] &= last_item_mask;
+ }
+ }
+
+ /// Performs a union of two bit sets, and stores the
+ /// result in the first one. Bits in the result are
+ /// set if the corresponding bits were set in either input.
+ pub fn setUnion(self: *Self, other: Self) void {
+ for (self.masks) |*mask, i| {
+ mask.* |= other.masks[i];
+ }
+ }
+
+ /// Performs an intersection of two bit sets, and stores
+ /// the result in the first one. Bits in the result are
+ /// set if the corresponding bits were set in both inputs.
+ pub fn setIntersection(self: *Self, other: Self) void {
+ for (self.masks) |*mask, i| {
+ mask.* &= other.masks[i];
+ }
+ }
+
+ /// Finds the index of the first set bit.
+ /// If no bits are set, returns null.
+ pub fn findFirstSet(self: Self) ?usize {
+ var offset: usize = 0;
+ const mask = for (self.masks) |mask| {
+ if (mask != 0) break mask;
+ offset += @bitSizeOf(MaskInt);
+ } else return null;
+ return offset + @ctz(MaskInt, mask);
+ }
+
+ /// Finds the index of the first set bit, and unsets it.
+ /// If no bits are set, returns null.
+ pub fn toggleFirstSet(self: *Self) ?usize {
+ var offset: usize = 0;
+ const mask = for (self.masks) |*mask| {
+ if (mask.* != 0) break mask;
+ offset += @bitSizeOf(MaskInt);
+ } else return null;
+ const index = @ctz(MaskInt, mask.*);
+ mask.* &= (mask.* - 1);
+ return offset + index;
+ }
+
+ /// Iterates through the items in the set, according to the options.
+ /// The default options (.{}) will iterate indices of set bits in
+ /// ascending order. Modifications to the underlying bit set may
+ /// or may not be observed by the iterator.
+ pub fn iterator(self: *const Self, comptime options: IteratorOptions) BitSetIterator(MaskInt, options) {
+ return BitSetIterator(MaskInt, options).init(&self.masks, last_item_mask);
+ }
+
+ fn maskBit(index: usize) MaskInt {
+ return @as(MaskInt, 1) << @truncate(ShiftInt, index);
+ }
+ fn maskIndex(index: usize) usize {
+ return index >> @bitSizeOf(ShiftInt);
+ }
+ fn boolMaskBit(index: usize, value: bool) MaskInt {
+ return @as(MaskInt, @boolToInt(value)) << @intCast(ShiftInt, index);
+ }
+ };
+}
+
+/// A bit set with runtime known size, backed by an allocated slice
+/// of usize. The allocator must be tracked externally by the user.
+pub const DynamicBitSetUnmanaged = struct {
+ const Self = @This();
+
+ /// The integer type used to represent a mask in this bit set
+ pub const MaskInt = usize;
+
+ /// The integer type used to shift a mask in this bit set
+ pub const ShiftInt = std.math.Log2Int(MaskInt);
+
+ /// The number of valid items in this bit set
+ bit_length: usize = 0,
+
+ /// The bit masks, ordered with lower indices first.
+ /// Padding bits at the end must be zeroed.
+ masks: [*]MaskInt = empty_masks_ptr,
+ // This pointer is one usize after the actual allocation.
+ // That slot holds the size of the true allocation, which
+ // is needed by Zig's allocator interface in case a shrink
+ // fails.
+
+ // Don't modify this value. Ideally it would go in const data so
+ // modifications would cause a bus error, but the only way
+ // to discard a const qualifier is through ptrToInt, which
+ // cannot currently round trip at comptime.
+ var empty_masks_data = [_]MaskInt{ 0, undefined };
+ const empty_masks_ptr = empty_masks_data[1..2];
+
+ /// Creates a bit set with no elements present.
+ /// If bit_length is not zero, deinit must eventually be called.
+ pub fn initEmpty(bit_length: usize, allocator: *Allocator) !Self {
+ var self = Self{};
+ try self.resize(bit_length, false, allocator);
+ return self;
+ }
+
+ /// Creates a bit set with all elements present.
+ /// If bit_length is not zero, deinit must eventually be called.
+ pub fn initFull(bit_length: usize, allocator: *Allocator) !Self {
+ var self = Self{};
+ try self.resize(bit_length, true, allocator);
+ return self;
+ }
+
+ /// Resizes to a new bit_length. If the new length is larger
+ /// than the old length, fills any added bits with `fill`.
+ /// If new_len is not zero, deinit must eventually be called.
+ pub fn resize(self: *@This(), new_len: usize, fill: bool, allocator: *Allocator) !void {
+ const old_len = self.bit_length;
+
+ const old_masks = numMasks(old_len);
+ const new_masks = numMasks(new_len);
+
+ const old_allocation = (self.masks - 1)[0..(self.masks - 1)[0]];
+
+ if (new_masks == 0) {
+ assert(new_len == 0);
+ allocator.free(old_allocation);
+ self.masks = empty_masks_ptr;
+ self.bit_length = 0;
+ return;
+ }
+
+ if (old_allocation.len != new_masks + 1) realloc: {
+ // If realloc fails, it may mean one of two things.
+ // If we are growing, it means we are out of memory.
+ // If we are shrinking, it means the allocator doesn't
+ // want to move the allocation. This means we need to
+ // hold on to the extra 8 bytes required to be able to free
+ // this allocation properly.
+ const new_allocation = allocator.realloc(old_allocation, new_masks + 1) catch |err| {
+ if (new_masks + 1 > old_allocation.len) return err;
+ break :realloc;
+ };
+
+ new_allocation[0] = new_allocation.len;
+ self.masks = new_allocation.ptr + 1;
+ }
+
+ // If we increased in size, we need to set any new bits
+ // to the fill value.
+ if (new_len > old_len) {
+ // set the padding bits in the old last item to 1
+ if (fill and old_masks > 0) {
+ const old_padding_bits = old_masks * @bitSizeOf(MaskInt) - old_len;
+ const old_mask = (~@as(MaskInt, 0)) >> @intCast(ShiftInt, old_padding_bits);
+ self.masks[old_masks - 1] |= ~old_mask;
+ }
+
+ // fill in any new masks
+ if (new_masks > old_masks) {
+ const fill_value = std.math.boolMask(MaskInt, fill);
+ std.mem.set(MaskInt, self.masks[old_masks..new_masks], fill_value);
+ }
+ }
+
+ // Zero out the padding bits
+ if (new_len > 0) {
+ const padding_bits = new_masks * @bitSizeOf(MaskInt) - new_len;
+ const last_item_mask = (~@as(MaskInt, 0)) >> @intCast(ShiftInt, padding_bits);
+ self.masks[new_masks - 1] &= last_item_mask;
+ }
+
+ // And finally, save the new length.
+ self.bit_length = new_len;
+ }
+
+ /// deinitializes the array and releases its memory.
+ /// The passed allocator must be the same one used for
+ /// init* or resize in the past.
+ pub fn deinit(self: *Self, allocator: *Allocator) void {
+ self.resize(0, false, allocator) catch unreachable;
+ }
+
+ /// Creates a duplicate of this bit set, using the new allocator.
+ pub fn clone(self: *const Self, new_allocator: *Allocator) !Self {
+ const num_masks = numMasks(self.bit_length);
+ var copy = Self{};
+ try copy.resize(self.bit_length, false, new_allocator);
+ std.mem.copy(MaskInt, copy.masks[0..num_masks], self.masks[0..num_masks]);
+ return copy;
+ }
+
+ /// Returns the number of bits in this bit set
+ pub fn capacity(self: Self) callconv(.Inline) usize {
+ return self.bit_length;
+ }
+
+ /// Returns true if the bit at the specified index
+ /// is present in the set, false otherwise.
+ pub fn isSet(self: Self, index: usize) bool {
+ assert(index < self.bit_length);
+ return (self.masks[maskIndex(index)] & maskBit(index)) != 0;
+ }
+
+ /// Returns the total number of set bits in this bit set.
+ pub fn count(self: Self) usize {
+ const num_masks = (self.bit_length + (@bitSizeOf(MaskInt) - 1)) / @bitSizeOf(MaskInt);
+ var total: usize = 0;
+ for (self.masks[0..num_masks]) |mask| {
+ // Note: This is where we depend on padding bits being zero
+ total += @popCount(MaskInt, mask);
+ }
+ return total;
+ }
+
+ /// Changes the value of the specified bit of the bit
+ /// set to match the passed boolean.
+ pub fn setValue(self: *Self, index: usize, value: bool) void {
+ assert(index < self.bit_length);
+ const bit = maskBit(index);
+ const mask_index = maskIndex(index);
+ const new_bit = bit & std.math.boolMask(MaskInt, value);
+ self.masks[mask_index] = (self.masks[mask_index] & ~bit) | new_bit;
+ }
+
+ /// Adds a specific bit to the bit set
+ pub fn set(self: *Self, index: usize) void {
+ assert(index < self.bit_length);
+ self.masks[maskIndex(index)] |= maskBit(index);
+ }
+
+ /// Removes a specific bit from the bit set
+ pub fn unset(self: *Self, index: usize) void {
+ assert(index < self.bit_length);
+ self.masks[maskIndex(index)] &= ~maskBit(index);
+ }
+
+ /// Flips a specific bit in the bit set
+ pub fn toggle(self: *Self, index: usize) void {
+ assert(index < self.bit_length);
+ self.masks[maskIndex(index)] ^= maskBit(index);
+ }
+
+ /// Flips all bits in this bit set which are present
+ /// in the toggles bit set. Both sets must have the
+ /// same bit_length.
+ pub fn toggleSet(self: *Self, toggles: Self) void {
+ assert(toggles.bit_length == self.bit_length);
+ const num_masks = numMasks(self.bit_length);
+ for (self.masks[0..num_masks]) |*mask, i| {
+ mask.* ^= toggles.masks[i];
+ }
+ }
+
+ /// Flips every bit in the bit set.
+ pub fn toggleAll(self: *Self) void {
+ const bit_length = self.bit_length;
+ // avoid underflow if bit_length is zero
+ if (bit_length == 0) return;
+
+ const num_masks = numMasks(self.bit_length);
+ for (self.masks[0..num_masks]) |*mask, i| {
+ mask.* = ~mask.*;
+ }
+
+ const padding_bits = num_masks * @bitSizeOf(MaskInt) - bit_length;
+ const last_item_mask = (~@as(MaskInt, 0)) >> @intCast(ShiftInt, padding_bits);
+ self.masks[num_masks - 1] &= last_item_mask;
+ }
+
+ /// Performs a union of two bit sets, and stores the
+ /// result in the first one. Bits in the result are
+ /// set if the corresponding bits were set in either input.
+ /// The two sets must both be the same bit_length.
+ pub fn setUnion(self: *Self, other: Self) void {
+ assert(other.bit_length == self.bit_length);
+ const num_masks = numMasks(self.bit_length);
+ for (self.masks[0..num_masks]) |*mask, i| {
+ mask.* |= other.masks[i];
+ }
+ }
+
+ /// Performs an intersection of two bit sets, and stores
+ /// the result in the first one. Bits in the result are
+ /// set if the corresponding bits were set in both inputs.
+ /// The two sets must both be the same bit_length.
+ pub fn setIntersection(self: *Self, other: Self) void {
+ assert(other.bit_length == self.bit_length);
+ const num_masks = numMasks(self.bit_length);
+ for (self.masks[0..num_masks]) |*mask, i| {
+ mask.* &= other.masks[i];
+ }
+ }
+
+ /// Finds the index of the first set bit.
+ /// If no bits are set, returns null.
+ pub fn findFirstSet(self: Self) ?usize {
+ var offset: usize = 0;
+ var mask = self.masks;
+ while (offset < self.bit_length) {
+ if (mask[0] != 0) break;
+ mask += 1;
+ offset += @bitSizeOf(MaskInt);
+ } else return null;
+ return offset + @ctz(MaskInt, mask[0]);
+ }
+
+ /// Finds the index of the first set bit, and unsets it.
+ /// If no bits are set, returns null.
+ pub fn toggleFirstSet(self: *Self) ?usize {
+ var offset: usize = 0;
+ var mask = self.masks;
+ while (offset < self.bit_length) {
+ if (mask[0] != 0) break;
+ mask += 1;
+ offset += @bitSizeOf(MaskInt);
+ } else return null;
+ const index = @ctz(MaskInt, mask[0]);
+ mask[0] &= (mask[0] - 1);
+ return offset + index;
+ }
+
+ /// Iterates through the items in the set, according to the options.
+ /// The default options (.{}) will iterate indices of set bits in
+ /// ascending order. Modifications to the underlying bit set may
+ /// or may not be observed by the iterator. Resizing the underlying
+ /// bit set invalidates the iterator.
+ pub fn iterator(self: *const Self, comptime options: IteratorOptions) BitSetIterator(MaskInt, options) {
+ const num_masks = numMasks(self.bit_length);
+ const padding_bits = num_masks * @bitSizeOf(MaskInt) - self.bit_length;
+ const last_item_mask = (~@as(MaskInt, 0)) >> @intCast(ShiftInt, padding_bits);
+ return BitSetIterator(MaskInt, options).init(self.masks[0..num_masks], last_item_mask);
+ }
+
+ fn maskBit(index: usize) MaskInt {
+ return @as(MaskInt, 1) << @truncate(ShiftInt, index);
+ }
+ fn maskIndex(index: usize) usize {
+ return index >> @bitSizeOf(ShiftInt);
+ }
+ fn boolMaskBit(index: usize, value: bool) MaskInt {
+ return @as(MaskInt, @boolToInt(value)) << @intCast(ShiftInt, index);
+ }
+ fn numMasks(bit_length: usize) usize {
+ return (bit_length + (@bitSizeOf(MaskInt) - 1)) / @bitSizeOf(MaskInt);
+ }
+};
+
+/// A bit set with runtime known size, backed by an allocated slice
+/// of usize. Thin wrapper around DynamicBitSetUnmanaged which keeps
+/// track of the allocator instance.
+pub const DynamicBitSet = struct {
+ const Self = @This();
+
+ /// The integer type used to represent a mask in this bit set
+ pub const MaskInt = usize;
+
+ /// The integer type used to shift a mask in this bit set
+ pub const ShiftInt = std.math.Log2Int(MaskInt);
+
+ /// The allocator used by this bit set
+ allocator: *Allocator,
+
+ /// The number of valid items in this bit set
+ unmanaged: DynamicBitSetUnmanaged = .{},
+
+ /// Creates a bit set with no elements present.
+ pub fn initEmpty(bit_length: usize, allocator: *Allocator) !Self {
+ return Self{
+ .unmanaged = try DynamicBitSetUnmanaged.initEmpty(bit_length, allocator),
+ .allocator = allocator,
+ };
+ }
+
+ /// Creates a bit set with all elements present.
+ pub fn initFull(bit_length: usize, allocator: *Allocator) !Self {
+ return Self{
+ .unmanaged = try DynamicBitSetUnmanaged.initFull(bit_length, allocator),
+ .allocator = allocator,
+ };
+ }
+
+ /// Resizes to a new length. If the new length is larger
+ /// than the old length, fills any added bits with `fill`.
+ pub fn resize(self: *@This(), new_len: usize, fill: bool) !void {
+ try self.unmanaged.resize(new_len, fill, self.allocator);
+ }
+
+ /// deinitializes the array and releases its memory.
+ /// The passed allocator must be the same one used for
+ /// init* or resize in the past.
+ pub fn deinit(self: *Self) void {
+ self.unmanaged.deinit(self.allocator);
+ }
+
+ /// Creates a duplicate of this bit set, using the new allocator.
+ pub fn clone(self: *const Self, new_allocator: *Allocator) !Self {
+ return Self{
+ .unmanaged = try self.unmanaged.clone(new_allocator),
+ .allocator = new_allocator,
+ };
+ }
+
+ /// Returns the number of bits in this bit set
+ pub fn capacity(self: Self) callconv(.Inline) usize {
+ return self.unmanaged.capacity();
+ }
+
+ /// Returns true if the bit at the specified index
+ /// is present in the set, false otherwise.
+ pub fn isSet(self: Self, index: usize) bool {
+ return self.unmanaged.isSet(index);
+ }
+
+ /// Returns the total number of set bits in this bit set.
+ pub fn count(self: Self) usize {
+ return self.unmanaged.count();
+ }
+
+ /// Changes the value of the specified bit of the bit
+ /// set to match the passed boolean.
+ pub fn setValue(self: *Self, index: usize, value: bool) void {
+ self.unmanaged.setValue(index, value);
+ }
+
+ /// Adds a specific bit to the bit set
+ pub fn set(self: *Self, index: usize) void {
+ self.unmanaged.set(index);
+ }
+
+ /// Removes a specific bit from the bit set
+ pub fn unset(self: *Self, index: usize) void {
+ self.unmanaged.unset(index);
+ }
+
+ /// Flips a specific bit in the bit set
+ pub fn toggle(self: *Self, index: usize) void {
+ self.unmanaged.toggle(index);
+ }
+
+ /// Flips all bits in this bit set which are present
+ /// in the toggles bit set. Both sets must have the
+ /// same bit_length.
+ pub fn toggleSet(self: *Self, toggles: Self) void {
+ self.unmanaged.toggleSet(toggles.unmanaged);
+ }
+
+ /// Flips every bit in the bit set.
+ pub fn toggleAll(self: *Self) void {
+ self.unmanaged.toggleAll();
+ }
+
+ /// Performs a union of two bit sets, and stores the
+ /// result in the first one. Bits in the result are
+ /// set if the corresponding bits were set in either input.
+ /// The two sets must both be the same bit_length.
+ pub fn setUnion(self: *Self, other: Self) void {
+ self.unmanaged.setUnion(other.unmanaged);
+ }
+
+ /// Performs an intersection of two bit sets, and stores
+ /// the result in the first one. Bits in the result are
+ /// set if the corresponding bits were set in both inputs.
+ /// The two sets must both be the same bit_length.
+ pub fn setIntersection(self: *Self, other: Self) void {
+ self.unmanaged.setIntersection(other.unmanaged);
+ }
+
+ /// Finds the index of the first set bit.
+ /// If no bits are set, returns null.
+ pub fn findFirstSet(self: Self) ?usize {
+ return self.unmanaged.findFirstSet();
+ }
+
+ /// Finds the index of the first set bit, and unsets it.
+ /// If no bits are set, returns null.
+ pub fn toggleFirstSet(self: *Self) ?usize {
+ return self.unmanaged.toggleFirstSet();
+ }
+
+ /// Iterates through the items in the set, according to the options.
+ /// The default options (.{}) will iterate indices of set bits in
+ /// ascending order. Modifications to the underlying bit set may
+ /// or may not be observed by the iterator. Resizing the underlying
+ /// bit set invalidates the iterator.
+ pub fn iterator(self: *Self, comptime options: IteratorOptions) BitSetIterator(MaskInt, options) {
+ return self.unmanaged.iterator(options);
+ }
+};
+
+/// Options for configuring an iterator over a bit set
+pub const IteratorOptions = struct {
+ /// determines which bits should be visited
+ kind: Type = .set,
+ /// determines the order in which bit indices should be visited
+ direction: Direction = .forward,
+
+ pub const Type = enum {
+ /// visit indexes of set bits
+ set,
+ /// visit indexes of unset bits
+ unset,
+ };
+
+ pub const Direction = enum {
+ /// visit indices in ascending order
+ forward,
+ /// visit indices in descending order.
+ /// Note that this may be slightly more expensive than forward iteration.
+ reverse,
+ };
+};
+
+// The iterator is reusable between several bit set types
+fn BitSetIterator(comptime MaskInt: type, comptime options: IteratorOptions) type {
+ const ShiftInt = std.math.Log2Int(MaskInt);
+ const kind = options.kind;
+ const direction = options.direction;
+ return struct {
+ const Self = @This();
+
+ // all bits which have not yet been iterated over
+ bits_remain: MaskInt,
+ // all words which have not yet been iterated over
+ words_remain: []const MaskInt,
+ // the offset of the current word
+ bit_offset: usize,
+ // the mask of the last word
+ last_word_mask: MaskInt,
+
+ fn init(masks: []const MaskInt, last_word_mask: MaskInt) Self {
+ if (masks.len == 0) {
+ return Self{
+ .bits_remain = 0,
+ .words_remain = &[_]MaskInt{},
+ .last_word_mask = last_word_mask,
+ .bit_offset = 0,
+ };
+ } else {
+ var result = Self{
+ .bits_remain = 0,
+ .words_remain = masks,
+ .last_word_mask = last_word_mask,
+ .bit_offset = if (direction == .forward) 0 else (masks.len - 1) * @bitSizeOf(MaskInt),
+ };
+ result.nextWord(true);
+ return result;
+ }
+ }
+
+ /// Returns the index of the next unvisited set bit
+ /// in the bit set, in ascending order.
+ pub fn next(self: *Self) ?usize {
+ while (self.bits_remain == 0) {
+ if (self.words_remain.len == 0) return null;
+ self.nextWord(false);
+ switch (direction) {
+ .forward => self.bit_offset += @bitSizeOf(MaskInt),
+ .reverse => self.bit_offset -= @bitSizeOf(MaskInt),
+ }
+ }
+
+ switch (direction) {
+ .forward => {
+ const next_index = @ctz(MaskInt, self.bits_remain) + self.bit_offset;
+ self.bits_remain &= self.bits_remain - 1;
+ return next_index;
+ },
+ .reverse => {
+ const leading_zeroes = @clz(MaskInt, self.bits_remain);
+ const top_bit = (@bitSizeOf(MaskInt) - 1) - leading_zeroes;
+ const no_top_bit_mask = (@as(MaskInt, 1) << @intCast(ShiftInt, top_bit)) - 1;
+ self.bits_remain &= no_top_bit_mask;
+ return top_bit + self.bit_offset;
+ },
+ }
+ }
+
+ // Load the next word. Don't call this if there
+ // isn't a next word. If the next word is the
+ // last word, mask off the padding bits so we
+ // don't visit them.
+ fn nextWord(self: *Self, comptime is_first_word: bool) callconv(.Inline) void {
+ var word = switch (direction) {
+ .forward => self.words_remain[0],
+ .reverse => self.words_remain[self.words_remain.len - 1],
+ };
+ switch (kind) {
+ .set => {},
+ .unset => {
+ word = ~word;
+ if ((direction == .reverse and is_first_word) or
+ (direction == .forward and self.words_remain.len == 1))
+ {
+ word &= self.last_word_mask;
+ }
+ },
+ }
+ switch (direction) {
+ .forward => self.words_remain = self.words_remain[1..],
+ .reverse => self.words_remain.len -= 1,
+ }
+ self.bits_remain = word;
+ }
+ };
+}
+
+// ---------------- Tests -----------------
+
+const testing = std.testing;
+
+fn testBitSet(a: anytype, b: anytype, len: usize) void {
+ testing.expectEqual(len, a.capacity());
+ testing.expectEqual(len, b.capacity());
+
+ {
+ var i: usize = 0;
+ while (i < len) : (i += 1) {
+ a.setValue(i, i & 1 == 0);
+ b.setValue(i, i & 2 == 0);
+ }
+ }
+
+ testing.expectEqual((len + 1) / 2, a.count());
+ testing.expectEqual((len + 3) / 4 + (len + 2) / 4, b.count());
+
+ {
+ var iter = a.iterator(.{});
+ var i: usize = 0;
+ while (i < len) : (i += 2) {
+ testing.expectEqual(@as(?usize, i), iter.next());
+ }
+ testing.expectEqual(@as(?usize, null), iter.next());
+ testing.expectEqual(@as(?usize, null), iter.next());
+ testing.expectEqual(@as(?usize, null), iter.next());
+ }
+ a.toggleAll();
+ {
+ var iter = a.iterator(.{});
+ var i: usize = 1;
+ while (i < len) : (i += 2) {
+ testing.expectEqual(@as(?usize, i), iter.next());
+ }
+ testing.expectEqual(@as(?usize, null), iter.next());
+ testing.expectEqual(@as(?usize, null), iter.next());
+ testing.expectEqual(@as(?usize, null), iter.next());
+ }
+
+ {
+ var iter = b.iterator(.{ .kind = .unset });
+ var i: usize = 2;
+ while (i < len) : (i += 4) {
+ testing.expectEqual(@as(?usize, i), iter.next());
+ if (i + 1 < len) {
+ testing.expectEqual(@as(?usize, i + 1), iter.next());
+ }
+ }
+ testing.expectEqual(@as(?usize, null), iter.next());
+ testing.expectEqual(@as(?usize, null), iter.next());
+ testing.expectEqual(@as(?usize, null), iter.next());
+ }
+
+ {
+ var i: usize = 0;
+ while (i < len) : (i += 1) {
+ testing.expectEqual(i & 1 != 0, a.isSet(i));
+ testing.expectEqual(i & 2 == 0, b.isSet(i));
+ }
+ }
+
+ a.setUnion(b.*);
+ {
+ var i: usize = 0;
+ while (i < len) : (i += 1) {
+ testing.expectEqual(i & 1 != 0 or i & 2 == 0, a.isSet(i));
+ testing.expectEqual(i & 2 == 0, b.isSet(i));
+ }
+
+ i = len;
+ var set = a.iterator(.{ .direction = .reverse });
+ var unset = a.iterator(.{ .kind = .unset, .direction = .reverse });
+ while (i > 0) {
+ i -= 1;
+ if (i & 1 != 0 or i & 2 == 0) {
+ testing.expectEqual(@as(?usize, i), set.next());
+ } else {
+ testing.expectEqual(@as(?usize, i), unset.next());
+ }
+ }
+ testing.expectEqual(@as(?usize, null), set.next());
+ testing.expectEqual(@as(?usize, null), set.next());
+ testing.expectEqual(@as(?usize, null), set.next());
+ testing.expectEqual(@as(?usize, null), unset.next());
+ testing.expectEqual(@as(?usize, null), unset.next());
+ testing.expectEqual(@as(?usize, null), unset.next());
+ }
+
+ a.toggleSet(b.*);
+ {
+ testing.expectEqual(len / 4, a.count());
+
+ var i: usize = 0;
+ while (i < len) : (i += 1) {
+ testing.expectEqual(i & 1 != 0 and i & 2 != 0, a.isSet(i));
+ testing.expectEqual(i & 2 == 0, b.isSet(i));
+ if (i & 1 == 0) {
+ a.set(i);
+ } else {
+ a.unset(i);
+ }
+ }
+ }
+
+ a.setIntersection(b.*);
+ {
+ testing.expectEqual((len + 3) / 4, a.count());
+
+ var i: usize = 0;
+ while (i < len) : (i += 1) {
+ testing.expectEqual(i & 1 == 0 and i & 2 == 0, a.isSet(i));
+ testing.expectEqual(i & 2 == 0, b.isSet(i));
+ }
+ }
+
+ a.toggleSet(a.*);
+ {
+ var iter = a.iterator(.{});
+ testing.expectEqual(@as(?usize, null), iter.next());
+ testing.expectEqual(@as(?usize, null), iter.next());
+ testing.expectEqual(@as(?usize, null), iter.next());
+ testing.expectEqual(@as(usize, 0), a.count());
+ }
+ {
+ var iter = a.iterator(.{ .direction = .reverse });
+ testing.expectEqual(@as(?usize, null), iter.next());
+ testing.expectEqual(@as(?usize, null), iter.next());
+ testing.expectEqual(@as(?usize, null), iter.next());
+ testing.expectEqual(@as(usize, 0), a.count());
+ }
+
+ const test_bits = [_]usize{
+ 0, 1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 22, 31, 32, 63, 64,
+ 66, 95, 127, 160, 192, 1000,
+ };
+ for (test_bits) |i| {
+ if (i < a.capacity()) {
+ a.set(i);
+ }
+ }
+
+ for (test_bits) |i| {
+ if (i < a.capacity()) {
+ testing.expectEqual(@as(?usize, i), a.findFirstSet());
+ testing.expectEqual(@as(?usize, i), a.toggleFirstSet());
+ }
+ }
+ testing.expectEqual(@as(?usize, null), a.findFirstSet());
+ testing.expectEqual(@as(?usize, null), a.toggleFirstSet());
+ testing.expectEqual(@as(?usize, null), a.findFirstSet());
+ testing.expectEqual(@as(?usize, null), a.toggleFirstSet());
+ testing.expectEqual(@as(usize, 0), a.count());
+}
+
+fn testStaticBitSet(comptime Set: type) void {
+ var a = Set.initEmpty();
+ var b = Set.initFull();
+ testing.expectEqual(@as(usize, 0), a.count());
+ testing.expectEqual(@as(usize, Set.bit_length), b.count());
+
+ testBitSet(&a, &b, Set.bit_length);
+}
+
+test "IntegerBitSet" {
+ testStaticBitSet(IntegerBitSet(0));
+ testStaticBitSet(IntegerBitSet(1));
+ testStaticBitSet(IntegerBitSet(2));
+ testStaticBitSet(IntegerBitSet(5));
+ testStaticBitSet(IntegerBitSet(8));
+ testStaticBitSet(IntegerBitSet(32));
+ testStaticBitSet(IntegerBitSet(64));
+ testStaticBitSet(IntegerBitSet(127));
+}
+
+test "ArrayBitSet" {
+ inline for (.{ 0, 1, 2, 31, 32, 33, 63, 64, 65, 254, 500, 3000 }) |size| {
+ testStaticBitSet(ArrayBitSet(u8, size));
+ testStaticBitSet(ArrayBitSet(u16, size));
+ testStaticBitSet(ArrayBitSet(u32, size));
+ testStaticBitSet(ArrayBitSet(u64, size));
+ testStaticBitSet(ArrayBitSet(u128, size));
+ }
+}
+
+test "DynamicBitSetUnmanaged" {
+ const allocator = std.testing.allocator;
+ var a = try DynamicBitSetUnmanaged.initEmpty(300, allocator);
+ testing.expectEqual(@as(usize, 0), a.count());
+ a.deinit(allocator);
+
+ a = try DynamicBitSetUnmanaged.initEmpty(0, allocator);
+ defer a.deinit(allocator);
+ for ([_]usize{ 1, 2, 31, 32, 33, 0, 65, 64, 63, 500, 254, 3000 }) |size| {
+ const old_len = a.capacity();
+
+ var tmp = try a.clone(allocator);
+ defer tmp.deinit(allocator);
+ testing.expectEqual(old_len, tmp.capacity());
+ var i: usize = 0;
+ while (i < old_len) : (i += 1) {
+ testing.expectEqual(a.isSet(i), tmp.isSet(i));
+ }
+
+ a.toggleSet(a); // zero a
+ tmp.toggleSet(tmp);
+
+ try a.resize(size, true, allocator);
+ try tmp.resize(size, false, allocator);
+
+ if (size > old_len) {
+ testing.expectEqual(size - old_len, a.count());
+ } else {
+ testing.expectEqual(@as(usize, 0), a.count());
+ }
+ testing.expectEqual(@as(usize, 0), tmp.count());
+
+ var b = try DynamicBitSetUnmanaged.initFull(size, allocator);
+ defer b.deinit(allocator);
+ testing.expectEqual(@as(usize, size), b.count());
+
+ testBitSet(&a, &b, size);
+ }
+}
+
+test "DynamicBitSet" {
+ const allocator = std.testing.allocator;
+ var a = try DynamicBitSet.initEmpty(300, allocator);
+ testing.expectEqual(@as(usize, 0), a.count());
+ a.deinit();
+
+ a = try DynamicBitSet.initEmpty(0, allocator);
+ defer a.deinit();
+ for ([_]usize{ 1, 2, 31, 32, 33, 0, 65, 64, 63, 500, 254, 3000 }) |size| {
+ const old_len = a.capacity();
+
+ var tmp = try a.clone(allocator);
+ defer tmp.deinit();
+ testing.expectEqual(old_len, tmp.capacity());
+ var i: usize = 0;
+ while (i < old_len) : (i += 1) {
+ testing.expectEqual(a.isSet(i), tmp.isSet(i));
+ }
+
+ a.toggleSet(a); // zero a
+ tmp.toggleSet(tmp); // zero tmp
+
+ try a.resize(size, true);
+ try tmp.resize(size, false);
+
+ if (size > old_len) {
+ testing.expectEqual(size - old_len, a.count());
+ } else {
+ testing.expectEqual(@as(usize, 0), a.count());
+ }
+ testing.expectEqual(@as(usize, 0), tmp.count());
+
+ var b = try DynamicBitSet.initFull(size, allocator);
+ defer b.deinit();
+ testing.expectEqual(@as(usize, size), b.count());
+
+ testBitSet(&a, &b, size);
+ }
+}
+
+test "StaticBitSet" {
+ testing.expectEqual(IntegerBitSet(0), StaticBitSet(0));
+ testing.expectEqual(IntegerBitSet(5), StaticBitSet(5));
+ testing.expectEqual(IntegerBitSet(@bitSizeOf(usize)), StaticBitSet(@bitSizeOf(usize)));
+ testing.expectEqual(ArrayBitSet(usize, @bitSizeOf(usize) + 1), StaticBitSet(@bitSizeOf(usize) + 1));
+ testing.expectEqual(ArrayBitSet(usize, 500), StaticBitSet(500));
+}
diff --git a/lib/std/build/emit_raw.zig b/lib/std/build/emit_raw.zig
index 721b38b7a2..0932e117fe 100644
--- a/lib/std/build/emit_raw.zig
+++ b/lib/std/build/emit_raw.zig
@@ -51,9 +51,9 @@ const BinaryElfOutput = struct {
.segments = ArrayList(*BinaryElfSegment).init(allocator),
.sections = ArrayList(*BinaryElfSection).init(allocator),
};
- const elf_hdr = try std.elf.readHeader(elf_file);
+ const elf_hdr = try std.elf.Header.read(&elf_file);
- var section_headers = elf_hdr.section_header_iterator(elf_file);
+ var section_headers = elf_hdr.section_header_iterator(&elf_file);
while (try section_headers.next()) |section| {
if (sectionValidForOutput(section)) {
const newSection = try allocator.create(BinaryElfSection);
@@ -67,7 +67,7 @@ const BinaryElfOutput = struct {
}
}
- var program_headers = elf_hdr.program_header_iterator(elf_file);
+ var program_headers = elf_hdr.program_header_iterator(&elf_file);
while (try program_headers.next()) |phdr| {
if (phdr.p_type == elf.PT_LOAD) {
const newSegment = try allocator.create(BinaryElfSegment);
diff --git a/lib/std/c/tokenizer.zig b/lib/std/c/tokenizer.zig
index 2e1969e269..4399d3dc6c 100644
--- a/lib/std/c/tokenizer.zig
+++ b/lib/std/c/tokenizer.zig
@@ -401,7 +401,9 @@ pub const Tokenizer = struct {
Zero,
IntegerLiteralOct,
IntegerLiteralBinary,
+ IntegerLiteralBinaryFirst,
IntegerLiteralHex,
+ IntegerLiteralHexFirst,
IntegerLiteral,
IntegerSuffix,
IntegerSuffixU,
@@ -1046,10 +1048,10 @@ pub const Tokenizer = struct {
state = .IntegerLiteralOct;
},
'b', 'B' => {
- state = .IntegerLiteralBinary;
+ state = .IntegerLiteralBinaryFirst;
},
'x', 'X' => {
- state = .IntegerLiteralHex;
+ state = .IntegerLiteralHexFirst;
},
'.' => {
state = .FloatFraction;
@@ -1066,6 +1068,13 @@ pub const Tokenizer = struct {
self.index -= 1;
},
},
+ .IntegerLiteralBinaryFirst => switch (c) {
+ '0'...'7' => state = .IntegerLiteralBinary,
+ else => {
+ result.id = .Invalid;
+ break;
+ },
+ },
.IntegerLiteralBinary => switch (c) {
'0', '1' => {},
else => {
@@ -1073,6 +1082,19 @@ pub const Tokenizer = struct {
self.index -= 1;
},
},
+ .IntegerLiteralHexFirst => switch (c) {
+ '0'...'9', 'a'...'f', 'A'...'F' => state = .IntegerLiteralHex,
+ '.' => {
+ state = .FloatFractionHex;
+ },
+ 'p', 'P' => {
+ state = .FloatExponent;
+ },
+ else => {
+ result.id = .Invalid;
+ break;
+ },
+ },
.IntegerLiteralHex => switch (c) {
'0'...'9', 'a'...'f', 'A'...'F' => {},
'.' => {
@@ -1238,6 +1260,8 @@ pub const Tokenizer = struct {
.MultiLineCommentAsterisk,
.FloatExponent,
.MacroString,
+ .IntegerLiteralBinaryFirst,
+ .IntegerLiteralHexFirst,
=> result.id = .Invalid,
.FloatExponentDigits => result.id = if (counter == 0) .Invalid else .{ .FloatLiteral = .none },
@@ -1523,6 +1547,7 @@ test "num suffixes" {
\\ 1.0f 1.0L 1.0 .0 1.
\\ 0l 0lu 0ll 0llu 0
\\ 1u 1ul 1ull 1
+ \\ 0x 0b
\\
, &[_]Token.Id{
.{ .FloatLiteral = .f },
@@ -1542,6 +1567,9 @@ test "num suffixes" {
.{ .IntegerLiteral = .llu },
.{ .IntegerLiteral = .none },
.Nl,
+ .Invalid,
+ .Invalid,
+ .Nl,
});
}
diff --git a/lib/std/crypto/25519/curve25519.zig b/lib/std/crypto/25519/curve25519.zig
index 765ffa1629..e01b024360 100644
--- a/lib/std/crypto/25519/curve25519.zig
+++ b/lib/std/crypto/25519/curve25519.zig
@@ -115,9 +115,9 @@ test "curve25519" {
const p = try Curve25519.basePoint.clampedMul(s);
try p.rejectIdentity();
var buf: [128]u8 = undefined;
- std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{p.toBytes()}), "E6F2A4D1C28EE5C7AD0329268255A468AD407D2672824C0C0EB30EA6EF450145");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&p.toBytes())}), "E6F2A4D1C28EE5C7AD0329268255A468AD407D2672824C0C0EB30EA6EF450145");
const q = try p.clampedMul(s);
- std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{q.toBytes()}), "3614E119FFE55EC55B87D6B19971A9F4CBC78EFE80BEC55B96392BABCC712537");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&q.toBytes())}), "3614E119FFE55EC55B87D6B19971A9F4CBC78EFE80BEC55B96392BABCC712537");
try Curve25519.rejectNonCanonical(s);
s[31] |= 0x80;
diff --git a/lib/std/crypto/25519/ed25519.zig b/lib/std/crypto/25519/ed25519.zig
index 5c7ec0cdac..06a4826f58 100644
--- a/lib/std/crypto/25519/ed25519.zig
+++ b/lib/std/crypto/25519/ed25519.zig
@@ -210,8 +210,8 @@ test "ed25519 key pair creation" {
_ = try fmt.hexToBytes(seed[0..], "8052030376d47112be7f73ed7a019293dd12ad910b654455798b4667d73de166");
const key_pair = try Ed25519.KeyPair.create(seed);
var buf: [256]u8 = undefined;
- std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{key_pair.secret_key}), "8052030376D47112BE7F73ED7A019293DD12AD910B654455798B4667D73DE1662D6F7455D97B4A3A10D7293909D1A4F2058CB9A370E43FA8154BB280DB839083");
- std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{key_pair.public_key}), "2D6F7455D97B4A3A10D7293909D1A4F2058CB9A370E43FA8154BB280DB839083");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&key_pair.secret_key)}), "8052030376D47112BE7F73ED7A019293DD12AD910B654455798B4667D73DE1662D6F7455D97B4A3A10D7293909D1A4F2058CB9A370E43FA8154BB280DB839083");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&key_pair.public_key)}), "2D6F7455D97B4A3A10D7293909D1A4F2058CB9A370E43FA8154BB280DB839083");
}
test "ed25519 signature" {
@@ -221,7 +221,7 @@ test "ed25519 signature" {
const sig = try Ed25519.sign("test", key_pair, null);
var buf: [128]u8 = undefined;
- std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{sig}), "10A442B4A80CC4225B154F43BEF28D2472CA80221951262EB8E0DF9091575E2687CC486E77263C3418C757522D54F84B0359236ABBBD4ACD20DC297FDCA66808");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&sig)}), "10A442B4A80CC4225B154F43BEF28D2472CA80221951262EB8E0DF9091575E2687CC486E77263C3418C757522D54F84B0359236ABBBD4ACD20DC297FDCA66808");
try Ed25519.verify(sig, "test", key_pair.public_key);
std.testing.expectError(error.InvalidSignature, Ed25519.verify(sig, "TEST", key_pair.public_key));
}
diff --git a/lib/std/crypto/25519/edwards25519.zig b/lib/std/crypto/25519/edwards25519.zig
index d4238f87bb..8d9922d80c 100644
--- a/lib/std/crypto/25519/edwards25519.zig
+++ b/lib/std/crypto/25519/edwards25519.zig
@@ -450,7 +450,7 @@ test "edwards25519 packing/unpacking" {
var b = Edwards25519.basePoint;
const pk = try b.mul(s);
var buf: [128]u8 = undefined;
- std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{pk.toBytes()}), "074BC7E0FCBD587FDBC0969444245FADC562809C8F6E97E949AF62484B5B81A6");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&pk.toBytes())}), "074BC7E0FCBD587FDBC0969444245FADC562809C8F6E97E949AF62484B5B81A6");
const small_order_ss: [7][32]u8 = .{
.{
diff --git a/lib/std/crypto/25519/ristretto255.zig b/lib/std/crypto/25519/ristretto255.zig
index df85422f65..46bb9697e2 100644
--- a/lib/std/crypto/25519/ristretto255.zig
+++ b/lib/std/crypto/25519/ristretto255.zig
@@ -170,21 +170,21 @@ pub const Ristretto255 = struct {
test "ristretto255" {
const p = Ristretto255.basePoint;
var buf: [256]u8 = undefined;
- std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{p.toBytes()}), "E2F2AE0A6ABC4E71A884A961C500515F58E30B6AA582DD8DB6A65945E08D2D76");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&p.toBytes())}), "E2F2AE0A6ABC4E71A884A961C500515F58E30B6AA582DD8DB6A65945E08D2D76");
var r: [Ristretto255.encoded_length]u8 = undefined;
_ = try fmt.hexToBytes(r[0..], "6a493210f7499cd17fecb510ae0cea23a110e8d5b901f8acadd3095c73a3b919");
var q = try Ristretto255.fromBytes(r);
q = q.dbl().add(p);
- std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{q.toBytes()}), "E882B131016B52C1D3337080187CF768423EFCCBB517BB495AB812C4160FF44E");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&q.toBytes())}), "E882B131016B52C1D3337080187CF768423EFCCBB517BB495AB812C4160FF44E");
const s = [_]u8{15} ++ [_]u8{0} ** 31;
const w = try p.mul(s);
- std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{w.toBytes()}), "E0C418F7C8D9C4CDD7395B93EA124F3AD99021BB681DFC3302A9D99A2E53E64E");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&w.toBytes())}), "E0C418F7C8D9C4CDD7395B93EA124F3AD99021BB681DFC3302A9D99A2E53E64E");
std.testing.expect(p.dbl().dbl().dbl().dbl().equivalent(w.add(p)));
const h = [_]u8{69} ** 32 ++ [_]u8{42} ** 32;
const ph = Ristretto255.fromUniform(h);
- std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{ph.toBytes()}), "DCCA54E037A4311EFBEEF413ACD21D35276518970B7A61DC88F8587B493D5E19");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&ph.toBytes())}), "DCCA54E037A4311EFBEEF413ACD21D35276518970B7A61DC88F8587B493D5E19");
}
diff --git a/lib/std/crypto/25519/scalar.zig b/lib/std/crypto/25519/scalar.zig
index ceff153bff..e4fb277807 100644
--- a/lib/std/crypto/25519/scalar.zig
+++ b/lib/std/crypto/25519/scalar.zig
@@ -771,10 +771,10 @@ test "scalar25519" {
var y = x.toBytes();
try rejectNonCanonical(y);
var buf: [128]u8 = undefined;
- std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{y}), "1E979B917937F3DE71D18077F961F6CEFF01030405060708010203040506070F");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&y)}), "1E979B917937F3DE71D18077F961F6CEFF01030405060708010203040506070F");
const reduced = reduce(field_size);
- std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{reduced}), "0000000000000000000000000000000000000000000000000000000000000000");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&reduced)}), "0000000000000000000000000000000000000000000000000000000000000000");
}
test "non-canonical scalar25519" {
@@ -788,5 +788,5 @@ test "mulAdd overflow check" {
const c: [32]u8 = [_]u8{0xff} ** 32;
const x = mulAdd(a, b, c);
var buf: [128]u8 = undefined;
- std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{x}), "D14DF91389432C25AD60FF9791B9FD1D67BEF517D273ECCE3D9A307C1B419903");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&x)}), "D14DF91389432C25AD60FF9791B9FD1D67BEF517D273ECCE3D9A307C1B419903");
}
diff --git a/lib/std/crypto/chacha20.zig b/lib/std/crypto/chacha20.zig
index 0f79707279..e01888e793 100644
--- a/lib/std/crypto/chacha20.zig
+++ b/lib/std/crypto/chacha20.zig
@@ -876,7 +876,7 @@ test "crypto.xchacha20" {
var ciphertext: [input.len]u8 = undefined;
XChaCha20IETF.xor(ciphertext[0..], input[0..], 0, key, nonce);
var buf: [2 * ciphertext.len]u8 = undefined;
- testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{ciphertext}), "E0A1BCF939654AFDBDC1746EC49832647C19D891F0D1A81FC0C1703B4514BDEA584B512F6908C2C5E9DD18D5CBC1805DE5803FE3B9CA5F193FB8359E91FAB0C3BB40309A292EB1CF49685C65C4A3ADF4F11DB0CD2B6B67FBC174BC2E860E8F769FD3565BBFAD1C845E05A0FED9BE167C240D");
+ testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&ciphertext)}), "E0A1BCF939654AFDBDC1746EC49832647C19D891F0D1A81FC0C1703B4514BDEA584B512F6908C2C5E9DD18D5CBC1805DE5803FE3B9CA5F193FB8359E91FAB0C3BB40309A292EB1CF49685C65C4A3ADF4F11DB0CD2B6B67FBC174BC2E860E8F769FD3565BBFAD1C845E05A0FED9BE167C240D");
}
{
const data = "Additional data";
@@ -885,7 +885,7 @@ test "crypto.xchacha20" {
var out: [input.len]u8 = undefined;
try xchacha20poly1305Open(out[0..], ciphertext[0..], data, key, nonce);
var buf: [2 * ciphertext.len]u8 = undefined;
- testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{ciphertext}), "994D2DD32333F48E53650C02C7A2ABB8E018B0836D7175AEC779F52E961780768F815C58F1AA52D211498DB89B9216763F569C9433A6BBFCEFB4D4A49387A4C5207FBB3B5A92B5941294DF30588C6740D39DC16FA1F0E634F7246CF7CDCB978E44347D89381B7A74EB7084F754B90BDE9AAF5A94B8F2A85EFD0B50692AE2D425E234");
+ testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&ciphertext)}), "994D2DD32333F48E53650C02C7A2ABB8E018B0836D7175AEC779F52E961780768F815C58F1AA52D211498DB89B9216763F569C9433A6BBFCEFB4D4A49387A4C5207FBB3B5A92B5941294DF30588C6740D39DC16FA1F0E634F7246CF7CDCB978E44347D89381B7A74EB7084F754B90BDE9AAF5A94B8F2A85EFD0B50692AE2D425E234");
testing.expectEqualSlices(u8, out[0..], input);
ciphertext[0] += 1;
testing.expectError(error.AuthenticationFailed, xchacha20poly1305Open(out[0..], ciphertext[0..], data, key, nonce));
diff --git a/lib/std/debug.zig b/lib/std/debug.zig
index f32c1a6156..74fb95ffa8 100644
--- a/lib/std/debug.zig
+++ b/lib/std/debug.zig
@@ -360,14 +360,24 @@ pub const StackIterator = struct {
};
}
- // Negative offset of the saved BP wrt the frame pointer.
+ // Offset of the saved BP wrt the frame pointer.
const fp_offset = if (builtin.arch.isRISCV())
// On RISC-V the frame pointer points to the top of the saved register
// area, on pretty much every other architecture it points to the stack
// slot where the previous frame pointer is saved.
2 * @sizeOf(usize)
+ else if (builtin.arch.isSPARC())
+ // On SPARC the previous frame pointer is stored at 14 slots past %fp+BIAS.
+ 14 * @sizeOf(usize)
else
0;
+
+ const fp_bias = if (builtin.arch.isSPARC())
+ // On SPARC frame pointers are biased by a constant.
+ 2047
+ else
+ 0;
+
// Positive offset of the saved PC wrt the frame pointer.
const pc_offset = if (builtin.arch == .powerpc64le)
2 * @sizeOf(usize)
@@ -388,13 +398,17 @@ pub const StackIterator = struct {
}
fn next_internal(self: *StackIterator) ?usize {
- const fp = math.sub(usize, self.fp, fp_offset) catch return null;
+ const fp = if (builtin.arch.isSPARC())
+ // On SPARC the offset is positive. (!)
+ math.add(usize, self.fp, fp_offset) catch return null
+ else
+ math.sub(usize, self.fp, fp_offset) catch return null;
// Sanity check.
if (fp == 0 or !mem.isAligned(fp, @alignOf(usize)))
return null;
- const new_fp = @intToPtr(*const usize, fp).*;
+ const new_fp = math.add(usize, @intToPtr(*const usize, fp).*, fp_bias) catch return null;
// Sanity check: the stack grows down thus all the parent frames must be
// be at addresses that are greater (or equal) than the previous one.
diff --git a/lib/std/elf.zig b/lib/std/elf.zig
index cfb6b448c0..e644c6631a 100644
--- a/lib/std/elf.zig
+++ b/lib/std/elf.zig
@@ -335,7 +335,7 @@ pub const ET = extern enum(u16) {
};
/// All integers are native endian.
-const Header = struct {
+pub const Header = struct {
endian: builtin.Endian,
is_64: bool,
entry: u64,
@@ -347,187 +347,200 @@ const Header = struct {
shnum: u16,
shstrndx: u16,
- pub fn program_header_iterator(self: Header, file: File) ProgramHeaderIterator {
- return .{
+ pub fn program_header_iterator(self: Header, parse_source: anytype) ProgramHeaderIterator(@TypeOf(parse_source)) {
+ return ProgramHeaderIterator(@TypeOf(parse_source)){
.elf_header = self,
- .file = file,
+ .parse_source = parse_source,
};
}
- pub fn section_header_iterator(self: Header, file: File) SectionHeaderIterator {
- return .{
+ pub fn section_header_iterator(self: Header, parse_source: anytype) SectionHeaderIterator(@TypeOf(parse_source)) {
+ return SectionHeaderIterator(@TypeOf(parse_source)){
.elf_header = self,
- .file = file,
+ .parse_source = parse_source,
};
}
+
+ pub fn read(parse_source: anytype) !Header {
+ var hdr_buf: [@sizeOf(Elf64_Ehdr)]u8 align(@alignOf(Elf64_Ehdr)) = undefined;
+ try parse_source.seekableStream().seekTo(0);
+ try parse_source.reader().readNoEof(&hdr_buf);
+ return Header.parse(&hdr_buf);
+ }
+
+ pub fn parse(hdr_buf: *align(@alignOf(Elf64_Ehdr)) const [@sizeOf(Elf64_Ehdr)]u8) !Header {
+ const hdr32 = @ptrCast(*const Elf32_Ehdr, hdr_buf);
+ const hdr64 = @ptrCast(*const Elf64_Ehdr, hdr_buf);
+ if (!mem.eql(u8, hdr32.e_ident[0..4], "\x7fELF")) return error.InvalidElfMagic;
+ if (hdr32.e_ident[EI_VERSION] != 1) return error.InvalidElfVersion;
+
+ const endian: std.builtin.Endian = switch (hdr32.e_ident[EI_DATA]) {
+ ELFDATA2LSB => .Little,
+ ELFDATA2MSB => .Big,
+ else => return error.InvalidElfEndian,
+ };
+ const need_bswap = endian != std.builtin.endian;
+
+ const is_64 = switch (hdr32.e_ident[EI_CLASS]) {
+ ELFCLASS32 => false,
+ ELFCLASS64 => true,
+ else => return error.InvalidElfClass,
+ };
+
+ return @as(Header, .{
+ .endian = endian,
+ .is_64 = is_64,
+ .entry = int(is_64, need_bswap, hdr32.e_entry, hdr64.e_entry),
+ .phoff = int(is_64, need_bswap, hdr32.e_phoff, hdr64.e_phoff),
+ .shoff = int(is_64, need_bswap, hdr32.e_shoff, hdr64.e_shoff),
+ .phentsize = int(is_64, need_bswap, hdr32.e_phentsize, hdr64.e_phentsize),
+ .phnum = int(is_64, need_bswap, hdr32.e_phnum, hdr64.e_phnum),
+ .shentsize = int(is_64, need_bswap, hdr32.e_shentsize, hdr64.e_shentsize),
+ .shnum = int(is_64, need_bswap, hdr32.e_shnum, hdr64.e_shnum),
+ .shstrndx = int(is_64, need_bswap, hdr32.e_shstrndx, hdr64.e_shstrndx),
+ });
+ }
};
-pub fn readHeader(file: File) !Header {
- var hdr_buf: [@sizeOf(Elf64_Ehdr)]u8 align(@alignOf(Elf64_Ehdr)) = undefined;
- try preadNoEof(file, &hdr_buf, 0);
- const hdr32 = @ptrCast(*Elf32_Ehdr, &hdr_buf);
- const hdr64 = @ptrCast(*Elf64_Ehdr, &hdr_buf);
- if (!mem.eql(u8, hdr32.e_ident[0..4], "\x7fELF")) return error.InvalidElfMagic;
- if (hdr32.e_ident[EI_VERSION] != 1) return error.InvalidElfVersion;
+pub fn ProgramHeaderIterator(ParseSource: anytype) type {
+ return struct {
+ elf_header: Header,
+ parse_source: ParseSource,
+ index: usize = 0,
- const endian: std.builtin.Endian = switch (hdr32.e_ident[EI_DATA]) {
- ELFDATA2LSB => .Little,
- ELFDATA2MSB => .Big,
- else => return error.InvalidElfEndian,
+ pub fn next(self: *@This()) !?Elf64_Phdr {
+ if (self.index >= self.elf_header.phnum) return null;
+ defer self.index += 1;
+
+ if (self.elf_header.is_64) {
+ var phdr: Elf64_Phdr = undefined;
+ const offset = self.elf_header.phoff + @sizeOf(@TypeOf(phdr)) * self.index;
+ try self.parse_source.seekableStream().seekTo(offset);
+ try self.parse_source.reader().readNoEof(mem.asBytes(&phdr));
+
+ // ELF endianness matches native endianness.
+ if (self.elf_header.endian == std.builtin.endian) return phdr;
+
+ // Convert fields to native endianness.
+ return Elf64_Phdr{
+ .p_type = @byteSwap(@TypeOf(phdr.p_type), phdr.p_type),
+ .p_offset = @byteSwap(@TypeOf(phdr.p_offset), phdr.p_offset),
+ .p_vaddr = @byteSwap(@TypeOf(phdr.p_vaddr), phdr.p_vaddr),
+ .p_paddr = @byteSwap(@TypeOf(phdr.p_paddr), phdr.p_paddr),
+ .p_filesz = @byteSwap(@TypeOf(phdr.p_filesz), phdr.p_filesz),
+ .p_memsz = @byteSwap(@TypeOf(phdr.p_memsz), phdr.p_memsz),
+ .p_flags = @byteSwap(@TypeOf(phdr.p_flags), phdr.p_flags),
+ .p_align = @byteSwap(@TypeOf(phdr.p_align), phdr.p_align),
+ };
+ }
+
+ var phdr: Elf32_Phdr = undefined;
+ const offset = self.elf_header.phoff + @sizeOf(@TypeOf(phdr)) * self.index;
+ try self.parse_source.seekableStream().seekTo(offset);
+ try self.parse_source.reader().readNoEof(mem.asBytes(&phdr));
+
+ // ELF endianness does NOT match native endianness.
+ if (self.elf_header.endian != std.builtin.endian) {
+ // Convert fields to native endianness.
+ phdr = .{
+ .p_type = @byteSwap(@TypeOf(phdr.p_type), phdr.p_type),
+ .p_offset = @byteSwap(@TypeOf(phdr.p_offset), phdr.p_offset),
+ .p_vaddr = @byteSwap(@TypeOf(phdr.p_vaddr), phdr.p_vaddr),
+ .p_paddr = @byteSwap(@TypeOf(phdr.p_paddr), phdr.p_paddr),
+ .p_filesz = @byteSwap(@TypeOf(phdr.p_filesz), phdr.p_filesz),
+ .p_memsz = @byteSwap(@TypeOf(phdr.p_memsz), phdr.p_memsz),
+ .p_flags = @byteSwap(@TypeOf(phdr.p_flags), phdr.p_flags),
+ .p_align = @byteSwap(@TypeOf(phdr.p_align), phdr.p_align),
+ };
+ }
+
+ // Convert 32-bit header to 64-bit.
+ return Elf64_Phdr{
+ .p_type = phdr.p_type,
+ .p_offset = phdr.p_offset,
+ .p_vaddr = phdr.p_vaddr,
+ .p_paddr = phdr.p_paddr,
+ .p_filesz = phdr.p_filesz,
+ .p_memsz = phdr.p_memsz,
+ .p_flags = phdr.p_flags,
+ .p_align = phdr.p_align,
+ };
+ }
};
- const need_bswap = endian != std.builtin.endian;
-
- const is_64 = switch (hdr32.e_ident[EI_CLASS]) {
- ELFCLASS32 => false,
- ELFCLASS64 => true,
- else => return error.InvalidElfClass,
- };
-
- return @as(Header, .{
- .endian = endian,
- .is_64 = is_64,
- .entry = int(is_64, need_bswap, hdr32.e_entry, hdr64.e_entry),
- .phoff = int(is_64, need_bswap, hdr32.e_phoff, hdr64.e_phoff),
- .shoff = int(is_64, need_bswap, hdr32.e_shoff, hdr64.e_shoff),
- .phentsize = int(is_64, need_bswap, hdr32.e_phentsize, hdr64.e_phentsize),
- .phnum = int(is_64, need_bswap, hdr32.e_phnum, hdr64.e_phnum),
- .shentsize = int(is_64, need_bswap, hdr32.e_shentsize, hdr64.e_shentsize),
- .shnum = int(is_64, need_bswap, hdr32.e_shnum, hdr64.e_shnum),
- .shstrndx = int(is_64, need_bswap, hdr32.e_shstrndx, hdr64.e_shstrndx),
- });
}
-pub const ProgramHeaderIterator = struct {
- elf_header: Header,
- file: File,
- index: usize = 0,
+pub fn SectionHeaderIterator(ParseSource: anytype) type {
+ return struct {
+ elf_header: Header,
+ parse_source: ParseSource,
+ index: usize = 0,
- pub fn next(self: *ProgramHeaderIterator) !?Elf64_Phdr {
- if (self.index >= self.elf_header.phnum) return null;
- defer self.index += 1;
+ pub fn next(self: *@This()) !?Elf64_Shdr {
+ if (self.index >= self.elf_header.shnum) return null;
+ defer self.index += 1;
- if (self.elf_header.is_64) {
- var phdr: Elf64_Phdr = undefined;
- const offset = self.elf_header.phoff + @sizeOf(@TypeOf(phdr)) * self.index;
- try preadNoEof(self.file, mem.asBytes(&phdr), offset);
+ if (self.elf_header.is_64) {
+ var shdr: Elf64_Shdr = undefined;
+ const offset = self.elf_header.shoff + @sizeOf(@TypeOf(shdr)) * self.index;
+ try self.parse_source.seekableStream().seekTo(offset);
+ try self.parse_source.reader().readNoEof(mem.asBytes(&shdr));
- // ELF endianness matches native endianness.
- if (self.elf_header.endian == std.builtin.endian) return phdr;
+ // ELF endianness matches native endianness.
+ if (self.elf_header.endian == std.builtin.endian) return shdr;
- // Convert fields to native endianness.
- return Elf64_Phdr{
- .p_type = @byteSwap(@TypeOf(phdr.p_type), phdr.p_type),
- .p_offset = @byteSwap(@TypeOf(phdr.p_offset), phdr.p_offset),
- .p_vaddr = @byteSwap(@TypeOf(phdr.p_vaddr), phdr.p_vaddr),
- .p_paddr = @byteSwap(@TypeOf(phdr.p_paddr), phdr.p_paddr),
- .p_filesz = @byteSwap(@TypeOf(phdr.p_filesz), phdr.p_filesz),
- .p_memsz = @byteSwap(@TypeOf(phdr.p_memsz), phdr.p_memsz),
- .p_flags = @byteSwap(@TypeOf(phdr.p_flags), phdr.p_flags),
- .p_align = @byteSwap(@TypeOf(phdr.p_align), phdr.p_align),
- };
- }
+ // Convert fields to native endianness.
+ return Elf64_Shdr{
+ .sh_name = @byteSwap(@TypeOf(shdr.sh_name), shdr.sh_name),
+ .sh_type = @byteSwap(@TypeOf(shdr.sh_type), shdr.sh_type),
+ .sh_flags = @byteSwap(@TypeOf(shdr.sh_flags), shdr.sh_flags),
+ .sh_addr = @byteSwap(@TypeOf(shdr.sh_addr), shdr.sh_addr),
+ .sh_offset = @byteSwap(@TypeOf(shdr.sh_offset), shdr.sh_offset),
+ .sh_size = @byteSwap(@TypeOf(shdr.sh_size), shdr.sh_size),
+ .sh_link = @byteSwap(@TypeOf(shdr.sh_link), shdr.sh_link),
+ .sh_info = @byteSwap(@TypeOf(shdr.sh_info), shdr.sh_info),
+ .sh_addralign = @byteSwap(@TypeOf(shdr.sh_addralign), shdr.sh_addralign),
+ .sh_entsize = @byteSwap(@TypeOf(shdr.sh_entsize), shdr.sh_entsize),
+ };
+ }
- var phdr: Elf32_Phdr = undefined;
- const offset = self.elf_header.phoff + @sizeOf(@TypeOf(phdr)) * self.index;
- try preadNoEof(self.file, mem.asBytes(&phdr), offset);
-
- // ELF endianness does NOT match native endianness.
- if (self.elf_header.endian != std.builtin.endian) {
- // Convert fields to native endianness.
- phdr = .{
- .p_type = @byteSwap(@TypeOf(phdr.p_type), phdr.p_type),
- .p_offset = @byteSwap(@TypeOf(phdr.p_offset), phdr.p_offset),
- .p_vaddr = @byteSwap(@TypeOf(phdr.p_vaddr), phdr.p_vaddr),
- .p_paddr = @byteSwap(@TypeOf(phdr.p_paddr), phdr.p_paddr),
- .p_filesz = @byteSwap(@TypeOf(phdr.p_filesz), phdr.p_filesz),
- .p_memsz = @byteSwap(@TypeOf(phdr.p_memsz), phdr.p_memsz),
- .p_flags = @byteSwap(@TypeOf(phdr.p_flags), phdr.p_flags),
- .p_align = @byteSwap(@TypeOf(phdr.p_align), phdr.p_align),
- };
- }
-
- // Convert 32-bit header to 64-bit.
- return Elf64_Phdr{
- .p_type = phdr.p_type,
- .p_offset = phdr.p_offset,
- .p_vaddr = phdr.p_vaddr,
- .p_paddr = phdr.p_paddr,
- .p_filesz = phdr.p_filesz,
- .p_memsz = phdr.p_memsz,
- .p_flags = phdr.p_flags,
- .p_align = phdr.p_align,
- };
- }
-};
-
-pub const SectionHeaderIterator = struct {
- elf_header: Header,
- file: File,
- index: usize = 0,
-
- pub fn next(self: *SectionHeaderIterator) !?Elf64_Shdr {
- if (self.index >= self.elf_header.shnum) return null;
- defer self.index += 1;
-
- if (self.elf_header.is_64) {
- var shdr: Elf64_Shdr = undefined;
+ var shdr: Elf32_Shdr = undefined;
const offset = self.elf_header.shoff + @sizeOf(@TypeOf(shdr)) * self.index;
- try preadNoEof(self.file, mem.asBytes(&shdr), offset);
+ try self.parse_source.seekableStream().seekTo(offset);
+ try self.parse_source.reader().readNoEof(mem.asBytes(&shdr));
- // ELF endianness matches native endianness.
- if (self.elf_header.endian == std.builtin.endian) return shdr;
+ // ELF endianness does NOT match native endianness.
+ if (self.elf_header.endian != std.builtin.endian) {
+ // Convert fields to native endianness.
+ shdr = .{
+ .sh_name = @byteSwap(@TypeOf(shdr.sh_name), shdr.sh_name),
+ .sh_type = @byteSwap(@TypeOf(shdr.sh_type), shdr.sh_type),
+ .sh_flags = @byteSwap(@TypeOf(shdr.sh_flags), shdr.sh_flags),
+ .sh_addr = @byteSwap(@TypeOf(shdr.sh_addr), shdr.sh_addr),
+ .sh_offset = @byteSwap(@TypeOf(shdr.sh_offset), shdr.sh_offset),
+ .sh_size = @byteSwap(@TypeOf(shdr.sh_size), shdr.sh_size),
+ .sh_link = @byteSwap(@TypeOf(shdr.sh_link), shdr.sh_link),
+ .sh_info = @byteSwap(@TypeOf(shdr.sh_info), shdr.sh_info),
+ .sh_addralign = @byteSwap(@TypeOf(shdr.sh_addralign), shdr.sh_addralign),
+ .sh_entsize = @byteSwap(@TypeOf(shdr.sh_entsize), shdr.sh_entsize),
+ };
+ }
- // Convert fields to native endianness.
+ // Convert 32-bit header to 64-bit.
return Elf64_Shdr{
- .sh_name = @byteSwap(@TypeOf(shdr.sh_name), shdr.sh_name),
- .sh_type = @byteSwap(@TypeOf(shdr.sh_type), shdr.sh_type),
- .sh_flags = @byteSwap(@TypeOf(shdr.sh_flags), shdr.sh_flags),
- .sh_addr = @byteSwap(@TypeOf(shdr.sh_addr), shdr.sh_addr),
- .sh_offset = @byteSwap(@TypeOf(shdr.sh_offset), shdr.sh_offset),
- .sh_size = @byteSwap(@TypeOf(shdr.sh_size), shdr.sh_size),
- .sh_link = @byteSwap(@TypeOf(shdr.sh_link), shdr.sh_link),
- .sh_info = @byteSwap(@TypeOf(shdr.sh_info), shdr.sh_info),
- .sh_addralign = @byteSwap(@TypeOf(shdr.sh_addralign), shdr.sh_addralign),
- .sh_entsize = @byteSwap(@TypeOf(shdr.sh_entsize), shdr.sh_entsize),
+ .sh_name = shdr.sh_name,
+ .sh_type = shdr.sh_type,
+ .sh_flags = shdr.sh_flags,
+ .sh_addr = shdr.sh_addr,
+ .sh_offset = shdr.sh_offset,
+ .sh_size = shdr.sh_size,
+ .sh_link = shdr.sh_link,
+ .sh_info = shdr.sh_info,
+ .sh_addralign = shdr.sh_addralign,
+ .sh_entsize = shdr.sh_entsize,
};
}
-
- var shdr: Elf32_Shdr = undefined;
- const offset = self.elf_header.shoff + @sizeOf(@TypeOf(shdr)) * self.index;
- try preadNoEof(self.file, mem.asBytes(&shdr), offset);
-
- // ELF endianness does NOT match native endianness.
- if (self.elf_header.endian != std.builtin.endian) {
- // Convert fields to native endianness.
- shdr = .{
- .sh_name = @byteSwap(@TypeOf(shdr.sh_name), shdr.sh_name),
- .sh_type = @byteSwap(@TypeOf(shdr.sh_type), shdr.sh_type),
- .sh_flags = @byteSwap(@TypeOf(shdr.sh_flags), shdr.sh_flags),
- .sh_addr = @byteSwap(@TypeOf(shdr.sh_addr), shdr.sh_addr),
- .sh_offset = @byteSwap(@TypeOf(shdr.sh_offset), shdr.sh_offset),
- .sh_size = @byteSwap(@TypeOf(shdr.sh_size), shdr.sh_size),
- .sh_link = @byteSwap(@TypeOf(shdr.sh_link), shdr.sh_link),
- .sh_info = @byteSwap(@TypeOf(shdr.sh_info), shdr.sh_info),
- .sh_addralign = @byteSwap(@TypeOf(shdr.sh_addralign), shdr.sh_addralign),
- .sh_entsize = @byteSwap(@TypeOf(shdr.sh_entsize), shdr.sh_entsize),
- };
- }
-
- // Convert 32-bit header to 64-bit.
- return Elf64_Shdr{
- .sh_name = shdr.sh_name,
- .sh_type = shdr.sh_type,
- .sh_flags = shdr.sh_flags,
- .sh_addr = shdr.sh_addr,
- .sh_offset = shdr.sh_offset,
- .sh_size = shdr.sh_size,
- .sh_link = shdr.sh_link,
- .sh_info = shdr.sh_info,
- .sh_addralign = shdr.sh_addralign,
- .sh_entsize = shdr.sh_entsize,
- };
- }
-};
+ };
+}
pub fn int(is_64: bool, need_bswap: bool, int_32: anytype, int_64: anytype) @TypeOf(int_64) {
if (is_64) {
@@ -549,28 +562,6 @@ pub fn int32(need_bswap: bool, int_32: anytype, comptime Int64: anytype) Int64 {
}
}
-fn preadNoEof(file: std.fs.File, buf: []u8, offset: u64) !void {
- var i: usize = 0;
- while (i < buf.len) {
- const len = file.pread(buf[i .. buf.len - i], offset + i) catch |err| switch (err) {
- error.SystemResources => return error.SystemResources,
- error.IsDir => return error.UnableToReadElfFile,
- error.OperationAborted => return error.UnableToReadElfFile,
- error.BrokenPipe => return error.UnableToReadElfFile,
- error.Unseekable => return error.UnableToReadElfFile,
- error.ConnectionResetByPeer => return error.UnableToReadElfFile,
- error.ConnectionTimedOut => return error.UnableToReadElfFile,
- error.InputOutput => return error.FileSystem,
- error.Unexpected => return error.Unexpected,
- error.WouldBlock => return error.Unexpected,
- error.NotOpenForReading => return error.Unexpected,
- error.AccessDenied => return error.Unexpected,
- };
- if (len == 0) return error.UnexpectedEndOfFile;
- i += len;
- }
-}
-
pub const EI_NIDENT = 16;
pub const EI_CLASS = 4;
diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig
index fca21000cf..1f924bf00c 100644
--- a/lib/std/fmt.zig
+++ b/lib/std/fmt.zig
@@ -709,6 +709,87 @@ fn formatFloatValue(
return formatBuf(buf_stream.getWritten(), options, writer);
}
+fn formatSliceHexImpl(comptime uppercase: bool) type {
+ const charset = "0123456789" ++ if (uppercase) "ABCDEF" else "abcdef";
+
+ return struct {
+ pub fn f(
+ bytes: []const u8,
+ comptime fmt: []const u8,
+ options: std.fmt.FormatOptions,
+ writer: anytype,
+ ) !void {
+ var buf: [2]u8 = undefined;
+
+ for (bytes) |c| {
+ buf[0] = charset[c >> 4];
+ buf[1] = charset[c & 15];
+ try writer.writeAll(&buf);
+ }
+ }
+ };
+}
+
+const formatSliceHexLower = formatSliceHexImpl(false).f;
+const formatSliceHexUpper = formatSliceHexImpl(true).f;
+
+/// Return a Formatter for a []const u8 where every byte is formatted as a pair
+/// of lowercase hexadecimal digits.
+pub fn fmtSliceHexLower(bytes: []const u8) std.fmt.Formatter(formatSliceHexLower) {
+ return .{ .data = bytes };
+}
+
+/// Return a Formatter for a []const u8 where every byte is formatted as a pair
+/// of uppercase hexadecimal digits.
+pub fn fmtSliceHexUpper(bytes: []const u8) std.fmt.Formatter(formatSliceHexUpper) {
+ return .{ .data = bytes };
+}
+
+fn formatSliceEscapeImpl(comptime uppercase: bool) type {
+ const charset = "0123456789" ++ if (uppercase) "ABCDEF" else "abcdef";
+
+ return struct {
+ pub fn f(
+ bytes: []const u8,
+ comptime fmt: []const u8,
+ options: std.fmt.FormatOptions,
+ writer: anytype,
+ ) !void {
+ var buf: [4]u8 = undefined;
+
+ buf[0] = '\\';
+ buf[1] = 'x';
+
+ for (bytes) |c| {
+ if (std.ascii.isPrint(c)) {
+ try writer.writeByte(c);
+ } else {
+ buf[2] = charset[c >> 4];
+ buf[3] = charset[c & 15];
+ try writer.writeAll(&buf);
+ }
+ }
+ }
+ };
+}
+
+const formatSliceEscapeLower = formatSliceEscapeImpl(false).f;
+const formatSliceEscapeUpper = formatSliceEscapeImpl(true).f;
+
+/// Return a Formatter for a []const u8 where every non-printable ASCII
+/// character is escaped as \xNN, where NN is the character in lowercase
+/// hexadecimal notation.
+pub fn fmtSliceEscapeLower(bytes: []const u8) std.fmt.Formatter(formatSliceEscapeLower) {
+ return .{ .data = bytes };
+}
+
+/// Return a Formatter for a []const u8 where every non-printable ASCII
+/// character is escaped as \xNN, where NN is the character in uppercase
+/// hexadecimal notation.
+pub fn fmtSliceEscapeUpper(bytes: []const u8) std.fmt.Formatter(formatSliceEscapeUpper) {
+ return .{ .data = bytes };
+}
+
pub fn formatText(
bytes: []const u8,
comptime fmt: []const u8,
@@ -717,21 +798,18 @@ pub fn formatText(
) !void {
if (comptime std.mem.eql(u8, fmt, "s")) {
return formatBuf(bytes, options, writer);
- } else if (comptime (std.mem.eql(u8, fmt, "x") or std.mem.eql(u8, fmt, "X"))) {
- for (bytes) |c| {
- try formatInt(c, 16, fmt[0] == 'X', FormatOptions{ .width = 2, .fill = '0' }, writer);
- }
- return;
- } else if (comptime (std.mem.eql(u8, fmt, "e") or std.mem.eql(u8, fmt, "E"))) {
- for (bytes) |c| {
- if (std.ascii.isPrint(c)) {
- try writer.writeByte(c);
- } else {
- try writer.writeAll("\\x");
- try formatInt(c, 16, fmt[0] == 'E', FormatOptions{ .width = 2, .fill = '0' }, writer);
- }
- }
- return;
+ } else if (comptime (std.mem.eql(u8, fmt, "x"))) {
+ @compileError("specifier 'x' has been deprecated, wrap your argument in std.fmt.fmtSliceHexLower instead");
+ } else if (comptime (std.mem.eql(u8, fmt, "X"))) {
+ @compileError("specifier 'X' has been deprecated, wrap your argument in std.fmt.fmtSliceHexUpper instead");
+ } else if (comptime (std.mem.eql(u8, fmt, "e"))) {
+ @compileError("specifier 'e' has been deprecated, wrap your argument in std.fmt.fmtSliceEscapeLower instead");
+ } else if (comptime (std.mem.eql(u8, fmt, "E"))) {
+ @compileError("specifier 'X' has been deprecated, wrap your argument in std.fmt.fmtSliceEscapeUpper instead");
+ } else if (comptime std.mem.eql(u8, fmt, "z")) {
+ @compileError("specifier 'z' has been deprecated, wrap your argument in std.zig.fmtId instead");
+ } else if (comptime std.mem.eql(u8, fmt, "Z")) {
+ @compileError("specifier 'Z' has been deprecated, wrap your argument in std.zig.fmtEscapes instead");
} else {
@compileError("Unsupported format string '" ++ fmt ++ "' for type '" ++ @typeName(@TypeOf(value)) ++ "'");
}
@@ -1693,9 +1771,9 @@ test "slice" {
}
test "escape non-printable" {
- try expectFmt("abc", "{e}", .{"abc"});
- try expectFmt("ab\\xffc", "{e}", .{"ab\xffc"});
- try expectFmt("ab\\xFFc", "{E}", .{"ab\xffc"});
+ try expectFmt("abc", "{s}", .{fmtSliceEscapeLower("abc")});
+ try expectFmt("ab\\xffc", "{s}", .{fmtSliceEscapeLower("ab\xffc")});
+ try expectFmt("ab\\xFFc", "{s}", .{fmtSliceEscapeUpper("ab\xffc")});
}
test "pointer" {
@@ -1968,13 +2046,13 @@ test "struct.zero-size" {
test "bytes.hex" {
const some_bytes = "\xCA\xFE\xBA\xBE";
- try expectFmt("lowercase: cafebabe\n", "lowercase: {x}\n", .{some_bytes});
- try expectFmt("uppercase: CAFEBABE\n", "uppercase: {X}\n", .{some_bytes});
+ try expectFmt("lowercase: cafebabe\n", "lowercase: {x}\n", .{fmtSliceHexLower(some_bytes)});
+ try expectFmt("uppercase: CAFEBABE\n", "uppercase: {X}\n", .{fmtSliceHexUpper(some_bytes)});
//Test Slices
- try expectFmt("uppercase: CAFE\n", "uppercase: {X}\n", .{some_bytes[0..2]});
- try expectFmt("lowercase: babe\n", "lowercase: {x}\n", .{some_bytes[2..]});
+ try expectFmt("uppercase: CAFE\n", "uppercase: {X}\n", .{fmtSliceHexUpper(some_bytes[0..2])});
+ try expectFmt("lowercase: babe\n", "lowercase: {x}\n", .{fmtSliceHexLower(some_bytes[2..])});
const bytes_with_zeros = "\x00\x0E\xBA\xBE";
- try expectFmt("lowercase: 000ebabe\n", "lowercase: {x}\n", .{bytes_with_zeros});
+ try expectFmt("lowercase: 000ebabe\n", "lowercase: {x}\n", .{fmtSliceHexLower(bytes_with_zeros)});
}
pub const trim = @compileError("deprecated; use std.mem.trim with std.ascii.spaces instead");
@@ -2002,9 +2080,9 @@ pub fn hexToBytes(out: []u8, input: []const u8) ![]u8 {
test "hexToBytes" {
var buf: [32]u8 = undefined;
- try expectFmt("90" ** 32, "{X}", .{try hexToBytes(&buf, "90" ** 32)});
- try expectFmt("ABCD", "{X}", .{try hexToBytes(&buf, "ABCD")});
- try expectFmt("", "{X}", .{try hexToBytes(&buf, "")});
+ try expectFmt("90" ** 32, "{s}", .{fmtSliceHexUpper(try hexToBytes(&buf, "90" ** 32))});
+ try expectFmt("ABCD", "{s}", .{fmtSliceHexUpper(try hexToBytes(&buf, "ABCD"))});
+ try expectFmt("", "{s}", .{fmtSliceHexUpper(try hexToBytes(&buf, ""))});
std.testing.expectError(error.InvalidCharacter, hexToBytes(&buf, "012Z"));
std.testing.expectError(error.InvalidLength, hexToBytes(&buf, "AAA"));
std.testing.expectError(error.NoSpaceLeft, hexToBytes(buf[0..1], "ABAB"));
diff --git a/lib/std/fs/get_app_data_dir.zig b/lib/std/fs/get_app_data_dir.zig
index 18f8458eb2..02c36f736a 100644
--- a/lib/std/fs/get_app_data_dir.zig
+++ b/lib/std/fs/get_app_data_dir.zig
@@ -60,7 +60,7 @@ pub fn getAppDataDir(allocator: *mem.Allocator, appname: []const u8) GetAppDataD
var dir_path_ptr: [*:0]u8 = undefined;
// TODO look into directory_which
const be_user_settings = 0xbbe;
- const rc = os.system.find_directory(be_user_settings, -1, true, dir_path_ptr, 1) ;
+ const rc = os.system.find_directory(be_user_settings, -1, true, dir_path_ptr, 1);
const settings_dir = try allocator.dupeZ(u8, mem.spanZ(dir_path_ptr));
defer allocator.free(settings_dir);
switch (rc) {
diff --git a/lib/std/io/writer.zig b/lib/std/io/writer.zig
index 0a9edb425a..6f9386b8de 100644
--- a/lib/std/io/writer.zig
+++ b/lib/std/io/writer.zig
@@ -4,6 +4,7 @@
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
const std = @import("../std.zig");
+const assert = std.debug.assert;
const builtin = std.builtin;
const mem = std.mem;
@@ -86,5 +87,11 @@ pub fn Writer(
mem.writeInt(T, &bytes, value, endian);
return self.writeAll(&bytes);
}
+
+ pub fn writeStruct(self: Self, value: anytype) Error!void {
+ // Only extern and packed structs have defined in-memory layout.
+ comptime assert(@typeInfo(@TypeOf(value)).Struct.layout != builtin.TypeInfo.ContainerLayout.Auto);
+ return self.writeAll(mem.asBytes(&value));
+ }
};
}
diff --git a/lib/std/math.zig b/lib/std/math.zig
index 6e7c5c0915..d71cafe5ef 100644
--- a/lib/std/math.zig
+++ b/lib/std/math.zig
@@ -1330,3 +1330,59 @@ test "math.comptime" {
comptime const v = sin(@as(f32, 1)) + ln(@as(f32, 5));
testing.expect(v == sin(@as(f32, 1)) + ln(@as(f32, 5)));
}
+
+/// Returns a mask of all ones if value is true,
+/// and a mask of all zeroes if value is false.
+/// Compiles to one instruction for register sized integers.
+pub fn boolMask(comptime MaskInt: type, value: bool) callconv(.Inline) MaskInt {
+ if (@typeInfo(MaskInt) != .Int)
+ @compileError("boolMask requires an integer mask type.");
+
+ if (MaskInt == u0 or MaskInt == i0)
+ @compileError("boolMask cannot convert to u0 or i0, they are too small.");
+
+ // The u1 and i1 cases tend to overflow,
+ // so we special case them here.
+ if (MaskInt == u1) return @boolToInt(value);
+ if (MaskInt == i1) {
+ // The @as here is a workaround for #7950
+ return @bitCast(i1, @as(u1, @boolToInt(value)));
+ }
+
+ // At comptime, -% is disallowed on unsigned values.
+ // So we need to jump through some hoops in that case.
+ // This is a workaround for #7951
+ if (@typeInfo(@TypeOf(.{value})).Struct.fields[0].is_comptime) {
+ // Since it's comptime, we don't need this to generate nice code.
+ // We can just do a branch here.
+ return if (value) ~@as(MaskInt, 0) else 0;
+ }
+
+ return -%@intCast(MaskInt, @boolToInt(value));
+}
+
+test "boolMask" {
+ const runTest = struct {
+ fn runTest() void {
+ testing.expectEqual(@as(u1, 0), boolMask(u1, false));
+ testing.expectEqual(@as(u1, 1), boolMask(u1, true));
+
+ testing.expectEqual(@as(i1, 0), boolMask(i1, false));
+ testing.expectEqual(@as(i1, -1), boolMask(i1, true));
+
+ testing.expectEqual(@as(u13, 0), boolMask(u13, false));
+ testing.expectEqual(@as(u13, 0x1FFF), boolMask(u13, true));
+
+ testing.expectEqual(@as(i13, 0), boolMask(i13, false));
+ testing.expectEqual(@as(i13, -1), boolMask(i13, true));
+
+ testing.expectEqual(@as(u32, 0), boolMask(u32, false));
+ testing.expectEqual(@as(u32, 0xFFFF_FFFF), boolMask(u32, true));
+
+ testing.expectEqual(@as(i32, 0), boolMask(i32, false));
+ testing.expectEqual(@as(i32, -1), boolMask(i32, true));
+ }
+ }.runTest;
+ runTest();
+ comptime runTest();
+}
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index 5f23a10401..581fb16e6c 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -25,6 +25,14 @@ pub const page_size = switch (builtin.arch) {
else => 4 * 1024,
};
+/// The standard library currently thoroughly depends on byte size
+/// being 8 bits. (see the use of u8 throughout allocation code as
+/// the "byte" type.) Code which depends on this can reference this
+/// declaration. If we ever try to port the standard library to a
+/// non-8-bit-byte platform, this will allow us to search for things
+/// which need to be updated.
+pub const byte_size_in_bits = 8;
+
pub const Allocator = @import("mem/Allocator.zig");
/// Detects and asserts if the std.mem.Allocator interface is violated by the caller
diff --git a/lib/std/multi_array_list.zig b/lib/std/multi_array_list.zig
index 3306fd3ef0..99a9fff7f0 100644
--- a/lib/std/multi_array_list.zig
+++ b/lib/std/multi_array_list.zig
@@ -136,7 +136,7 @@ pub fn MultiArrayList(comptime S: type) type {
const slices = self.slice();
var result: S = undefined;
inline for (fields) |field_info, i| {
- @field(elem, field_info.name) = slices.items(@intToEnum(Field, i))[index];
+ @field(result, field_info.name) = slices.items(@intToEnum(Field, i))[index];
}
return result;
}
diff --git a/lib/std/os/bits/haiku.zig b/lib/std/os/bits/haiku.zig
index 59631fd40e..32093570d7 100644
--- a/lib/std/os/bits/haiku.zig
+++ b/lib/std/os/bits/haiku.zig
@@ -180,8 +180,8 @@ pub const dirent = extern struct {
};
pub const image_info = extern struct {
- id: u32, //image_id
- type: u32, // image_type
+ id: u32,
+ type: u32,
sequence: i32,
init_order: i32,
init_routine: *c_void,
@@ -806,17 +806,16 @@ pub const Sigaction = extern struct {
pub const _SIG_WORDS = 4;
pub const _SIG_MAXSIG = 128;
-
-pub inline fn _SIG_IDX(sig: usize) usize {
+pub fn _SIG_IDX(sig: usize) callconv(.Inline) usize {
return sig - 1;
}
-pub inline fn _SIG_WORD(sig: usize) usize {
+pub fn _SIG_WORD(sig: usize) callconv(.Inline) usize {
return_SIG_IDX(sig) >> 5;
}
-pub inline fn _SIG_BIT(sig: usize) usize {
+pub fn _SIG_BIT(sig: usize) callconv(.Inline) usize {
return 1 << (_SIG_IDX(sig) & 31);
}
-pub inline fn _SIG_VALID(sig: usize) usize {
+pub fn _SIG_VALID(sig: usize) callconv(.Inline) usize {
return sig <= _SIG_MAXSIG and sig > 0;
}
diff --git a/lib/std/os/bits/linux.zig b/lib/std/os/bits/linux.zig
index 8d3d5c49a3..21fa058aef 100644
--- a/lib/std/os/bits/linux.zig
+++ b/lib/std/os/bits/linux.zig
@@ -2244,3 +2244,8 @@ pub const MADV_COLD = 20;
pub const MADV_PAGEOUT = 21;
pub const MADV_HWPOISON = 100;
pub const MADV_SOFT_OFFLINE = 101;
+
+pub const __kernel_timespec = extern struct {
+ tv_sec: i64,
+ tv_nsec: i64,
+};
diff --git a/lib/std/os/linux/io_uring.zig b/lib/std/os/linux/io_uring.zig
index b47d4c7b32..340020cf9b 100644
--- a/lib/std/os/linux/io_uring.zig
+++ b/lib/std/os/linux/io_uring.zig
@@ -526,7 +526,7 @@ pub const IO_Uring = struct {
pub fn timeout(
self: *IO_Uring,
user_data: u64,
- ts: *const os.timespec,
+ ts: *const os.__kernel_timespec,
count: u32,
flags: u32,
) !*io_uring_sqe {
@@ -884,7 +884,7 @@ pub fn io_uring_prep_close(sqe: *io_uring_sqe, fd: os.fd_t) void {
pub fn io_uring_prep_timeout(
sqe: *io_uring_sqe,
- ts: *const os.timespec,
+ ts: *const os.__kernel_timespec,
count: u32,
flags: u32,
) void {
@@ -1339,7 +1339,7 @@ test "timeout (after a relative time)" {
const ms = 10;
const margin = 5;
- const ts = os.timespec{ .tv_sec = 0, .tv_nsec = ms * 1000000 };
+ const ts = os.__kernel_timespec{ .tv_sec = 0, .tv_nsec = ms * 1000000 };
const started = std.time.milliTimestamp();
const sqe = try ring.timeout(0x55555555, &ts, 0, 0);
@@ -1366,7 +1366,7 @@ test "timeout (after a number of completions)" {
};
defer ring.deinit();
- const ts = os.timespec{ .tv_sec = 3, .tv_nsec = 0 };
+ const ts = os.__kernel_timespec{ .tv_sec = 3, .tv_nsec = 0 };
const count_completions: u64 = 1;
const sqe_timeout = try ring.timeout(0x66666666, &ts, count_completions, 0);
testing.expectEqual(linux.IORING_OP.TIMEOUT, sqe_timeout.opcode);
@@ -1399,7 +1399,7 @@ test "timeout_remove" {
};
defer ring.deinit();
- const ts = os.timespec{ .tv_sec = 3, .tv_nsec = 0 };
+ const ts = os.__kernel_timespec{ .tv_sec = 3, .tv_nsec = 0 };
const sqe_timeout = try ring.timeout(0x88888888, &ts, 0, 0);
testing.expectEqual(linux.IORING_OP.TIMEOUT, sqe_timeout.opcode);
testing.expectEqual(@as(u64, 0x88888888), sqe_timeout.user_data);
diff --git a/lib/std/std.zig b/lib/std/std.zig
index c0d97a9d9c..a7e5bcb682 100644
--- a/lib/std/std.zig
+++ b/lib/std/std.zig
@@ -18,6 +18,8 @@ pub const BufSet = @import("buf_set.zig").BufSet;
pub const ChildProcess = @import("child_process.zig").ChildProcess;
pub const ComptimeStringMap = @import("comptime_string_map.zig").ComptimeStringMap;
pub const DynLib = @import("dynamic_library.zig").DynLib;
+pub const DynamicBitSet = bit_set.DynamicBitSet;
+pub const DynamicBitSetUnmanaged = bit_set.DynamicBitSetUnmanaged;
pub const HashMap = hash_map.HashMap;
pub const HashMapUnmanaged = hash_map.HashMapUnmanaged;
pub const MultiArrayList = @import("multi_array_list.zig").MultiArrayList;
@@ -29,6 +31,7 @@ pub const PriorityQueue = @import("priority_queue.zig").PriorityQueue;
pub const Progress = @import("Progress.zig");
pub const SemanticVersion = @import("SemanticVersion.zig");
pub const SinglyLinkedList = @import("linked_list.zig").SinglyLinkedList;
+pub const StaticBitSet = bit_set.StaticBitSet;
pub const StringHashMap = hash_map.StringHashMap;
pub const StringHashMapUnmanaged = hash_map.StringHashMapUnmanaged;
pub const StringArrayHashMap = array_hash_map.StringArrayHashMap;
@@ -40,6 +43,7 @@ pub const Thread = @import("Thread.zig");
pub const array_hash_map = @import("array_hash_map.zig");
pub const atomic = @import("atomic.zig");
pub const base64 = @import("base64.zig");
+pub const bit_set = @import("bit_set.zig");
pub const build = @import("build.zig");
pub const builtin = @import("builtin.zig");
pub const c = @import("c.zig");
diff --git a/lib/std/zig/parse.zig b/lib/std/zig/parse.zig
index 7a6404fbb2..9b755c2033 100644
--- a/lib/std/zig/parse.zig
+++ b/lib/std/zig/parse.zig
@@ -3714,7 +3714,6 @@ const Parser = struct {
if (p.eatToken(.r_paren)) |_| {
return SmallSpan{ .zero_or_one = 0 };
}
- continue;
},
.r_paren => return SmallSpan{ .zero_or_one = 0 },
else => {
@@ -3728,14 +3727,7 @@ const Parser = struct {
const param_two = while (true) {
switch (p.token_tags[p.nextToken()]) {
- .comma => {
- if (p.eatToken(.r_paren)) |_| {
- return SmallSpan{ .zero_or_one = param_one };
- }
- const param = try p.expectParamDecl();
- if (param != 0) break param;
- continue;
- },
+ .comma => {},
.r_paren => return SmallSpan{ .zero_or_one = param_one },
.colon, .r_brace, .r_bracket => {
p.tok_i -= 1;
@@ -3748,6 +3740,11 @@ const Parser = struct {
try p.warnExpected(.comma);
},
}
+ if (p.eatToken(.r_paren)) |_| {
+ return SmallSpan{ .zero_or_one = param_one };
+ }
+ const param = try p.expectParamDecl();
+ if (param != 0) break param;
} else unreachable;
var list = std.ArrayList(Node.Index).init(p.gpa);
@@ -3757,17 +3754,7 @@ const Parser = struct {
while (true) {
switch (p.token_tags[p.nextToken()]) {
- .comma => {
- if (p.token_tags[p.tok_i] == .r_paren) {
- p.tok_i += 1;
- return SmallSpan{ .multi = list.toOwnedSlice() };
- }
- const param = try p.expectParamDecl();
- if (param != 0) {
- try list.append(param);
- }
- continue;
- },
+ .comma => {},
.r_paren => return SmallSpan{ .multi = list.toOwnedSlice() },
.colon, .r_brace, .r_bracket => {
p.tok_i -= 1;
@@ -3780,6 +3767,11 @@ const Parser = struct {
try p.warnExpected(.comma);
},
}
+ if (p.eatToken(.r_paren)) |_| {
+ return SmallSpan{ .multi = list.toOwnedSlice() };
+ }
+ const param = try p.expectParamDecl();
+ if (param != 0) try list.append(param);
}
}
diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig
index 2b9e3fb03c..c083d23932 100644
--- a/lib/std/zig/parser_test.zig
+++ b/lib/std/zig/parser_test.zig
@@ -1108,6 +1108,25 @@ test "zig fmt: comment to disable/enable zig fmt first" {
);
}
+test "zig fmt: 'zig fmt: (off|on)' can be surrounded by arbitrary whitespace" {
+ try testTransform(
+ \\// Test trailing comma syntax
+ \\// zig fmt: off
+ \\
+ \\const struct_trailing_comma = struct { x: i32, y: i32, };
+ \\
+ \\// zig fmt: on
+ ,
+ \\// Test trailing comma syntax
+ \\// zig fmt: off
+ \\
+ \\const struct_trailing_comma = struct { x: i32, y: i32, };
+ \\
+ \\// zig fmt: on
+ \\
+ );
+}
+
test "zig fmt: comment to disable/enable zig fmt" {
try testTransform(
\\const a = b;
@@ -4549,6 +4568,18 @@ test "recovery: missing for payload" {
});
}
+test "recovery: missing comma in params" {
+ try testError(
+ \\fn foo(comptime bool what what) void { }
+ \\fn bar(a: i32, b: i32 c) void { }
+ \\
+ , &[_]Error{
+ .expected_token,
+ .expected_token,
+ .expected_token,
+ });
+}
+
const std = @import("std");
const mem = std.mem;
const warn = std.debug.warn;
diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig
index e12f7bc733..069b62af79 100644
--- a/lib/std/zig/render.zig
+++ b/lib/std/zig/render.zig
@@ -2352,18 +2352,24 @@ fn renderComments(ais: *Ais, tree: ast.Tree, start: usize, end: usize) Error!boo
}
}
- try ais.writer().print("{s}\n", .{trimmed_comment});
- index = 1 + (newline orelse return true);
+ index = 1 + (newline orelse end - 1);
- if (ais.disabled_offset) |disabled_offset| {
- if (mem.eql(u8, trimmed_comment, "// zig fmt: on")) {
- // write the source for which formatting was disabled directly
- // to the underlying writer, fixing up invaild whitespace
- try writeFixingWhitespace(ais.underlying_writer, tree.source[disabled_offset..index]);
- ais.disabled_offset = null;
- }
- } else if (mem.eql(u8, trimmed_comment, "// zig fmt: off")) {
+ const comment_content = mem.trimLeft(u8, trimmed_comment["//".len..], &std.ascii.spaces);
+ if (ais.disabled_offset != null and mem.eql(u8, comment_content, "zig fmt: on")) {
+ // Write the source for which formatting was disabled directly
+ // to the underlying writer, fixing up invaild whitespace.
+ const disabled_source = tree.source[ais.disabled_offset.?..comment_start];
+ try writeFixingWhitespace(ais.underlying_writer, disabled_source);
+ ais.disabled_offset = null;
+ // Write with the canonical single space.
+ try ais.writer().writeAll("// zig fmt: on\n");
+ } else if (ais.disabled_offset == null and mem.eql(u8, comment_content, "zig fmt: off")) {
+ // Write with the canonical single space.
+ try ais.writer().writeAll("// zig fmt: off\n");
ais.disabled_offset = index;
+ } else {
+ // Write the comment minus trailing whitespace.
+ try ais.writer().print("{s}\n", .{trimmed_comment});
}
}
diff --git a/src/Cache.zig b/src/Cache.zig
index 57ff9227fa..f2fdafff9b 100644
--- a/src/Cache.zig
+++ b/src/Cache.zig
@@ -153,7 +153,11 @@ pub const HashHelper = struct {
hh.hasher.final(&bin_digest);
var out_digest: [hex_digest_len]u8 = undefined;
- _ = std.fmt.bufPrint(&out_digest, "{x}", .{bin_digest}) catch unreachable;
+ _ = std.fmt.bufPrint(
+ &out_digest,
+ "{s}",
+ .{std.fmt.fmtSliceHexLower(&bin_digest)},
+ ) catch unreachable;
return out_digest;
}
};
@@ -250,7 +254,11 @@ pub const Manifest = struct {
var bin_digest: BinDigest = undefined;
self.hash.hasher.final(&bin_digest);
- _ = std.fmt.bufPrint(&self.hex_digest, "{x}", .{bin_digest}) catch unreachable;
+ _ = std.fmt.bufPrint(
+ &self.hex_digest,
+ "{s}",
+ .{std.fmt.fmtSliceHexLower(&bin_digest)},
+ ) catch unreachable;
self.hash.hasher = hasher_init;
self.hash.hasher.update(&bin_digest);
@@ -549,7 +557,11 @@ pub const Manifest = struct {
self.hash.hasher.final(&bin_digest);
var out_digest: [hex_digest_len]u8 = undefined;
- _ = std.fmt.bufPrint(&out_digest, "{x}", .{bin_digest}) catch unreachable;
+ _ = std.fmt.bufPrint(
+ &out_digest,
+ "{s}",
+ .{std.fmt.fmtSliceHexLower(&bin_digest)},
+ ) catch unreachable;
return out_digest;
}
@@ -565,7 +577,11 @@ pub const Manifest = struct {
var encoded_digest: [hex_digest_len]u8 = undefined;
for (self.files.items) |file| {
- _ = std.fmt.bufPrint(&encoded_digest, "{x}", .{file.bin_digest}) catch unreachable;
+ _ = std.fmt.bufPrint(
+ &encoded_digest,
+ "{s}",
+ .{std.fmt.fmtSliceHexLower(&file.bin_digest)},
+ ) catch unreachable;
try writer.print("{d} {d} {d} {s} {s}\n", .{
file.stat.size,
file.stat.inode,
diff --git a/src/Module.zig b/src/Module.zig
index 3a72f1272b..e6d509ace5 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -4083,15 +4083,15 @@ pub fn namedFieldPtr(
const child_type = try val.toType(scope.arena());
switch (child_type.zigTypeTag()) {
.ErrorSet => {
+ var name: []const u8 = undefined;
// TODO resolve inferred error sets
- const entry = if (val.castTag(.error_set)) |payload|
- (payload.data.fields.getEntry(field_name) orelse
- return mod.fail(scope, src, "no error named '{s}' in '{}'", .{ field_name, child_type })).*
+ if (val.castTag(.error_set)) |payload|
+ name = (payload.data.fields.getEntry(field_name) orelse return mod.fail(scope, src, "no error named '{s}' in '{}'", .{ field_name, child_type })).key
else
- try mod.getErrorValue(field_name);
+ name = (try mod.getErrorValue(field_name)).key;
const result_type = if (child_type.tag() == .anyerror)
- try Type.Tag.error_set_single.create(scope.arena(), entry.key)
+ try Type.Tag.error_set_single.create(scope.arena(), name)
else
child_type;
@@ -4100,7 +4100,7 @@ pub fn namedFieldPtr(
.val = try Value.Tag.ref_val.create(
scope.arena(),
try Value.Tag.@"error".create(scope.arena(), .{
- .name = entry.key,
+ .name = name,
}),
),
});
diff --git a/src/astgen.zig b/src/astgen.zig
index 63184e641b..aaf38ed1ea 100644
--- a/src/astgen.zig
+++ b/src/astgen.zig
@@ -453,13 +453,23 @@ pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) In
return rvalue(mod, scope, rl, result);
},
.unwrap_optional => {
- const operand = try expr(mod, scope, rl, node_datas[node].lhs);
- const op: zir.Inst.Tag = switch (rl) {
- .ref => .optional_payload_safe_ptr,
- else => .optional_payload_safe,
- };
const src = token_starts[main_tokens[node]];
- return addZIRUnOp(mod, scope, src, op, operand);
+ switch (rl) {
+ .ref => return addZIRUnOp(
+ mod,
+ scope,
+ src,
+ .optional_payload_safe_ptr,
+ try expr(mod, scope, .ref, node_datas[node].lhs),
+ ),
+ else => return rvalue(mod, scope, rl, try addZIRUnOp(
+ mod,
+ scope,
+ src,
+ .optional_payload_safe,
+ try expr(mod, scope, .none, node_datas[node].lhs),
+ )),
+ }
},
.block_two, .block_two_semicolon => {
const statements = [2]ast.Node.Index{ node_datas[node].lhs, node_datas[node].rhs };
@@ -1645,7 +1655,7 @@ fn errorSetDecl(
switch (token_tags[tok_i]) {
.doc_comment, .comma => {},
.identifier => count += 1,
- .r_paren => break :count count,
+ .r_brace => break :count count,
else => unreachable,
}
} else unreachable; // TODO should not need else unreachable here
@@ -1662,7 +1672,7 @@ fn errorSetDecl(
fields[field_i] = try mod.identifierTokenString(scope, tok_i);
field_i += 1;
},
- .r_paren => break,
+ .r_brace => break,
else => unreachable,
}
}
@@ -1699,9 +1709,13 @@ fn orelseCatchExpr(
setBlockResultLoc(&block_scope, rl);
defer block_scope.instructions.deinit(mod.gpa);
- // This could be a pointer or value depending on the `rl` parameter.
+ // This could be a pointer or value depending on the `operand_rl` parameter.
+ // We cannot use `block_scope.break_result_loc` because that has the bare
+ // type, whereas this expression has the optional type. Later we make
+ // up for this fact by calling rvalue on the else branch.
block_scope.break_count += 1;
- const operand = try expr(mod, &block_scope.base, block_scope.break_result_loc, lhs);
+ const operand_rl = try makeOptionalTypeResultLoc(mod, &block_scope.base, src, block_scope.break_result_loc);
+ const operand = try expr(mod, &block_scope.base, operand_rl, lhs);
const cond = try addZIRUnOp(mod, &block_scope.base, src, cond_op, operand);
const condbr = try addZIRInstSpecial(mod, &block_scope.base, src, zir.Inst.CondBr, .{
@@ -1753,6 +1767,10 @@ fn orelseCatchExpr(
// This could be a pointer or value depending on `unwrap_op`.
const unwrapped_payload = try addZIRUnOp(mod, &else_scope.base, src, unwrap_op, operand);
+ const else_result = switch (rl) {
+ .ref => unwrapped_payload,
+ else => try rvalue(mod, &else_scope.base, block_scope.break_result_loc, unwrapped_payload),
+ };
return finishThenElseBlock(
mod,
@@ -1766,7 +1784,7 @@ fn orelseCatchExpr(
src,
src,
then_result,
- unwrapped_payload,
+ else_result,
block,
block,
);
@@ -3955,6 +3973,25 @@ fn rlStrategy(rl: ResultLoc, block_scope: *Scope.GenZIR) ResultLoc.Strategy {
}
}
+/// If the input ResultLoc is ref, returns ResultLoc.ref. Otherwise:
+/// Returns ResultLoc.ty, where the type is determined by the input
+/// ResultLoc type, wrapped in an optional type. If the input ResultLoc
+/// has no type, .none is returned.
+fn makeOptionalTypeResultLoc(mod: *Module, scope: *Scope, src: usize, rl: ResultLoc) !ResultLoc {
+ switch (rl) {
+ .ref => return ResultLoc.ref,
+ .discard, .none, .block_ptr, .inferred_ptr, .bitcasted_ptr => return ResultLoc.none,
+ .ty => |elem_ty| {
+ const wrapped_ty = try addZIRUnOp(mod, scope, src, .optional_type, elem_ty);
+ return ResultLoc{ .ty = wrapped_ty };
+ },
+ .ptr => |ptr_ty| {
+ const wrapped_ty = try addZIRUnOp(mod, scope, src, .optional_type_from_ptr_elem, ptr_ty);
+ return ResultLoc{ .ty = wrapped_ty };
+ },
+ }
+}
+
fn setBlockResultLoc(block_scope: *Scope.GenZIR, parent_rl: ResultLoc) void {
// Depending on whether the result location is a pointer or value, different
// ZIR needs to be generated. In the former case we rely on storing to the
diff --git a/src/clang_options_data.zig b/src/clang_options_data.zig
index 82800afd4b..8e4ab92894 100644
--- a/src/clang_options_data.zig
+++ b/src/clang_options_data.zig
@@ -46,7 +46,7 @@ flagpd1("M"),
.{
.name = "MM",
.syntax = .flag,
- .zig_equivalent = .dep_file,
+ .zig_equivalent = .dep_file_mm,
.pd1 = true,
.pd2 = false,
.psl = false,
@@ -1870,7 +1870,7 @@ flagpsl("MT"),
.{
.name = "print-missing-file-dependencies",
.syntax = .flag,
- .zig_equivalent = .other,
+ .zig_equivalent = .dep_file,
.pd1 = false,
.pd2 = true,
.psl = false,
@@ -1990,7 +1990,7 @@ flagpsl("MT"),
.{
.name = "user-dependencies",
.syntax = .flag,
- .zig_equivalent = .other,
+ .zig_equivalent = .dep_file_mm,
.pd1 = false,
.pd2 = true,
.psl = false,
@@ -2014,7 +2014,7 @@ flagpsl("MT"),
.{
.name = "write-dependencies",
.syntax = .flag,
- .zig_equivalent = .other,
+ .zig_equivalent = .dep_file,
.pd1 = false,
.pd2 = true,
.psl = false,
@@ -2022,7 +2022,7 @@ flagpsl("MT"),
.{
.name = "write-user-dependencies",
.syntax = .flag,
- .zig_equivalent = .other,
+ .zig_equivalent = .dep_file,
.pd1 = false,
.pd2 = true,
.psl = false,
diff --git a/src/codegen.zig b/src/codegen.zig
index 57fd732b42..c3cd64cf73 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -899,6 +899,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.load => return self.genLoad(inst.castTag(.load).?),
.loop => return self.genLoop(inst.castTag(.loop).?),
.not => return self.genNot(inst.castTag(.not).?),
+ .mul => return self.genMul(inst.castTag(.mul).?),
.ptrtoint => return self.genPtrToInt(inst.castTag(.ptrtoint).?),
.ref => return self.genRef(inst.castTag(.ref).?),
.ret => return self.genRet(inst.castTag(.ret).?),
@@ -1128,6 +1129,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
}
+ fn genMul(self: *Self, inst: *ir.Inst.BinOp) !MCValue {
+ // No side effects, so if it's unreferenced, do nothing.
+ if (inst.base.isUnused())
+ return MCValue.dead;
+ switch (arch) {
+ .arm, .armeb => return try self.genArmMul(&inst.base, inst.lhs, inst.rhs),
+ else => return self.fail(inst.base.src, "TODO implement mul for {}", .{self.target.cpu.arch}),
+ }
+ }
+
fn genBitAnd(self: *Self, inst: *ir.Inst.BinOp) !MCValue {
// No side effects, so if it's unreferenced, do nothing.
if (inst.base.isUnused())
@@ -1478,6 +1489,38 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
}
+ fn genArmMul(self: *Self, inst: *ir.Inst, op_lhs: *ir.Inst, op_rhs: *ir.Inst) !MCValue {
+ const lhs = try self.resolveInst(op_lhs);
+ const rhs = try self.resolveInst(op_rhs);
+
+ // Destination must be a register
+ // LHS must be a register
+ // RHS must be a register
+ var dst_mcv: MCValue = undefined;
+ var lhs_mcv: MCValue = undefined;
+ var rhs_mcv: MCValue = undefined;
+ if (self.reuseOperand(inst, 0, lhs)) {
+ // LHS is the destination
+ lhs_mcv = if (lhs != .register) try self.copyToNewRegister(inst, lhs) else lhs;
+ rhs_mcv = if (rhs != .register) try self.copyToNewRegister(inst, rhs) else rhs;
+ dst_mcv = lhs_mcv;
+ } else if (self.reuseOperand(inst, 1, rhs)) {
+ // RHS is the destination
+ lhs_mcv = if (lhs != .register) try self.copyToNewRegister(inst, lhs) else lhs;
+ rhs_mcv = if (rhs != .register) try self.copyToNewRegister(inst, rhs) else rhs;
+ dst_mcv = rhs_mcv;
+ } else {
+ // TODO save 1 copy instruction by directly allocating the destination register
+ // LHS is the destination
+ lhs_mcv = try self.copyToNewRegister(inst, lhs);
+ rhs_mcv = if (rhs != .register) try self.copyToNewRegister(inst, rhs) else rhs;
+ dst_mcv = lhs_mcv;
+ }
+
+ writeInt(u32, try self.code.addManyAsArray(4), Instruction.mul(.al, dst_mcv.register, lhs_mcv.register, rhs_mcv.register).toU32());
+ return dst_mcv;
+ }
+
/// ADD, SUB, XOR, OR, AND
fn genX8664BinMath(self: *Self, inst: *ir.Inst, op_lhs: *ir.Inst, op_rhs: *ir.Inst, opx: u8, mr: u8) !MCValue {
try self.code.ensureCapacity(self.code.items.len + 8);
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index f34b74bce8..8e38145be7 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -400,6 +400,7 @@ pub const LLVMIRModule = struct {
.block => try self.genBlock(inst.castTag(.block).?),
.br => try self.genBr(inst.castTag(.br).?),
.breakpoint => try self.genBreakpoint(inst.castTag(.breakpoint).?),
+ .br_void => try self.genBrVoid(inst.castTag(.br_void).?),
.call => try self.genCall(inst.castTag(.call).?),
.cmp_eq => try self.genCmp(inst.castTag(.cmp_eq).?, .eq),
.cmp_gt => try self.genCmp(inst.castTag(.cmp_gt).?, .gt),
@@ -409,6 +410,10 @@ pub const LLVMIRModule = struct {
.cmp_neq => try self.genCmp(inst.castTag(.cmp_neq).?, .neq),
.condbr => try self.genCondBr(inst.castTag(.condbr).?),
.intcast => try self.genIntCast(inst.castTag(.intcast).?),
+ .is_non_null => try self.genIsNonNull(inst.castTag(.is_non_null).?, false),
+ .is_non_null_ptr => try self.genIsNonNull(inst.castTag(.is_non_null_ptr).?, true),
+ .is_null => try self.genIsNull(inst.castTag(.is_null).?, false),
+ .is_null_ptr => try self.genIsNull(inst.castTag(.is_null_ptr).?, true),
.load => try self.genLoad(inst.castTag(.load).?),
.loop => try self.genLoop(inst.castTag(.loop).?),
.not => try self.genNot(inst.castTag(.not).?),
@@ -417,6 +422,8 @@ pub const LLVMIRModule = struct {
.store => try self.genStore(inst.castTag(.store).?),
.sub => try self.genSub(inst.castTag(.sub).?),
.unreach => self.genUnreach(inst.castTag(.unreach).?),
+ .optional_payload => try self.genOptionalPayload(inst.castTag(.optional_payload).?, false),
+ .optional_payload_ptr => try self.genOptionalPayload(inst.castTag(.optional_payload_ptr).?, true),
.dbg_stmt => blk: {
// TODO: implement debug info
break :blk null;
@@ -537,21 +544,29 @@ pub const LLVMIRModule = struct {
}
fn genBr(self: *LLVMIRModule, inst: *Inst.Br) !?*const llvm.Value {
- // Get the block that we want to break to.
var block = self.blocks.get(inst.block).?;
- _ = self.builder.buildBr(block.parent_bb);
// If the break doesn't break a value, then we don't have to add
// the values to the lists.
- if (!inst.operand.ty.hasCodeGenBits()) return null;
+ if (!inst.operand.ty.hasCodeGenBits()) {
+ // TODO: in astgen these instructions should turn into `br_void` instructions.
+ _ = self.builder.buildBr(block.parent_bb);
+ } else {
+ const val = try self.resolveInst(inst.operand);
- // For the phi node, we need the basic blocks and the values of the
- // break instructions.
- try block.break_bbs.append(self.gpa, self.builder.getInsertBlock());
+ // For the phi node, we need the basic blocks and the values of the
+ // break instructions.
+ try block.break_bbs.append(self.gpa, self.builder.getInsertBlock());
+ try block.break_vals.append(self.gpa, val);
- const val = try self.resolveInst(inst.operand);
- try block.break_vals.append(self.gpa, val);
+ _ = self.builder.buildBr(block.parent_bb);
+ }
+ return null;
+ }
+ fn genBrVoid(self: *LLVMIRModule, inst: *Inst.BrVoid) !?*const llvm.Value {
+ var block = self.blocks.get(inst.block).?;
+ _ = self.builder.buildBr(block.parent_bb);
return null;
}
@@ -594,6 +609,44 @@ pub const LLVMIRModule = struct {
return null;
}
+ fn genIsNonNull(self: *LLVMIRModule, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value {
+ const operand = try self.resolveInst(inst.operand);
+
+ if (operand_is_ptr) {
+ const index_type = self.context.intType(32);
+
+ var indices: [2]*const llvm.Value = .{
+ index_type.constNull(),
+ index_type.constInt(1, false),
+ };
+
+ return self.builder.buildLoad(self.builder.buildInBoundsGEP(operand, &indices, 2, ""), "");
+ } else {
+ return self.builder.buildExtractValue(operand, 1, "");
+ }
+ }
+
+ fn genIsNull(self: *LLVMIRModule, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value {
+ return self.builder.buildNot((try self.genIsNonNull(inst, operand_is_ptr)).?, "");
+ }
+
+ fn genOptionalPayload(self: *LLVMIRModule, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value {
+ const operand = try self.resolveInst(inst.operand);
+
+ if (operand_is_ptr) {
+ const index_type = self.context.intType(32);
+
+ var indices: [2]*const llvm.Value = .{
+ index_type.constNull(),
+ index_type.constNull(),
+ };
+
+ return self.builder.buildInBoundsGEP(operand, &indices, 2, "");
+ } else {
+ return self.builder.buildExtractValue(operand, 0, "");
+ }
+ }
+
fn genAdd(self: *LLVMIRModule, inst: *Inst.BinOp) !?*const llvm.Value {
const lhs = try self.resolveInst(inst.lhs);
const rhs = try self.resolveInst(inst.rhs);
@@ -754,6 +807,13 @@ pub const LLVMIRModule = struct {
// TODO: consider using buildInBoundsGEP2 for opaque pointers
return self.builder.buildInBoundsGEP(val, &indices, 2, "");
},
+ .ref_val => {
+ const elem_value = tv.val.castTag(.ref_val).?.data;
+ const elem_type = tv.ty.castPointer().?.data;
+ const alloca = self.buildAlloca(try self.getLLVMType(elem_type, src));
+ _ = self.builder.buildStore(try self.genTypedValue(src, .{ .ty = elem_type, .val = elem_value }), alloca);
+ return alloca;
+ },
else => return self.fail(src, "TODO implement const of pointer type '{}'", .{tv.ty}),
},
.Array => {
@@ -768,6 +828,29 @@ pub const LLVMIRModule = struct {
return self.fail(src, "TODO handle more array values", .{});
}
},
+ .Optional => {
+ if (!tv.ty.isPtrLikeOptional()) {
+ var buf: Type.Payload.ElemType = undefined;
+ const child_type = tv.ty.optionalChild(&buf);
+ const llvm_child_type = try self.getLLVMType(child_type, src);
+
+ if (tv.val.tag() == .null_value) {
+ var optional_values: [2]*const llvm.Value = .{
+ llvm_child_type.constNull(),
+ self.context.intType(1).constNull(),
+ };
+ return self.context.constStruct(&optional_values, 2, false);
+ } else {
+ var optional_values: [2]*const llvm.Value = .{
+ try self.genTypedValue(src, .{ .ty = child_type, .val = tv.val }),
+ self.context.intType(1).constAllOnes(),
+ };
+ return self.context.constStruct(&optional_values, 2, false);
+ }
+ } else {
+ return self.fail(src, "TODO implement const of optional pointer", .{});
+ }
+ },
else => return self.fail(src, "TODO implement const of type '{}'", .{tv.ty}),
}
}
@@ -793,6 +876,20 @@ pub const LLVMIRModule = struct {
const elem_type = try self.getLLVMType(t.elemType(), src);
return elem_type.arrayType(@intCast(c_uint, t.abiSize(self.module.getTarget())));
},
+ .Optional => {
+ if (!t.isPtrLikeOptional()) {
+ var buf: Type.Payload.ElemType = undefined;
+ const child_type = t.optionalChild(&buf);
+
+ var optional_types: [2]*const llvm.Type = .{
+ try self.getLLVMType(child_type, src),
+ self.context.intType(1),
+ };
+ return self.context.structType(&optional_types, 2, false);
+ } else {
+ return self.fail(src, "TODO implement optional pointers as actual pointers", .{});
+ }
+ },
else => return self.fail(src, "TODO implement getLLVMType for type '{}'", .{t}),
}
}
diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig
index 79ac833aac..223d006d5c 100644
--- a/src/codegen/llvm/bindings.zig
+++ b/src/codegen/llvm/bindings.zig
@@ -21,9 +21,15 @@ pub const Context = opaque {
pub const voidType = LLVMVoidTypeInContext;
extern fn LLVMVoidTypeInContext(C: *const Context) *const Type;
+ pub const structType = LLVMStructTypeInContext;
+ extern fn LLVMStructTypeInContext(C: *const Context, ElementTypes: [*]*const Type, ElementCount: c_uint, Packed: LLVMBool) *const Type;
+
pub const constString = LLVMConstStringInContext;
extern fn LLVMConstStringInContext(C: *const Context, Str: [*]const u8, Length: c_uint, DontNullTerminate: LLVMBool) *const Value;
+ pub const constStruct = LLVMConstStructInContext;
+ extern fn LLVMConstStructInContext(C: *const Context, ConstantVals: [*]*const Value, Count: c_uint, Packed: LLVMBool) *const Value;
+
pub const createBasicBlock = LLVMCreateBasicBlockInContext;
extern fn LLVMCreateBasicBlockInContext(C: *const Context, Name: [*:0]const u8) *const BasicBlock;
@@ -204,6 +210,9 @@ pub const Builder = opaque {
pub const buildPhi = LLVMBuildPhi;
extern fn LLVMBuildPhi(*const Builder, Ty: *const Type, Name: [*:0]const u8) *const Value;
+
+ pub const buildExtractValue = LLVMBuildExtractValue;
+ extern fn LLVMBuildExtractValue(*const Builder, AggVal: *const Value, Index: c_uint, Name: [*:0]const u8) *const Value;
};
pub const IntPredicate = extern enum {
diff --git a/src/ir.zig b/src/ir.zig
index eddc885d14..996f3b9782 100644
--- a/src/ir.zig
+++ b/src/ir.zig
@@ -106,6 +106,7 @@ pub const Inst = struct {
store,
sub,
unreach,
+ mul,
not,
floatcast,
intcast,
@@ -165,6 +166,7 @@ pub const Inst = struct {
.add,
.sub,
+ .mul,
.cmp_lt,
.cmp_lte,
.cmp_eq,
diff --git a/src/link.zig b/src/link.zig
index 0a4cde0284..db3e973f84 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -550,11 +550,11 @@ pub const File = struct {
id_symlink_basename,
&prev_digest_buf,
) catch |err| b: {
- log.debug("archive new_digest={x} readFile error: {s}", .{ digest, @errorName(err) });
+ log.debug("archive new_digest={s} readFile error: {s}", .{ std.fmt.fmtSliceHexLower(&digest), @errorName(err) });
break :b prev_digest_buf[0..0];
};
if (mem.eql(u8, prev_digest, &digest)) {
- log.debug("archive digest={x} match - skipping invocation", .{digest});
+ log.debug("archive digest={s} match - skipping invocation", .{std.fmt.fmtSliceHexLower(&digest)});
base.lock = man.toOwnedLock();
return;
}
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index 2eee19b4f6..a73b8aaf9c 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -892,17 +892,17 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
id_symlink_basename,
&prev_digest_buf,
) catch |err| blk: {
- log.debug("COFF LLD new_digest={x} error: {s}", .{ digest, @errorName(err) });
+ log.debug("COFF LLD new_digest={s} error: {s}", .{ std.fmt.fmtSliceHexLower(&digest), @errorName(err) });
// Handle this as a cache miss.
break :blk prev_digest_buf[0..0];
};
if (mem.eql(u8, prev_digest, &digest)) {
- log.debug("COFF LLD digest={x} match - skipping invocation", .{digest});
+ log.debug("COFF LLD digest={s} match - skipping invocation", .{std.fmt.fmtSliceHexLower(&digest)});
// Hot diggity dog! The output binary is already there.
self.base.lock = man.toOwnedLock();
return;
}
- log.debug("COFF LLD prev_digest={x} new_digest={x}", .{ prev_digest, digest });
+ log.debug("COFF LLD prev_digest={s} new_digest={s}", .{ std.fmt.fmtSliceHexLower(prev_digest), std.fmt.fmtSliceHexLower(&digest) });
// We are about to change the output file to be different, so we invalidate the build hash now.
directory.handle.deleteFile(id_symlink_basename) catch |err| switch (err) {
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 1b6fbb0f0f..bfef2cd12c 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -1365,17 +1365,17 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
id_symlink_basename,
&prev_digest_buf,
) catch |err| blk: {
- log.debug("ELF LLD new_digest={x} error: {s}", .{ digest, @errorName(err) });
+ log.debug("ELF LLD new_digest={s} error: {s}", .{ std.fmt.fmtSliceHexLower(&digest), @errorName(err) });
// Handle this as a cache miss.
break :blk prev_digest_buf[0..0];
};
if (mem.eql(u8, prev_digest, &digest)) {
- log.debug("ELF LLD digest={x} match - skipping invocation", .{digest});
+ log.debug("ELF LLD digest={s} match - skipping invocation", .{std.fmt.fmtSliceHexLower(&digest)});
// Hot diggity dog! The output binary is already there.
self.base.lock = man.toOwnedLock();
return;
}
- log.debug("ELF LLD prev_digest={x} new_digest={x}", .{ prev_digest, digest });
+ log.debug("ELF LLD prev_digest={s} new_digest={s}", .{ std.fmt.fmtSliceHexLower(prev_digest), std.fmt.fmtSliceHexLower(&digest) });
// We are about to change the output file to be different, so we invalidate the build hash now.
directory.handle.deleteFile(id_symlink_basename) catch |err| switch (err) {
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 139a9b8940..0f76925618 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -556,17 +556,17 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
id_symlink_basename,
&prev_digest_buf,
) catch |err| blk: {
- log.debug("MachO LLD new_digest={x} error: {s}", .{ digest, @errorName(err) });
+ log.debug("MachO LLD new_digest={s} error: {s}", .{ std.fmt.fmtSliceHexLower(&digest), @errorName(err) });
// Handle this as a cache miss.
break :blk prev_digest_buf[0..0];
};
if (mem.eql(u8, prev_digest, &digest)) {
- log.debug("MachO LLD digest={x} match - skipping invocation", .{digest});
+ log.debug("MachO LLD digest={s} match - skipping invocation", .{std.fmt.fmtSliceHexLower(&digest)});
// Hot diggity dog! The output binary is already there.
self.base.lock = man.toOwnedLock();
return;
}
- log.debug("MachO LLD prev_digest={x} new_digest={x}", .{ prev_digest, digest });
+ log.debug("MachO LLD prev_digest={s} new_digest={s}", .{ std.fmt.fmtSliceHexLower(prev_digest), std.fmt.fmtSliceHexLower(&digest) });
// We are about to change the output file to be different, so we invalidate the build hash now.
directory.handle.deleteFile(id_symlink_basename) catch |err| switch (err) {
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index e0e10ad88d..71cb171d98 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -391,17 +391,17 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation) !void {
id_symlink_basename,
&prev_digest_buf,
) catch |err| blk: {
- log.debug("WASM LLD new_digest={x} error: {s}", .{ digest, @errorName(err) });
+ log.debug("WASM LLD new_digest={s} error: {s}", .{ std.fmt.fmtSliceHexLower(&digest), @errorName(err) });
// Handle this as a cache miss.
break :blk prev_digest_buf[0..0];
};
if (mem.eql(u8, prev_digest, &digest)) {
- log.debug("WASM LLD digest={x} match - skipping invocation", .{digest});
+ log.debug("WASM LLD digest={s} match - skipping invocation", .{std.fmt.fmtSliceHexLower(&digest)});
// Hot diggity dog! The output binary is already there.
self.base.lock = man.toOwnedLock();
return;
}
- log.debug("WASM LLD prev_digest={x} new_digest={x}", .{ prev_digest, digest });
+ log.debug("WASM LLD prev_digest={s} new_digest={s}", .{ std.fmt.fmtSliceHexLower(prev_digest), std.fmt.fmtSliceHexLower(&digest) });
// We are about to change the output file to be different, so we invalidate the build hash now.
directory.handle.deleteFile(id_symlink_basename) catch |err| switch (err) {
diff --git a/src/main.zig b/src/main.zig
index bfac976c5c..011b9edf76 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -448,6 +448,15 @@ const Emit = union(enum) {
}
};
+fn optionalBoolEnvVar(arena: *Allocator, name: []const u8) !bool {
+ if (std.process.getEnvVarOwned(arena, name)) |value| {
+ return true;
+ } else |err| switch (err) {
+ error.EnvironmentVariableNotFound => return false,
+ else => |e| return e,
+ }
+}
+
fn optionalStringEnvVar(arena: *Allocator, name: []const u8) !?[]const u8 {
if (std.process.getEnvVarOwned(arena, name)) |value| {
return value;
@@ -482,8 +491,8 @@ fn buildOutputType(
var single_threaded = false;
var function_sections = false;
var watch = false;
- var verbose_link = false;
- var verbose_cc = false;
+ var verbose_link = try optionalBoolEnvVar(arena, "ZIG_VERBOSE_LINK");
+ var verbose_cc = try optionalBoolEnvVar(arena, "ZIG_VERBOSE_CC");
var verbose_tokenize = false;
var verbose_ast = false;
var verbose_ir = false;
@@ -1183,6 +1192,12 @@ fn buildOutputType(
disable_c_depfile = true;
try clang_argv.appendSlice(it.other_args);
},
+ .dep_file_mm => { // -MM
+ // "Like -MMD, but also implies -E and writes to stdout by default"
+ c_out_mode = .preprocessor;
+ disable_c_depfile = true;
+ try clang_argv.appendSlice(it.other_args);
+ },
.framework_dir => try framework_dirs.append(it.only_arg),
.framework => try frameworks.append(it.only_arg),
.nostdlibinc => want_native_include_dirs = false,
@@ -3046,6 +3061,7 @@ pub const ClangArgIterator = struct {
lib_dir,
mcpu,
dep_file,
+ dep_file_mm,
framework_dir,
framework,
nostdlibinc,
diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp
index 1338f58e30..ec953dfa13 100644
--- a/src/stage1/codegen.cpp
+++ b/src/stage1/codegen.cpp
@@ -4126,7 +4126,15 @@ static LLVMValueRef gen_frame_size(CodeGen *g, LLVMValueRef fn_val) {
LLVMValueRef casted_fn_val = LLVMBuildBitCast(g->builder, fn_val, ptr_usize_llvm_type, "");
LLVMValueRef negative_one = LLVMConstInt(LLVMInt32Type(), -1, true);
LLVMValueRef prefix_ptr = LLVMBuildInBoundsGEP(g->builder, casted_fn_val, &negative_one, 1, "");
- return LLVMBuildLoad(g->builder, prefix_ptr, "");
+ LLVMValueRef load_inst = LLVMBuildLoad(g->builder, prefix_ptr, "");
+
+ // Some architectures (e.g SPARCv9) has different alignment requirements between a
+ // function/usize pointer and also require all loads to be aligned.
+ // On those architectures, not explicitly setting the alignment will lead into @frameSize
+ // generating usize-aligned load instruction that could crash if the function pointer
+ // happens to be not usize-aligned.
+ LLVMSetAlignment(load_inst, 1);
+ return load_inst;
}
static void gen_init_stack_trace(CodeGen *g, LLVMValueRef trace_field_ptr, LLVMValueRef addrs_field_ptr) {
diff --git a/src/test.zig b/src/test.zig
index a28787e952..d2c368d8ea 100644
--- a/src/test.zig
+++ b/src/test.zig
@@ -1030,8 +1030,8 @@ pub const TestContext = struct {
var file = try tmp_dir.openFile(bin_name, .{ .read = true });
defer file.close();
- const header = try std.elf.readHeader(file);
- var iterator = header.program_header_iterator(file);
+ const header = try std.elf.Header.read(&file);
+ var iterator = header.program_header_iterator(&file);
var none_loaded = true;
diff --git a/src/translate_c.zig b/src/translate_c.zig
index e7a756be0a..bb38e8ad55 100644
--- a/src/translate_c.zig
+++ b/src/translate_c.zig
@@ -4608,7 +4608,7 @@ fn parseCPrimaryExprInner(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!N
if (slice[0] != '\'' or slice[1] == '\\' or slice.len == 3) {
return Tag.char_literal.create(c.arena, try zigifyEscapeSequences(c, m));
} else {
- const str = try std.fmt.allocPrint(c.arena, "0x{x}", .{slice[1 .. slice.len - 1]});
+ const str = try std.fmt.allocPrint(c.arena, "0x{s}", .{std.fmt.fmtSliceHexLower(slice[1 .. slice.len - 1])});
return Tag.integer_literal.create(c.arena, str);
}
},
diff --git a/src/value.zig b/src/value.zig
index ae94e4b424..3fd2889fc8 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -2144,7 +2144,7 @@ pub const Value = extern union {
base: Payload = .{ .tag = base_tag },
data: struct {
/// TODO revisit this when we have the concept of the error tag type
- fields: std.StringHashMapUnmanaged(u16),
+ fields: std.StringHashMapUnmanaged(void),
decl: *Module.Decl,
},
};
diff --git a/src/zir.zig b/src/zir.zig
index d013f25ee5..1331f26dc7 100644
--- a/src/zir.zig
+++ b/src/zir.zig
@@ -299,6 +299,9 @@ pub const Inst = struct {
xor,
/// Create an optional type '?T'
optional_type,
+ /// Create an optional type '?T'. The operand is a pointer value. The optional type will
+ /// be the type of the pointer element, wrapped in an optional.
+ optional_type_from_ptr_elem,
/// Create a union type.
union_type,
/// ?T => T with safety.
@@ -397,6 +400,7 @@ pub const Inst = struct {
.mut_slice_type,
.const_slice_type,
.optional_type,
+ .optional_type_from_ptr_elem,
.optional_payload_safe,
.optional_payload_unsafe,
.optional_payload_safe_ptr,
@@ -597,6 +601,7 @@ pub const Inst = struct {
.typeof,
.xor,
.optional_type,
+ .optional_type_from_ptr_elem,
.optional_payload_safe,
.optional_payload_unsafe,
.optional_payload_safe_ptr,
@@ -1649,6 +1654,7 @@ const DumpTzir = struct {
.add,
.sub,
+ .mul,
.cmp_lt,
.cmp_lte,
.cmp_eq,
@@ -1771,6 +1777,7 @@ const DumpTzir = struct {
.add,
.sub,
+ .mul,
.cmp_lt,
.cmp_lte,
.cmp_eq,
diff --git a/src/zir_sema.zig b/src/zir_sema.zig
index 864f766f54..27e31c6197 100644
--- a/src/zir_sema.zig
+++ b/src/zir_sema.zig
@@ -131,6 +131,7 @@ pub fn analyzeInst(mod: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!
.typeof => return zirTypeof(mod, scope, old_inst.castTag(.typeof).?),
.typeof_peer => return zirTypeofPeer(mod, scope, old_inst.castTag(.typeof_peer).?),
.optional_type => return zirOptionalType(mod, scope, old_inst.castTag(.optional_type).?),
+ .optional_type_from_ptr_elem => return zirOptionalTypeFromPtrElem(mod, scope, old_inst.castTag(.optional_type_from_ptr_elem).?),
.optional_payload_safe => return zirOptionalPayload(mod, scope, old_inst.castTag(.optional_payload_safe).?, true),
.optional_payload_unsafe => return zirOptionalPayload(mod, scope, old_inst.castTag(.optional_payload_unsafe).?, false),
.optional_payload_safe_ptr => return zirOptionalPayloadPtr(mod, scope, old_inst.castTag(.optional_payload_safe_ptr).?, true),
@@ -1093,6 +1094,16 @@ fn zirOptionalType(mod: *Module, scope: *Scope, optional: *zir.Inst.UnOp) InnerE
return mod.constType(scope, optional.base.src, try mod.optionalType(scope, child_type));
}
+fn zirOptionalTypeFromPtrElem(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ const ptr = try resolveInst(mod, scope, inst.positionals.operand);
+ const elem_ty = ptr.ty.elemType();
+
+ return mod.constType(scope, inst.base.src, try mod.optionalType(scope, elem_ty));
+}
+
fn zirArrayType(mod: *Module, scope: *Scope, array: *zir.Inst.BinOp) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -1154,7 +1165,7 @@ fn zirErrorSet(mod: *Module, scope: *Scope, inst: *zir.Inst.ErrorSet) InnerError
for (inst.positionals.fields) |field_name| {
const entry = try mod.getErrorValue(field_name);
- if (payload.data.fields.fetchPutAssumeCapacity(entry.key, entry.value)) |prev| {
+ if (payload.data.fields.fetchPutAssumeCapacity(entry.key, {})) |_| {
return mod.fail(scope, inst.base.src, "duplicate error: '{s}'", .{field_name});
}
}
@@ -1185,7 +1196,79 @@ fn zirErrorValue(mod: *Module, scope: *Scope, inst: *zir.Inst.ErrorValue) InnerE
fn zirMergeErrorSets(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
- return mod.fail(scope, inst.base.src, "TODO implement merge_error_sets", .{});
+
+ const rhs_ty = try resolveType(mod, scope, inst.positionals.rhs);
+ const lhs_ty = try resolveType(mod, scope, inst.positionals.lhs);
+ if (rhs_ty.zigTypeTag() != .ErrorSet)
+ return mod.fail(scope, inst.positionals.rhs.src, "expected error set type, found {}", .{rhs_ty});
+ if (lhs_ty.zigTypeTag() != .ErrorSet)
+ return mod.fail(scope, inst.positionals.lhs.src, "expected error set type, found {}", .{lhs_ty});
+
+ // anything merged with anyerror is anyerror
+ if (lhs_ty.tag() == .anyerror or rhs_ty.tag() == .anyerror)
+ return mod.constInst(scope, inst.base.src, .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.anyerror_type),
+ });
+ // The declarations arena will store the hashmap.
+ var new_decl_arena = std.heap.ArenaAllocator.init(mod.gpa);
+ errdefer new_decl_arena.deinit();
+
+ const payload = try new_decl_arena.allocator.create(Value.Payload.ErrorSet);
+ payload.* = .{
+ .base = .{ .tag = .error_set },
+ .data = .{
+ .fields = .{},
+ .decl = undefined, // populated below
+ },
+ };
+ try payload.data.fields.ensureCapacity(&new_decl_arena.allocator, @intCast(u32, switch (rhs_ty.tag()) {
+ .error_set_single => 1,
+ .error_set => rhs_ty.castTag(.error_set).?.data.typed_value.most_recent.typed_value.val.castTag(.error_set).?.data.fields.size,
+ else => unreachable,
+ } + switch (lhs_ty.tag()) {
+ .error_set_single => 1,
+ .error_set => lhs_ty.castTag(.error_set).?.data.typed_value.most_recent.typed_value.val.castTag(.error_set).?.data.fields.size,
+ else => unreachable,
+ }));
+
+ switch (lhs_ty.tag()) {
+ .error_set_single => {
+ const name = lhs_ty.castTag(.error_set_single).?.data;
+ payload.data.fields.putAssumeCapacity(name, {});
+ },
+ .error_set => {
+ var multiple = lhs_ty.castTag(.error_set).?.data.typed_value.most_recent.typed_value.val.castTag(.error_set).?.data.fields;
+ var it = multiple.iterator();
+ while (it.next()) |entry| {
+ payload.data.fields.putAssumeCapacity(entry.key, entry.value);
+ }
+ },
+ else => unreachable,
+ }
+
+ switch (rhs_ty.tag()) {
+ .error_set_single => {
+ const name = rhs_ty.castTag(.error_set_single).?.data;
+ payload.data.fields.putAssumeCapacity(name, {});
+ },
+ .error_set => {
+ var multiple = rhs_ty.castTag(.error_set).?.data.typed_value.most_recent.typed_value.val.castTag(.error_set).?.data.fields;
+ var it = multiple.iterator();
+ while (it.next()) |entry| {
+ payload.data.fields.putAssumeCapacity(entry.key, entry.value);
+ }
+ },
+ else => unreachable,
+ }
+ // TODO create name in format "error:line:column"
+ const new_decl = try mod.createAnonymousDecl(scope, &new_decl_arena, .{
+ .ty = Type.initTag(.type),
+ .val = Value.initPayload(&payload.base),
+ });
+ payload.data.decl = new_decl;
+
+ return mod.analyzeDeclVal(scope, inst.base.src, new_decl);
}
fn zirEnumLiteral(mod: *Module, scope: *Scope, inst: *zir.Inst.EnumLiteral) InnerError!*Inst {
@@ -2075,6 +2158,7 @@ fn zirArithmetic(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!
const ir_tag = switch (inst.base.tag) {
.add => Inst.Tag.add,
.sub => Inst.Tag.sub,
+ .mul => Inst.Tag.mul,
else => return mod.fail(scope, inst.base.src, "TODO implement arithmetic for operand '{s}''", .{@tagName(inst.base.tag)}),
};
diff --git a/test/run_translated_c.zig b/test/run_translated_c.zig
index 4eb0f1e50a..07e449733f 100644
--- a/test/run_translated_c.zig
+++ b/test/run_translated_c.zig
@@ -27,6 +27,8 @@ pub fn addCases(cases: *tests.RunTranslatedCContext) void {
\\#define FOO =
\\#define PtrToPtr64(p) ((void *POINTER_64) p)
\\#define STRUC_ALIGNED_STACK_COPY(t,s) ((CONST t *)(s))
+ \\#define bar = 0x
+ \\#define baz = 0b
\\int main(void) {}
, "");
diff --git a/test/stage1/behavior.zig b/test/stage1/behavior.zig
index f82631cd9e..db6e596291 100644
--- a/test/stage1/behavior.zig
+++ b/test/stage1/behavior.zig
@@ -141,5 +141,5 @@ comptime {
_ = @import("behavior/while.zig");
_ = @import("behavior/widening.zig");
_ = @import("behavior/src.zig");
- // _ = @import("behavior/translate_c_macros.zig");
+ _ = @import("behavior/translate_c_macros.zig");
}
diff --git a/test/stage2/arm.zig b/test/stage2/arm.zig
index 06e359ccff..1bc3f23058 100644
--- a/test/stage2/arm.zig
+++ b/test/stage2/arm.zig
@@ -344,4 +344,38 @@ pub fn addCases(ctx: *TestContext) !void {
"",
);
}
+
+ {
+ var case = ctx.exe("integer multiplication", linux_arm);
+ // Simple u32 integer multiplication
+ case.addCompareOutput(
+ \\export fn _start() noreturn {
+ \\ assert(mul(1, 1) == 1);
+ \\ assert(mul(42, 1) == 42);
+ \\ assert(mul(1, 42) == 42);
+ \\ assert(mul(123, 42) == 5166);
+ \\ exit();
+ \\}
+ \\
+ \\fn mul(x: u32, y: u32) u32 {
+ \\ return x * y;
+ \\}
+ \\
+ \\fn assert(ok: bool) void {
+ \\ if (!ok) unreachable;
+ \\}
+ \\
+ \\fn exit() noreturn {
+ \\ asm volatile ("svc #0"
+ \\ :
+ \\ : [number] "{r7}" (1),
+ \\ [arg1] "{r0}" (0)
+ \\ : "memory"
+ \\ );
+ \\ unreachable;
+ \\}
+ ,
+ "",
+ );
+ }
}
diff --git a/test/stage2/llvm.zig b/test/stage2/llvm.zig
index f52ccecb68..4b00ed124c 100644
--- a/test/stage2/llvm.zig
+++ b/test/stage2/llvm.zig
@@ -132,4 +132,72 @@ pub fn addCases(ctx: *TestContext) !void {
\\}
, "");
}
+
+ {
+ var case = ctx.exeUsingLlvmBackend("optionals", linux_x64);
+
+ case.addCompareOutput(
+ \\fn assert(ok: bool) void {
+ \\ if (!ok) unreachable;
+ \\}
+ \\
+ \\export fn main() c_int {
+ \\ var opt_val: ?i32 = 10;
+ \\ var null_val: ?i32 = null;
+ \\
+ \\ var val1: i32 = opt_val.?;
+ \\ const val1_1: i32 = opt_val.?;
+ \\ var ptr_val1 = &(opt_val.?);
+ \\ const ptr_val1_1 = &(opt_val.?);
+ \\
+ \\ var val2: i32 = null_val orelse 20;
+ \\ const val2_2: i32 = null_val orelse 20;
+ \\
+ \\ var value: i32 = 20;
+ \\ var ptr_val2 = &(null_val orelse value);
+ \\
+ \\ const val3 = opt_val orelse 30;
+ \\ var val3_var = opt_val orelse 30;
+ \\
+ \\ assert(val1 == 10);
+ \\ assert(val1_1 == 10);
+ \\ assert(ptr_val1.* == 10);
+ \\ assert(ptr_val1_1.* == 10);
+ \\
+ \\ assert(val2 == 20);
+ \\ assert(val2_2 == 20);
+ \\ assert(ptr_val2.* == 20);
+ \\
+ \\ assert(val3 == 10);
+ \\ assert(val3_var == 10);
+ \\
+ \\ (null_val orelse val2) = 1234;
+ \\ assert(val2 == 1234);
+ \\
+ \\ (opt_val orelse val2) = 5678;
+ \\ assert(opt_val.? == 5678);
+ \\
+ \\ return 0;
+ \\}
+ , "");
+ }
+
+ {
+ var case = ctx.exeUsingLlvmBackend("for loop", linux_x64);
+
+ case.addCompareOutput(
+ \\fn assert(ok: bool) void {
+ \\ if (!ok) unreachable;
+ \\}
+ \\
+ \\export fn main() c_int {
+ \\ var x: u32 = 0;
+ \\ for ("hello") |_| {
+ \\ x += 1;
+ \\ }
+ \\ assert("hello".len == x);
+ \\ return 0;
+ \\}
+ , "");
+ }
}
diff --git a/test/stage2/test.zig b/test/stage2/test.zig
index 9bd5655d22..d475f5dff0 100644
--- a/test/stage2/test.zig
+++ b/test/stage2/test.zig
@@ -985,7 +985,7 @@ pub fn addCases(ctx: *TestContext) !void {
"Hello, World!\n",
);
try case.files.append(.{
- .src =
+ .src =
\\pub fn print() void {
\\ asm volatile ("syscall"
\\ :
@@ -1525,4 +1525,37 @@ pub fn addCases(ctx: *TestContext) !void {
\\}
, "");
}
+ {
+ var case = ctx.exe("merge error sets", linux_x64);
+
+ case.addCompareOutput(
+ \\export fn _start() noreturn {
+ \\ const E = error{ A, B, D } || error { A, B, C };
+ \\ const a = E.A;
+ \\ const b = E.B;
+ \\ const c = E.C;
+ \\ const d = E.D;
+ \\ const E2 = error { X, Y } || @TypeOf(error.Z);
+ \\ const x = E2.X;
+ \\ const y = E2.Y;
+ \\ const z = E2.Z;
+ \\ assert(anyerror || error { Z } == anyerror);
+ \\ exit();
+ \\}
+ \\fn assert(b: bool) void {
+ \\ if (!b) unreachable;
+ \\}
+ \\fn exit() noreturn {
+ \\ asm volatile ("syscall"
+ \\ :
+ \\ : [number] "{rax}" (231),
+ \\ [arg1] "{rdi}" (0)
+ \\ : "rcx", "r11", "memory"
+ \\ );
+ \\ unreachable;
+ \\}
+ ,
+ "",
+ );
+ }
}
diff --git a/tools/update_clang_options.zig b/tools/update_clang_options.zig
index 1cb134a731..f0dfe1639d 100644
--- a/tools/update_clang_options.zig
+++ b/tools/update_clang_options.zig
@@ -268,6 +268,10 @@ const known_options = [_]KnownOpt{
.name = "MD",
.ident = "dep_file",
},
+ .{
+ .name = "write-dependencies",
+ .ident = "dep_file",
+ },
.{
.name = "MV",
.ident = "dep_file",
@@ -284,18 +288,30 @@ const known_options = [_]KnownOpt{
.name = "MG",
.ident = "dep_file",
},
+ .{
+ .name = "print-missing-file-dependencies",
+ .ident = "dep_file",
+ },
.{
.name = "MJ",
.ident = "dep_file",
},
.{
.name = "MM",
- .ident = "dep_file",
+ .ident = "dep_file_mm",
+ },
+ .{
+ .name = "user-dependencies",
+ .ident = "dep_file_mm",
},
.{
.name = "MMD",
.ident = "dep_file",
},
+ .{
+ .name = "write-user-dependencies",
+ .ident = "dep_file",
+ },
.{
.name = "MP",
.ident = "dep_file",