Merge remote-tracking branch 'origin/master' into zir-memory-layout

Wanted to make sure those new test cases still pass.

Also grab that CI fix so we can get those green check marks.
This commit is contained in:
Andrew Kelley 2021-03-28 19:42:08 -07:00
commit 281a7baaea
32 changed files with 1088 additions and 503 deletions

View File

@ -89,6 +89,7 @@ set(ZIG_TARGET_MCPU "baseline" CACHE STRING "-mcpu parameter to output binaries
set(ZIG_EXECUTABLE "" CACHE STRING "(when cross compiling) path to already-built zig binary")
set(ZIG_SINGLE_THREADED off CACHE BOOL "limit the zig compiler to use only 1 thread")
set(ZIG_OMIT_STAGE2 off CACHE BOOL "omit the stage2 backend from stage1")
set(ZIG_ENABLE_LOGGING off CACHE BOOL "enable logging")
if("${ZIG_TARGET_TRIPLE}" STREQUAL "native")
set(ZIG_USE_LLVM_CONFIG ON CACHE BOOL "use llvm-config to find LLVM libraries")
@ -607,6 +608,12 @@ else()
set(ZIG_OMIT_STAGE2_BOOL "false")
endif()
if(ZIG_ENABLE_LOGGING)
set(ZIG_ENABLE_LOGGING_BOOL "true")
else()
set(ZIG_ENABLE_LOGGING_BOOL "false")
endif()
configure_file (
"${CMAKE_SOURCE_DIR}/src/stage1/config.h.in"
"${ZIG_CONFIG_H_OUT}"
@ -735,12 +742,14 @@ if(MSVC OR MINGW)
target_link_libraries(zigstage1 LINK_PUBLIC version)
endif()
add_executable(zig0 ${ZIG0_SOURCES})
set_target_properties(zig0 PROPERTIES
COMPILE_FLAGS ${EXE_CFLAGS}
LINK_FLAGS ${EXE_LDFLAGS}
)
target_link_libraries(zig0 zigstage1)
if("${ZIG_EXECUTABLE}" STREQUAL "")
add_executable(zig0 ${ZIG0_SOURCES})
set_target_properties(zig0 PROPERTIES
COMPILE_FLAGS ${EXE_CFLAGS}
LINK_FLAGS ${EXE_LDFLAGS}
)
target_link_libraries(zig0 zigstage1)
endif()
if(MSVC)
set(ZIG1_OBJECT "${CMAKE_BINARY_DIR}/zig1.obj")
@ -789,7 +798,6 @@ if("${ZIG_EXECUTABLE}" STREQUAL "")
else()
add_custom_command(
OUTPUT "${ZIG1_OBJECT}"
BYPRODUCTS "${ZIG1_OBJECT}"
COMMAND "${ZIG_EXECUTABLE}" "build-obj" ${BUILD_ZIG1_ARGS}
DEPENDS ${ZIG_STAGE2_SOURCES}
COMMENT STATUS "Building self-hosted component ${ZIG1_OBJECT}"

132
ci/azure/macos_arm64_script Executable file
View File

@ -0,0 +1,132 @@
#!/bin/sh
set -x
set -e
brew install s3cmd ninja gnu-tar
ZIGDIR="$(pwd)"
ARCH="aarch64"
# {product}-{os}{sdk_version}-{arch}-{llvm_version}-{cmake_build_type}
CACHE_HOST_BASENAME="llvm-macos10.15-x86_64-11.0.1-release"
CACHE_ARM64_BASENAME="llvm-macos11.0-arm64-11.0.1-release"
PREFIX_HOST="$HOME/$CACHE_HOST_BASENAME"
PREFIX_ARM64="$HOME/$CACHE_ARM64_BASENAME"
JOBS="-j2"
rm -rf $PREFIX
cd $HOME
wget -nv "https://ziglang.org/deps/$CACHE_HOST_BASENAME.tar.xz"
wget -nv "https://ziglang.org/deps/$CACHE_ARM64_BASENAME.tar.xz"
gtar xf "$CACHE_HOST_BASENAME.tar.xz"
gtar xf "$CACHE_ARM64_BASENAME.tar.xz"
cd $ZIGDIR
# Make the `zig version` number consistent.
# This will affect the cmake command below.
git config core.abbrev 9
git fetch --unshallow || true
git fetch --tags
# Select xcode: latest version found on vmImage macOS-10.15 .
DEVELOPER_DIR=/Applications/Xcode_12.4.app
export ZIG_LOCAL_CACHE_DIR="$ZIGDIR/zig-cache"
export ZIG_GLOBAL_CACHE_DIR="$ZIGDIR/zig-cache"
# Build zig for host and use `Debug` type to make builds a little faster.
cd $ZIGDIR
mkdir build.host
cd build.host
cmake -G "Ninja" .. \
-DCMAKE_INSTALL_PREFIX="$(pwd)/release" \
-DCMAKE_PREFIX_PATH="$PREFIX_HOST" \
-DCMAKE_BUILD_TYPE="Debug" \
-DZIG_STATIC="OFF"
# Build but do not install.
ninja $JOBS
ZIG_EXE="$ZIGDIR/build.host/zig"
# Build zig for arm64 target.
# - use `Release` type for published tarballs
# - ad-hoc codesign with linker
# - note: apple quarantine of downloads (eg. via safari) still apply
cd $ZIGDIR
mkdir build.arm64
cd build.arm64
cmake -G "Ninja" .. \
-DCMAKE_INSTALL_PREFIX="$(pwd)/release" \
-DCMAKE_PREFIX_PATH="$PREFIX_ARM64" \
-DCMAKE_BUILD_TYPE="Release" \
-DCMAKE_CROSSCOMPILING="True" \
-DCMAKE_SYSTEM_NAME="Darwin" \
-DCMAKE_C_FLAGS="-arch arm64" \
-DCMAKE_CXX_FLAGS="-arch arm64" \
-DCMAKE_EXE_LINKER_FLAGS="-lz -Xlinker -adhoc_codesign" \
-DZIG_USE_LLVM_CONFIG="OFF" \
-DZIG_EXECUTABLE="$ZIG_EXE" \
-DZIG_TARGET_TRIPLE="${ARCH}-macos" \
-DZIG_STATIC="OFF"
ninja $JOBS install
# Disable test because binary is foreign arch.
#release/bin/zig build test
if [ "${BUILD_REASON}" != "PullRequest" ]; then
mv ../LICENSE release/
# We do not run test suite but still need langref.
mkdir -p release/docs
$ZIG_EXE run ../doc/docgen.zig -- $ZIG_EXE ../doc/langref.html.in release/docs/langref.html
# Produce the experimental std lib documentation.
mkdir -p release/docs/std
$ZIG_EXE test ../lib/std/std.zig \
--override-lib-dir ../lib \
-femit-docs=release/docs/std \
-fno-emit-bin
# Remove the unnecessary bin dir in $prefix/bin/zig
mv release/bin/zig release/
rmdir release/bin
# Remove the unnecessary zig dir in $prefix/lib/zig/std/std.zig
mv release/lib/zig release/lib2
rmdir release/lib
mv release/lib2 release/lib
VERSION=$($ZIG_EXE version)
DIRNAME="zig-macos-$ARCH-$VERSION"
TARBALL="$DIRNAME.tar.xz"
gtar cJf "$TARBALL" release/ --owner=root --sort=name --transform="s,^release,${DIRNAME},"
ln "$TARBALL" "$BUILD_ARTIFACTSTAGINGDIRECTORY/."
mv "$DOWNLOADSECUREFILE_SECUREFILEPATH" "$HOME/.s3cfg"
s3cmd put -P --add-header="cache-control: public, max-age=31536000, immutable" "$TARBALL" s3://ziglang.org/builds/
SHASUM=$(shasum -a 256 $TARBALL | cut '-d ' -f1)
BYTESIZE=$(wc -c < $TARBALL)
JSONFILE="macos-$GITBRANCH.json"
touch $JSONFILE
echo "{\"tarball\": \"$TARBALL\"," >>$JSONFILE
echo "\"shasum\": \"$SHASUM\"," >>$JSONFILE
echo "\"size\": \"$BYTESIZE\"}" >>$JSONFILE
s3cmd put -P --add-header="Cache-Control: max-age=0, must-revalidate" "$JSONFILE" "s3://ziglang.org/builds/$JSONFILE"
s3cmd put -P "$JSONFILE" "s3://ziglang.org/builds/$ARCH-macos-$VERSION.json"
# `set -x` causes these variables to be mangled.
# See https://developercommunity.visualstudio.com/content/problem/375679/pipeline-variable-incorrectly-inserts-single-quote.html
set +x
echo "##vso[task.setvariable variable=tarball;isOutput=true]$TARBALL"
echo "##vso[task.setvariable variable=shasum;isOutput=true]$SHASUM"
echo "##vso[task.setvariable variable=bytesize;isOutput=true]$BYTESIZE"
fi

View File

@ -12,6 +12,19 @@ jobs:
- script: ci/azure/macos_script
name: main
displayName: 'Build and test'
- job: BuildMacOS_arm64
pool:
vmImage: 'macOS-10.15'
timeoutInMinutes: 60
steps:
- task: DownloadSecureFile@1
inputs:
secureFile: s3cfg
- script: ci/azure/macos_arm64_script
name: main
displayName: 'Build and cross-compile'
- job: BuildLinux
pool:
vmImage: 'ubuntu-18.04'
@ -31,7 +44,7 @@ jobs:
timeoutInMinutes: 360
steps:
- powershell: |
(New-Object Net.WebClient).DownloadFile("https://github.com/msys2/msys2-installer/releases/download/2021-01-05/msys2-base-x86_64-20210105.sfx.exe", "sfx.exe")
(New-Object Net.WebClient).DownloadFile("https://github.com/msys2/msys2-installer/releases/download/2021-02-28/msys2-base-x86_64-20210228.sfx.exe", "sfx.exe")
.\sfx.exe -y -o\
del sfx.exe
displayName: Download/Extract/Install MSYS2

View File

@ -3,7 +3,7 @@
set -x
set -e
pacman -Su --needed --noconfirm
pacman -Suy --needed --noconfirm
pacman -S --needed --noconfirm wget p7zip python3-pip tar xz
pip install s3cmd

View File

@ -9952,9 +9952,9 @@ export fn decode_base_64(
) usize {
const src = source_ptr[0..source_len];
const dest = dest_ptr[0..dest_len];
const base64_decoder = base64.standard_decoder_unsafe;
const decoded_size = base64_decoder.calcSize(src);
base64_decoder.decode(dest[0..decoded_size], src);
const base64_decoder = base64.standard.Decoder;
const decoded_size = base64_decoder.calcSizeForSlice(src) catch unreachable;
base64_decoder.decode(dest[0..decoded_size], src) catch unreachable;
return decoded_size;
}
{#code_end#}

View File

@ -687,8 +687,9 @@ pub fn ArrayHashMapUnmanaged(
/// Removes the last inserted `Entry` in the hash map and returns it.
pub fn pop(self: *Self) Entry {
const top = self.entries.pop();
const top = self.entries.items[self.entries.items.len - 1];
_ = self.removeWithHash(top.key, top.hash, .index_only);
self.entries.items.len -= 1;
return top;
}
@ -1258,19 +1259,18 @@ test "pop" {
var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
defer map.deinit();
testing.expect((try map.fetchPut(1, 11)) == null);
testing.expect((try map.fetchPut(2, 22)) == null);
testing.expect((try map.fetchPut(3, 33)) == null);
testing.expect((try map.fetchPut(4, 44)) == null);
// Insert just enough entries so that the map expands. Afterwards,
// pop all entries out of the map.
const pop1 = map.pop();
testing.expect(pop1.key == 4 and pop1.value == 44);
const pop2 = map.pop();
testing.expect(pop2.key == 3 and pop2.value == 33);
const pop3 = map.pop();
testing.expect(pop3.key == 2 and pop3.value == 22);
const pop4 = map.pop();
testing.expect(pop4.key == 1 and pop4.value == 11);
var i: i32 = 0;
while (i < 9) : (i += 1) {
testing.expect((try map.fetchPut(i, i)) == null);
}
while (i > 0) : (i -= 1) {
const pop = map.pop();
testing.expect(pop.key == i - 1 and pop.value == i - 1);
}
}
test "reIndex" {

View File

@ -8,454 +8,452 @@ const assert = std.debug.assert;
const testing = std.testing;
const mem = std.mem;
pub const standard_alphabet_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
pub const standard_pad_char = '=';
pub const standard_encoder = Base64Encoder.init(standard_alphabet_chars, standard_pad_char);
pub const Error = error{
InvalidCharacter,
InvalidPadding,
NoSpaceLeft,
};
/// Base64 codecs
pub const Codecs = struct {
alphabet_chars: [64]u8,
pad_char: ?u8,
decoderWithIgnore: fn (ignore: []const u8) Base64DecoderWithIgnore,
Encoder: Base64Encoder,
Decoder: Base64Decoder,
};
pub const standard_alphabet_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".*;
fn standardBase64DecoderWithIgnore(ignore: []const u8) Base64DecoderWithIgnore {
return Base64DecoderWithIgnore.init(standard_alphabet_chars, '=', ignore);
}
/// Standard Base64 codecs, with padding
pub const standard = Codecs{
.alphabet_chars = standard_alphabet_chars,
.pad_char = '=',
.decoderWithIgnore = standardBase64DecoderWithIgnore,
.Encoder = Base64Encoder.init(standard_alphabet_chars, '='),
.Decoder = Base64Decoder.init(standard_alphabet_chars, '='),
};
/// Standard Base64 codecs, without padding
pub const standard_no_pad = Codecs{
.alphabet_chars = standard_alphabet_chars,
.pad_char = null,
.decoderWithIgnore = standardBase64DecoderWithIgnore,
.Encoder = Base64Encoder.init(standard_alphabet_chars, null),
.Decoder = Base64Decoder.init(standard_alphabet_chars, null),
};
pub const url_safe_alphabet_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_".*;
fn urlSafeBase64DecoderWithIgnore(ignore: []const u8) Base64DecoderWithIgnore {
return Base64DecoderWithIgnore.init(url_safe_alphabet_chars, null, ignore);
}
/// URL-safe Base64 codecs, with padding
pub const url_safe = Codecs{
.alphabet_chars = url_safe_alphabet_chars,
.pad_char = '=',
.decoderWithIgnore = urlSafeBase64DecoderWithIgnore,
.Encoder = Base64Encoder.init(url_safe_alphabet_chars, '='),
.Decoder = Base64Decoder.init(url_safe_alphabet_chars, '='),
};
/// URL-safe Base64 codecs, without padding
pub const url_safe_no_pad = Codecs{
.alphabet_chars = url_safe_alphabet_chars,
.pad_char = null,
.decoderWithIgnore = urlSafeBase64DecoderWithIgnore,
.Encoder = Base64Encoder.init(url_safe_alphabet_chars, null),
.Decoder = Base64Decoder.init(url_safe_alphabet_chars, null),
};
// Backwards compatibility
/// Deprecated - Use `standard.pad_char`
pub const standard_pad_char = standard.pad_char;
/// Deprecated - Use `standard.Encoder`
pub const standard_encoder = standard.Encoder;
/// Deprecated - Use `standard.Decoder`
pub const standard_decoder = standard.Decoder;
pub const Base64Encoder = struct {
alphabet_chars: []const u8,
pad_char: u8,
alphabet_chars: [64]u8,
pad_char: ?u8,
/// a bunch of assertions, then simply pass the data right through.
pub fn init(alphabet_chars: []const u8, pad_char: u8) Base64Encoder {
/// A bunch of assertions, then simply pass the data right through.
pub fn init(alphabet_chars: [64]u8, pad_char: ?u8) Base64Encoder {
assert(alphabet_chars.len == 64);
var char_in_alphabet = [_]bool{false} ** 256;
for (alphabet_chars) |c| {
assert(!char_in_alphabet[c]);
assert(c != pad_char);
assert(pad_char == null or c != pad_char.?);
char_in_alphabet[c] = true;
}
return Base64Encoder{
.alphabet_chars = alphabet_chars,
.pad_char = pad_char,
};
}
/// ceil(source_len * 4/3)
pub fn calcSize(source_len: usize) usize {
return @divTrunc(source_len + 2, 3) * 4;
/// Compute the encoded length
pub fn calcSize(encoder: *const Base64Encoder, source_len: usize) usize {
if (encoder.pad_char != null) {
return @divTrunc(source_len + 2, 3) * 4;
} else {
const leftover = source_len % 3;
return @divTrunc(source_len, 3) * 4 + @divTrunc(leftover * 4 + 2, 3);
}
}
/// dest.len must be what you get from ::calcSize.
/// dest.len must at least be what you get from ::calcSize.
pub fn encode(encoder: *const Base64Encoder, dest: []u8, source: []const u8) []const u8 {
assert(dest.len >= Base64Encoder.calcSize(source.len));
const out_len = encoder.calcSize(source.len);
assert(dest.len >= out_len);
var i: usize = 0;
var out_index: usize = 0;
while (i + 2 < source.len) : (i += 3) {
dest[out_index] = encoder.alphabet_chars[(source[i] >> 2) & 0x3f];
out_index += 1;
const nibbles = source.len / 3;
const leftover = source.len - 3 * nibbles;
dest[out_index] = encoder.alphabet_chars[((source[i] & 0x3) << 4) | ((source[i + 1] & 0xf0) >> 4)];
out_index += 1;
dest[out_index] = encoder.alphabet_chars[((source[i + 1] & 0xf) << 2) | ((source[i + 2] & 0xc0) >> 6)];
out_index += 1;
dest[out_index] = encoder.alphabet_chars[source[i + 2] & 0x3f];
out_index += 1;
}
if (i < source.len) {
dest[out_index] = encoder.alphabet_chars[(source[i] >> 2) & 0x3f];
out_index += 1;
if (i + 1 == source.len) {
dest[out_index] = encoder.alphabet_chars[(source[i] & 0x3) << 4];
out_index += 1;
dest[out_index] = encoder.pad_char;
out_index += 1;
} else {
dest[out_index] = encoder.alphabet_chars[((source[i] & 0x3) << 4) | ((source[i + 1] & 0xf0) >> 4)];
out_index += 1;
dest[out_index] = encoder.alphabet_chars[(source[i + 1] & 0xf) << 2];
out_index += 1;
var acc: u12 = 0;
var acc_len: u4 = 0;
var out_idx: usize = 0;
for (source) |v| {
acc = (acc << 8) + v;
acc_len += 8;
while (acc_len >= 6) {
acc_len -= 6;
dest[out_idx] = encoder.alphabet_chars[@truncate(u6, (acc >> acc_len))];
out_idx += 1;
}
dest[out_index] = encoder.pad_char;
out_index += 1;
}
return dest[0..out_index];
if (acc_len > 0) {
dest[out_idx] = encoder.alphabet_chars[@truncate(u6, (acc << 6 - acc_len))];
out_idx += 1;
}
if (encoder.pad_char) |pad_char| {
for (dest[out_idx..]) |*pad| {
pad.* = pad_char;
}
}
return dest[0..out_len];
}
};
pub const standard_decoder = Base64Decoder.init(standard_alphabet_chars, standard_pad_char);
pub const Base64Decoder = struct {
const invalid_char: u8 = 0xff;
/// e.g. 'A' => 0.
/// undefined for any value not in the 64 alphabet chars.
/// `invalid_char` for any value not in the 64 alphabet chars.
char_to_index: [256]u8,
pad_char: ?u8,
/// true only for the 64 chars in the alphabet, not the pad char.
char_in_alphabet: [256]bool,
pad_char: u8,
pub fn init(alphabet_chars: []const u8, pad_char: u8) Base64Decoder {
assert(alphabet_chars.len == 64);
pub fn init(alphabet_chars: [64]u8, pad_char: ?u8) Base64Decoder {
var result = Base64Decoder{
.char_to_index = undefined,
.char_in_alphabet = [_]bool{false} ** 256,
.char_to_index = [_]u8{invalid_char} ** 256,
.pad_char = pad_char,
};
var char_in_alphabet = [_]bool{false} ** 256;
for (alphabet_chars) |c, i| {
assert(!result.char_in_alphabet[c]);
assert(c != pad_char);
assert(!char_in_alphabet[c]);
assert(pad_char == null or c != pad_char.?);
result.char_to_index[c] = @intCast(u8, i);
result.char_in_alphabet[c] = true;
char_in_alphabet[c] = true;
}
return result;
}
/// If the encoded buffer is detected to be invalid, returns error.InvalidPadding.
pub fn calcSize(decoder: *const Base64Decoder, source: []const u8) !usize {
if (source.len % 4 != 0) return error.InvalidPadding;
return calcDecodedSizeExactUnsafe(source, decoder.pad_char);
/// Return the maximum possible decoded size for a given input length - The actual length may be less if the input includes padding.
/// `InvalidPadding` is returned if the input length is not valid.
pub fn calcSizeUpperBound(decoder: *const Base64Decoder, source_len: usize) Error!usize {
var result = source_len / 4 * 3;
const leftover = source_len % 4;
if (decoder.pad_char != null) {
if (leftover % 4 != 0) return error.InvalidPadding;
} else {
if (leftover % 4 == 1) return error.InvalidPadding;
result += leftover * 3 / 4;
}
return result;
}
/// Return the exact decoded size for a slice.
/// `InvalidPadding` is returned if the input length is not valid.
pub fn calcSizeForSlice(decoder: *const Base64Decoder, source: []const u8) Error!usize {
const source_len = source.len;
var result = try decoder.calcSizeUpperBound(source_len);
if (decoder.pad_char) |pad_char| {
if (source_len >= 1 and source[source_len - 1] == pad_char) result -= 1;
if (source_len >= 2 and source[source_len - 2] == pad_char) result -= 1;
}
return result;
}
/// dest.len must be what you get from ::calcSize.
/// invalid characters result in error.InvalidCharacter.
/// invalid padding results in error.InvalidPadding.
pub fn decode(decoder: *const Base64Decoder, dest: []u8, source: []const u8) !void {
assert(dest.len == (decoder.calcSize(source) catch unreachable));
assert(source.len % 4 == 0);
var src_cursor: usize = 0;
var dest_cursor: usize = 0;
while (src_cursor < source.len) : (src_cursor += 4) {
if (!decoder.char_in_alphabet[source[src_cursor + 0]]) return error.InvalidCharacter;
if (!decoder.char_in_alphabet[source[src_cursor + 1]]) return error.InvalidCharacter;
if (src_cursor < source.len - 4 or source[src_cursor + 3] != decoder.pad_char) {
// common case
if (!decoder.char_in_alphabet[source[src_cursor + 2]]) return error.InvalidCharacter;
if (!decoder.char_in_alphabet[source[src_cursor + 3]]) return error.InvalidCharacter;
dest[dest_cursor + 0] = decoder.char_to_index[source[src_cursor + 0]] << 2 | decoder.char_to_index[source[src_cursor + 1]] >> 4;
dest[dest_cursor + 1] = decoder.char_to_index[source[src_cursor + 1]] << 4 | decoder.char_to_index[source[src_cursor + 2]] >> 2;
dest[dest_cursor + 2] = decoder.char_to_index[source[src_cursor + 2]] << 6 | decoder.char_to_index[source[src_cursor + 3]];
dest_cursor += 3;
} else if (source[src_cursor + 2] != decoder.pad_char) {
// one pad char
if (!decoder.char_in_alphabet[source[src_cursor + 2]]) return error.InvalidCharacter;
dest[dest_cursor + 0] = decoder.char_to_index[source[src_cursor + 0]] << 2 | decoder.char_to_index[source[src_cursor + 1]] >> 4;
dest[dest_cursor + 1] = decoder.char_to_index[source[src_cursor + 1]] << 4 | decoder.char_to_index[source[src_cursor + 2]] >> 2;
if (decoder.char_to_index[source[src_cursor + 2]] << 6 != 0) return error.InvalidPadding;
dest_cursor += 2;
} else {
// two pad chars
dest[dest_cursor + 0] = decoder.char_to_index[source[src_cursor + 0]] << 2 | decoder.char_to_index[source[src_cursor + 1]] >> 4;
if (decoder.char_to_index[source[src_cursor + 1]] << 4 != 0) return error.InvalidPadding;
dest_cursor += 1;
pub fn decode(decoder: *const Base64Decoder, dest: []u8, source: []const u8) Error!void {
if (decoder.pad_char != null and source.len % 4 != 0) return error.InvalidPadding;
var acc: u12 = 0;
var acc_len: u4 = 0;
var dest_idx: usize = 0;
var leftover_idx: ?usize = null;
for (source) |c, src_idx| {
const d = decoder.char_to_index[c];
if (d == invalid_char) {
if (decoder.pad_char == null or c != decoder.pad_char.?) return error.InvalidCharacter;
leftover_idx = src_idx;
break;
}
acc = (acc << 6) + d;
acc_len += 6;
if (acc_len >= 8) {
acc_len -= 8;
dest[dest_idx] = @truncate(u8, acc >> acc_len);
dest_idx += 1;
}
}
assert(src_cursor == source.len);
assert(dest_cursor == dest.len);
if (acc_len > 4 or (acc & (@as(u12, 1) << acc_len) - 1) != 0) {
return error.InvalidPadding;
}
if (leftover_idx == null) return;
var leftover = source[leftover_idx.?..];
if (decoder.pad_char) |pad_char| {
const padding_len = acc_len / 2;
var padding_chars: usize = 0;
var i: usize = 0;
for (leftover) |c| {
if (c != pad_char) {
return if (c == Base64Decoder.invalid_char) error.InvalidCharacter else error.InvalidPadding;
}
padding_chars += 1;
}
if (padding_chars != padding_len) return error.InvalidPadding;
}
}
};
pub const Base64DecoderWithIgnore = struct {
decoder: Base64Decoder,
char_is_ignored: [256]bool,
pub fn init(alphabet_chars: []const u8, pad_char: u8, ignore_chars: []const u8) Base64DecoderWithIgnore {
pub fn init(alphabet_chars: [64]u8, pad_char: ?u8, ignore_chars: []const u8) Base64DecoderWithIgnore {
var result = Base64DecoderWithIgnore{
.decoder = Base64Decoder.init(alphabet_chars, pad_char),
.char_is_ignored = [_]bool{false} ** 256,
};
for (ignore_chars) |c| {
assert(!result.decoder.char_in_alphabet[c]);
assert(result.decoder.char_to_index[c] == Base64Decoder.invalid_char);
assert(!result.char_is_ignored[c]);
assert(result.decoder.pad_char != c);
result.char_is_ignored[c] = true;
}
return result;
}
/// If no characters end up being ignored or padding, this will be the exact decoded size.
pub fn calcSizeUpperBound(encoded_len: usize) usize {
return @divTrunc(encoded_len, 4) * 3;
/// Return the maximum possible decoded size for a given input length - The actual length may be less if the input includes padding
/// `InvalidPadding` is returned if the input length is not valid.
pub fn calcSizeUpperBound(decoder_with_ignore: *const Base64DecoderWithIgnore, source_len: usize) Error!usize {
var result = source_len / 4 * 3;
if (decoder_with_ignore.decoder.pad_char == null) {
const leftover = source_len % 4;
result += leftover * 3 / 4;
}
return result;
}
/// Invalid characters that are not ignored result in error.InvalidCharacter.
/// Invalid padding results in error.InvalidPadding.
/// Decoding more data than can fit in dest results in error.OutputTooSmall. See also ::calcSizeUpperBound.
/// Decoding more data than can fit in dest results in error.NoSpaceLeft. See also ::calcSizeUpperBound.
/// Returns the number of bytes written to dest.
pub fn decode(decoder_with_ignore: *const Base64DecoderWithIgnore, dest: []u8, source: []const u8) !usize {
pub fn decode(decoder_with_ignore: *const Base64DecoderWithIgnore, dest: []u8, source: []const u8) Error!usize {
const decoder = &decoder_with_ignore.decoder;
var src_cursor: usize = 0;
var dest_cursor: usize = 0;
while (true) {
// get the next 4 chars, if available
var next_4_chars: [4]u8 = undefined;
var available_chars: usize = 0;
var pad_char_count: usize = 0;
while (available_chars < 4 and src_cursor < source.len) {
var c = source[src_cursor];
src_cursor += 1;
if (decoder.char_in_alphabet[c]) {
// normal char
next_4_chars[available_chars] = c;
available_chars += 1;
} else if (decoder_with_ignore.char_is_ignored[c]) {
// we're told to skip this one
continue;
} else if (c == decoder.pad_char) {
// the padding has begun. count the pad chars.
pad_char_count += 1;
while (src_cursor < source.len) {
c = source[src_cursor];
src_cursor += 1;
if (c == decoder.pad_char) {
pad_char_count += 1;
if (pad_char_count > 2) return error.InvalidCharacter;
} else if (decoder_with_ignore.char_is_ignored[c]) {
// we can even ignore chars during the padding
continue;
} else return error.InvalidCharacter;
}
break;
} else return error.InvalidCharacter;
var acc: u12 = 0;
var acc_len: u4 = 0;
var dest_idx: usize = 0;
var leftover_idx: ?usize = null;
for (source) |c, src_idx| {
if (decoder_with_ignore.char_is_ignored[c]) continue;
const d = decoder.char_to_index[c];
if (d == Base64Decoder.invalid_char) {
if (decoder.pad_char == null or c != decoder.pad_char.?) return error.InvalidCharacter;
leftover_idx = src_idx;
break;
}
switch (available_chars) {
4 => {
// common case
if (dest_cursor + 3 > dest.len) return error.OutputTooSmall;
assert(pad_char_count == 0);
dest[dest_cursor + 0] = decoder.char_to_index[next_4_chars[0]] << 2 | decoder.char_to_index[next_4_chars[1]] >> 4;
dest[dest_cursor + 1] = decoder.char_to_index[next_4_chars[1]] << 4 | decoder.char_to_index[next_4_chars[2]] >> 2;
dest[dest_cursor + 2] = decoder.char_to_index[next_4_chars[2]] << 6 | decoder.char_to_index[next_4_chars[3]];
dest_cursor += 3;
continue;
},
3 => {
if (dest_cursor + 2 > dest.len) return error.OutputTooSmall;
if (pad_char_count != 1) return error.InvalidPadding;
dest[dest_cursor + 0] = decoder.char_to_index[next_4_chars[0]] << 2 | decoder.char_to_index[next_4_chars[1]] >> 4;
dest[dest_cursor + 1] = decoder.char_to_index[next_4_chars[1]] << 4 | decoder.char_to_index[next_4_chars[2]] >> 2;
if (decoder.char_to_index[next_4_chars[2]] << 6 != 0) return error.InvalidPadding;
dest_cursor += 2;
break;
},
2 => {
if (dest_cursor + 1 > dest.len) return error.OutputTooSmall;
if (pad_char_count != 2) return error.InvalidPadding;
dest[dest_cursor + 0] = decoder.char_to_index[next_4_chars[0]] << 2 | decoder.char_to_index[next_4_chars[1]] >> 4;
if (decoder.char_to_index[next_4_chars[1]] << 4 != 0) return error.InvalidPadding;
dest_cursor += 1;
break;
},
1 => {
return error.InvalidPadding;
},
0 => {
if (pad_char_count != 0) return error.InvalidPadding;
break;
},
else => unreachable,
acc = (acc << 6) + d;
acc_len += 6;
if (acc_len >= 8) {
if (dest_idx == dest.len) return error.NoSpaceLeft;
acc_len -= 8;
dest[dest_idx] = @truncate(u8, acc >> acc_len);
dest_idx += 1;
}
}
assert(src_cursor == source.len);
return dest_cursor;
if (acc_len > 4 or (acc & (@as(u12, 1) << acc_len) - 1) != 0) {
return error.InvalidPadding;
}
const padding_len = acc_len / 2;
if (leftover_idx == null) {
if (decoder.pad_char != null and padding_len != 0) return error.InvalidPadding;
return dest_idx;
}
var leftover = source[leftover_idx.?..];
if (decoder.pad_char) |pad_char| {
var padding_chars: usize = 0;
var i: usize = 0;
for (leftover) |c| {
if (decoder_with_ignore.char_is_ignored[c]) continue;
if (c != pad_char) {
return if (c == Base64Decoder.invalid_char) error.InvalidCharacter else error.InvalidPadding;
}
padding_chars += 1;
}
if (padding_chars != padding_len) return error.InvalidPadding;
}
return dest_idx;
}
};
pub const standard_decoder_unsafe = Base64DecoderUnsafe.init(standard_alphabet_chars, standard_pad_char);
pub const Base64DecoderUnsafe = struct {
/// e.g. 'A' => 0.
/// undefined for any value not in the 64 alphabet chars.
char_to_index: [256]u8,
pad_char: u8,
pub fn init(alphabet_chars: []const u8, pad_char: u8) Base64DecoderUnsafe {
assert(alphabet_chars.len == 64);
var result = Base64DecoderUnsafe{
.char_to_index = undefined,
.pad_char = pad_char,
};
for (alphabet_chars) |c, i| {
assert(c != pad_char);
result.char_to_index[c] = @intCast(u8, i);
}
return result;
}
/// The source buffer must be valid.
pub fn calcSize(decoder: *const Base64DecoderUnsafe, source: []const u8) usize {
return calcDecodedSizeExactUnsafe(source, decoder.pad_char);
}
/// dest.len must be what you get from ::calcDecodedSizeExactUnsafe.
/// invalid characters or padding will result in undefined values.
pub fn decode(decoder: *const Base64DecoderUnsafe, dest: []u8, source: []const u8) void {
assert(dest.len == decoder.calcSize(source));
var src_index: usize = 0;
var dest_index: usize = 0;
var in_buf_len: usize = source.len;
while (in_buf_len > 0 and source[in_buf_len - 1] == decoder.pad_char) {
in_buf_len -= 1;
}
while (in_buf_len > 4) {
dest[dest_index] = decoder.char_to_index[source[src_index + 0]] << 2 | decoder.char_to_index[source[src_index + 1]] >> 4;
dest_index += 1;
dest[dest_index] = decoder.char_to_index[source[src_index + 1]] << 4 | decoder.char_to_index[source[src_index + 2]] >> 2;
dest_index += 1;
dest[dest_index] = decoder.char_to_index[source[src_index + 2]] << 6 | decoder.char_to_index[source[src_index + 3]];
dest_index += 1;
src_index += 4;
in_buf_len -= 4;
}
if (in_buf_len > 1) {
dest[dest_index] = decoder.char_to_index[source[src_index + 0]] << 2 | decoder.char_to_index[source[src_index + 1]] >> 4;
dest_index += 1;
}
if (in_buf_len > 2) {
dest[dest_index] = decoder.char_to_index[source[src_index + 1]] << 4 | decoder.char_to_index[source[src_index + 2]] >> 2;
dest_index += 1;
}
if (in_buf_len > 3) {
dest[dest_index] = decoder.char_to_index[source[src_index + 2]] << 6 | decoder.char_to_index[source[src_index + 3]];
dest_index += 1;
}
}
};
fn calcDecodedSizeExactUnsafe(source: []const u8, pad_char: u8) usize {
if (source.len == 0) return 0;
var result = @divExact(source.len, 4) * 3;
if (source[source.len - 1] == pad_char) {
result -= 1;
if (source[source.len - 2] == pad_char) {
result -= 1;
}
}
return result;
}
test "base64" {
@setEvalBranchQuota(8000);
testBase64() catch unreachable;
comptime (testBase64() catch unreachable);
comptime testAllApis(standard, "comptime", "Y29tcHRpbWU=") catch unreachable;
}
test "base64 url_safe_no_pad" {
@setEvalBranchQuota(8000);
testBase64UrlSafeNoPad() catch unreachable;
comptime testAllApis(url_safe_no_pad, "comptime", "Y29tcHRpbWU") catch unreachable;
}
fn testBase64() !void {
try testAllApis("", "");
try testAllApis("f", "Zg==");
try testAllApis("fo", "Zm8=");
try testAllApis("foo", "Zm9v");
try testAllApis("foob", "Zm9vYg==");
try testAllApis("fooba", "Zm9vYmE=");
try testAllApis("foobar", "Zm9vYmFy");
const codecs = standard;
try testDecodeIgnoreSpace("", " ");
try testDecodeIgnoreSpace("f", "Z g= =");
try testDecodeIgnoreSpace("fo", " Zm8=");
try testDecodeIgnoreSpace("foo", "Zm9v ");
try testDecodeIgnoreSpace("foob", "Zm9vYg = = ");
try testDecodeIgnoreSpace("fooba", "Zm9v YmE=");
try testDecodeIgnoreSpace("foobar", " Z m 9 v Y m F y ");
try testAllApis(codecs, "", "");
try testAllApis(codecs, "f", "Zg==");
try testAllApis(codecs, "fo", "Zm8=");
try testAllApis(codecs, "foo", "Zm9v");
try testAllApis(codecs, "foob", "Zm9vYg==");
try testAllApis(codecs, "fooba", "Zm9vYmE=");
try testAllApis(codecs, "foobar", "Zm9vYmFy");
try testDecodeIgnoreSpace(codecs, "", " ");
try testDecodeIgnoreSpace(codecs, "f", "Z g= =");
try testDecodeIgnoreSpace(codecs, "fo", " Zm8=");
try testDecodeIgnoreSpace(codecs, "foo", "Zm9v ");
try testDecodeIgnoreSpace(codecs, "foob", "Zm9vYg = = ");
try testDecodeIgnoreSpace(codecs, "fooba", "Zm9v YmE=");
try testDecodeIgnoreSpace(codecs, "foobar", " Z m 9 v Y m F y ");
// test getting some api errors
try testError("A", error.InvalidPadding);
try testError("AA", error.InvalidPadding);
try testError("AAA", error.InvalidPadding);
try testError("A..A", error.InvalidCharacter);
try testError("AA=A", error.InvalidCharacter);
try testError("AA/=", error.InvalidPadding);
try testError("A/==", error.InvalidPadding);
try testError("A===", error.InvalidCharacter);
try testError("====", error.InvalidCharacter);
try testError(codecs, "A", error.InvalidPadding);
try testError(codecs, "AA", error.InvalidPadding);
try testError(codecs, "AAA", error.InvalidPadding);
try testError(codecs, "A..A", error.InvalidCharacter);
try testError(codecs, "AA=A", error.InvalidPadding);
try testError(codecs, "AA/=", error.InvalidPadding);
try testError(codecs, "A/==", error.InvalidPadding);
try testError(codecs, "A===", error.InvalidPadding);
try testError(codecs, "====", error.InvalidPadding);
try testOutputTooSmallError("AA==");
try testOutputTooSmallError("AAA=");
try testOutputTooSmallError("AAAA");
try testOutputTooSmallError("AAAAAA==");
try testNoSpaceLeftError(codecs, "AA==");
try testNoSpaceLeftError(codecs, "AAA=");
try testNoSpaceLeftError(codecs, "AAAA");
try testNoSpaceLeftError(codecs, "AAAAAA==");
}
fn testAllApis(expected_decoded: []const u8, expected_encoded: []const u8) !void {
fn testBase64UrlSafeNoPad() !void {
const codecs = url_safe_no_pad;
try testAllApis(codecs, "", "");
try testAllApis(codecs, "f", "Zg");
try testAllApis(codecs, "fo", "Zm8");
try testAllApis(codecs, "foo", "Zm9v");
try testAllApis(codecs, "foob", "Zm9vYg");
try testAllApis(codecs, "fooba", "Zm9vYmE");
try testAllApis(codecs, "foobar", "Zm9vYmFy");
try testDecodeIgnoreSpace(codecs, "", " ");
try testDecodeIgnoreSpace(codecs, "f", "Z g ");
try testDecodeIgnoreSpace(codecs, "fo", " Zm8");
try testDecodeIgnoreSpace(codecs, "foo", "Zm9v ");
try testDecodeIgnoreSpace(codecs, "foob", "Zm9vYg ");
try testDecodeIgnoreSpace(codecs, "fooba", "Zm9v YmE");
try testDecodeIgnoreSpace(codecs, "foobar", " Z m 9 v Y m F y ");
// test getting some api errors
try testError(codecs, "A", error.InvalidPadding);
try testError(codecs, "AAA=", error.InvalidCharacter);
try testError(codecs, "A..A", error.InvalidCharacter);
try testError(codecs, "AA=A", error.InvalidCharacter);
try testError(codecs, "AA/=", error.InvalidCharacter);
try testError(codecs, "A/==", error.InvalidCharacter);
try testError(codecs, "A===", error.InvalidCharacter);
try testError(codecs, "====", error.InvalidCharacter);
try testNoSpaceLeftError(codecs, "AA");
try testNoSpaceLeftError(codecs, "AAA");
try testNoSpaceLeftError(codecs, "AAAA");
try testNoSpaceLeftError(codecs, "AAAAAA");
}
fn testAllApis(codecs: Codecs, expected_decoded: []const u8, expected_encoded: []const u8) !void {
// Base64Encoder
{
var buffer: [0x100]u8 = undefined;
const encoded = standard_encoder.encode(&buffer, expected_decoded);
const encoded = codecs.Encoder.encode(&buffer, expected_decoded);
testing.expectEqualSlices(u8, expected_encoded, encoded);
}
// Base64Decoder
{
var buffer: [0x100]u8 = undefined;
var decoded = buffer[0..try standard_decoder.calcSize(expected_encoded)];
try standard_decoder.decode(decoded, expected_encoded);
var decoded = buffer[0..try codecs.Decoder.calcSizeForSlice(expected_encoded)];
try codecs.Decoder.decode(decoded, expected_encoded);
testing.expectEqualSlices(u8, expected_decoded, decoded);
}
// Base64DecoderWithIgnore
{
const standard_decoder_ignore_nothing = Base64DecoderWithIgnore.init(standard_alphabet_chars, standard_pad_char, "");
const decoder_ignore_nothing = codecs.decoderWithIgnore("");
var buffer: [0x100]u8 = undefined;
var decoded = buffer[0..Base64DecoderWithIgnore.calcSizeUpperBound(expected_encoded.len)];
var written = try standard_decoder_ignore_nothing.decode(decoded, expected_encoded);
var decoded = buffer[0..try decoder_ignore_nothing.calcSizeUpperBound(expected_encoded.len)];
var written = try decoder_ignore_nothing.decode(decoded, expected_encoded);
testing.expect(written <= decoded.len);
testing.expectEqualSlices(u8, expected_decoded, decoded[0..written]);
}
// Base64DecoderUnsafe
{
var buffer: [0x100]u8 = undefined;
var decoded = buffer[0..standard_decoder_unsafe.calcSize(expected_encoded)];
standard_decoder_unsafe.decode(decoded, expected_encoded);
testing.expectEqualSlices(u8, expected_decoded, decoded);
}
}
fn testDecodeIgnoreSpace(expected_decoded: []const u8, encoded: []const u8) !void {
const standard_decoder_ignore_space = Base64DecoderWithIgnore.init(standard_alphabet_chars, standard_pad_char, " ");
fn testDecodeIgnoreSpace(codecs: Codecs, expected_decoded: []const u8, encoded: []const u8) !void {
const decoder_ignore_space = codecs.decoderWithIgnore(" ");
var buffer: [0x100]u8 = undefined;
var decoded = buffer[0..Base64DecoderWithIgnore.calcSizeUpperBound(encoded.len)];
var written = try standard_decoder_ignore_space.decode(decoded, encoded);
var decoded = buffer[0..try decoder_ignore_space.calcSizeUpperBound(encoded.len)];
var written = try decoder_ignore_space.decode(decoded, encoded);
testing.expectEqualSlices(u8, expected_decoded, decoded[0..written]);
}
fn testError(encoded: []const u8, expected_err: anyerror) !void {
const standard_decoder_ignore_space = Base64DecoderWithIgnore.init(standard_alphabet_chars, standard_pad_char, " ");
fn testError(codecs: Codecs, encoded: []const u8, expected_err: anyerror) !void {
const decoder_ignore_space = codecs.decoderWithIgnore(" ");
var buffer: [0x100]u8 = undefined;
if (standard_decoder.calcSize(encoded)) |decoded_size| {
if (codecs.Decoder.calcSizeForSlice(encoded)) |decoded_size| {
var decoded = buffer[0..decoded_size];
if (standard_decoder.decode(decoded, encoded)) |_| {
if (codecs.Decoder.decode(decoded, encoded)) |_| {
return error.ExpectedError;
} else |err| if (err != expected_err) return err;
} else |err| if (err != expected_err) return err;
if (standard_decoder_ignore_space.decode(buffer[0..], encoded)) |_| {
if (decoder_ignore_space.decode(buffer[0..], encoded)) |_| {
return error.ExpectedError;
} else |err| if (err != expected_err) return err;
}
fn testOutputTooSmallError(encoded: []const u8) !void {
const standard_decoder_ignore_space = Base64DecoderWithIgnore.init(standard_alphabet_chars, standard_pad_char, " ");
fn testNoSpaceLeftError(codecs: Codecs, encoded: []const u8) !void {
const decoder_ignore_space = codecs.decoderWithIgnore(" ");
var buffer: [0x100]u8 = undefined;
var decoded = buffer[0 .. calcDecodedSizeExactUnsafe(encoded, standard_pad_char) - 1];
if (standard_decoder_ignore_space.decode(decoded, encoded)) |_| {
var decoded = buffer[0 .. (try codecs.Decoder.calcSizeForSlice(encoded)) - 1];
if (decoder_ignore_space.decode(decoded, encoded)) |_| {
return error.ExpectedError;
} else |err| if (err != error.OutputTooSmall) return err;
} else |err| if (err != error.NoSpaceLeft) return err;
}

View File

@ -51,7 +51,7 @@ pub const Builder = struct {
default_step: *Step,
env_map: *BufMap,
top_level_steps: ArrayList(*TopLevelStep),
install_prefix: ?[]const u8,
install_prefix: []const u8,
dest_dir: ?[]const u8,
lib_dir: []const u8,
exe_dir: []const u8,
@ -156,7 +156,7 @@ pub const Builder = struct {
.default_step = undefined,
.env_map = env_map,
.search_prefixes = ArrayList([]const u8).init(allocator),
.install_prefix = null,
.install_prefix = undefined,
.lib_dir = undefined,
.exe_dir = undefined,
.h_dir = undefined,
@ -190,22 +190,13 @@ pub const Builder = struct {
}
/// This function is intended to be called by std/special/build_runner.zig, not a build.zig file.
pub fn setInstallPrefix(self: *Builder, optional_prefix: ?[]const u8) void {
self.install_prefix = optional_prefix;
}
/// This function is intended to be called by std/special/build_runner.zig, not a build.zig file.
pub fn resolveInstallPrefix(self: *Builder) void {
pub fn resolveInstallPrefix(self: *Builder, install_prefix: ?[]const u8) void {
if (self.dest_dir) |dest_dir| {
const install_prefix = self.install_prefix orelse "/usr";
self.install_path = fs.path.join(self.allocator, &[_][]const u8{ dest_dir, install_prefix }) catch unreachable;
self.install_prefix = install_prefix orelse "/usr";
self.install_path = fs.path.join(self.allocator, &[_][]const u8{ dest_dir, self.install_prefix }) catch unreachable;
} else {
const install_prefix = self.install_prefix orelse blk: {
const p = self.cache_root;
self.install_prefix = p;
break :blk p;
};
self.install_path = install_prefix;
self.install_prefix = install_prefix orelse self.cache_root;
self.install_path = self.install_prefix;
}
self.lib_dir = fs.path.join(self.allocator, &[_][]const u8{ self.install_path, "lib" }) catch unreachable;
self.exe_dir = fs.path.join(self.allocator, &[_][]const u8{ self.install_path, "bin" }) catch unreachable;

View File

@ -1250,9 +1250,9 @@ fn formatDuration(ns: u64, comptime fmt: []const u8, options: std.fmt.FormatOpti
const kunits = ns_remaining * 1000 / unit.ns;
if (kunits >= 1000) {
try formatInt(kunits / 1000, 10, false, .{}, writer);
if (kunits > 1000) {
const frac = kunits % 1000;
if (frac > 0) {
// Write up to 3 decimal places
const frac = kunits % 1000;
var buf = [_]u8{ '.', 0, 0, 0 };
_ = formatIntBuf(buf[1..], frac, 10, false, .{ .fill = '0', .width = 3 });
var end: usize = 4;
@ -1286,9 +1286,14 @@ test "fmtDuration" {
.{ .s = "1us", .d = std.time.ns_per_us },
.{ .s = "1.45us", .d = 1450 },
.{ .s = "1.5us", .d = 3 * std.time.ns_per_us / 2 },
.{ .s = "14.5us", .d = 14500 },
.{ .s = "145us", .d = 145000 },
.{ .s = "999.999us", .d = std.time.ns_per_ms - 1 },
.{ .s = "1ms", .d = std.time.ns_per_ms + 1 },
.{ .s = "1.5ms", .d = 3 * std.time.ns_per_ms / 2 },
.{ .s = "1.11ms", .d = 1110000 },
.{ .s = "1.111ms", .d = 1111000 },
.{ .s = "1.111ms", .d = 1111100 },
.{ .s = "999.999ms", .d = std.time.ns_per_s - 1 },
.{ .s = "1s", .d = std.time.ns_per_s },
.{ .s = "59.999s", .d = std.time.ns_per_min - 1 },

View File

@ -50,13 +50,13 @@ pub const MAX_PATH_BYTES = switch (builtin.os.tag) {
else => @compileError("Unsupported OS"),
};
pub const base64_alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_";
pub const base64_alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_".*;
/// Base64 encoder, replacing the standard `+/` with `-_` so that it can be used in a file name on any filesystem.
pub const base64_encoder = base64.Base64Encoder.init(base64_alphabet, base64.standard_pad_char);
pub const base64_encoder = base64.Base64Encoder.init(base64_alphabet, null);
/// Base64 decoder, replacing the standard `+/` with `-_` so that it can be used in a file name on any filesystem.
pub const base64_decoder = base64.Base64Decoder.init(base64_alphabet, base64.standard_pad_char);
pub const base64_decoder = base64.Base64Decoder.init(base64_alphabet, null);
/// Whether or not async file system syscalls need a dedicated thread because the operating
/// system does not support non-blocking I/O on the file system.
@ -77,7 +77,7 @@ pub fn atomicSymLink(allocator: *Allocator, existing_path: []const u8, new_path:
const dirname = path.dirname(new_path) orelse ".";
var rand_buf: [AtomicFile.RANDOM_BYTES]u8 = undefined;
const tmp_path = try allocator.alloc(u8, dirname.len + 1 + base64.Base64Encoder.calcSize(rand_buf.len));
const tmp_path = try allocator.alloc(u8, dirname.len + 1 + base64_encoder.calcSize(rand_buf.len));
defer allocator.free(tmp_path);
mem.copy(u8, tmp_path[0..], dirname);
tmp_path[dirname.len] = path.sep;
@ -142,7 +142,7 @@ pub const AtomicFile = struct {
const InitError = File.OpenError;
const RANDOM_BYTES = 12;
const TMP_PATH_LEN = base64.Base64Encoder.calcSize(RANDOM_BYTES);
const TMP_PATH_LEN = base64_encoder.calcSize(RANDOM_BYTES);
/// Note that the `Dir.atomicFile` API may be more handy than this lower-level function.
pub fn init(

View File

@ -95,7 +95,7 @@ pub fn hash(hasher: anytype, key: anytype, comptime strat: HashStrategy) void {
.EnumLiteral,
.Frame,
.Float,
=> @compileError("cannot hash this type"),
=> @compileError("unable to hash type " ++ @typeName(Key)),
// Help the optimizer see that hashing an int is easy by inlining!
// TODO Check if the situation is better after #561 is resolved.

View File

@ -1373,6 +1373,20 @@ test "mem.tokenize (multibyte)" {
testing.expect(it.next() == null);
}
test "mem.tokenize (reset)" {
var it = tokenize(" abc def ghi ", " ");
testing.expect(eql(u8, it.next().?, "abc"));
testing.expect(eql(u8, it.next().?, "def"));
testing.expect(eql(u8, it.next().?, "ghi"));
it.reset();
testing.expect(eql(u8, it.next().?, "abc"));
testing.expect(eql(u8, it.next().?, "def"));
testing.expect(eql(u8, it.next().?, "ghi"));
testing.expect(it.next() == null);
}
/// Returns an iterator that iterates over the slices of `buffer` that
/// are separated by bytes in `delimiter`.
/// split("abc|def||ghi", "|")
@ -1471,6 +1485,11 @@ pub const TokenIterator = struct {
return self.buffer[index..];
}
/// Resets the iterator to the initial token.
pub fn reset(self: *TokenIterator) void {
self.index = 0;
}
fn isSplitByte(self: TokenIterator, byte: u8) bool {
for (self.delimiter_bytes) |delimiter_byte| {
if (byte == delimiter_byte) {

View File

@ -5610,6 +5610,7 @@ pub fn recvfrom(
EAGAIN => return error.WouldBlock,
ENOMEM => return error.SystemResources,
ECONNREFUSED => return error.ConnectionRefused,
ECONNRESET => return error.ConnectionResetByPeer,
else => |err| return unexpectedErrno(err),
}
}

View File

@ -115,6 +115,9 @@ pub fn syscall5(number: SYS, arg1: usize, arg2: usize, arg3: usize, arg4: usize,
);
}
// NOTE: The o32 calling convention requires the callee to reserve 16 bytes for
// the first four arguments even though they're passed in $a0-$a3.
pub fn syscall6(
number: SYS,
arg1: usize,
@ -146,6 +149,40 @@ pub fn syscall6(
);
}
pub fn syscall7(
number: SYS,
arg1: usize,
arg2: usize,
arg3: usize,
arg4: usize,
arg5: usize,
arg6: usize,
arg7: usize,
) usize {
return asm volatile (
\\ .set noat
\\ subu $sp, $sp, 32
\\ sw %[arg5], 16($sp)
\\ sw %[arg6], 20($sp)
\\ sw %[arg7], 24($sp)
\\ syscall
\\ addu $sp, $sp, 32
\\ blez $7, 1f
\\ subu $2, $0, $2
\\ 1:
: [ret] "={$2}" (-> usize)
: [number] "{$2}" (@enumToInt(number)),
[arg1] "{$4}" (arg1),
[arg2] "{$5}" (arg2),
[arg3] "{$6}" (arg3),
[arg4] "{$7}" (arg4),
[arg5] "r" (arg5),
[arg6] "r" (arg6),
[arg7] "r" (arg7)
: "memory", "cc", "$7"
);
}
/// This matches the libc clone function.
pub extern fn clone(func: fn (arg: usize) callconv(.C) u8, stack: usize, flags: u32, arg: usize, ptid: *i32, tls: usize, ctid: *i32) usize;

View File

@ -78,7 +78,8 @@ pub const BootServices = extern struct {
/// Returns an array of handles that support a specified protocol.
locateHandle: fn (LocateSearchType, ?*align(8) const Guid, ?*const c_void, *usize, [*]Handle) callconv(.C) Status,
locateDevicePath: Status, // TODO
/// Locates the handle to a device on the device path that supports the specified protocol
locateDevicePath: fn (*align(8) const Guid, **const DevicePathProtocol, *?Handle) callconv(.C) Status,
installConfigurationTable: Status, // TODO
/// Loads an EFI image into memory.

View File

@ -373,7 +373,7 @@ pub fn createWindowExA(dwExStyle: u32, lpClassName: [*:0]const u8, lpWindowName:
}
pub extern "user32" fn CreateWindowExW(dwExStyle: DWORD, lpClassName: [*:0]const u16, lpWindowName: [*:0]const u16, dwStyle: DWORD, X: i32, Y: i32, nWidth: i32, nHeight: i32, hWindParent: ?HWND, hMenu: ?HMENU, hInstance: HINSTANCE, lpParam: ?LPVOID) callconv(WINAPI) ?HWND;
pub var pfnCreateWindowExW: @TypeOf(RegisterClassExW) = undefined;
pub var pfnCreateWindowExW: @TypeOf(CreateWindowExW) = undefined;
pub fn createWindowExW(dwExStyle: u32, lpClassName: [*:0]const u16, lpWindowName: [*:0]const u16, dwStyle: u32, X: i32, Y: i32, nWidth: i32, nHeight: i32, hWindParent: ?HWND, hMenu: ?HMENU, hInstance: HINSTANCE, lpParam: ?*c_void) !HWND {
const function = selectSymbol(CreateWindowExW, pfnCreateWindowExW, .win2k);
const window = function(dwExStyle, lpClassName, lpWindowName, dwStyle, X, Y, nWidth, nHeight, hWindParent, hMenu, hInstance, lpParam);

View File

@ -60,6 +60,7 @@ pub fn main() !void {
const stderr_stream = io.getStdErr().writer();
const stdout_stream = io.getStdOut().writer();
var install_prefix: ?[]const u8 = null;
while (nextArg(args, &arg_idx)) |arg| {
if (mem.startsWith(u8, arg, "-D")) {
const option_contents = arg[2..];
@ -82,7 +83,7 @@ pub fn main() !void {
} else if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
return usage(builder, false, stdout_stream);
} else if (mem.eql(u8, arg, "--prefix")) {
builder.install_prefix = nextArg(args, &arg_idx) orelse {
install_prefix = nextArg(args, &arg_idx) orelse {
warn("Expected argument after --prefix\n\n", .{});
return usageAndErr(builder, false, stderr_stream);
};
@ -134,7 +135,7 @@ pub fn main() !void {
}
}
builder.resolveInstallPrefix();
builder.resolveInstallPrefix(install_prefix);
try runBuild(builder);
if (builder.validateUserInputDidItFail())
@ -162,8 +163,7 @@ fn runBuild(builder: *Builder) anyerror!void {
fn usage(builder: *Builder, already_ran_build: bool, out_stream: anytype) !void {
// run the build script to collect the options
if (!already_ran_build) {
builder.setInstallPrefix(null);
builder.resolveInstallPrefix();
builder.resolveInstallPrefix(null);
try runBuild(builder);
}

View File

@ -298,7 +298,7 @@ pub const TmpDir = struct {
sub_path: [sub_path_len]u8,
const random_bytes_count = 12;
const sub_path_len = std.base64.Base64Encoder.calcSize(random_bytes_count);
const sub_path_len = std.fs.base64_encoder.calcSize(random_bytes_count);
pub fn cleanup(self: *TmpDir) void {
self.dir.close();

View File

@ -3188,7 +3188,11 @@ fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node
id_symlink_basename,
&prev_digest_buf,
) catch |err| blk: {
log.debug("stage1 {s} new_digest={} error: {s}", .{ mod.root_pkg.root_src_path, digest, @errorName(err) });
log.debug("stage1 {s} new_digest={s} error: {s}", .{
mod.root_pkg.root_src_path,
std.fmt.fmtSliceHexLower(&digest),
@errorName(err),
});
// Handle this as a cache miss.
break :blk prev_digest_buf[0..0];
};
@ -3196,10 +3200,13 @@ fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node
if (!mem.eql(u8, prev_digest[0..digest.len], &digest))
break :hit;
log.debug("stage1 {s} digest={} match - skipping invocation", .{ mod.root_pkg.root_src_path, digest });
log.debug("stage1 {s} digest={s} match - skipping invocation", .{
mod.root_pkg.root_src_path,
std.fmt.fmtSliceHexLower(&digest),
});
var flags_bytes: [1]u8 = undefined;
_ = std.fmt.hexToBytes(&flags_bytes, prev_digest[digest.len..]) catch {
log.warn("bad cache stage1 digest: '{s}'", .{prev_digest});
log.warn("bad cache stage1 digest: '{s}'", .{std.fmt.fmtSliceHexLower(prev_digest)});
break :hit;
};
@ -3219,7 +3226,11 @@ fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node
mod.stage1_flags = @bitCast(@TypeOf(mod.stage1_flags), flags_bytes[0]);
return;
}
log.debug("stage1 {s} prev_digest={} new_digest={}", .{ mod.root_pkg.root_src_path, prev_digest, digest });
log.debug("stage1 {s} prev_digest={s} new_digest={s}", .{
mod.root_pkg.root_src_path,
std.fmt.fmtSliceHexLower(prev_digest),
std.fmt.fmtSliceHexLower(&digest),
});
man.unhit(prev_hash_state, input_file_count);
}
@ -3366,8 +3377,8 @@ fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node
// Update the small file with the digest. If it fails we can continue; it only
// means that the next invocation will have an unnecessary cache miss.
const stage1_flags_byte = @bitCast(u8, mod.stage1_flags);
log.debug("stage1 {s} final digest={} flags={x}", .{
mod.root_pkg.root_src_path, digest, stage1_flags_byte,
log.debug("stage1 {s} final digest={s} flags={x}", .{
mod.root_pkg.root_src_path, std.fmt.fmtSliceHexLower(&digest), stage1_flags_byte,
});
var digest_plus_flags: [digest.len + 2]u8 = undefined;
digest_plus_flags[0..digest.len].* = digest;

View File

@ -94,7 +94,7 @@ pub const Context = struct {
return switch (ty.tag()) {
.f32 => wasm.valtype(.f32),
.f64 => wasm.valtype(.f64),
.u32, .i32 => wasm.valtype(.i32),
.u32, .i32, .bool => wasm.valtype(.i32),
.u64, .i64 => wasm.valtype(.i64),
else => self.fail(src, "TODO - Wasm genValtype for type '{s}'", .{ty.tag()}),
};
@ -207,6 +207,7 @@ pub const Context = struct {
.alloc => self.genAlloc(inst.castTag(.alloc).?),
.arg => self.genArg(inst.castTag(.arg).?),
.block => self.genBlock(inst.castTag(.block).?),
.breakpoint => self.genBreakpoint(inst.castTag(.breakpoint).?),
.br => self.genBr(inst.castTag(.br).?),
.call => self.genCall(inst.castTag(.call).?),
.cmp_eq => self.genCmp(inst.castTag(.cmp_eq).?, .eq),
@ -220,9 +221,11 @@ pub const Context = struct {
.dbg_stmt => WValue.none,
.load => self.genLoad(inst.castTag(.load).?),
.loop => self.genLoop(inst.castTag(.loop).?),
.not => self.genNot(inst.castTag(.not).?),
.ret => self.genRet(inst.castTag(.ret).?),
.retvoid => WValue.none,
.store => self.genStore(inst.castTag(.store).?),
.unreach => self.genUnreachable(inst.castTag(.unreach).?),
else => self.fail(inst.src, "TODO: Implement wasm inst: {s}", .{inst.tag}),
};
}
@ -328,7 +331,7 @@ pub const Context = struct {
try writer.writeByte(wasm.opcode(.i32_const));
try leb.writeILEB128(writer, inst.val.toUnsignedInt());
},
.i32 => {
.i32, .bool => {
try writer.writeByte(wasm.opcode(.i32_const));
try leb.writeILEB128(writer, inst.val.toSignedInt());
},
@ -413,7 +416,14 @@ pub const Context = struct {
// insert blocks at the position of `offset` so
// the condition can jump to it
const offset = condition.code_offset;
const offset = switch (condition) {
.code_offset => |offset| offset,
else => blk: {
const offset = self.code.items.len;
try self.emitWValue(condition);
break :blk offset;
},
};
const block_ty = try self.genBlockType(condbr.base.src, condbr.base.ty);
try self.startBlock(.block, block_ty, offset);
@ -522,4 +532,32 @@ pub const Context = struct {
return .none;
}
fn genNot(self: *Context, not: *Inst.UnOp) InnerError!WValue {
const offset = self.code.items.len;
const operand = self.resolveInst(not.operand);
try self.emitWValue(operand);
// wasm does not have booleans nor the `not` instruction, therefore compare with 0
// to create the same logic
const writer = self.code.writer();
try writer.writeByte(wasm.opcode(.i32_const));
try leb.writeILEB128(writer, @as(i32, 0));
try writer.writeByte(wasm.opcode(.i32_eq));
return WValue{ .code_offset = offset };
}
fn genBreakpoint(self: *Context, breakpoint: *Inst.NoOp) InnerError!WValue {
// unsupported by wasm itself. Can be implemented once we support DWARF
// for wasm
return .none;
}
fn genUnreachable(self: *Context, unreach: *Inst.NoOp) InnerError!WValue {
try self.code.append(wasm.opcode(.@"unreachable"));
return .none;
}
};

View File

@ -1,7 +1,7 @@
pub const have_llvm = true;
pub const version: [:0]const u8 = "@ZIG_VERSION@";
pub const semver = try @import("std").SemanticVersion.parse(version);
pub const enable_logging: bool = false;
pub const enable_logging: bool = @ZIG_ENABLE_LOGGING_BOOL@;
pub const enable_tracy = false;
pub const is_stage1 = true;
pub const skip_non_native = false;

View File

@ -61,6 +61,14 @@ pub fn findZigLibDirFromSelfExe(
/// Caller owns returned memory.
pub fn resolveGlobalCacheDir(allocator: *mem.Allocator) ![]u8 {
if (std.process.getEnvVarOwned(allocator, "ZIG_GLOBAL_CACHE_DIR")) |value| {
if (value.len > 0) {
return value;
} else {
allocator.free(value);
}
} else |_| {}
const appname = "zig";
if (std.Target.current.os.tag != .windows) {

View File

@ -20,6 +20,11 @@ name: []u8,
objects: std.ArrayListUnmanaged(Object) = .{},
/// Parsed table of contents.
/// Each symbol name points to a list of all definition
/// sites within the current static archive.
toc: std.StringArrayHashMapUnmanaged(std.ArrayListUnmanaged(u32)) = .{},
// Archive files start with the ARMAG identifying string. Then follows a
// `struct ar_hdr', and as many bytes of member file data as its `ar_size'
// member indicates, for each member file.
@ -88,6 +93,11 @@ pub fn deinit(self: *Archive) void {
object.deinit();
}
self.objects.deinit(self.allocator);
for (self.toc.items()) |*entry| {
self.allocator.free(entry.key);
entry.value.deinit(self.allocator);
}
self.toc.deinit(self.allocator);
self.file.close();
}
@ -159,8 +169,20 @@ fn readTableOfContents(self: *Archive, reader: anytype) ![]u32 {
};
const object_offset = try symtab_reader.readIntLittle(u32);
// TODO Store the table of contents for later reuse.
const sym_name = mem.spanZ(@ptrCast([*:0]const u8, strtab.ptr + n_strx));
const owned_name = try self.allocator.dupe(u8, sym_name);
const res = try self.toc.getOrPut(self.allocator, owned_name);
defer if (res.found_existing) self.allocator.free(owned_name);
if (!res.found_existing) {
res.entry.value = .{};
}
try res.entry.value.append(self.allocator, object_offset);
// TODO This will go once we properly use archive's TOC to pick
// an object which defines a missing symbol rather than pasting in
// all of the objects always.
// Here, we assume that symbols are NOT sorted in any way, and
// they point to objects in sequence.
if (object_offsets.items[last] != object_offset) {
@ -248,8 +270,8 @@ fn getName(allocator: *Allocator, header: ar_hdr, reader: anytype) ![]u8 {
var n = try allocator.alloc(u8, len);
defer allocator.free(n);
try reader.readNoEof(n);
const actual_len = mem.indexOfScalar(u8, n, @as(u8, 0));
name = try allocator.dupe(u8, n[0..actual_len.?]);
const actual_len = mem.indexOfScalar(u8, n, @as(u8, 0)) orelse n.len;
name = try allocator.dupe(u8, n[0..actual_len]);
},
}
return name;

View File

@ -164,6 +164,7 @@ pub fn readLoadCommands(self: *Object, reader: anytype, offset: ReadOffset) !voi
},
macho.LC_DATA_IN_CODE => {
self.data_in_code_cmd_index = i;
cmd.LinkeditData.dataoff += offset_mod;
},
else => {
log.debug("Unknown load command detected: 0x{x}.", .{cmd.cmd()});

View File

@ -60,13 +60,17 @@ stub_helper_section_index: ?u16 = null,
text_const_section_index: ?u16 = null,
cstring_section_index: ?u16 = null,
// __DATA segment sections
// __DATA_CONST segment sections
got_section_index: ?u16 = null,
mod_init_func_section_index: ?u16 = null,
mod_term_func_section_index: ?u16 = null,
data_const_section_index: ?u16 = null,
// __DATA segment sections
tlv_section_index: ?u16 = null,
tlv_data_section_index: ?u16 = null,
tlv_bss_section_index: ?u16 = null,
la_symbol_ptr_section_index: ?u16 = null,
data_const_section_index: ?u16 = null,
data_section_index: ?u16 = null,
bss_section_index: ?u16 = null,
@ -448,6 +452,46 @@ fn updateMetadata(self: *Zld, object_id: u16) !void {
.reserved3 = 0,
});
},
macho.S_MOD_INIT_FUNC_POINTERS => {
if (!mem.eql(u8, segname, "__DATA")) continue;
if (self.mod_init_func_section_index != null) continue;
self.mod_init_func_section_index = @intCast(u16, data_const_seg.sections.items.len);
try data_const_seg.addSection(self.allocator, .{
.sectname = makeStaticString("__mod_init_func"),
.segname = makeStaticString("__DATA_CONST"),
.addr = 0,
.size = 0,
.offset = 0,
.@"align" = 0,
.reloff = 0,
.nreloc = 0,
.flags = macho.S_MOD_INIT_FUNC_POINTERS,
.reserved1 = 0,
.reserved2 = 0,
.reserved3 = 0,
});
},
macho.S_MOD_TERM_FUNC_POINTERS => {
if (!mem.eql(u8, segname, "__DATA")) continue;
if (self.mod_term_func_section_index != null) continue;
self.mod_term_func_section_index = @intCast(u16, data_const_seg.sections.items.len);
try data_const_seg.addSection(self.allocator, .{
.sectname = makeStaticString("__mod_term_func"),
.segname = makeStaticString("__DATA_CONST"),
.addr = 0,
.size = 0,
.offset = 0,
.@"align" = 0,
.reloff = 0,
.nreloc = 0,
.flags = macho.S_MOD_TERM_FUNC_POINTERS,
.reserved1 = 0,
.reserved2 = 0,
.reserved3 = 0,
});
},
macho.S_ZEROFILL => {
if (!mem.eql(u8, segname, "__DATA")) continue;
if (self.bss_section_index != null) continue;
@ -583,6 +627,18 @@ fn getMatchingSection(self: *Zld, section: macho.section_64) ?MatchingSection {
.sect = self.cstring_section_index.?,
};
},
macho.S_MOD_INIT_FUNC_POINTERS => {
break :blk .{
.seg = self.data_const_segment_cmd_index.?,
.sect = self.mod_init_func_section_index.?,
};
},
macho.S_MOD_TERM_FUNC_POINTERS => {
break :blk .{
.seg = self.data_const_segment_cmd_index.?,
.sect = self.mod_term_func_section_index.?,
};
},
macho.S_ZEROFILL => {
break :blk .{
.seg = self.data_segment_cmd_index.?,
@ -684,6 +740,8 @@ fn sortSections(self: *Zld) !void {
const indices = &[_]*?u16{
&self.got_section_index,
&self.mod_init_func_section_index,
&self.mod_term_func_section_index,
&self.data_const_section_index,
};
for (indices) |maybe_index| {
@ -2471,6 +2529,42 @@ fn writeRebaseInfoTable(self: *Zld) !void {
}
}
if (self.mod_init_func_section_index) |idx| {
// TODO audit and investigate this.
const seg = self.load_commands.items[self.data_const_segment_cmd_index.?].Segment;
const sect = seg.sections.items[idx];
const npointers = sect.size * @sizeOf(u64);
const base_offset = sect.addr - seg.inner.vmaddr;
const segment_id = @intCast(u16, self.data_const_segment_cmd_index.?);
try pointers.ensureCapacity(pointers.items.len + npointers);
var i: usize = 0;
while (i < npointers) : (i += 1) {
pointers.appendAssumeCapacity(.{
.offset = base_offset + i * @sizeOf(u64),
.segment_id = segment_id,
});
}
}
if (self.mod_term_func_section_index) |idx| {
// TODO audit and investigate this.
const seg = self.load_commands.items[self.data_const_segment_cmd_index.?].Segment;
const sect = seg.sections.items[idx];
const npointers = sect.size * @sizeOf(u64);
const base_offset = sect.addr - seg.inner.vmaddr;
const segment_id = @intCast(u16, self.data_const_segment_cmd_index.?);
try pointers.ensureCapacity(pointers.items.len + npointers);
var i: usize = 0;
while (i < npointers) : (i += 1) {
pointers.appendAssumeCapacity(.{
.offset = base_offset + i * @sizeOf(u64),
.segment_id = segment_id,
});
}
}
if (self.la_symbol_ptr_section_index) |idx| {
try pointers.ensureCapacity(pointers.items.len + self.lazy_imports.items().len);
const seg = self.load_commands.items[self.data_segment_cmd_index.?].Segment;
@ -2707,7 +2801,15 @@ fn writeDebugInfo(self: *Zld) !void {
};
defer debug_info.deinit(self.allocator);
const compile_unit = try debug_info.inner.findCompileUnit(0x0); // We assume there is only one CU.
// We assume there is only one CU.
const compile_unit = debug_info.inner.findCompileUnit(0x0) catch |err| switch (err) {
error.MissingDebugInfo => {
// TODO audit cases with missing debug info and audit our dwarf.zig module.
log.debug("invalid or missing debug info in {s}; skipping", .{object.name});
continue;
},
else => |e| return e,
};
const name = try compile_unit.die.getAttrString(&debug_info.inner, dwarf.AT_name);
const comp_dir = try compile_unit.die.getAttrString(&debug_info.inner, dwarf.AT_comp_dir);

View File

@ -557,7 +557,7 @@ fn buildOutputType(
var test_filter: ?[]const u8 = null;
var test_name_prefix: ?[]const u8 = null;
var override_local_cache_dir: ?[]const u8 = try optionalStringEnvVar(arena, "ZIG_LOCAL_CACHE_DIR");
var override_global_cache_dir: ?[]const u8 = try optionalStringEnvVar(arena, "ZIG_GLOBAL_CACHE_DIR");
var override_global_cache_dir: ?[]const u8 = null;
var override_lib_dir: ?[]const u8 = try optionalStringEnvVar(arena, "ZIG_LIB_DIR");
var main_pkg_path: ?[]const u8 = null;
var clang_preprocessor_mode: Compilation.ClangPreprocessorMode = .no;

View File

@ -2139,10 +2139,6 @@ struct CodeGen {
Buf llvm_ir_file_output_path;
Buf analysis_json_output_path;
Buf docs_output_path;
Buf *cache_dir;
Buf *c_artifact_dir;
const char **libc_include_dir_list;
size_t libc_include_dir_len;
Buf *builtin_zig_path;
Buf *zig_std_special_dir; // Cannot be overridden; derived from zig_lib_dir.

View File

@ -270,7 +270,10 @@ pub const Context = struct {
global_scope: *Scope.Root,
clang_context: *clang.ASTContext,
mangle_count: u32 = 0,
/// Table of record decls that have been demoted to opaques.
opaque_demotes: std.AutoHashMapUnmanaged(usize, void) = .{},
/// Table of unnamed enums and records that are child types of typedefs.
unnamed_typedefs: std.AutoHashMapUnmanaged(usize, []const u8) = .{},
/// This one is different than the root scope's name table. This contains
/// a list of names that we found by visiting all the top level decls without
@ -338,6 +341,7 @@ pub fn translate(
context.alias_list.deinit();
context.global_names.deinit(gpa);
context.opaque_demotes.deinit(gpa);
context.unnamed_typedefs.deinit(gpa);
context.global_scope.deinit();
}
@ -401,6 +405,51 @@ fn declVisitorNamesOnly(c: *Context, decl: *const clang.Decl) Error!void {
if (decl.castToNamedDecl()) |named_decl| {
const decl_name = try c.str(named_decl.getName_bytes_begin());
try c.global_names.put(c.gpa, decl_name, {});
// Check for typedefs with unnamed enum/record child types.
if (decl.getKind() == .Typedef) {
const typedef_decl = @ptrCast(*const clang.TypedefNameDecl, decl);
var child_ty = typedef_decl.getUnderlyingType().getTypePtr();
const addr: usize = while (true) switch (child_ty.getTypeClass()) {
.Enum => {
const enum_ty = @ptrCast(*const clang.EnumType, child_ty);
const enum_decl = enum_ty.getDecl();
// check if this decl is unnamed
if (@ptrCast(*const clang.NamedDecl, enum_decl).getName_bytes_begin()[0] != 0) return;
break @ptrToInt(enum_decl.getCanonicalDecl());
},
.Record => {
const record_ty = @ptrCast(*const clang.RecordType, child_ty);
const record_decl = record_ty.getDecl();
// check if this decl is unnamed
if (@ptrCast(*const clang.NamedDecl, record_decl).getName_bytes_begin()[0] != 0) return;
break @ptrToInt(record_decl.getCanonicalDecl());
},
.Elaborated => {
const elaborated_ty = @ptrCast(*const clang.ElaboratedType, child_ty);
child_ty = elaborated_ty.getNamedType().getTypePtr();
},
.Decayed => {
const decayed_ty = @ptrCast(*const clang.DecayedType, child_ty);
child_ty = decayed_ty.getDecayedType().getTypePtr();
},
.Attributed => {
const attributed_ty = @ptrCast(*const clang.AttributedType, child_ty);
child_ty = attributed_ty.getEquivalentType().getTypePtr();
},
.MacroQualified => {
const macroqualified_ty = @ptrCast(*const clang.MacroQualifiedType, child_ty);
child_ty = macroqualified_ty.getModifiedType().getTypePtr();
},
else => return,
} else unreachable;
// TODO https://github.com/ziglang/zig/issues/3756
// TODO https://github.com/ziglang/zig/issues/1802
const name = if (isZigPrimitiveType(decl_name)) try std.fmt.allocPrint(c.arena, "{s}_{d}", .{ decl_name, c.getMangle() }) else decl_name;
try c.unnamed_typedefs.putNoClobber(c.gpa, addr, name);
// Put this typedef in the decl_table to avoid redefinitions.
try c.decl_table.putNoClobber(c.gpa, @ptrToInt(typedef_decl.getCanonicalDecl()), name);
}
}
}
@ -752,17 +801,10 @@ fn transRecordDecl(c: *Context, scope: *Scope, record_decl: *const clang.RecordD
const toplevel = scope.id == .root;
const bs: *Scope.Block = if (!toplevel) try scope.findBlockScope(c) else undefined;
var bare_name = try c.str(@ptrCast(*const clang.NamedDecl, record_decl).getName_bytes_begin());
var is_unnamed = false;
// Record declarations such as `struct {...} x` have no name but they're not
// anonymous hence here isAnonymousStructOrUnion is not needed
if (bare_name.len == 0) {
bare_name = try std.fmt.allocPrint(c.arena, "unnamed_{d}", .{c.getMangle()});
is_unnamed = true;
}
var container_kind_name: []const u8 = undefined;
var is_union = false;
var container_kind_name: []const u8 = undefined;
var bare_name: []const u8 = try c.str(@ptrCast(*const clang.NamedDecl, record_decl).getName_bytes_begin());
if (record_decl.isUnion()) {
container_kind_name = "union";
is_union = true;
@ -773,7 +815,20 @@ fn transRecordDecl(c: *Context, scope: *Scope, record_decl: *const clang.RecordD
return failDecl(c, record_loc, bare_name, "record {s} is not a struct or union", .{bare_name});
}
var name: []const u8 = try std.fmt.allocPrint(c.arena, "{s}_{s}", .{ container_kind_name, bare_name });
var is_unnamed = false;
var name = bare_name;
if (c.unnamed_typedefs.get(@ptrToInt(record_decl.getCanonicalDecl()))) |typedef_name| {
bare_name = typedef_name;
name = typedef_name;
} else {
// Record declarations such as `struct {...} x` have no name but they're not
// anonymous hence here isAnonymousStructOrUnion is not needed
if (bare_name.len == 0) {
bare_name = try std.fmt.allocPrint(c.arena, "unnamed_{d}", .{c.getMangle()});
is_unnamed = true;
}
name = try std.fmt.allocPrint(c.arena, "{s}_{s}", .{ container_kind_name, bare_name });
}
if (!toplevel) name = try bs.makeMangledName(c, name);
try c.decl_table.putNoClobber(c.gpa, @ptrToInt(record_decl.getCanonicalDecl()), name);
@ -874,14 +929,19 @@ fn transEnumDecl(c: *Context, scope: *Scope, enum_decl: *const clang.EnumDecl) E
const toplevel = scope.id == .root;
const bs: *Scope.Block = if (!toplevel) try scope.findBlockScope(c) else undefined;
var bare_name = try c.str(@ptrCast(*const clang.NamedDecl, enum_decl).getName_bytes_begin());
var is_unnamed = false;
if (bare_name.len == 0) {
bare_name = try std.fmt.allocPrint(c.arena, "unnamed_{d}", .{c.getMangle()});
is_unnamed = true;
var bare_name: []const u8 = try c.str(@ptrCast(*const clang.NamedDecl, enum_decl).getName_bytes_begin());
var name = bare_name;
if (c.unnamed_typedefs.get(@ptrToInt(enum_decl.getCanonicalDecl()))) |typedef_name| {
bare_name = typedef_name;
name = typedef_name;
} else {
if (bare_name.len == 0) {
bare_name = try std.fmt.allocPrint(c.arena, "unnamed_{d}", .{c.getMangle()});
is_unnamed = true;
}
name = try std.fmt.allocPrint(c.arena, "enum_{s}", .{bare_name});
}
var name: []const u8 = try std.fmt.allocPrint(c.arena, "enum_{s}", .{bare_name});
if (!toplevel) _ = try bs.makeMangledName(c, name);
try c.decl_table.putNoClobber(c.gpa, @ptrToInt(enum_decl.getCanonicalDecl()), name);
@ -1063,6 +1123,7 @@ fn transStmt(
const gen_sel = @ptrCast(*const clang.GenericSelectionExpr, stmt);
return transExpr(c, scope, gen_sel.getResultExpr(), result_used);
},
// When adding new cases here, see comment for maybeBlockify()
else => {
return fail(c, error.UnsupportedTranslation, stmt.getBeginLoc(), "TODO implement translation of stmt class {s}", .{@tagName(sc)});
},
@ -2242,6 +2303,35 @@ fn transImplicitValueInitExpr(
return transZeroInitExpr(c, scope, source_loc, ty);
}
/// If a statement can possibly translate to a Zig assignment (either directly because it's
/// an assignment in C or indirectly via result assignment to `_`) AND it's the sole statement
/// in the body of an if statement or loop, then we need to put the statement into its own block.
/// The `else` case here corresponds to statements that could result in an assignment. If a statement
/// class never needs a block, add its enum to the top prong.
fn maybeBlockify(c: *Context, scope: *Scope, stmt: *const clang.Stmt) TransError!Node {
switch (stmt.getStmtClass()) {
.BreakStmtClass,
.CompoundStmtClass,
.ContinueStmtClass,
.DeclRefExprClass,
.DeclStmtClass,
.DoStmtClass,
.ForStmtClass,
.IfStmtClass,
.ReturnStmtClass,
.NullStmtClass,
.WhileStmtClass,
=> return transStmt(c, scope, stmt, .unused),
else => {
var block_scope = try Scope.Block.init(c, scope, false);
defer block_scope.deinit();
const result = try transStmt(c, &block_scope.base, stmt, .unused);
try block_scope.statements.append(result);
return block_scope.complete(c);
},
}
}
fn transIfStmt(
c: *Context,
scope: *Scope,
@ -2259,9 +2349,10 @@ fn transIfStmt(
const cond_expr = @ptrCast(*const clang.Expr, stmt.getCond());
const cond = try transBoolExpr(c, &cond_scope.base, cond_expr, .used);
const then_body = try transStmt(c, scope, stmt.getThen(), .unused);
const then_body = try maybeBlockify(c, scope, stmt.getThen());
const else_body = if (stmt.getElse()) |expr|
try transStmt(c, scope, expr, .unused)
try maybeBlockify(c, scope, expr)
else
null;
return Tag.@"if".create(c.arena, .{ .cond = cond, .then = then_body, .@"else" = else_body });
@ -2286,7 +2377,7 @@ fn transWhileLoop(
.parent = scope,
.id = .loop,
};
const body = try transStmt(c, &loop_scope, stmt.getBody(), .unused);
const body = try maybeBlockify(c, &loop_scope, stmt.getBody());
return Tag.@"while".create(c.arena, .{ .cond = cond, .body = body, .cont_expr = null });
}
@ -2312,7 +2403,7 @@ fn transDoWhileLoop(
const if_not_break = switch (cond.tag()) {
.false_literal => return transStmt(c, scope, stmt.getBody(), .unused),
.true_literal => {
const body_node = try transStmt(c, scope, stmt.getBody(), .unused);
const body_node = try maybeBlockify(c, scope, stmt.getBody());
return Tag.while_true.create(c.arena, body_node);
},
else => try Tag.if_not_break.create(c.arena, cond),
@ -2388,7 +2479,7 @@ fn transForLoop(
else
null;
const body = try transStmt(c, &loop_scope, stmt.getBody(), .unused);
const body = try maybeBlockify(c, &loop_scope, stmt.getBody());
const while_node = try Tag.@"while".create(c.arena, .{ .cond = cond, .body = body, .cont_expr = cont_expr });
if (block_scope) |*bs| {
try bs.statements.append(while_node);
@ -3106,43 +3197,34 @@ fn transCreateCompoundAssign(
const requires_int_cast = blk: {
const are_integers = cIsInteger(lhs_qt) and cIsInteger(rhs_qt);
const are_same_sign = cIsSignedInteger(lhs_qt) == cIsSignedInteger(rhs_qt);
break :blk are_integers and !are_same_sign;
break :blk are_integers and !(are_same_sign and cIntTypeCmp(lhs_qt, rhs_qt) == .eq);
};
if (used == .unused) {
// common case
// c: lhs += rhs
// zig: lhs += rhs
const lhs_node = try transExpr(c, scope, lhs, .used);
var rhs_node = try transExpr(c, scope, rhs, .used);
if (is_ptr_op_signed) rhs_node = try usizeCastForWrappingPtrArithmetic(c.arena, rhs_node);
if ((is_mod or is_div) and is_signed) {
const lhs_node = try transExpr(c, scope, lhs, .used);
const rhs_node = try transExpr(c, scope, rhs, .used);
if (requires_int_cast) rhs_node = try transCCast(c, scope, loc, lhs_qt, rhs_qt, rhs_node);
const operands = .{ .lhs = lhs_node, .rhs = rhs_node };
const builtin = if (is_mod)
try Tag.rem.create(c.arena, .{ .lhs = lhs_node, .rhs = rhs_node })
try Tag.rem.create(c.arena, operands)
else
try Tag.div_trunc.create(c.arena, .{ .lhs = lhs_node, .rhs = rhs_node });
try Tag.div_trunc.create(c.arena, operands);
return transCreateNodeInfixOp(c, scope, .assign, lhs_node, builtin, .used);
}
const lhs_node = try transExpr(c, scope, lhs, .used);
var rhs_node = if (is_shift or requires_int_cast)
try transExprCoercing(c, scope, rhs, .used)
else
try transExpr(c, scope, rhs, .used);
if (is_ptr_op_signed) {
rhs_node = try usizeCastForWrappingPtrArithmetic(c.arena, rhs_node);
}
if (is_shift or requires_int_cast) {
// @intCast(rhs)
const cast_to_type = if (is_shift)
try qualTypeToLog2IntRef(c, scope, getExprQualType(c, rhs), loc)
else
try transQualType(c, scope, getExprQualType(c, lhs), loc);
if (is_shift) {
const cast_to_type = try qualTypeToLog2IntRef(c, scope, rhs_qt, loc);
rhs_node = try Tag.int_cast.create(c.arena, .{ .lhs = cast_to_type, .rhs = rhs_node });
} else if (requires_int_cast) {
rhs_node = try transCCast(c, scope, loc, lhs_qt, rhs_qt, rhs_node);
}
return transCreateNodeInfixOp(c, scope, op, lhs_node, rhs_node, .used);
}
// worst case
@ -3164,29 +3246,24 @@ fn transCreateCompoundAssign(
const lhs_node = try Tag.identifier.create(c.arena, ref);
const ref_node = try Tag.deref.create(c.arena, lhs_node);
var rhs_node = try transExpr(c, &block_scope.base, rhs, .used);
if (is_ptr_op_signed) rhs_node = try usizeCastForWrappingPtrArithmetic(c.arena, rhs_node);
if ((is_mod or is_div) and is_signed) {
const rhs_node = try transExpr(c, &block_scope.base, rhs, .used);
if (requires_int_cast) rhs_node = try transCCast(c, scope, loc, lhs_qt, rhs_qt, rhs_node);
const operands = .{ .lhs = ref_node, .rhs = rhs_node };
const builtin = if (is_mod)
try Tag.rem.create(c.arena, .{ .lhs = ref_node, .rhs = rhs_node })
try Tag.rem.create(c.arena, operands)
else
try Tag.div_trunc.create(c.arena, .{ .lhs = ref_node, .rhs = rhs_node });
try Tag.div_trunc.create(c.arena, operands);
const assign = try transCreateNodeInfixOp(c, &block_scope.base, .assign, ref_node, builtin, .used);
try block_scope.statements.append(assign);
} else {
var rhs_node = try transExpr(c, &block_scope.base, rhs, .used);
if (is_shift or requires_int_cast) {
// @intCast(rhs)
const cast_to_type = if (is_shift)
try qualTypeToLog2IntRef(c, scope, getExprQualType(c, rhs), loc)
else
try transQualType(c, scope, getExprQualType(c, lhs), loc);
if (is_shift) {
const cast_to_type = try qualTypeToLog2IntRef(c, &block_scope.base, rhs_qt, loc);
rhs_node = try Tag.int_cast.create(c.arena, .{ .lhs = cast_to_type, .rhs = rhs_node });
}
if (is_ptr_op_signed) {
rhs_node = try usizeCastForWrappingPtrArithmetic(c.arena, rhs_node);
} else if (requires_int_cast) {
rhs_node = try transCCast(c, &block_scope.base, loc, lhs_qt, rhs_qt, rhs_node);
}
const assign = try transCreateNodeInfixOp(c, &block_scope.base, op, ref_node, rhs_node, .used);

View File

@ -1244,4 +1244,68 @@ pub fn addCases(cases: *tests.RunTranslatedCContext) void {
\\ return 0;
\\}
, "");
cases.add("convert single-statement bodies into blocks for if/else/for/while. issue #8159",
\\#include <stdlib.h>
\\int foo() { return 1; }
\\int main(void) {
\\ int i = 0;
\\ if (i == 0) if (i == 0) if (i != 0) i = 1;
\\ if (i != 0) i = 1; else if (i == 0) if (i == 0) i += 1;
\\ for (; i < 10;) for (; i < 10;) i++;
\\ while (i == 100) while (i == 100) foo();
\\ if (0) do do "string"; while(1); while(1);
\\ return 0;
\\}
, "");
cases.add("cast RHS of compound assignment if necessary, unused result",
\\#include <stdlib.h>
\\int main(void) {
\\ signed short val = -1;
\\ val += 1; if (val != 0) abort();
\\ val -= 1; if (val != -1) abort();
\\ val *= 2; if (val != -2) abort();
\\ val /= 2; if (val != -1) abort();
\\ val %= 2; if (val != -1) abort();
\\ val <<= 1; if (val != -2) abort();
\\ val >>= 1; if (val != -1) abort();
\\ val += 100000000; // compile error if @truncate() not inserted
\\ unsigned short uval = 1;
\\ uval += 1; if (uval != 2) abort();
\\ uval -= 1; if (uval != 1) abort();
\\ uval *= 2; if (uval != 2) abort();
\\ uval /= 2; if (uval != 1) abort();
\\ uval %= 2; if (uval != 1) abort();
\\ uval <<= 1; if (uval != 2) abort();
\\ uval >>= 1; if (uval != 1) abort();
\\ uval += 100000000; // compile error if @truncate() not inserted
\\}
, "");
cases.add("cast RHS of compound assignment if necessary, used result",
\\#include <stdlib.h>
\\int main(void) {
\\ signed short foo;
\\ signed short val = -1;
\\ foo = (val += 1); if (foo != 0) abort();
\\ foo = (val -= 1); if (foo != -1) abort();
\\ foo = (val *= 2); if (foo != -2) abort();
\\ foo = (val /= 2); if (foo != -1) abort();
\\ foo = (val %= 2); if (foo != -1) abort();
\\ foo = (val <<= 1); if (foo != -2) abort();
\\ foo = (val >>= 1); if (foo != -1) abort();
\\ foo = (val += 100000000); // compile error if @truncate() not inserted
\\ unsigned short ufoo;
\\ unsigned short uval = 1;
\\ ufoo = (uval += 1); if (ufoo != 2) abort();
\\ ufoo = (uval -= 1); if (ufoo != 1) abort();
\\ ufoo = (uval *= 2); if (ufoo != 2) abort();
\\ ufoo = (uval /= 2); if (ufoo != 1) abort();
\\ ufoo = (uval %= 2); if (ufoo != 1) abort();
\\ ufoo = (uval <<= 1); if (ufoo != 2) abort();
\\ ufoo = (uval >>= 1); if (ufoo != 1) abort();
\\ ufoo = (uval += 100000000); // compile error if @truncate() not inserted
\\}
, "");
}

View File

@ -175,6 +175,41 @@ pub fn addCases(ctx: *TestContext) !void {
\\ return i;
\\}
, "31\n");
case.addCompareOutput(
\\export fn _start() void {
\\ assert(foo(true) != @as(i32, 30));
\\}
\\
\\fn assert(ok: bool) void {
\\ if (!ok) unreachable;
\\}
\\
\\fn foo(ok: bool) i32 {
\\ const x = if(ok) @as(i32, 20) else @as(i32, 10);
\\ return x;
\\}
, "");
case.addCompareOutput(
\\export fn _start() void {
\\ assert(foo(false) == @as(i32, 20));
\\ assert(foo(true) == @as(i32, 30));
\\}
\\
\\fn assert(ok: bool) void {
\\ if (!ok) unreachable;
\\}
\\
\\fn foo(ok: bool) i32 {
\\ const val: i32 = blk: {
\\ var x: i32 = 1;
\\ if (!ok) break :blk x + @as(i32, 9);
\\ break :blk x + @as(i32, 19);
\\ };
\\ return val + 10;
\\}
, "");
}
{

View File

@ -3,9 +3,9 @@ const base64 = @import("std").base64;
export fn decode_base_64(dest_ptr: [*]u8, dest_len: usize, source_ptr: [*]const u8, source_len: usize) usize {
const src = source_ptr[0..source_len];
const dest = dest_ptr[0..dest_len];
const base64_decoder = base64.standard_decoder_unsafe;
const decoded_size = base64_decoder.calcSize(src);
base64_decoder.decode(dest[0..decoded_size], src);
const base64_decoder = base64.standard.Decoder;
const decoded_size = base64_decoder.calcSizeForSlice(src) catch unreachable;
base64_decoder.decode(dest[0..decoded_size], src) catch unreachable;
return decoded_size;
}

View File

@ -3,6 +3,28 @@ const std = @import("std");
const CrossTarget = std.zig.CrossTarget;
pub fn addCases(cases: *tests.TranslateCContext) void {
cases.add("unnamed child types of typedef receive typedef's name",
\\typedef enum {
\\ FooA,
\\ FooB,
\\} Foo;
\\typedef struct {
\\ int a, b;
\\} Bar;
, &[_][]const u8{
\\pub const Foo = extern enum(c_int) {
\\ A,
\\ B,
\\ _,
\\};
\\pub const FooA = @enumToInt(Foo.A);
\\pub const FooB = @enumToInt(Foo.B);
\\pub const Bar = extern struct {
\\ a: c_int,
\\ b: c_int,
\\};
});
cases.add("if as while stmt has semicolon",
\\void foo() {
\\ while (1) if (1) {
@ -218,9 +240,8 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\} Bar;
, &[_][]const u8{
\\source.h:1:9: warning: struct demoted to opaque type - unable to translate type of field foo
\\const struct_unnamed_1 = opaque {};
\\pub const Foo = struct_unnamed_1;
\\const struct_unnamed_2 = extern struct {
\\pub const Foo = opaque {};
\\pub const Bar = extern struct {
\\ bar: ?*Foo,
\\};
});
@ -519,17 +540,16 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\} outer;
\\void foo(outer *x) { x->y = x->x; }
, &[_][]const u8{
\\const struct_unnamed_3 = extern struct {
\\const struct_unnamed_2 = extern struct {
\\ y: c_int,
\\};
\\const union_unnamed_2 = extern union {
\\const union_unnamed_1 = extern union {
\\ x: u8,
\\ unnamed_0: struct_unnamed_3,
\\ unnamed_0: struct_unnamed_2,
\\};
\\const struct_unnamed_1 = extern struct {
\\ unnamed_0: union_unnamed_2,
\\pub const outer = extern struct {
\\ unnamed_0: union_unnamed_1,
\\};
\\pub const outer = struct_unnamed_1;
\\pub export fn foo(arg_x: [*c]outer) void {
\\ var x = arg_x;
\\ x.*.unnamed_0.unnamed_0.y = @bitCast(c_int, @as(c_uint, x.*.unnamed_0.x));
@ -565,21 +585,20 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\struct {int x,y;} s2 = {.y = 2, .x=1};
\\foo s3 = { 123 };
, &[_][]const u8{
\\const struct_unnamed_1 = extern struct {
\\pub const foo = extern struct {
\\ x: c_int,
\\};
\\pub const foo = struct_unnamed_1;
\\const struct_unnamed_2 = extern struct {
\\const struct_unnamed_1 = extern struct {
\\ x: f64,
\\ y: f64,
\\ z: f64,
\\};
\\pub export var s0: struct_unnamed_2 = struct_unnamed_2{
\\pub export var s0: struct_unnamed_1 = struct_unnamed_1{
\\ .x = 1.2,
\\ .y = 1.3,
\\ .z = 0,
\\};
\\const struct_unnamed_3 = extern struct {
\\const struct_unnamed_2 = extern struct {
\\ sec: c_int,
\\ min: c_int,
\\ hour: c_int,
@ -587,7 +606,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ mon: c_int,
\\ year: c_int,
\\};
\\pub export var s1: struct_unnamed_3 = struct_unnamed_3{
\\pub export var s1: struct_unnamed_2 = struct_unnamed_2{
\\ .sec = @as(c_int, 30),
\\ .min = @as(c_int, 15),
\\ .hour = @as(c_int, 17),
@ -595,11 +614,11 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ .mon = @as(c_int, 12),
\\ .year = @as(c_int, 2014),
\\};
\\const struct_unnamed_4 = extern struct {
\\const struct_unnamed_3 = extern struct {
\\ x: c_int,
\\ y: c_int,
\\};
\\pub export var s2: struct_unnamed_4 = struct_unnamed_4{
\\pub export var s2: struct_unnamed_3 = struct_unnamed_3{
\\ .x = @as(c_int, 1),
\\ .y = @as(c_int, 2),
\\};
@ -1639,37 +1658,36 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ p,
\\};
, &[_][]const u8{
\\const enum_unnamed_1 = extern enum(c_int) {
\\pub const d = extern enum(c_int) {
\\ a,
\\ b,
\\ c,
\\ _,
\\};
\\pub const a = @enumToInt(enum_unnamed_1.a);
\\pub const b = @enumToInt(enum_unnamed_1.b);
\\pub const c = @enumToInt(enum_unnamed_1.c);
\\pub const d = enum_unnamed_1;
\\const enum_unnamed_2 = extern enum(c_int) {
\\pub const a = @enumToInt(d.a);
\\pub const b = @enumToInt(d.b);
\\pub const c = @enumToInt(d.c);
\\const enum_unnamed_1 = extern enum(c_int) {
\\ e = 0,
\\ f = 4,
\\ g = 5,
\\ _,
\\};
\\pub const e = @enumToInt(enum_unnamed_2.e);
\\pub const f = @enumToInt(enum_unnamed_2.f);
\\pub const g = @enumToInt(enum_unnamed_2.g);
\\pub export var h: enum_unnamed_2 = @intToEnum(enum_unnamed_2, e);
\\const enum_unnamed_3 = extern enum(c_int) {
\\pub const e = @enumToInt(enum_unnamed_1.e);
\\pub const f = @enumToInt(enum_unnamed_1.f);
\\pub const g = @enumToInt(enum_unnamed_1.g);
\\pub export var h: enum_unnamed_1 = @intToEnum(enum_unnamed_1, e);
\\const enum_unnamed_2 = extern enum(c_int) {
\\ i,
\\ j,
\\ k,
\\ _,
\\};
\\pub const i = @enumToInt(enum_unnamed_3.i);
\\pub const j = @enumToInt(enum_unnamed_3.j);
\\pub const k = @enumToInt(enum_unnamed_3.k);
\\pub const i = @enumToInt(enum_unnamed_2.i);
\\pub const j = @enumToInt(enum_unnamed_2.j);
\\pub const k = @enumToInt(enum_unnamed_2.k);
\\pub const struct_Baz = extern struct {
\\ l: enum_unnamed_3,
\\ l: enum_unnamed_2,
\\ m: d,
\\};
\\pub const enum_i = extern enum(c_int) {
@ -1934,7 +1952,9 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
, &[_][]const u8{
\\pub export fn foo() c_int {
\\ var a: c_int = 5;
\\ while (true) a = 2;
\\ while (true) {
\\ a = 2;
\\ }
\\ while (true) {
\\ var a_1: c_int = 4;
\\ a_1 = 9;
@ -1947,7 +1967,9 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ var a_1: c_int = 2;
\\ a_1 = 12;
\\ }
\\ while (true) a = 7;
\\ while (true) {
\\ a = 7;
\\ }
\\ return 0;
\\}
});
@ -2008,7 +2030,9 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\}
, &[_][]const u8{
\\pub export fn bar() c_int {
\\ if ((if (true) @as(c_int, 5) else if (true) @as(c_int, 4) else @as(c_int, 6)) != 0) _ = @as(c_int, 2);
\\ if ((if (true) @as(c_int, 5) else if (true) @as(c_int, 4) else @as(c_int, 6)) != 0) {
\\ _ = @as(c_int, 2);
\\ }
\\ return if (true) @as(c_int, 5) else if (true) @as(c_int, 4) else @as(c_int, 6);
\\}
});
@ -2389,7 +2413,9 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\pub const yes = [*c]u8;
\\pub export fn foo() void {
\\ var a: yes = undefined;
\\ if (a != null) _ = @as(c_int, 2);
\\ if (a != null) {
\\ _ = @as(c_int, 2);
\\ }
\\}
});
@ -2740,7 +2766,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ var a = arg_a;
\\ var i: c_int = 0;
\\ while (a > @bitCast(c_uint, @as(c_int, 0))) {
\\ a >>= @intCast(@import("std").math.Log2Int(c_int), 1);
\\ a >>= @intCast(@import("std").math.Log2Int(c_int), @as(c_int, 1));
\\ }
\\ return i;
\\}
@ -2760,7 +2786,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ var a = arg_a;
\\ var i: c_int = 0;
\\ while (a > @bitCast(c_uint, @as(c_int, 0))) {
\\ a >>= @intCast(@import("std").math.Log2Int(c_int), 1);
\\ a >>= @intCast(@import("std").math.Log2Int(c_int), @as(c_int, 1));
\\ }
\\ return i;
\\}