Merge remote-tracking branch 'origin/master' into llvm11

The changes to install_files.h needed to put into src/libcxx.zig
This commit is contained in:
Andrew Kelley 2020-09-30 02:55:41 -07:00
commit 7067764ed3
200 changed files with 21864 additions and 21854 deletions

View File

@ -51,11 +51,11 @@ message("Configuring zig version ${ZIG_VERSION}")
set(ZIG_STATIC off CACHE BOOL "Attempt to build a static zig executable (not compatible with glibc)") set(ZIG_STATIC off CACHE BOOL "Attempt to build a static zig executable (not compatible with glibc)")
set(ZIG_STATIC_LLVM off CACHE BOOL "Prefer linking against static LLVM libraries") set(ZIG_STATIC_LLVM off CACHE BOOL "Prefer linking against static LLVM libraries")
set(ZIG_ENABLE_MEM_PROFILE off CACHE BOOL "Activate memory usage instrumentation")
set(ZIG_PREFER_CLANG_CPP_DYLIB off CACHE BOOL "Try to link against -lclang-cpp") set(ZIG_PREFER_CLANG_CPP_DYLIB off CACHE BOOL "Try to link against -lclang-cpp")
set(ZIG_WORKAROUND_4799 off CACHE BOOL "workaround for https://github.com/ziglang/zig/issues/4799") set(ZIG_WORKAROUND_4799 off CACHE BOOL "workaround for https://github.com/ziglang/zig/issues/4799")
set(ZIG_WORKAROUND_POLLY_SO off CACHE STRING "workaround for https://github.com/ziglang/zig/issues/4799") set(ZIG_WORKAROUND_POLLY_SO off CACHE STRING "workaround for https://github.com/ziglang/zig/issues/4799")
set(ZIG_USE_CCACHE off CACHE BOOL "Use ccache if available") set(ZIG_USE_CCACHE off CACHE BOOL "Use ccache if available")
set(ZIG_WORKAROUND_6087 off CACHE BOOL "workaround for https://github.com/ziglang/zig/issues/6087")
if(CCACHE_PROGRAM AND ZIG_USE_CCACHE) if(CCACHE_PROGRAM AND ZIG_USE_CCACHE)
SET_PROPERTY(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "${CCACHE_PROGRAM}") SET_PROPERTY(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "${CCACHE_PROGRAM}")
@ -71,11 +71,6 @@ string(REGEX REPLACE "\\\\" "\\\\\\\\" ZIG_LIBC_INCLUDE_DIR_ESCAPED "${ZIG_LIBC_
option(ZIG_TEST_COVERAGE "Build Zig with test coverage instrumentation" OFF) option(ZIG_TEST_COVERAGE "Build Zig with test coverage instrumentation" OFF)
# Zig no longer has embedded LLD. This option is kept for package maintainers
# so that they don't have to update their scripts in case we ever re-introduce
# LLD to the tree. This option does nothing.
option(ZIG_FORCE_EXTERNAL_LLD "does nothing" OFF)
set(ZIG_TARGET_TRIPLE "native" CACHE STRING "arch-os-abi to output binaries for") set(ZIG_TARGET_TRIPLE "native" CACHE STRING "arch-os-abi to output binaries for")
set(ZIG_TARGET_MCPU "baseline" CACHE STRING "-mcpu parameter to output binaries for") set(ZIG_TARGET_MCPU "baseline" CACHE STRING "-mcpu parameter to output binaries for")
set(ZIG_EXECUTABLE "" CACHE STRING "(when cross compiling) path to already-built zig binary") set(ZIG_EXECUTABLE "" CACHE STRING "(when cross compiling) path to already-built zig binary")
@ -90,12 +85,17 @@ if(APPLE AND ZIG_STATIC)
list(APPEND LLVM_LIBRARIES "${ZLIB}") list(APPEND LLVM_LIBRARIES "${ZLIB}")
endif() endif()
if(APPLE AND ZIG_WORKAROUND_6087)
list(REMOVE_ITEM LLVM_LIBRARIES "-llibxml2.tbd")
list(APPEND LLVM_LIBRARIES "-lxml2")
endif()
if(APPLE AND ZIG_WORKAROUND_4799) if(APPLE AND ZIG_WORKAROUND_4799)
# eg: ${CMAKE_PREFIX_PATH} could be /usr/local/opt/llvm/ # eg: ${CMAKE_PREFIX_PATH} could be /usr/local/opt/llvm/
list(APPEND LLVM_LIBRARIES "-Wl,${CMAKE_PREFIX_PATH}/lib/libPolly.a" "-Wl,${CMAKE_PREFIX_PATH}/lib/libPollyPPCG.a" "-Wl,${CMAKE_PREFIX_PATH}/lib/libPollyISL.a") list(APPEND LLVM_LIBRARIES "-Wl,${CMAKE_PREFIX_PATH}/lib/libPolly.a" "-Wl,${CMAKE_PREFIX_PATH}/lib/libPollyPPCG.a" "-Wl,${CMAKE_PREFIX_PATH}/lib/libPollyISL.a")
endif() endif()
set(ZIG_CPP_LIB_DIR "${CMAKE_BINARY_DIR}/zig_cpp") set(ZIG_CPP_LIB_DIR "${CMAKE_BINARY_DIR}/zigcpp")
# Handle multi-config builds and place each into a common lib. The VS generator # Handle multi-config builds and place each into a common lib. The VS generator
# for example will append a Debug folder by default if not explicitly specified. # for example will append a Debug folder by default if not explicitly specified.
@ -261,53 +261,45 @@ include_directories("${CMAKE_SOURCE_DIR}/deps/dbg-macro")
find_package(Threads) find_package(Threads)
# CMake doesn't let us create an empty executable, so we hang on to this one separately. # This is our shim which will be replaced by stage1.zig.
set(ZIG_MAIN_SRC "${CMAKE_SOURCE_DIR}/src/main.cpp") set(ZIG0_SOURCES
"${CMAKE_SOURCE_DIR}/src/stage1/zig0.cpp"
)
# This is our shim which will be replaced by libstage2 written in Zig. set(STAGE1_SOURCES
set(ZIG0_SHIM_SRC "${CMAKE_SOURCE_DIR}/src/stage2.cpp") "${CMAKE_SOURCE_DIR}/src/stage1/analyze.cpp"
"${CMAKE_SOURCE_DIR}/src/stage1/ast_render.cpp"
if(ZIG_ENABLE_MEM_PROFILE) "${CMAKE_SOURCE_DIR}/src/stage1/bigfloat.cpp"
set(ZIG_SOURCES_MEM_PROFILE "${CMAKE_SOURCE_DIR}/src/mem_profile.cpp") "${CMAKE_SOURCE_DIR}/src/stage1/bigint.cpp"
endif() "${CMAKE_SOURCE_DIR}/src/stage1/buffer.cpp"
"${CMAKE_SOURCE_DIR}/src/stage1/codegen.cpp"
set(ZIG_SOURCES "${CMAKE_SOURCE_DIR}/src/stage1/dump_analysis.cpp"
"${CMAKE_SOURCE_DIR}/src/analyze.cpp" "${CMAKE_SOURCE_DIR}/src/stage1/errmsg.cpp"
"${CMAKE_SOURCE_DIR}/src/ast_render.cpp" "${CMAKE_SOURCE_DIR}/src/stage1/error.cpp"
"${CMAKE_SOURCE_DIR}/src/bigfloat.cpp" "${CMAKE_SOURCE_DIR}/src/stage1/heap.cpp"
"${CMAKE_SOURCE_DIR}/src/bigint.cpp" "${CMAKE_SOURCE_DIR}/src/stage1/ir.cpp"
"${CMAKE_SOURCE_DIR}/src/buffer.cpp" "${CMAKE_SOURCE_DIR}/src/stage1/ir_print.cpp"
"${CMAKE_SOURCE_DIR}/src/cache_hash.cpp" "${CMAKE_SOURCE_DIR}/src/stage1/mem.cpp"
"${CMAKE_SOURCE_DIR}/src/codegen.cpp" "${CMAKE_SOURCE_DIR}/src/stage1/os.cpp"
"${CMAKE_SOURCE_DIR}/src/compiler.cpp" "${CMAKE_SOURCE_DIR}/src/stage1/parser.cpp"
"${CMAKE_SOURCE_DIR}/src/dump_analysis.cpp" "${CMAKE_SOURCE_DIR}/src/stage1/range_set.cpp"
"${CMAKE_SOURCE_DIR}/src/errmsg.cpp" "${CMAKE_SOURCE_DIR}/src/stage1/stage1.cpp"
"${CMAKE_SOURCE_DIR}/src/error.cpp" "${CMAKE_SOURCE_DIR}/src/stage1/target.cpp"
"${CMAKE_SOURCE_DIR}/src/glibc.cpp" "${CMAKE_SOURCE_DIR}/src/stage1/tokenizer.cpp"
"${CMAKE_SOURCE_DIR}/src/heap.cpp" "${CMAKE_SOURCE_DIR}/src/stage1/util.cpp"
"${CMAKE_SOURCE_DIR}/src/ir.cpp" "${CMAKE_SOURCE_DIR}/src/stage1/softfloat_ext.cpp"
"${CMAKE_SOURCE_DIR}/src/ir_print.cpp"
"${CMAKE_SOURCE_DIR}/src/link.cpp"
"${CMAKE_SOURCE_DIR}/src/mem.cpp"
"${CMAKE_SOURCE_DIR}/src/os.cpp"
"${CMAKE_SOURCE_DIR}/src/parser.cpp"
"${CMAKE_SOURCE_DIR}/src/range_set.cpp"
"${CMAKE_SOURCE_DIR}/src/target.cpp"
"${CMAKE_SOURCE_DIR}/src/tokenizer.cpp"
"${CMAKE_SOURCE_DIR}/src/util.cpp"
"${CMAKE_SOURCE_DIR}/src/softfloat_ext.cpp"
"${ZIG_SOURCES_MEM_PROFILE}"
) )
set(OPTIMIZED_C_SOURCES set(OPTIMIZED_C_SOURCES
"${CMAKE_SOURCE_DIR}/src/blake2b.c" "${CMAKE_SOURCE_DIR}/src/stage1/parse_f128.c"
"${CMAKE_SOURCE_DIR}/src/parse_f128.c"
) )
set(ZIG_CPP_SOURCES set(ZIG_CPP_SOURCES
# These are planned to stay even when we are self-hosted.
"${CMAKE_SOURCE_DIR}/src/zig_llvm.cpp" "${CMAKE_SOURCE_DIR}/src/zig_llvm.cpp"
"${CMAKE_SOURCE_DIR}/src/zig_clang.cpp" "${CMAKE_SOURCE_DIR}/src/zig_clang.cpp"
"${CMAKE_SOURCE_DIR}/src/zig_clang_driver.cpp" "${CMAKE_SOURCE_DIR}/src/zig_clang_driver.cpp"
"${CMAKE_SOURCE_DIR}/src/zig_clang_cc1_main.cpp" "${CMAKE_SOURCE_DIR}/src/zig_clang_cc1_main.cpp"
"${CMAKE_SOURCE_DIR}/src/zig_clang_cc1as_main.cpp" "${CMAKE_SOURCE_DIR}/src/zig_clang_cc1as_main.cpp"
# https://github.com/ziglang/zig/issues/6363
"${CMAKE_SOURCE_DIR}/src/windows_sdk.cpp" "${CMAKE_SOURCE_DIR}/src/windows_sdk.cpp"
) )
@ -328,7 +320,7 @@ set(ZIG_STD_DEST "${ZIG_LIB_DIR}/std")
set(ZIG_CONFIG_H_OUT "${CMAKE_BINARY_DIR}/config.h") set(ZIG_CONFIG_H_OUT "${CMAKE_BINARY_DIR}/config.h")
set(ZIG_CONFIG_ZIG_OUT "${CMAKE_BINARY_DIR}/config.zig") set(ZIG_CONFIG_ZIG_OUT "${CMAKE_BINARY_DIR}/config.zig")
configure_file ( configure_file (
"${CMAKE_SOURCE_DIR}/src/config.h.in" "${CMAKE_SOURCE_DIR}/src/stage1/config.h.in"
"${ZIG_CONFIG_H_OUT}" "${ZIG_CONFIG_H_OUT}"
) )
configure_file ( configure_file (
@ -340,6 +332,7 @@ include_directories(
${CMAKE_SOURCE_DIR} ${CMAKE_SOURCE_DIR}
${CMAKE_BINARY_DIR} ${CMAKE_BINARY_DIR}
"${CMAKE_SOURCE_DIR}/src" "${CMAKE_SOURCE_DIR}/src"
"${CMAKE_SOURCE_DIR}/src/stage1"
) )
# These have to go before the -Wno- flags # These have to go before the -Wno- flags
@ -405,18 +398,19 @@ if(ZIG_TEST_COVERAGE)
set(EXE_LDFLAGS "${EXE_LDFLAGS} -fprofile-arcs -ftest-coverage") set(EXE_LDFLAGS "${EXE_LDFLAGS} -fprofile-arcs -ftest-coverage")
endif() endif()
add_library(zig_cpp STATIC ${ZIG_CPP_SOURCES}) add_library(zigcpp STATIC ${ZIG_CPP_SOURCES})
set_target_properties(zig_cpp PROPERTIES set_target_properties(zigcpp PROPERTIES
COMPILE_FLAGS ${EXE_CFLAGS} COMPILE_FLAGS ${EXE_CFLAGS}
) )
target_link_libraries(zig_cpp LINK_PUBLIC target_link_libraries(zigcpp LINK_PUBLIC
${CLANG_LIBRARIES} ${CLANG_LIBRARIES}
${LLD_LIBRARIES} ${LLD_LIBRARIES}
${LLVM_LIBRARIES} ${LLVM_LIBRARIES}
${CMAKE_THREAD_LIBS_INIT}
) )
if(ZIG_WORKAROUND_POLLY_SO) if(ZIG_WORKAROUND_POLLY_SO)
target_link_libraries(zig_cpp LINK_PUBLIC "-Wl,${ZIG_WORKAROUND_POLLY_SO}") target_link_libraries(zigcpp LINK_PUBLIC "-Wl,${ZIG_WORKAROUND_POLLY_SO}")
endif() endif()
add_library(opt_c_util STATIC ${OPTIMIZED_C_SOURCES}) add_library(opt_c_util STATIC ${OPTIMIZED_C_SOURCES})
@ -424,68 +418,67 @@ set_target_properties(opt_c_util PROPERTIES
COMPILE_FLAGS "${OPTIMIZED_C_FLAGS}" COMPILE_FLAGS "${OPTIMIZED_C_FLAGS}"
) )
add_library(zigcompiler STATIC ${ZIG_SOURCES}) add_library(zigstage1 STATIC ${STAGE1_SOURCES})
set_target_properties(zigcompiler PROPERTIES set_target_properties(zigstage1 PROPERTIES
COMPILE_FLAGS ${EXE_CFLAGS} COMPILE_FLAGS ${EXE_CFLAGS}
LINK_FLAGS ${EXE_LDFLAGS} LINK_FLAGS ${EXE_LDFLAGS}
) )
target_link_libraries(zigcompiler LINK_PUBLIC target_link_libraries(zigstage1 LINK_PUBLIC
zig_cpp
opt_c_util opt_c_util
${SOFTFLOAT_LIBRARIES} ${SOFTFLOAT_LIBRARIES}
${CMAKE_THREAD_LIBS_INIT} zigcpp
) )
if(NOT MSVC) if(NOT MSVC)
target_link_libraries(zigcompiler LINK_PUBLIC ${LIBXML2}) target_link_libraries(zigstage1 LINK_PUBLIC ${LIBXML2})
endif() endif()
if(ZIG_DIA_GUIDS_LIB) if(ZIG_DIA_GUIDS_LIB)
target_link_libraries(zigcompiler LINK_PUBLIC ${ZIG_DIA_GUIDS_LIB}) target_link_libraries(zigstage1 LINK_PUBLIC ${ZIG_DIA_GUIDS_LIB})
endif() endif()
if(MSVC OR MINGW) if(MSVC OR MINGW)
target_link_libraries(zigcompiler LINK_PUBLIC version) target_link_libraries(zigstage1 LINK_PUBLIC version)
endif() endif()
add_executable(zig0 "${ZIG_MAIN_SRC}" "${ZIG0_SHIM_SRC}") add_executable(zig0 ${ZIG0_SOURCES})
set_target_properties(zig0 PROPERTIES set_target_properties(zig0 PROPERTIES
COMPILE_FLAGS ${EXE_CFLAGS} COMPILE_FLAGS ${EXE_CFLAGS}
LINK_FLAGS ${EXE_LDFLAGS} LINK_FLAGS ${EXE_LDFLAGS}
) )
target_link_libraries(zig0 zigcompiler) target_link_libraries(zig0 zigstage1)
if(MSVC) if(MSVC)
set(LIBSTAGE2 "${CMAKE_BINARY_DIR}/zigstage2.lib") set(ZIG1_OBJECT "${CMAKE_BINARY_DIR}/zig1.obj")
else() else()
set(LIBSTAGE2 "${CMAKE_BINARY_DIR}/libzigstage2.a") set(ZIG1_OBJECT "${CMAKE_BINARY_DIR}/zig1.o")
endif() endif()
if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug") if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
set(LIBSTAGE2_RELEASE_ARG "") set(ZIG1_RELEASE_ARG "")
else() else()
set(LIBSTAGE2_RELEASE_ARG --release-fast --strip) set(ZIG1_RELEASE_ARG -OReleaseFast --strip)
endif() endif()
set(BUILD_LIBSTAGE2_ARGS "build-lib" set(BUILD_ZIG1_ARGS
"src-self-hosted/stage2.zig" "src/stage1.zig"
-target "${ZIG_TARGET_TRIPLE}" -target "${ZIG_TARGET_TRIPLE}"
"-mcpu=${ZIG_TARGET_MCPU}" "-mcpu=${ZIG_TARGET_MCPU}"
--name zigstage2 --name zig1
--override-lib-dir "${CMAKE_SOURCE_DIR}/lib" --override-lib-dir "${CMAKE_SOURCE_DIR}/lib"
--cache on "-femit-bin=${ZIG1_OBJECT}"
--output-dir "${CMAKE_BINARY_DIR}" "${ZIG1_RELEASE_ARG}"
${LIBSTAGE2_RELEASE_ARG}
--bundle-compiler-rt
-fPIC
-lc -lc
--pkg-begin build_options "${ZIG_CONFIG_ZIG_OUT}" --pkg-begin build_options "${ZIG_CONFIG_ZIG_OUT}"
--pkg-end --pkg-end
--pkg-begin compiler_rt "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt.zig"
--pkg-end
) )
if("${ZIG_TARGET_TRIPLE}" STREQUAL "native") if("${ZIG_TARGET_TRIPLE}" STREQUAL "native")
add_custom_target(zig_build_libstage2 ALL add_custom_target(zig_build_zig1 ALL
COMMAND zig0 ${BUILD_LIBSTAGE2_ARGS} COMMAND zig0 ${BUILD_ZIG1_ARGS}
DEPENDS zig0 DEPENDS zig0
BYPRODUCTS "${LIBSTAGE2}" BYPRODUCTS "${ZIG1_OBJECT}"
COMMENT STATUS "Building self-hosted component ${ZIG1_OBJECT}"
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}" WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
) )
set(ZIG_EXECUTABLE "${zig_BINARY_DIR}/zig") set(ZIG_EXECUTABLE "${zig_BINARY_DIR}/zig")
@ -493,26 +486,28 @@ if("${ZIG_TARGET_TRIPLE}" STREQUAL "native")
set(ZIG_EXECUTABLE "${ZIG_EXECUTABLE}.exe") set(ZIG_EXECUTABLE "${ZIG_EXECUTABLE}.exe")
endif() endif()
else() else()
add_custom_target(zig_build_libstage2 ALL add_custom_target(zig_build_zig1 ALL
COMMAND "${ZIG_EXECUTABLE}" ${BUILD_LIBSTAGE2_ARGS} COMMAND "${ZIG_EXECUTABLE}" ${BUILD_ZIG1_ARGS}
BYPRODUCTS "${LIBSTAGE2}" BYPRODUCTS "${ZIG1_OBJECT}"
COMMENT STATUS "Building self-hosted component ${ZIG1_OBJECT}"
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}" WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
) )
endif() endif()
add_executable(zig "${ZIG_MAIN_SRC}") # cmake won't let us configure an executable without C sources.
add_executable(zig "${CMAKE_SOURCE_DIR}/src/stage1/empty.cpp")
set_target_properties(zig PROPERTIES set_target_properties(zig PROPERTIES
COMPILE_FLAGS ${EXE_CFLAGS} COMPILE_FLAGS ${EXE_CFLAGS}
LINK_FLAGS ${EXE_LDFLAGS} LINK_FLAGS ${EXE_LDFLAGS}
) )
target_link_libraries(zig zigcompiler "${LIBSTAGE2}") target_link_libraries(zig "${ZIG1_OBJECT}" zigstage1)
if(MSVC) if(MSVC)
target_link_libraries(zig ntdll.lib) target_link_libraries(zig ntdll.lib)
elseif(MINGW) elseif(MINGW)
target_link_libraries(zig ntdll) target_link_libraries(zig ntdll)
endif() endif()
add_dependencies(zig zig_build_libstage2) add_dependencies(zig zig_build_zig1)
install(TARGETS zig DESTINATION bin) install(TARGETS zig DESTINATION bin)

View File

@ -22,6 +22,8 @@ Note that you can
### Stage 1: Build Zig from C++ Source Code ### Stage 1: Build Zig from C++ Source Code
This step must be repeated when you make changes to any of the C++ source code.
#### Dependencies #### Dependencies
##### POSIX ##### POSIX
@ -69,6 +71,41 @@ make install
See https://github.com/ziglang/zig/wiki/Building-Zig-on-Windows See https://github.com/ziglang/zig/wiki/Building-Zig-on-Windows
### Stage 2: Build Self-Hosted Zig from Zig Source Code
Now we use the stage1 binary:
```
zig build --prefix $(pwd)/stage2 -Denable-llvm
```
This produces `stage2/bin/zig` which can be used for testing and development.
Once it is feature complete, it will be used to build stage 3 - the final compiler
binary.
### Stage 3: Rebuild Self-Hosted Zig Using the Self-Hosted Compiler
*Note: Stage 2 compiler is not yet able to build Stage 3. Building Stage 3 is
not yet supported.*
Once the self-hosted compiler can build itself, this will be the actual
compiler binary that we will install to the system. Until then, users should
use stage 1.
#### Debug / Development Build
```
stage2/bin/zig build
```
This produces `zig-cache/bin/zig`.
#### Release / Install Build
```
stage2/bin/zig build install -Drelease
```
## License ## License
The ultimate goal of the Zig project is to serve users. As a first-order The ultimate goal of the Zig project is to serve users. As a first-order

210
build.zig
View File

@ -9,6 +9,7 @@ const ArrayList = std.ArrayList;
const io = std.io; const io = std.io;
const fs = std.fs; const fs = std.fs;
const InstallDirectoryOptions = std.build.InstallDirectoryOptions; const InstallDirectoryOptions = std.build.InstallDirectoryOptions;
const assert = std.debug.assert;
const zig_version = std.builtin.Version{ .major = 0, .minor = 6, .patch = 0 }; const zig_version = std.builtin.Version{ .major = 0, .minor = 6, .patch = 0 };
@ -37,7 +38,7 @@ pub fn build(b: *Builder) !void {
const test_step = b.step("test", "Run all the tests"); const test_step = b.step("test", "Run all the tests");
var test_stage2 = b.addTest("src-self-hosted/test.zig"); var test_stage2 = b.addTest("src/test.zig");
test_stage2.setBuildMode(mode); test_stage2.setBuildMode(mode);
test_stage2.addPackagePath("stage2_tests", "test/stage2/test.zig"); test_stage2.addPackagePath("stage2_tests", "test/stage2/test.zig");
@ -55,70 +56,6 @@ pub fn build(b: *Builder) !void {
const enable_llvm = b.option(bool, "enable-llvm", "Build self-hosted compiler with LLVM backend enabled") orelse false; const enable_llvm = b.option(bool, "enable-llvm", "Build self-hosted compiler with LLVM backend enabled") orelse false;
const config_h_path_option = b.option([]const u8, "config_h", "Path to the generated config.h"); const config_h_path_option = b.option([]const u8, "config_h", "Path to the generated config.h");
if (!only_install_lib_files) {
var exe = b.addExecutable("zig", "src-self-hosted/main.zig");
exe.setBuildMode(mode);
exe.setTarget(target);
test_step.dependOn(&exe.step);
b.default_step.dependOn(&exe.step);
if (enable_llvm) {
const config_h_text = if (config_h_path_option) |config_h_path|
try std.fs.cwd().readFileAlloc(b.allocator, toNativePathSep(b, config_h_path), max_config_h_bytes)
else
try findAndReadConfigH(b);
var ctx = parseConfigH(b, config_h_text);
ctx.llvm = try findLLVM(b, ctx.llvm_config_exe);
try configureStage2(b, exe, ctx);
}
if (!only_install_lib_files) {
exe.install();
}
const tracy = b.option([]const u8, "tracy", "Enable Tracy integration. Supply path to Tracy source");
const link_libc = b.option(bool, "force-link-libc", "Force self-hosted compiler to link libc") orelse false;
if (link_libc) {
exe.linkLibC();
test_stage2.linkLibC();
}
const log_scopes = b.option([]const []const u8, "log", "Which log scopes to enable") orelse &[0][]const u8{};
const zir_dumps = b.option([]const []const u8, "dump-zir", "Which functions to dump ZIR for before codegen") orelse &[0][]const u8{};
const opt_version_string = b.option([]const u8, "version-string", "Override Zig version string. Default is to find out with git.");
const version = if (opt_version_string) |version| version else v: {
var code: u8 = undefined;
const version_untrimmed = b.execAllowFail(&[_][]const u8{
"git", "-C", b.build_root, "name-rev", "HEAD",
"--tags", "--name-only", "--no-undefined", "--always",
}, &code, .Ignore) catch |err| {
std.debug.print(
\\Unable to determine zig version string: {}
\\Provide the zig version string explicitly using the `version-string` build option.
, .{err});
std.process.exit(1);
};
const trimmed = mem.trim(u8, version_untrimmed, " \n\r");
break :v b.fmt("{}.{}.{}+{}", .{ zig_version.major, zig_version.minor, zig_version.patch, trimmed });
};
exe.addBuildOption([]const u8, "version", version);
exe.addBuildOption([]const []const u8, "log_scopes", log_scopes);
exe.addBuildOption([]const []const u8, "zir_dumps", zir_dumps);
exe.addBuildOption(bool, "enable_tracy", tracy != null);
if (tracy) |tracy_path| {
const client_cpp = fs.path.join(
b.allocator,
&[_][]const u8{ tracy_path, "TracyClient.cpp" },
) catch unreachable;
exe.addIncludeDir(tracy_path);
exe.addCSourceFile(client_cpp, &[_][]const u8{ "-DTRACY_ENABLE=1", "-fno-sanitize=undefined" });
exe.linkSystemLibraryName("c++");
exe.linkLibC();
}
}
b.installDirectory(InstallDirectoryOptions{ b.installDirectory(InstallDirectoryOptions{
.source_dir = "lib", .source_dir = "lib",
.install_dir = .Lib, .install_dir = .Lib,
@ -133,6 +70,95 @@ pub fn build(b: *Builder) !void {
}, },
}); });
if (only_install_lib_files)
return;
const tracy = b.option([]const u8, "tracy", "Enable Tracy integration. Supply path to Tracy source");
const link_libc = b.option(bool, "force-link-libc", "Force self-hosted compiler to link libc") orelse enable_llvm;
var exe = b.addExecutable("zig", "src/main.zig");
exe.install();
exe.setBuildMode(mode);
exe.setTarget(target);
test_step.dependOn(&exe.step);
b.default_step.dependOn(&exe.step);
exe.addBuildOption(bool, "have_llvm", enable_llvm);
if (enable_llvm) {
const config_h_text = if (config_h_path_option) |config_h_path|
try std.fs.cwd().readFileAlloc(b.allocator, toNativePathSep(b, config_h_path), max_config_h_bytes)
else
try findAndReadConfigH(b);
var ctx = parseConfigH(b, config_h_text);
ctx.llvm = try findLLVM(b, ctx.llvm_config_exe);
try configureStage2(b, exe, ctx, tracy != null);
}
if (link_libc) {
exe.linkLibC();
test_stage2.linkLibC();
}
const log_scopes = b.option([]const []const u8, "log", "Which log scopes to enable") orelse &[0][]const u8{};
const zir_dumps = b.option([]const []const u8, "dump-zir", "Which functions to dump ZIR for before codegen") orelse &[0][]const u8{};
const opt_version_string = b.option([]const u8, "version-string", "Override Zig version string. Default is to find out with git.");
const version = if (opt_version_string) |version| version else v: {
const version_string = b.fmt("{}.{}.{}", .{ zig_version.major, zig_version.minor, zig_version.patch });
var code: u8 = undefined;
const git_sha_untrimmed = b.execAllowFail(&[_][]const u8{
"git", "-C", b.build_root, "name-rev", "HEAD",
"--tags", "--name-only", "--no-undefined", "--always",
}, &code, .Ignore) catch {
break :v version_string;
};
const git_sha_trimmed = mem.trim(u8, git_sha_untrimmed, " \n\r");
// Detect dirty changes.
const diff_untrimmed = b.execAllowFail(&[_][]const u8{
"git", "-C", b.build_root, "diff", "HEAD",
}, &code, .Ignore) catch |err| {
std.debug.print("Error executing git diff: {}", .{err});
std.process.exit(1);
};
const trimmed_diff = mem.trim(u8, diff_untrimmed, " \n\r");
const dirty_suffix = if (trimmed_diff.len == 0) "" else s: {
const dirty_hash = std.hash.Wyhash.hash(0, trimmed_diff);
break :s b.fmt("dirty{x}", .{@truncate(u32, dirty_hash)});
};
// This will look like e.g. "0.6.0^0" for a tag commit.
if (mem.endsWith(u8, git_sha_trimmed, "^0")) {
const git_ver_string = git_sha_trimmed[0 .. git_sha_trimmed.len - 2];
if (!mem.eql(u8, git_ver_string, version_string)) {
std.debug.print("Expected git tag '{}', found '{}'", .{ version_string, git_ver_string });
std.process.exit(1);
}
break :v b.fmt("{}{}", .{ version_string, dirty_suffix });
} else {
break :v b.fmt("{}+{}{}", .{ version_string, git_sha_trimmed, dirty_suffix });
}
};
exe.addBuildOption([]const u8, "version", version);
exe.addBuildOption([]const []const u8, "log_scopes", log_scopes);
exe.addBuildOption([]const []const u8, "zir_dumps", zir_dumps);
exe.addBuildOption(bool, "enable_tracy", tracy != null);
exe.addBuildOption(bool, "is_stage1", false);
if (tracy) |tracy_path| {
const client_cpp = fs.path.join(
b.allocator,
&[_][]const u8{ tracy_path, "TracyClient.cpp" },
) catch unreachable;
exe.addIncludeDir(tracy_path);
exe.addCSourceFile(client_cpp, &[_][]const u8{ "-DTRACY_ENABLE=1", "-fno-sanitize=undefined" });
if (!enable_llvm) {
exe.linkSystemLibraryName("c++");
}
exe.linkLibC();
}
const test_filter = b.option([]const u8, "test-filter", "Skip tests that do not match filter"); const test_filter = b.option([]const u8, "test-filter", "Skip tests that do not match filter");
const is_wine_enabled = b.option(bool, "enable-wine", "Use Wine to run cross compiled Windows tests") orelse false; const is_wine_enabled = b.option(bool, "enable-wine", "Use Wine to run cross compiled Windows tests") orelse false;
@ -140,10 +166,13 @@ pub fn build(b: *Builder) !void {
const is_wasmtime_enabled = b.option(bool, "enable-wasmtime", "Use Wasmtime to enable and run WASI libstd tests") orelse false; const is_wasmtime_enabled = b.option(bool, "enable-wasmtime", "Use Wasmtime to enable and run WASI libstd tests") orelse false;
const glibc_multi_dir = b.option([]const u8, "enable-foreign-glibc", "Provide directory with glibc installations to run cross compiled tests that link glibc"); const glibc_multi_dir = b.option([]const u8, "enable-foreign-glibc", "Provide directory with glibc installations to run cross compiled tests that link glibc");
test_stage2.addBuildOption(bool, "is_stage1", false);
test_stage2.addBuildOption(bool, "have_llvm", enable_llvm);
test_stage2.addBuildOption(bool, "enable_qemu", is_qemu_enabled); test_stage2.addBuildOption(bool, "enable_qemu", is_qemu_enabled);
test_stage2.addBuildOption(bool, "enable_wine", is_wine_enabled); test_stage2.addBuildOption(bool, "enable_wine", is_wine_enabled);
test_stage2.addBuildOption(bool, "enable_wasmtime", is_wasmtime_enabled); test_stage2.addBuildOption(bool, "enable_wasmtime", is_wasmtime_enabled);
test_stage2.addBuildOption(?[]const u8, "glibc_multi_install_dir", glibc_multi_dir); test_stage2.addBuildOption(?[]const u8, "glibc_multi_install_dir", glibc_multi_dir);
test_stage2.addBuildOption([]const u8, "version", version);
const test_stage2_step = b.step("test-stage2", "Run the stage2 compiler tests"); const test_stage2_step = b.step("test-stage2", "Run the stage2 compiler tests");
test_stage2_step.dependOn(&test_stage2.step); test_stage2_step.dependOn(&test_stage2.step);
@ -182,10 +211,7 @@ pub fn build(b: *Builder) !void {
test_step.dependOn(tests.addCompareOutputTests(b, test_filter, modes)); test_step.dependOn(tests.addCompareOutputTests(b, test_filter, modes));
test_step.dependOn(tests.addStandaloneTests(b, test_filter, modes)); test_step.dependOn(tests.addStandaloneTests(b, test_filter, modes));
test_step.dependOn(tests.addStackTraceTests(b, test_filter, modes)); test_step.dependOn(tests.addStackTraceTests(b, test_filter, modes));
const test_cli = tests.addCliTests(b, test_filter, modes); test_step.dependOn(tests.addCliTests(b, test_filter, modes));
const test_cli_step = b.step("test-cli", "Run zig cli tests");
test_cli_step.dependOn(test_cli);
test_step.dependOn(test_cli);
test_step.dependOn(tests.addAssembleAndLinkTests(b, test_filter, modes)); test_step.dependOn(tests.addAssembleAndLinkTests(b, test_filter, modes));
test_step.dependOn(tests.addRuntimeSafetyTests(b, test_filter, modes)); test_step.dependOn(tests.addRuntimeSafetyTests(b, test_filter, modes));
test_step.dependOn(tests.addTranslateCTests(b, test_filter)); test_step.dependOn(tests.addTranslateCTests(b, test_filter));
@ -241,7 +267,7 @@ fn fileExists(filename: []const u8) !bool {
fn addCppLib(b: *Builder, lib_exe_obj: anytype, cmake_binary_dir: []const u8, lib_name: []const u8) void { fn addCppLib(b: *Builder, lib_exe_obj: anytype, cmake_binary_dir: []const u8, lib_name: []const u8) void {
lib_exe_obj.addObjectFile(fs.path.join(b.allocator, &[_][]const u8{ lib_exe_obj.addObjectFile(fs.path.join(b.allocator, &[_][]const u8{
cmake_binary_dir, cmake_binary_dir,
"zig_cpp", "zigcpp",
b.fmt("{}{}{}", .{ lib_exe_obj.target.libPrefix(), lib_name, lib_exe_obj.target.staticLibSuffix() }), b.fmt("{}{}{}", .{ lib_exe_obj.target.libPrefix(), lib_name, lib_exe_obj.target.staticLibSuffix() }),
}) catch unreachable); }) catch unreachable);
} }
@ -320,21 +346,17 @@ fn findLLVM(b: *Builder, llvm_config_exe: []const u8) !LibraryDep {
return result; return result;
} }
fn configureStage2(b: *Builder, exe: anytype, ctx: Context) !void { fn configureStage2(b: *Builder, exe: anytype, ctx: Context, need_cpp_includes: bool) !void {
exe.addIncludeDir("src"); exe.addIncludeDir("src");
exe.addIncludeDir(ctx.cmake_binary_dir); exe.addIncludeDir(ctx.cmake_binary_dir);
addCppLib(b, exe, ctx.cmake_binary_dir, "zig_cpp"); addCppLib(b, exe, ctx.cmake_binary_dir, "zigcpp");
if (ctx.lld_include_dir.len != 0) { assert(ctx.lld_include_dir.len != 0);
exe.addIncludeDir(ctx.lld_include_dir); exe.addIncludeDir(ctx.lld_include_dir);
{
var it = mem.tokenize(ctx.lld_libraries, ";"); var it = mem.tokenize(ctx.lld_libraries, ";");
while (it.next()) |lib| { while (it.next()) |lib| {
exe.addObjectFile(lib); exe.addObjectFile(lib);
} }
} else {
addCppLib(b, exe, ctx.cmake_binary_dir, "embedded_lld_wasm");
addCppLib(b, exe, ctx.cmake_binary_dir, "embedded_lld_elf");
addCppLib(b, exe, ctx.cmake_binary_dir, "embedded_lld_coff");
addCppLib(b, exe, ctx.cmake_binary_dir, "embedded_lld_lib");
} }
{ {
var it = mem.tokenize(ctx.clang_libraries, ";"); var it = mem.tokenize(ctx.clang_libraries, ";");
@ -344,10 +366,20 @@ fn configureStage2(b: *Builder, exe: anytype, ctx: Context) !void {
} }
dependOnLib(b, exe, ctx.llvm); dependOnLib(b, exe, ctx.llvm);
// Boy, it sure would be nice to simply linkSystemLibrary("c++") and rely on zig's
// ability to provide libc++ right? Well thanks to C++ not having a stable ABI this
// will cause linker errors. It would work in the situation when `zig cc` is used to
// build LLVM, Clang, and LLD, however when depending on them as system libraries, system
// libc++ must be used.
const cross_compile = false; // TODO
if (cross_compile) {
// In this case we assume that zig cc was used to build the LLVM, Clang, LLD dependencies.
exe.linkSystemLibrary("c++");
} else {
if (exe.target.getOsTag() == .linux) { if (exe.target.getOsTag() == .linux) {
// First we try to static link against gcc libstdc++. If that doesn't work, // First we try to static link against gcc libstdc++. If that doesn't work,
// we fall back to -lc++ and cross our fingers. // we fall back to -lc++ and cross our fingers.
addCxxKnownPath(b, ctx, exe, "libstdc++.a", "") catch |err| switch (err) { addCxxKnownPath(b, ctx, exe, "libstdc++.a", "", need_cpp_includes) catch |err| switch (err) {
error.RequiredLibraryNotFound => { error.RequiredLibraryNotFound => {
exe.linkSystemLibrary("c++"); exe.linkSystemLibrary("c++");
}, },
@ -356,12 +388,12 @@ fn configureStage2(b: *Builder, exe: anytype, ctx: Context) !void {
exe.linkSystemLibrary("pthread"); exe.linkSystemLibrary("pthread");
} else if (exe.target.isFreeBSD()) { } else if (exe.target.isFreeBSD()) {
try addCxxKnownPath(b, ctx, exe, "libc++.a", null); try addCxxKnownPath(b, ctx, exe, "libc++.a", null, need_cpp_includes);
exe.linkSystemLibrary("pthread"); exe.linkSystemLibrary("pthread");
} else if (exe.target.isDarwin()) { } else if (exe.target.isDarwin()) {
if (addCxxKnownPath(b, ctx, exe, "libgcc_eh.a", "")) { if (addCxxKnownPath(b, ctx, exe, "libgcc_eh.a", "", need_cpp_includes)) {
// Compiler is GCC. // Compiler is GCC.
try addCxxKnownPath(b, ctx, exe, "libstdc++.a", null); try addCxxKnownPath(b, ctx, exe, "libstdc++.a", null, need_cpp_includes);
exe.linkSystemLibrary("pthread"); exe.linkSystemLibrary("pthread");
// TODO LLD cannot perform this link. // TODO LLD cannot perform this link.
// See https://github.com/ziglang/zig/issues/1535 // See https://github.com/ziglang/zig/issues/1535
@ -378,8 +410,7 @@ fn configureStage2(b: *Builder, exe: anytype, ctx: Context) !void {
if (ctx.dia_guids_lib.len != 0) { if (ctx.dia_guids_lib.len != 0) {
exe.addObjectFile(ctx.dia_guids_lib); exe.addObjectFile(ctx.dia_guids_lib);
} }
}
exe.linkSystemLibrary("c");
} }
fn addCxxKnownPath( fn addCxxKnownPath(
@ -388,6 +419,7 @@ fn addCxxKnownPath(
exe: anytype, exe: anytype,
objname: []const u8, objname: []const u8,
errtxt: ?[]const u8, errtxt: ?[]const u8,
need_cpp_includes: bool,
) !void { ) !void {
const path_padded = try b.exec(&[_][]const u8{ const path_padded = try b.exec(&[_][]const u8{
ctx.cxx_compiler, ctx.cxx_compiler,
@ -403,6 +435,16 @@ fn addCxxKnownPath(
return error.RequiredLibraryNotFound; return error.RequiredLibraryNotFound;
} }
exe.addObjectFile(path_unpadded); exe.addObjectFile(path_unpadded);
// TODO a way to integrate with system c++ include files here
// cc -E -Wp,-v -xc++ /dev/null
if (need_cpp_includes) {
// I used these temporarily for testing something but we obviously need a
// more general purpose solution here.
//exe.addIncludeDir("/nix/store/b3zsk4ihlpiimv3vff86bb5bxghgdzb9-gcc-9.2.0/lib/gcc/x86_64-unknown-linux-gnu/9.2.0/../../../../include/c++/9.2.0");
//exe.addIncludeDir("/nix/store/b3zsk4ihlpiimv3vff86bb5bxghgdzb9-gcc-9.2.0/lib/gcc/x86_64-unknown-linux-gnu/9.2.0/../../../../include/c++/9.2.0/x86_64-unknown-linux-gnu");
//exe.addIncludeDir("/nix/store/b3zsk4ihlpiimv3vff86bb5bxghgdzb9-gcc-9.2.0/lib/gcc/x86_64-unknown-linux-gnu/9.2.0/../../../../include/c++/9.2.0/backward");
}
} }
const Context = struct { const Context = struct {

View File

@ -28,22 +28,6 @@ PATH=$PWD/$WASMTIME:$PATH
# This will affect the cmake command below. # This will affect the cmake command below.
git config core.abbrev 9 git config core.abbrev 9
# This patch is a workaround for
# https://bugs.llvm.org/show_bug.cgi?id=44870 / https://github.com/llvm/llvm-project/issues/191
# It only applies to the apt.llvm.org packages.
patch <<'END_PATCH'
--- CMakeLists.txt
+++ CMakeLists.txt
@@ -369,6 +369,7 @@ target_link_libraries(zig_cpp LINK_PUBLIC
${CLANG_LIBRARIES}
${LLD_LIBRARIES}
${LLVM_LIBRARIES}
+ "-Wl,/usr/lib/llvm-10/lib/LLVMPolly.so"
)
add_library(opt_c_util STATIC ${OPTIMIZED_C_SOURCES})
END_PATCH
export CC=gcc-7 export CC=gcc-7
export CXX=g++-7 export CXX=g++-7
mkdir build mkdir build

View File

@ -7,6 +7,13 @@ pacman --noconfirm --needed -S git base-devel mingw-w64-x86_64-toolchain mingw-w
git config core.abbrev 9 git config core.abbrev 9
# Git is wrong for autocrlf being enabled by default on Windows.
# git is mangling files on Windows by default.
# This is the second bug I've tracked down to being caused by autocrlf.
git config core.autocrlf false
# Too late; the files are already mangled.
git checkout .
ZIGBUILDDIR="$(pwd)/build" ZIGBUILDDIR="$(pwd)/build"
PREFIX="$ZIGBUILDDIR/dist" PREFIX="$ZIGBUILDDIR/dist"
CMAKEFLAGS="-DCMAKE_COLOR_MAKEFILE=OFF -DCMAKE_INSTALL_PREFIX=$PREFIX -DZIG_STATIC=ON" CMAKEFLAGS="-DCMAKE_COLOR_MAKEFILE=OFF -DCMAKE_INSTALL_PREFIX=$PREFIX -DZIG_STATIC=ON"

View File

@ -21,6 +21,11 @@ cd $ZIGDIR
# This will affect the cmake command below. # This will affect the cmake command below.
git config core.abbrev 9 git config core.abbrev 9
# SourceHut reports that it is a terminal that supports escape codes, but it
# is a filthy liar. Here we tell Zig to not try to send any terminal escape
# codes to show progress.
export TERM=dumb
mkdir build mkdir build
cd build cd build
cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH=$PREFIX "-DCMAKE_INSTALL_PREFIX=$(pwd)/release" -DZIG_STATIC=ON cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH=$PREFIX "-DCMAKE_INSTALL_PREFIX=$(pwd)/release" -DZIG_STATIC=ON

View File

@ -4,7 +4,7 @@ const io = std.io;
const fs = std.fs; const fs = std.fs;
const process = std.process; const process = std.process;
const ChildProcess = std.ChildProcess; const ChildProcess = std.ChildProcess;
const warn = std.debug.warn; const print = std.debug.print;
const mem = std.mem; const mem = std.mem;
const testing = std.testing; const testing = std.testing;
@ -215,23 +215,23 @@ const Tokenizer = struct {
fn parseError(tokenizer: *Tokenizer, token: Token, comptime fmt: []const u8, args: anytype) anyerror { fn parseError(tokenizer: *Tokenizer, token: Token, comptime fmt: []const u8, args: anytype) anyerror {
const loc = tokenizer.getTokenLocation(token); const loc = tokenizer.getTokenLocation(token);
const args_prefix = .{ tokenizer.source_file_name, loc.line + 1, loc.column + 1 }; const args_prefix = .{ tokenizer.source_file_name, loc.line + 1, loc.column + 1 };
warn("{}:{}:{}: error: " ++ fmt ++ "\n", args_prefix ++ args); print("{}:{}:{}: error: " ++ fmt ++ "\n", args_prefix ++ args);
if (loc.line_start <= loc.line_end) { if (loc.line_start <= loc.line_end) {
warn("{}\n", .{tokenizer.buffer[loc.line_start..loc.line_end]}); print("{}\n", .{tokenizer.buffer[loc.line_start..loc.line_end]});
{ {
var i: usize = 0; var i: usize = 0;
while (i < loc.column) : (i += 1) { while (i < loc.column) : (i += 1) {
warn(" ", .{}); print(" ", .{});
} }
} }
{ {
const caret_count = token.end - token.start; const caret_count = token.end - token.start;
var i: usize = 0; var i: usize = 0;
while (i < caret_count) : (i += 1) { while (i < caret_count) : (i += 1) {
warn("~", .{}); print("~", .{});
} }
} }
warn("\n", .{}); print("\n", .{});
} }
return error.ParseError; return error.ParseError;
} }
@ -274,6 +274,7 @@ const Code = struct {
link_objects: []const []const u8, link_objects: []const []const u8,
target_str: ?[]const u8, target_str: ?[]const u8,
link_libc: bool, link_libc: bool,
disable_cache: bool,
const Id = union(enum) { const Id = union(enum) {
Test, Test,
@ -522,6 +523,7 @@ fn genToc(allocator: *mem.Allocator, tokenizer: *Tokenizer) !Toc {
defer link_objects.deinit(); defer link_objects.deinit();
var target_str: ?[]const u8 = null; var target_str: ?[]const u8 = null;
var link_libc = false; var link_libc = false;
var disable_cache = false;
const source_token = while (true) { const source_token = while (true) {
const content_tok = try eatToken(tokenizer, Token.Id.Content); const content_tok = try eatToken(tokenizer, Token.Id.Content);
@ -532,6 +534,8 @@ fn genToc(allocator: *mem.Allocator, tokenizer: *Tokenizer) !Toc {
mode = .ReleaseFast; mode = .ReleaseFast;
} else if (mem.eql(u8, end_tag_name, "code_release_safe")) { } else if (mem.eql(u8, end_tag_name, "code_release_safe")) {
mode = .ReleaseSafe; mode = .ReleaseSafe;
} else if (mem.eql(u8, end_tag_name, "code_disable_cache")) {
disable_cache = true;
} else if (mem.eql(u8, end_tag_name, "code_link_object")) { } else if (mem.eql(u8, end_tag_name, "code_link_object")) {
_ = try eatToken(tokenizer, Token.Id.Separator); _ = try eatToken(tokenizer, Token.Id.Separator);
const obj_tok = try eatToken(tokenizer, Token.Id.TagContent); const obj_tok = try eatToken(tokenizer, Token.Id.TagContent);
@ -572,6 +576,7 @@ fn genToc(allocator: *mem.Allocator, tokenizer: *Tokenizer) !Toc {
.link_objects = link_objects.toOwnedSlice(), .link_objects = link_objects.toOwnedSlice(),
.target_str = target_str, .target_str = target_str,
.link_libc = link_libc, .link_libc = link_libc,
.disable_cache = disable_cache,
}, },
}); });
tokenizer.code_node_count += 1; tokenizer.code_node_count += 1;
@ -1032,7 +1037,7 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
}, },
.Code => |code| { .Code => |code| {
code_progress_index += 1; code_progress_index += 1;
warn("docgen example code {}/{}...", .{ code_progress_index, tokenizer.code_node_count }); print("docgen example code {}/{}...", .{ code_progress_index, tokenizer.code_node_count });
const raw_source = tokenizer.buffer[code.source_token.start..code.source_token.end]; const raw_source = tokenizer.buffer[code.source_token.start..code.source_token.end];
const trimmed_raw_source = mem.trim(u8, raw_source, " \n"); const trimmed_raw_source = mem.trim(u8, raw_source, " \n");
@ -1055,30 +1060,17 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
var build_args = std.ArrayList([]const u8).init(allocator); var build_args = std.ArrayList([]const u8).init(allocator);
defer build_args.deinit(); defer build_args.deinit();
try build_args.appendSlice(&[_][]const u8{ try build_args.appendSlice(&[_][]const u8{
zig_exe, zig_exe, "build-exe",
"build-exe", "--name", code.name,
tmp_source_file_name, "--color", "on",
"--name", "--enable-cache", tmp_source_file_name,
code.name,
"--color",
"on",
"--cache",
"on",
}); });
try out.print("<pre><code class=\"shell\">$ zig build-exe {}.zig", .{code.name}); try out.print("<pre><code class=\"shell\">$ zig build-exe {}.zig", .{code.name});
switch (code.mode) { switch (code.mode) {
.Debug => {}, .Debug => {},
.ReleaseSafe => { else => {
try build_args.append("--release-safe"); try build_args.appendSlice(&[_][]const u8{ "-O", @tagName(code.mode) });
try out.print(" --release-safe", .{}); try out.print(" -O {s}", .{@tagName(code.mode)});
},
.ReleaseFast => {
try build_args.append("--release-fast");
try out.print(" --release-fast", .{});
},
.ReleaseSmall => {
try build_args.append("--release-small");
try out.print(" --release-small", .{});
}, },
} }
for (code.link_objects) |link_object| { for (code.link_objects) |link_object| {
@ -1087,9 +1079,8 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
allocator, allocator,
&[_][]const u8{ tmp_dir_name, name_with_ext }, &[_][]const u8{ tmp_dir_name, name_with_ext },
); );
try build_args.append("--object");
try build_args.append(full_path_object); try build_args.append(full_path_object);
try out.print(" --object {}", .{name_with_ext}); try out.print(" {s}", .{name_with_ext});
} }
if (code.link_libc) { if (code.link_libc) {
try build_args.append("-lc"); try build_args.append("-lc");
@ -1114,20 +1105,14 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
switch (result.term) { switch (result.term) {
.Exited => |exit_code| { .Exited => |exit_code| {
if (exit_code == 0) { if (exit_code == 0) {
warn("{}\nThe following command incorrectly succeeded:\n", .{result.stderr}); print("{}\nThe following command incorrectly succeeded:\n", .{result.stderr});
for (build_args.items) |arg| dumpArgs(build_args.items);
warn("{} ", .{arg})
else
warn("\n", .{});
return parseError(tokenizer, code.source_token, "example incorrectly compiled", .{}); return parseError(tokenizer, code.source_token, "example incorrectly compiled", .{});
} }
}, },
else => { else => {
warn("{}\nThe following command crashed:\n", .{result.stderr}); print("{}\nThe following command crashed:\n", .{result.stderr});
for (build_args.items) |arg| dumpArgs(build_args.items);
warn("{} ", .{arg})
else
warn("\n", .{});
return parseError(tokenizer, code.source_token, "example compile crashed", .{}); return parseError(tokenizer, code.source_token, "example compile crashed", .{});
}, },
} }
@ -1174,11 +1159,8 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
switch (result.term) { switch (result.term) {
.Exited => |exit_code| { .Exited => |exit_code| {
if (exit_code == 0) { if (exit_code == 0) {
warn("{}\nThe following command incorrectly succeeded:\n", .{result.stderr}); print("{}\nThe following command incorrectly succeeded:\n", .{result.stderr});
for (run_args) |arg| dumpArgs(run_args);
warn("{} ", .{arg})
else
warn("\n", .{});
return parseError(tokenizer, code.source_token, "example incorrectly compiled", .{}); return parseError(tokenizer, code.source_token, "example incorrectly compiled", .{});
} }
}, },
@ -1206,27 +1188,13 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
var test_args = std.ArrayList([]const u8).init(allocator); var test_args = std.ArrayList([]const u8).init(allocator);
defer test_args.deinit(); defer test_args.deinit();
try test_args.appendSlice(&[_][]const u8{ try test_args.appendSlice(&[_][]const u8{ zig_exe, "test", tmp_source_file_name });
zig_exe,
"test",
tmp_source_file_name,
"--cache",
"on",
});
try out.print("<pre><code class=\"shell\">$ zig test {}.zig", .{code.name}); try out.print("<pre><code class=\"shell\">$ zig test {}.zig", .{code.name});
switch (code.mode) { switch (code.mode) {
.Debug => {}, .Debug => {},
.ReleaseSafe => { else => {
try test_args.append("--release-safe"); try test_args.appendSlice(&[_][]const u8{ "-O", @tagName(code.mode) });
try out.print(" --release-safe", .{}); try out.print(" -O {s}", .{@tagName(code.mode)});
},
.ReleaseFast => {
try test_args.append("--release-fast");
try out.print(" --release-fast", .{});
},
.ReleaseSmall => {
try test_args.append("--release-small");
try out.print(" --release-small", .{});
}, },
} }
if (code.link_libc) { if (code.link_libc) {
@ -1252,23 +1220,13 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
"--color", "--color",
"on", "on",
tmp_source_file_name, tmp_source_file_name,
"--output-dir",
tmp_dir_name,
}); });
try out.print("<pre><code class=\"shell\">$ zig test {}.zig", .{code.name}); try out.print("<pre><code class=\"shell\">$ zig test {}.zig", .{code.name});
switch (code.mode) { switch (code.mode) {
.Debug => {}, .Debug => {},
.ReleaseSafe => { else => {
try test_args.append("--release-safe"); try test_args.appendSlice(&[_][]const u8{ "-O", @tagName(code.mode) });
try out.print(" --release-safe", .{}); try out.print(" -O {s}", .{@tagName(code.mode)});
},
.ReleaseFast => {
try test_args.append("--release-fast");
try out.print(" --release-fast", .{});
},
.ReleaseSmall => {
try test_args.append("--release-small");
try out.print(" --release-small", .{});
}, },
} }
const result = try ChildProcess.exec(.{ const result = try ChildProcess.exec(.{
@ -1280,25 +1238,19 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
switch (result.term) { switch (result.term) {
.Exited => |exit_code| { .Exited => |exit_code| {
if (exit_code == 0) { if (exit_code == 0) {
warn("{}\nThe following command incorrectly succeeded:\n", .{result.stderr}); print("{}\nThe following command incorrectly succeeded:\n", .{result.stderr});
for (test_args.items) |arg| dumpArgs(test_args.items);
warn("{} ", .{arg})
else
warn("\n", .{});
return parseError(tokenizer, code.source_token, "example incorrectly compiled", .{}); return parseError(tokenizer, code.source_token, "example incorrectly compiled", .{});
} }
}, },
else => { else => {
warn("{}\nThe following command crashed:\n", .{result.stderr}); print("{}\nThe following command crashed:\n", .{result.stderr});
for (test_args.items) |arg| dumpArgs(test_args.items);
warn("{} ", .{arg})
else
warn("\n", .{});
return parseError(tokenizer, code.source_token, "example compile crashed", .{}); return parseError(tokenizer, code.source_token, "example compile crashed", .{});
}, },
} }
if (mem.indexOf(u8, result.stderr, error_match) == null) { if (mem.indexOf(u8, result.stderr, error_match) == null) {
warn("{}\nExpected to find '{}' in stderr", .{ result.stderr, error_match }); print("{}\nExpected to find '{}' in stderr", .{ result.stderr, error_match });
return parseError(tokenizer, code.source_token, "example did not have expected compile error", .{}); return parseError(tokenizer, code.source_token, "example did not have expected compile error", .{});
} }
const escaped_stderr = try escapeHtml(allocator, result.stderr); const escaped_stderr = try escapeHtml(allocator, result.stderr);
@ -1314,23 +1266,21 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
zig_exe, zig_exe,
"test", "test",
tmp_source_file_name, tmp_source_file_name,
"--output-dir",
tmp_dir_name,
}); });
var mode_arg: []const u8 = ""; var mode_arg: []const u8 = "";
switch (code.mode) { switch (code.mode) {
.Debug => {}, .Debug => {},
.ReleaseSafe => { .ReleaseSafe => {
try test_args.append("--release-safe"); try test_args.append("-OReleaseSafe");
mode_arg = " --release-safe"; mode_arg = "-OReleaseSafe";
}, },
.ReleaseFast => { .ReleaseFast => {
try test_args.append("--release-fast"); try test_args.append("-OReleaseFast");
mode_arg = " --release-fast"; mode_arg = "-OReleaseFast";
}, },
.ReleaseSmall => { .ReleaseSmall => {
try test_args.append("--release-small"); try test_args.append("-OReleaseSmall");
mode_arg = " --release-small"; mode_arg = "-OReleaseSmall";
}, },
} }
@ -1343,25 +1293,19 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
switch (result.term) { switch (result.term) {
.Exited => |exit_code| { .Exited => |exit_code| {
if (exit_code == 0) { if (exit_code == 0) {
warn("{}\nThe following command incorrectly succeeded:\n", .{result.stderr}); print("{}\nThe following command incorrectly succeeded:\n", .{result.stderr});
for (test_args.items) |arg| dumpArgs(test_args.items);
warn("{} ", .{arg})
else
warn("\n", .{});
return parseError(tokenizer, code.source_token, "example test incorrectly succeeded", .{}); return parseError(tokenizer, code.source_token, "example test incorrectly succeeded", .{});
} }
}, },
else => { else => {
warn("{}\nThe following command crashed:\n", .{result.stderr}); print("{}\nThe following command crashed:\n", .{result.stderr});
for (test_args.items) |arg| dumpArgs(test_args.items);
warn("{} ", .{arg})
else
warn("\n", .{});
return parseError(tokenizer, code.source_token, "example compile crashed", .{}); return parseError(tokenizer, code.source_token, "example compile crashed", .{});
}, },
} }
if (mem.indexOf(u8, result.stderr, error_match) == null) { if (mem.indexOf(u8, result.stderr, error_match) == null) {
warn("{}\nExpected to find '{}' in stderr", .{ result.stderr, error_match }); print("{}\nExpected to find '{}' in stderr", .{ result.stderr, error_match });
return parseError(tokenizer, code.source_token, "example did not have expected runtime safety error message", .{}); return parseError(tokenizer, code.source_token, "example did not have expected runtime safety error message", .{});
} }
const escaped_stderr = try escapeHtml(allocator, result.stderr); const escaped_stderr = try escapeHtml(allocator, result.stderr);
@ -1395,32 +1339,20 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
"on", "on",
"--name", "--name",
code.name, code.name,
"--output-dir", try std.fmt.allocPrint(allocator, "-femit-bin={s}{c}{s}", .{
tmp_dir_name, tmp_dir_name, fs.path.sep, name_plus_obj_ext,
}),
}); });
if (!code.is_inline) { if (!code.is_inline) {
try out.print("<pre><code class=\"shell\">$ zig build-obj {}.zig", .{code.name}); try out.print("<pre><code class=\"shell\">$ zig build-obj {}.zig", .{code.name});
} }
switch (code.mode) { switch (code.mode) {
.Debug => {}, .Debug => {},
.ReleaseSafe => { else => {
try build_args.append("--release-safe"); try build_args.appendSlice(&[_][]const u8{ "-O", @tagName(code.mode) });
if (!code.is_inline) { if (!code.is_inline) {
try out.print(" --release-safe", .{}); try out.print(" -O {s}", .{@tagName(code.mode)});
}
},
.ReleaseFast => {
try build_args.append("--release-fast");
if (!code.is_inline) {
try out.print(" --release-fast", .{});
}
},
.ReleaseSmall => {
try build_args.append("--release-small");
if (!code.is_inline) {
try out.print(" --release-small", .{});
} }
}, },
} }
@ -1440,25 +1372,19 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
switch (result.term) { switch (result.term) {
.Exited => |exit_code| { .Exited => |exit_code| {
if (exit_code == 0) { if (exit_code == 0) {
warn("{}\nThe following command incorrectly succeeded:\n", .{result.stderr}); print("{}\nThe following command incorrectly succeeded:\n", .{result.stderr});
for (build_args.items) |arg| dumpArgs(build_args.items);
warn("{} ", .{arg})
else
warn("\n", .{});
return parseError(tokenizer, code.source_token, "example build incorrectly succeeded", .{}); return parseError(tokenizer, code.source_token, "example build incorrectly succeeded", .{});
} }
}, },
else => { else => {
warn("{}\nThe following command crashed:\n", .{result.stderr}); print("{}\nThe following command crashed:\n", .{result.stderr});
for (build_args.items) |arg| dumpArgs(build_args.items);
warn("{} ", .{arg})
else
warn("\n", .{});
return parseError(tokenizer, code.source_token, "example compile crashed", .{}); return parseError(tokenizer, code.source_token, "example compile crashed", .{});
}, },
} }
if (mem.indexOf(u8, result.stderr, error_match) == null) { if (mem.indexOf(u8, result.stderr, error_match) == null) {
warn("{}\nExpected to find '{}' in stderr", .{ result.stderr, error_match }); print("{}\nExpected to find '{}' in stderr", .{ result.stderr, error_match });
return parseError(tokenizer, code.source_token, "example did not have expected compile error message", .{}); return parseError(tokenizer, code.source_token, "example did not have expected compile error message", .{});
} }
const escaped_stderr = try escapeHtml(allocator, result.stderr); const escaped_stderr = try escapeHtml(allocator, result.stderr);
@ -1472,6 +1398,12 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
} }
}, },
Code.Id.Lib => { Code.Id.Lib => {
const bin_basename = try std.zig.binNameAlloc(allocator, .{
.root_name = code.name,
.target = std.Target.current,
.output_mode = .Lib,
});
var test_args = std.ArrayList([]const u8).init(allocator); var test_args = std.ArrayList([]const u8).init(allocator);
defer test_args.deinit(); defer test_args.deinit();
@ -1479,23 +1411,16 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
zig_exe, zig_exe,
"build-lib", "build-lib",
tmp_source_file_name, tmp_source_file_name,
"--output-dir", try std.fmt.allocPrint(allocator, "-femit-bin={s}{s}{s}", .{
tmp_dir_name, tmp_dir_name, fs.path.sep_str, bin_basename,
}),
}); });
try out.print("<pre><code class=\"shell\">$ zig build-lib {}.zig", .{code.name}); try out.print("<pre><code class=\"shell\">$ zig build-lib {}.zig", .{code.name});
switch (code.mode) { switch (code.mode) {
.Debug => {}, .Debug => {},
.ReleaseSafe => { else => {
try test_args.append("--release-safe"); try test_args.appendSlice(&[_][]const u8{ "-O", @tagName(code.mode) });
try out.print(" --release-safe", .{}); try out.print(" -O {s}", .{@tagName(code.mode)});
},
.ReleaseFast => {
try test_args.append("--release-fast");
try out.print(" --release-fast", .{});
},
.ReleaseSmall => {
try test_args.append("--release-small");
try out.print(" --release-small", .{});
}, },
} }
if (code.target_str) |triple| { if (code.target_str) |triple| {
@ -1508,7 +1433,7 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
try out.print("\n{}{}</code></pre>\n", .{ escaped_stderr, escaped_stdout }); try out.print("\n{}{}</code></pre>\n", .{ escaped_stderr, escaped_stdout });
}, },
} }
warn("OK\n", .{}); print("OK\n", .{});
}, },
} }
} }
@ -1524,20 +1449,14 @@ fn exec(allocator: *mem.Allocator, env_map: *std.BufMap, args: []const []const u
switch (result.term) { switch (result.term) {
.Exited => |exit_code| { .Exited => |exit_code| {
if (exit_code != 0) { if (exit_code != 0) {
warn("{}\nThe following command exited with code {}:\n", .{ result.stderr, exit_code }); print("{}\nThe following command exited with code {}:\n", .{ result.stderr, exit_code });
for (args) |arg| dumpArgs(args);
warn("{} ", .{arg})
else
warn("\n", .{});
return error.ChildExitError; return error.ChildExitError;
} }
}, },
else => { else => {
warn("{}\nThe following command crashed:\n", .{result.stderr}); print("{}\nThe following command crashed:\n", .{result.stderr});
for (args) |arg| dumpArgs(args);
warn("{} ", .{arg})
else
warn("\n", .{});
return error.ChildCrashed; return error.ChildCrashed;
}, },
} }
@ -1545,9 +1464,13 @@ fn exec(allocator: *mem.Allocator, env_map: *std.BufMap, args: []const []const u
} }
fn getBuiltinCode(allocator: *mem.Allocator, env_map: *std.BufMap, zig_exe: []const u8) ![]const u8 { fn getBuiltinCode(allocator: *mem.Allocator, env_map: *std.BufMap, zig_exe: []const u8) ![]const u8 {
const result = try exec(allocator, env_map, &[_][]const u8{ const result = try exec(allocator, env_map, &[_][]const u8{ zig_exe, "build-obj", "--show-builtin" });
zig_exe,
"builtin",
});
return result.stdout; return result.stdout;
} }
fn dumpArgs(args: []const []const u8) void {
for (args) |arg|
print("{} ", .{arg})
else
print("\n", .{});
}

View File

@ -1078,6 +1078,7 @@ const nan = std.math.nan(f128);
but you can switch to {#syntax#}Optimized{#endsyntax#} mode on a per-block basis:</p> but you can switch to {#syntax#}Optimized{#endsyntax#} mode on a per-block basis:</p>
{#code_begin|obj|foo#} {#code_begin|obj|foo#}
{#code_release_fast#} {#code_release_fast#}
{#code_disable_cache#}
const std = @import("std"); const std = @import("std");
const builtin = std.builtin; const builtin = std.builtin;
const big = @as(f64, 1 << 40); const big = @as(f64, 1 << 40);
@ -1840,7 +1841,7 @@ const Point = struct {
y: i32, y: i32,
}; };
test "compile-time array initalization" { test "compile-time array initialization" {
assert(fancy_array[4].x == 4); assert(fancy_array[4].x == 4);
assert(fancy_array[4].y == 8); assert(fancy_array[4].y == 8);
} }
@ -8467,30 +8468,23 @@ test "integer truncation" {
<li>{#syntax#}@TypeOf(null){#endsyntax#}</li> <li>{#syntax#}@TypeOf(null){#endsyntax#}</li>
<li>{#link|Arrays#}</li> <li>{#link|Arrays#}</li>
<li>{#link|Optionals#}</li> <li>{#link|Optionals#}</li>
<li>{#link|Error Set Type#}</li>
<li>{#link|Error Union Type#}</li> <li>{#link|Error Union Type#}</li>
<li>{#link|Vectors#}</li> <li>{#link|Vectors#}</li>
<li>{#link|Opaque Types#}</li> <li>{#link|Opaque Types#}</li>
<li>AnyFrame</li> <li>{#link|@Frame#}</li>
</ul> <li>{#syntax#}anyframe{#endsyntax#}</li>
<p> <li>{#link|struct#}</li>
For these types it is a <li>{#link|enum#}</li>
<a href="https://github.com/ziglang/zig/issues/2907">TODO in the compiler to implement</a>: <li>{#link|Enum Literals#}</li>
</p>
<ul>
<li>ErrorSet</li>
<li>Enum</li>
<li>FnFrame</li>
<li>EnumLiteral</li>
</ul>
<p>
For these types, {#syntax#}@Type{#endsyntax#} is not available.
<a href="https://github.com/ziglang/zig/issues/383">There is an open proposal to allow unions and structs</a>.
</p>
<ul>
<li>{#link|union#}</li> <li>{#link|union#}</li>
</ul>
<p>
For these types, {#syntax#}@Type{#endsyntax#} is not available:
</p>
<ul>
<li>{#link|Functions#}</li> <li>{#link|Functions#}</li>
<li>BoundFn</li> <li>BoundFn</li>
<li>{#link|struct#}</li>
</ul> </ul>
{#header_close#} {#header_close#}
{#header_open|@typeInfo#} {#header_open|@typeInfo#}
@ -9888,9 +9882,10 @@ The result is 3</code></pre>
const std = @import("std"); const std = @import("std");
pub fn main() !void { pub fn main() !void {
// TODO a better default allocator that isn't as wasteful! var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){};
const args = try std.process.argsAlloc(std.heap.page_allocator); const gpa = &general_purpose_allocator.allocator;
defer std.process.argsFree(std.heap.page_allocator, args); const args = try std.process.argsAlloc(gpa);
defer std.process.argsFree(gpa, args);
for (args) |arg, i| { for (args) |arg, i| {
std.debug.print("{}: {}\n", .{ i, arg }); std.debug.print("{}: {}\n", .{ i, arg });
@ -11392,8 +11387,9 @@ keyword &lt;- KEYWORD_align / KEYWORD_and / KEYWORD_anyframe / KEYWORD_anytype
<li>Incremental improvements.</li> <li>Incremental improvements.</li>
<li>Avoid local maximums.</li> <li>Avoid local maximums.</li>
<li>Reduce the amount one must remember.</li> <li>Reduce the amount one must remember.</li>
<li>Minimize energy spent on coding style.</li> <li>Focus on code rather than style.</li>
<li>Resource deallocation must succeed.</li> <li>Resource allocation may fail; resource deallocation must succeed.</li>
<li>Memory is a resource.</li>
<li>Together we serve the users.</li> <li>Together we serve the users.</li>
</ul> </ul>
{#header_close#} {#header_close#}

View File

@ -112,12 +112,10 @@ pub fn ArrayHashMap(
return self.unmanaged.clearAndFree(self.allocator); return self.unmanaged.clearAndFree(self.allocator);
} }
/// Deprecated. Use `items().len`.
pub fn count(self: Self) usize { pub fn count(self: Self) usize {
return self.items().len; return self.unmanaged.count();
} }
/// Deprecated. Iterate using `items`.
pub fn iterator(self: *const Self) Iterator { pub fn iterator(self: *const Self) Iterator {
return Iterator{ return Iterator{
.hm = self, .hm = self,
@ -332,6 +330,10 @@ pub fn ArrayHashMapUnmanaged(
} }
} }
pub fn count(self: Self) usize {
return self.entries.items.len;
}
/// If key exists this function cannot fail. /// If key exists this function cannot fail.
/// If there is an existing item with `key`, then the result /// If there is an existing item with `key`, then the result
/// `Entry` pointer points to it, and found_existing is true. /// `Entry` pointer points to it, and found_existing is true.

View File

@ -1,265 +0,0 @@
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2020 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
const builtin = @import("builtin");
const std = @import("std.zig");
const math = std.math;
const debug = std.debug;
const assert = std.debug.assert;
const testing = std.testing;
/// There is a trade off of how quickly to fill a bloom filter;
/// the number of items is:
/// n_items / K * ln(2)
/// the rate of false positives is:
/// (1-e^(-K*N/n_items))^K
/// where N is the number of items
pub fn BloomFilter(
/// Size of bloom filter in cells, must be a power of two.
comptime n_items: usize,
/// Number of cells to set per item
comptime K: usize,
/// Cell type, should be:
/// - `bool` for a standard bloom filter
/// - an unsigned integer type for a counting bloom filter
comptime Cell: type,
/// endianess of the Cell
comptime endian: builtin.Endian,
/// Hash function to use
comptime hash: fn (out: []u8, Ki: usize, in: []const u8) void,
) type {
assert(n_items > 0);
assert(math.isPowerOfTwo(n_items));
assert(K > 0);
const cellEmpty = if (Cell == bool) false else @as(Cell, 0);
const cellMax = if (Cell == bool) true else math.maxInt(Cell);
const n_bytes = (n_items * comptime std.meta.bitCount(Cell)) / 8;
assert(n_bytes > 0);
const Io = std.packed_int_array.PackedIntIo(Cell, endian);
return struct {
const Self = @This();
pub const items = n_items;
pub const Index = math.IntFittingRange(0, n_items - 1);
data: [n_bytes]u8 = [_]u8{0} ** n_bytes,
pub fn reset(self: *Self) void {
std.mem.set(u8, self.data[0..], 0);
}
pub fn @"union"(x: Self, y: Self) Self {
var r = Self{ .data = undefined };
inline for (x.data) |v, i| {
r.data[i] = v | y.data[i];
}
return r;
}
pub fn intersection(x: Self, y: Self) Self {
var r = Self{ .data = undefined };
inline for (x.data) |v, i| {
r.data[i] = v & y.data[i];
}
return r;
}
pub fn getCell(self: Self, cell: Index) Cell {
return Io.get(&self.data, cell, 0);
}
pub fn incrementCell(self: *Self, cell: Index) void {
if (Cell == bool or Cell == u1) {
// skip the 'get' operation
Io.set(&self.data, cell, 0, cellMax);
} else {
const old = Io.get(&self.data, cell, 0);
if (old != cellMax) {
Io.set(&self.data, cell, 0, old + 1);
}
}
}
pub fn clearCell(self: *Self, cell: Index) void {
Io.set(&self.data, cell, 0, cellEmpty);
}
pub fn add(self: *Self, item: []const u8) void {
comptime var i = 0;
inline while (i < K) : (i += 1) {
var K_th_bit: packed struct {
x: Index,
} = undefined;
hash(std.mem.asBytes(&K_th_bit), i, item);
incrementCell(self, K_th_bit.x);
}
}
pub fn contains(self: Self, item: []const u8) bool {
comptime var i = 0;
inline while (i < K) : (i += 1) {
var K_th_bit: packed struct {
x: Index,
} = undefined;
hash(std.mem.asBytes(&K_th_bit), i, item);
if (getCell(self, K_th_bit.x) == cellEmpty)
return false;
}
return true;
}
pub fn resize(self: Self, comptime newsize: usize) BloomFilter(newsize, K, Cell, endian, hash) {
var r: BloomFilter(newsize, K, Cell, endian, hash) = undefined;
if (newsize < n_items) {
std.mem.copy(u8, r.data[0..], self.data[0..r.data.len]);
var copied: usize = r.data.len;
while (copied < self.data.len) : (copied += r.data.len) {
for (self.data[copied .. copied + r.data.len]) |s, i| {
r.data[i] |= s;
}
}
} else if (newsize == n_items) {
r = self;
} else if (newsize > n_items) {
var copied: usize = 0;
while (copied < r.data.len) : (copied += self.data.len) {
std.mem.copy(u8, r.data[copied .. copied + self.data.len], &self.data);
}
}
return r;
}
/// Returns number of non-zero cells
pub fn popCount(self: Self) Index {
var n: Index = 0;
if (Cell == bool or Cell == u1) {
for (self.data) |b, i| {
n += @popCount(u8, b);
}
} else {
var i: usize = 0;
while (i < n_items) : (i += 1) {
const cell = self.getCell(@intCast(Index, i));
n += if (if (Cell == bool) cell else cell > 0) @as(Index, 1) else @as(Index, 0);
}
}
return n;
}
pub fn estimateItems(self: Self) f64 {
const m = comptime @intToFloat(f64, n_items);
const k = comptime @intToFloat(f64, K);
const X = @intToFloat(f64, self.popCount());
return (comptime (-m / k)) * math.log1p(X * comptime (-1 / m));
}
};
}
fn hashFunc(out: []u8, Ki: usize, in: []const u8) void {
var st = std.crypto.hash.Gimli.init(.{});
st.update(std.mem.asBytes(&Ki));
st.update(in);
st.final(out);
}
test "std.BloomFilter" {
// https://github.com/ziglang/zig/issues/5127
if (std.Target.current.cpu.arch == .mips) return error.SkipZigTest;
inline for ([_]type{ bool, u1, u2, u3, u4 }) |Cell| {
const emptyCell = if (Cell == bool) false else @as(Cell, 0);
const BF = BloomFilter(128 * 8, 8, Cell, builtin.endian, hashFunc);
var bf = BF{};
var i: usize = undefined;
// confirm that it is initialised to the empty filter
i = 0;
while (i < BF.items) : (i += 1) {
testing.expectEqual(emptyCell, bf.getCell(@intCast(BF.Index, i)));
}
testing.expectEqual(@as(BF.Index, 0), bf.popCount());
testing.expectEqual(@as(f64, 0), bf.estimateItems());
// fill in a few items
bf.incrementCell(42);
bf.incrementCell(255);
bf.incrementCell(256);
bf.incrementCell(257);
// check that they were set
testing.expectEqual(true, bf.getCell(42) != emptyCell);
testing.expectEqual(true, bf.getCell(255) != emptyCell);
testing.expectEqual(true, bf.getCell(256) != emptyCell);
testing.expectEqual(true, bf.getCell(257) != emptyCell);
// clear just one of them; make sure the rest are still set
bf.clearCell(256);
testing.expectEqual(true, bf.getCell(42) != emptyCell);
testing.expectEqual(true, bf.getCell(255) != emptyCell);
testing.expectEqual(false, bf.getCell(256) != emptyCell);
testing.expectEqual(true, bf.getCell(257) != emptyCell);
// reset any of the ones we've set and confirm we're back to the empty filter
bf.clearCell(42);
bf.clearCell(255);
bf.clearCell(257);
i = 0;
while (i < BF.items) : (i += 1) {
testing.expectEqual(emptyCell, bf.getCell(@intCast(BF.Index, i)));
}
testing.expectEqual(@as(BF.Index, 0), bf.popCount());
testing.expectEqual(@as(f64, 0), bf.estimateItems());
// Lets add a string
bf.add("foo");
testing.expectEqual(true, bf.contains("foo"));
{
// try adding same string again. make sure popcount is the same
const old_popcount = bf.popCount();
testing.expect(old_popcount > 0);
bf.add("foo");
testing.expectEqual(true, bf.contains("foo"));
testing.expectEqual(old_popcount, bf.popCount());
}
// Get back to empty filter via .reset
bf.reset();
// Double check that .reset worked
i = 0;
while (i < BF.items) : (i += 1) {
testing.expectEqual(emptyCell, bf.getCell(@intCast(BF.Index, i)));
}
testing.expectEqual(@as(BF.Index, 0), bf.popCount());
testing.expectEqual(@as(f64, 0), bf.estimateItems());
comptime var teststrings = [_][]const u8{
"foo",
"bar",
"a longer string",
"some more",
"the quick brown fox",
"unique string",
};
inline for (teststrings) |str| {
bf.add(str);
}
inline for (teststrings) |str| {
testing.expectEqual(true, bf.contains(str));
}
{ // estimate should be close for low packing
const est = bf.estimateItems();
testing.expect(est > @intToFloat(f64, teststrings.len) - 1);
testing.expect(est < @intToFloat(f64, teststrings.len) + 1);
}
const larger_bf = bf.resize(4096);
inline for (teststrings) |str| {
testing.expectEqual(true, larger_bf.contains(str));
}
testing.expectEqual(@as(u12, bf.popCount()) * (4096 / 1024), larger_bf.popCount());
const smaller_bf = bf.resize(64);
inline for (teststrings) |str| {
testing.expectEqual(true, smaller_bf.contains(str));
}
testing.expect(bf.popCount() <= @as(u10, smaller_bf.popCount()) * (1024 / 64));
}
}

View File

@ -1165,6 +1165,11 @@ pub const FileSource = union(enum) {
} }
}; };
const BuildOptionArtifactArg = struct {
name: []const u8,
artifact: *LibExeObjStep,
};
pub const LibExeObjStep = struct { pub const LibExeObjStep = struct {
step: Step, step: Step,
builder: *Builder, builder: *Builder,
@ -1210,6 +1215,7 @@ pub const LibExeObjStep = struct {
out_pdb_filename: []const u8, out_pdb_filename: []const u8,
packages: ArrayList(Pkg), packages: ArrayList(Pkg),
build_options_contents: std.ArrayList(u8), build_options_contents: std.ArrayList(u8),
build_options_artifact_args: std.ArrayList(BuildOptionArtifactArg),
system_linker_hack: bool = false, system_linker_hack: bool = false,
object_src: []const u8, object_src: []const u8,
@ -1355,6 +1361,7 @@ pub const LibExeObjStep = struct {
.framework_dirs = ArrayList([]const u8).init(builder.allocator), .framework_dirs = ArrayList([]const u8).init(builder.allocator),
.object_src = undefined, .object_src = undefined,
.build_options_contents = std.ArrayList(u8).init(builder.allocator), .build_options_contents = std.ArrayList(u8).init(builder.allocator),
.build_options_artifact_args = std.ArrayList(BuildOptionArtifactArg).init(builder.allocator),
.c_std = Builder.CStd.C99, .c_std = Builder.CStd.C99,
.override_lib_dir = null, .override_lib_dir = null,
.main_pkg_path = null, .main_pkg_path = null,
@ -1377,6 +1384,7 @@ pub const LibExeObjStep = struct {
} }
fn computeOutFileNames(self: *LibExeObjStep) void { fn computeOutFileNames(self: *LibExeObjStep) void {
// TODO make this call std.zig.binNameAlloc
switch (self.kind) { switch (self.kind) {
.Obj => { .Obj => {
self.out_filename = self.builder.fmt("{}{}", .{ self.name, self.target.oFileExt() }); self.out_filename = self.builder.fmt("{}{}", .{ self.name, self.target.oFileExt() });
@ -1692,8 +1700,6 @@ pub const LibExeObjStep = struct {
self.main_pkg_path = dir_path; self.main_pkg_path = dir_path;
} }
pub const setDisableGenH = @compileError("deprecated; set the emit_h field directly");
pub fn setLibCFile(self: *LibExeObjStep, libc_file: ?[]const u8) void { pub fn setLibCFile(self: *LibExeObjStep, libc_file: ?[]const u8) void {
self.libc_file = libc_file; self.libc_file = libc_file;
} }
@ -1812,6 +1818,13 @@ pub const LibExeObjStep = struct {
out.print("pub const {} = {};\n", .{ name, value }) catch unreachable; out.print("pub const {} = {};\n", .{ name, value }) catch unreachable;
} }
/// The value is the path in the cache dir.
/// Adds a dependency automatically.
pub fn addBuildOptionArtifact(self: *LibExeObjStep, name: []const u8, artifact: *LibExeObjStep) void {
self.build_options_artifact_args.append(.{ .name = name, .artifact = artifact }) catch unreachable;
self.step.dependOn(&artifact.step);
}
pub fn addSystemIncludeDir(self: *LibExeObjStep, path: []const u8) void { pub fn addSystemIncludeDir(self: *LibExeObjStep, path: []const u8) void {
self.include_dirs.append(IncludeDir{ .RawPathSystem = self.builder.dupe(path) }) catch unreachable; self.include_dirs.append(IncludeDir{ .RawPathSystem = self.builder.dupe(path) }) catch unreachable;
} }
@ -1947,10 +1960,10 @@ pub const LibExeObjStep = struct {
if (self.root_src) |root_src| try zig_args.append(root_src.getPath(builder)); if (self.root_src) |root_src| try zig_args.append(root_src.getPath(builder));
var prev_has_extra_flags = false;
for (self.link_objects.span()) |link_object| { for (self.link_objects.span()) |link_object| {
switch (link_object) { switch (link_object) {
.StaticPath => |static_path| { .StaticPath => |static_path| {
try zig_args.append("--object");
try zig_args.append(builder.pathFromRoot(static_path)); try zig_args.append(builder.pathFromRoot(static_path));
}, },
@ -1958,12 +1971,10 @@ pub const LibExeObjStep = struct {
.Exe => unreachable, .Exe => unreachable,
.Test => unreachable, .Test => unreachable,
.Obj => { .Obj => {
try zig_args.append("--object");
try zig_args.append(other.getOutputPath()); try zig_args.append(other.getOutputPath());
}, },
.Lib => { .Lib => {
if (!other.is_dynamic or self.target.isWindows()) { if (!other.is_dynamic or self.target.isWindows()) {
try zig_args.append("--object");
try zig_args.append(other.getOutputLibPath()); try zig_args.append(other.getOutputLibPath());
} else { } else {
const full_path_lib = other.getOutputPath(); const full_path_lib = other.getOutputPath();
@ -1982,20 +1993,41 @@ pub const LibExeObjStep = struct {
try zig_args.append(name); try zig_args.append(name);
}, },
.AssemblyFile => |asm_file| { .AssemblyFile => |asm_file| {
try zig_args.append("--c-source"); if (prev_has_extra_flags) {
try zig_args.append("-extra-cflags");
try zig_args.append("--");
prev_has_extra_flags = false;
}
try zig_args.append(asm_file.getPath(builder)); try zig_args.append(asm_file.getPath(builder));
}, },
.CSourceFile => |c_source_file| { .CSourceFile => |c_source_file| {
try zig_args.append("--c-source"); if (c_source_file.args.len == 0) {
if (prev_has_extra_flags) {
try zig_args.append("-cflags");
try zig_args.append("--");
prev_has_extra_flags = false;
}
} else {
try zig_args.append("-cflags");
for (c_source_file.args) |arg| { for (c_source_file.args) |arg| {
try zig_args.append(arg); try zig_args.append(arg);
} }
try zig_args.append("--");
}
try zig_args.append(c_source_file.source.getPath(builder)); try zig_args.append(c_source_file.source.getPath(builder));
}, },
} }
} }
if (self.build_options_contents.items.len > 0) { if (self.build_options_contents.items.len > 0 or self.build_options_artifact_args.items.len > 0) {
// Render build artifact options at the last minute, now that the path is known.
for (self.build_options_artifact_args.items) |item| {
const out = self.build_options_contents.writer();
out.print("pub const {}: []const u8 = ", .{item.name}) catch unreachable;
std.zig.renderStringLiteral(item.artifact.getOutputPath(), out) catch unreachable;
out.writeAll(";\n") catch unreachable;
}
const build_options_file = try fs.path.join( const build_options_file = try fs.path.join(
builder.allocator, builder.allocator,
&[_][]const u8{ builder.cache_root, builder.fmt("{}_build_options.zig", .{self.name}) }, &[_][]const u8{ builder.cache_root, builder.fmt("{}_build_options.zig", .{self.name}) },
@ -2056,10 +2088,8 @@ pub const LibExeObjStep = struct {
} }
switch (self.build_mode) { switch (self.build_mode) {
.Debug => {}, .Debug => {}, // Skip since it's the default.
.ReleaseSafe => zig_args.append("--release-safe") catch unreachable, else => zig_args.append(builder.fmt("-O{s}", .{@tagName(self.build_mode)})) catch unreachable,
.ReleaseFast => zig_args.append("--release-fast") catch unreachable,
.ReleaseSmall => zig_args.append("--release-small") catch unreachable,
} }
try zig_args.append("--cache-dir"); try zig_args.append("--cache-dir");
@ -2070,14 +2100,8 @@ pub const LibExeObjStep = struct {
if (self.kind == Kind.Lib and self.is_dynamic) { if (self.kind == Kind.Lib and self.is_dynamic) {
if (self.version) |version| { if (self.version) |version| {
zig_args.append("--ver-major") catch unreachable; zig_args.append("--version") catch unreachable;
zig_args.append(builder.fmt("{}", .{version.major})) catch unreachable; zig_args.append(builder.fmt("{}", .{version})) catch unreachable;
zig_args.append("--ver-minor") catch unreachable;
zig_args.append(builder.fmt("{}", .{version.minor})) catch unreachable;
zig_args.append("--ver-patch") catch unreachable;
zig_args.append(builder.fmt("{}", .{version.patch})) catch unreachable;
} }
} }
if (self.is_dynamic) { if (self.is_dynamic) {
@ -2294,8 +2318,7 @@ pub const LibExeObjStep = struct {
if (self.kind == Kind.Test) { if (self.kind == Kind.Test) {
try builder.spawnChild(zig_args.span()); try builder.spawnChild(zig_args.span());
} else { } else {
try zig_args.append("--cache"); try zig_args.append("--enable-cache");
try zig_args.append("on");
const output_dir_nl = try builder.execFromStep(zig_args.span(), &self.step); const output_dir_nl = try builder.execFromStep(zig_args.span(), &self.step);
const build_output_dir = mem.trimRight(u8, output_dir_nl, "\r\n"); const build_output_dir = mem.trimRight(u8, output_dir_nl, "\r\n");

View File

@ -72,8 +72,7 @@ pub const TranslateCStep = struct {
try argv_list.append("translate-c"); try argv_list.append("translate-c");
try argv_list.append("-lc"); try argv_list.append("-lc");
try argv_list.append("--cache"); try argv_list.append("--enable-cache");
try argv_list.append("on");
if (!self.target.isNative()) { if (!self.target.isNative()) {
try argv_list.append("-target"); try argv_list.append("-target");

View File

@ -1,726 +0,0 @@
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2020 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
const std = @import("std.zig");
const crypto = std.crypto;
const Hasher = crypto.auth.siphash.SipHash128(1, 3); // provides enough collision resistance for the CacheHash use cases, while being one of our fastest options right now
const fs = std.fs;
const base64 = std.base64;
const ArrayList = std.ArrayList;
const assert = std.debug.assert;
const testing = std.testing;
const mem = std.mem;
const fmt = std.fmt;
const Allocator = std.mem.Allocator;
const base64_encoder = fs.base64_encoder;
const base64_decoder = fs.base64_decoder;
/// This is 128 bits - Even with 2^54 cache entries, the probably of a collision would be under 10^-6
const BIN_DIGEST_LEN = 16;
const BASE64_DIGEST_LEN = base64.Base64Encoder.calcSize(BIN_DIGEST_LEN);
const MANIFEST_FILE_SIZE_MAX = 50 * 1024 * 1024;
pub const File = struct {
path: ?[]const u8,
max_file_size: ?usize,
stat: fs.File.Stat,
bin_digest: [BIN_DIGEST_LEN]u8,
contents: ?[]const u8,
pub fn deinit(self: *File, allocator: *Allocator) void {
if (self.path) |owned_slice| {
allocator.free(owned_slice);
self.path = null;
}
if (self.contents) |contents| {
allocator.free(contents);
self.contents = null;
}
self.* = undefined;
}
};
/// CacheHash manages project-local `zig-cache` directories.
/// This is not a general-purpose cache.
/// It was designed to be fast and simple, not to withstand attacks using specially-crafted input.
pub const CacheHash = struct {
allocator: *Allocator,
hasher_init: Hasher, // initial state, that can be copied
hasher: Hasher, // current state for incremental hashing
manifest_dir: fs.Dir,
manifest_file: ?fs.File,
manifest_dirty: bool,
files: ArrayList(File),
b64_digest: [BASE64_DIGEST_LEN]u8,
/// Be sure to call release after successful initialization.
pub fn init(allocator: *Allocator, dir: fs.Dir, manifest_dir_path: []const u8) !CacheHash {
const hasher_init = Hasher.init(&[_]u8{0} ** Hasher.minimum_key_length);
return CacheHash{
.allocator = allocator,
.hasher_init = hasher_init,
.hasher = hasher_init,
.manifest_dir = try dir.makeOpenPath(manifest_dir_path, .{}),
.manifest_file = null,
.manifest_dirty = false,
.files = ArrayList(File).init(allocator),
.b64_digest = undefined,
};
}
/// Record a slice of bytes as an dependency of the process being cached
pub fn addSlice(self: *CacheHash, val: []const u8) void {
assert(self.manifest_file == null);
self.hasher.update(val);
self.hasher.update(&[_]u8{0});
}
/// Convert the input value into bytes and record it as a dependency of the
/// process being cached
pub fn add(self: *CacheHash, val: anytype) void {
assert(self.manifest_file == null);
const valPtr = switch (@typeInfo(@TypeOf(val))) {
.Int => &val,
.Pointer => val,
else => &val,
};
self.addSlice(mem.asBytes(valPtr));
}
/// Add a file as a dependency of process being cached. When `CacheHash.hit` is
/// called, the file's contents will be checked to ensure that it matches
/// the contents from previous times.
///
/// Max file size will be used to determine the amount of space to the file contents
/// are allowed to take up in memory. If max_file_size is null, then the contents
/// will not be loaded into memory.
///
/// Returns the index of the entry in the `CacheHash.files` ArrayList. You can use it
/// to access the contents of the file after calling `CacheHash.hit()` like so:
///
/// ```
/// var file_contents = cache_hash.files.items[file_index].contents.?;
/// ```
pub fn addFile(self: *CacheHash, file_path: []const u8, max_file_size: ?usize) !usize {
assert(self.manifest_file == null);
try self.files.ensureCapacity(self.files.items.len + 1);
const resolved_path = try fs.path.resolve(self.allocator, &[_][]const u8{file_path});
const idx = self.files.items.len;
self.files.addOneAssumeCapacity().* = .{
.path = resolved_path,
.contents = null,
.max_file_size = max_file_size,
.stat = undefined,
.bin_digest = undefined,
};
self.addSlice(resolved_path);
return idx;
}
/// Check the cache to see if the input exists in it. If it exists, a base64 encoding
/// of it's hash will be returned; otherwise, null will be returned.
///
/// This function will also acquire an exclusive lock to the manifest file. This means
/// that a process holding a CacheHash will block any other process attempting to
/// acquire the lock.
///
/// The lock on the manifest file is released when `CacheHash.release` is called.
pub fn hit(self: *CacheHash) !?[BASE64_DIGEST_LEN]u8 {
assert(self.manifest_file == null);
var bin_digest: [BIN_DIGEST_LEN]u8 = undefined;
self.hasher.final(&bin_digest);
base64_encoder.encode(self.b64_digest[0..], &bin_digest);
self.hasher = self.hasher_init;
self.hasher.update(&bin_digest);
const manifest_file_path = try fmt.allocPrint(self.allocator, "{}.txt", .{self.b64_digest});
defer self.allocator.free(manifest_file_path);
if (self.files.items.len != 0) {
self.manifest_file = try self.manifest_dir.createFile(manifest_file_path, .{
.read = true,
.truncate = false,
.lock = .Exclusive,
});
} else {
// If there are no file inputs, we check if the manifest file exists instead of
// comparing the hashes on the files used for the cached item
self.manifest_file = self.manifest_dir.openFile(manifest_file_path, .{
.read = true,
.write = true,
.lock = .Exclusive,
}) catch |err| switch (err) {
error.FileNotFound => {
self.manifest_dirty = true;
self.manifest_file = try self.manifest_dir.createFile(manifest_file_path, .{
.read = true,
.truncate = false,
.lock = .Exclusive,
});
return null;
},
else => |e| return e,
};
}
const file_contents = try self.manifest_file.?.inStream().readAllAlloc(self.allocator, MANIFEST_FILE_SIZE_MAX);
defer self.allocator.free(file_contents);
const input_file_count = self.files.items.len;
var any_file_changed = false;
var line_iter = mem.tokenize(file_contents, "\n");
var idx: usize = 0;
while (line_iter.next()) |line| {
defer idx += 1;
const cache_hash_file = if (idx < input_file_count) &self.files.items[idx] else blk: {
const new = try self.files.addOne();
new.* = .{
.path = null,
.contents = null,
.max_file_size = null,
.stat = undefined,
.bin_digest = undefined,
};
break :blk new;
};
var iter = mem.tokenize(line, " ");
const size = iter.next() orelse return error.InvalidFormat;
const inode = iter.next() orelse return error.InvalidFormat;
const mtime_nsec_str = iter.next() orelse return error.InvalidFormat;
const digest_str = iter.next() orelse return error.InvalidFormat;
const file_path = iter.rest();
cache_hash_file.stat.size = fmt.parseInt(u64, size, 10) catch return error.InvalidFormat;
cache_hash_file.stat.inode = fmt.parseInt(fs.File.INode, inode, 10) catch return error.InvalidFormat;
cache_hash_file.stat.mtime = fmt.parseInt(i64, mtime_nsec_str, 10) catch return error.InvalidFormat;
base64_decoder.decode(&cache_hash_file.bin_digest, digest_str) catch return error.InvalidFormat;
if (file_path.len == 0) {
return error.InvalidFormat;
}
if (cache_hash_file.path) |p| {
if (!mem.eql(u8, file_path, p)) {
return error.InvalidFormat;
}
}
if (cache_hash_file.path == null) {
cache_hash_file.path = try self.allocator.dupe(u8, file_path);
}
const this_file = fs.cwd().openFile(cache_hash_file.path.?, .{ .read = true }) catch {
return error.CacheUnavailable;
};
defer this_file.close();
const actual_stat = try this_file.stat();
const size_match = actual_stat.size == cache_hash_file.stat.size;
const mtime_match = actual_stat.mtime == cache_hash_file.stat.mtime;
const inode_match = actual_stat.inode == cache_hash_file.stat.inode;
if (!size_match or !mtime_match or !inode_match) {
self.manifest_dirty = true;
cache_hash_file.stat = actual_stat;
if (isProblematicTimestamp(cache_hash_file.stat.mtime)) {
cache_hash_file.stat.mtime = 0;
cache_hash_file.stat.inode = 0;
}
var actual_digest: [BIN_DIGEST_LEN]u8 = undefined;
try hashFile(this_file, &actual_digest, self.hasher_init);
if (!mem.eql(u8, &cache_hash_file.bin_digest, &actual_digest)) {
cache_hash_file.bin_digest = actual_digest;
// keep going until we have the input file digests
any_file_changed = true;
}
}
if (!any_file_changed) {
self.hasher.update(&cache_hash_file.bin_digest);
}
}
if (any_file_changed) {
// cache miss
// keep the manifest file open
// reset the hash
self.hasher = self.hasher_init;
self.hasher.update(&bin_digest);
// Remove files not in the initial hash
for (self.files.items[input_file_count..]) |*file| {
file.deinit(self.allocator);
}
self.files.shrink(input_file_count);
for (self.files.items) |file| {
self.hasher.update(&file.bin_digest);
}
return null;
}
if (idx < input_file_count) {
self.manifest_dirty = true;
while (idx < input_file_count) : (idx += 1) {
const ch_file = &self.files.items[idx];
try self.populateFileHash(ch_file);
}
return null;
}
return self.final();
}
fn populateFileHash(self: *CacheHash, ch_file: *File) !void {
const file = try fs.cwd().openFile(ch_file.path.?, .{});
defer file.close();
ch_file.stat = try file.stat();
if (isProblematicTimestamp(ch_file.stat.mtime)) {
ch_file.stat.mtime = 0;
ch_file.stat.inode = 0;
}
if (ch_file.max_file_size) |max_file_size| {
if (ch_file.stat.size > max_file_size) {
return error.FileTooBig;
}
const contents = try self.allocator.alloc(u8, @intCast(usize, ch_file.stat.size));
errdefer self.allocator.free(contents);
// Hash while reading from disk, to keep the contents in the cpu cache while
// doing hashing.
var hasher = self.hasher_init;
var off: usize = 0;
while (true) {
// give me everything you've got, captain
const bytes_read = try file.read(contents[off..]);
if (bytes_read == 0) break;
hasher.update(contents[off..][0..bytes_read]);
off += bytes_read;
}
hasher.final(&ch_file.bin_digest);
ch_file.contents = contents;
} else {
try hashFile(file, &ch_file.bin_digest, self.hasher_init);
}
self.hasher.update(&ch_file.bin_digest);
}
/// Add a file as a dependency of process being cached, after the initial hash has been
/// calculated. This is useful for processes that don't know the all the files that
/// are depended on ahead of time. For example, a source file that can import other files
/// will need to be recompiled if the imported file is changed.
pub fn addFilePostFetch(self: *CacheHash, file_path: []const u8, max_file_size: usize) ![]u8 {
assert(self.manifest_file != null);
const resolved_path = try fs.path.resolve(self.allocator, &[_][]const u8{file_path});
errdefer self.allocator.free(resolved_path);
const new_ch_file = try self.files.addOne();
new_ch_file.* = .{
.path = resolved_path,
.max_file_size = max_file_size,
.stat = undefined,
.bin_digest = undefined,
.contents = null,
};
errdefer self.files.shrink(self.files.items.len - 1);
try self.populateFileHash(new_ch_file);
return new_ch_file.contents.?;
}
/// Add a file as a dependency of process being cached, after the initial hash has been
/// calculated. This is useful for processes that don't know the all the files that
/// are depended on ahead of time. For example, a source file that can import other files
/// will need to be recompiled if the imported file is changed.
pub fn addFilePost(self: *CacheHash, file_path: []const u8) !void {
assert(self.manifest_file != null);
const resolved_path = try fs.path.resolve(self.allocator, &[_][]const u8{file_path});
errdefer self.allocator.free(resolved_path);
const new_ch_file = try self.files.addOne();
new_ch_file.* = .{
.path = resolved_path,
.max_file_size = null,
.stat = undefined,
.bin_digest = undefined,
.contents = null,
};
errdefer self.files.shrink(self.files.items.len - 1);
try self.populateFileHash(new_ch_file);
}
/// Returns a base64 encoded hash of the inputs.
pub fn final(self: *CacheHash) [BASE64_DIGEST_LEN]u8 {
assert(self.manifest_file != null);
// We don't close the manifest file yet, because we want to
// keep it locked until the API user is done using it.
// We also don't write out the manifest yet, because until
// cache_release is called we still might be working on creating
// the artifacts to cache.
var bin_digest: [BIN_DIGEST_LEN]u8 = undefined;
self.hasher.final(&bin_digest);
var out_digest: [BASE64_DIGEST_LEN]u8 = undefined;
base64_encoder.encode(&out_digest, &bin_digest);
return out_digest;
}
pub fn writeManifest(self: *CacheHash) !void {
assert(self.manifest_file != null);
var encoded_digest: [BASE64_DIGEST_LEN]u8 = undefined;
var contents = ArrayList(u8).init(self.allocator);
var outStream = contents.outStream();
defer contents.deinit();
for (self.files.items) |file| {
base64_encoder.encode(encoded_digest[0..], &file.bin_digest);
try outStream.print("{} {} {} {} {}\n", .{ file.stat.size, file.stat.inode, file.stat.mtime, encoded_digest[0..], file.path });
}
try self.manifest_file.?.pwriteAll(contents.items, 0);
self.manifest_dirty = false;
}
/// Releases the manifest file and frees any memory the CacheHash was using.
/// `CacheHash.hit` must be called first.
///
/// Will also attempt to write to the manifest file if the manifest is dirty.
/// Writing to the manifest file can fail, but this function ignores those errors.
/// To detect failures from writing the manifest, one may explicitly call
/// `writeManifest` before `release`.
pub fn release(self: *CacheHash) void {
if (self.manifest_file) |file| {
if (self.manifest_dirty) {
// To handle these errors, API users should call
// writeManifest before release().
self.writeManifest() catch {};
}
file.close();
}
for (self.files.items) |*file| {
file.deinit(self.allocator);
}
self.files.deinit();
self.manifest_dir.close();
}
};
fn hashFile(file: fs.File, bin_digest: []u8, hasher_init: anytype) !void {
var buf: [1024]u8 = undefined;
var hasher = hasher_init;
while (true) {
const bytes_read = try file.read(&buf);
if (bytes_read == 0) break;
hasher.update(buf[0..bytes_read]);
}
hasher.final(bin_digest);
}
/// If the wall clock time, rounded to the same precision as the
/// mtime, is equal to the mtime, then we cannot rely on this mtime
/// yet. We will instead save an mtime value that indicates the hash
/// must be unconditionally computed.
/// This function recognizes the precision of mtime by looking at trailing
/// zero bits of the seconds and nanoseconds.
fn isProblematicTimestamp(fs_clock: i128) bool {
const wall_clock = std.time.nanoTimestamp();
// We have to break the nanoseconds into seconds and remainder nanoseconds
// to detect precision of seconds, because looking at the zero bits in base
// 2 would not detect precision of the seconds value.
const fs_sec = @intCast(i64, @divFloor(fs_clock, std.time.ns_per_s));
const fs_nsec = @intCast(i64, @mod(fs_clock, std.time.ns_per_s));
var wall_sec = @intCast(i64, @divFloor(wall_clock, std.time.ns_per_s));
var wall_nsec = @intCast(i64, @mod(wall_clock, std.time.ns_per_s));
// First make all the least significant zero bits in the fs_clock, also zero bits in the wall clock.
if (fs_nsec == 0) {
wall_nsec = 0;
if (fs_sec == 0) {
wall_sec = 0;
} else {
wall_sec &= @as(i64, -1) << @intCast(u6, @ctz(i64, fs_sec));
}
} else {
wall_nsec &= @as(i64, -1) << @intCast(u6, @ctz(i64, fs_nsec));
}
return wall_nsec == fs_nsec and wall_sec == fs_sec;
}
test "cache file and then recall it" {
if (std.Target.current.os.tag == .wasi) {
// https://github.com/ziglang/zig/issues/5437
return error.SkipZigTest;
}
const cwd = fs.cwd();
const temp_file = "test.txt";
const temp_manifest_dir = "temp_manifest_dir";
const ts = std.time.nanoTimestamp();
try cwd.writeFile(temp_file, "Hello, world!\n");
while (isProblematicTimestamp(ts)) {
std.time.sleep(1);
}
var digest1: [BASE64_DIGEST_LEN]u8 = undefined;
var digest2: [BASE64_DIGEST_LEN]u8 = undefined;
{
var ch = try CacheHash.init(testing.allocator, cwd, temp_manifest_dir);
defer ch.release();
ch.add(true);
ch.add(@as(u16, 1234));
ch.add("1234");
_ = try ch.addFile(temp_file, null);
// There should be nothing in the cache
testing.expectEqual(@as(?[BASE64_DIGEST_LEN]u8, null), try ch.hit());
digest1 = ch.final();
}
{
var ch = try CacheHash.init(testing.allocator, cwd, temp_manifest_dir);
defer ch.release();
ch.add(true);
ch.add(@as(u16, 1234));
ch.add("1234");
_ = try ch.addFile(temp_file, null);
// Cache hit! We just "built" the same file
digest2 = (try ch.hit()).?;
}
testing.expectEqual(digest1, digest2);
try cwd.deleteTree(temp_manifest_dir);
try cwd.deleteFile(temp_file);
}
test "give problematic timestamp" {
var fs_clock = std.time.nanoTimestamp();
// to make it problematic, we make it only accurate to the second
fs_clock = @divTrunc(fs_clock, std.time.ns_per_s);
fs_clock *= std.time.ns_per_s;
testing.expect(isProblematicTimestamp(fs_clock));
}
test "give nonproblematic timestamp" {
testing.expect(!isProblematicTimestamp(std.time.nanoTimestamp() - std.time.ns_per_s));
}
test "check that changing a file makes cache fail" {
if (std.Target.current.os.tag == .wasi) {
// https://github.com/ziglang/zig/issues/5437
return error.SkipZigTest;
}
const cwd = fs.cwd();
const temp_file = "cache_hash_change_file_test.txt";
const temp_manifest_dir = "cache_hash_change_file_manifest_dir";
const original_temp_file_contents = "Hello, world!\n";
const updated_temp_file_contents = "Hello, world; but updated!\n";
try cwd.deleteTree(temp_manifest_dir);
try cwd.deleteTree(temp_file);
const ts = std.time.nanoTimestamp();
try cwd.writeFile(temp_file, original_temp_file_contents);
while (isProblematicTimestamp(ts)) {
std.time.sleep(1);
}
var digest1: [BASE64_DIGEST_LEN]u8 = undefined;
var digest2: [BASE64_DIGEST_LEN]u8 = undefined;
{
var ch = try CacheHash.init(testing.allocator, cwd, temp_manifest_dir);
defer ch.release();
ch.add("1234");
const temp_file_idx = try ch.addFile(temp_file, 100);
// There should be nothing in the cache
testing.expectEqual(@as(?[BASE64_DIGEST_LEN]u8, null), try ch.hit());
testing.expect(mem.eql(u8, original_temp_file_contents, ch.files.items[temp_file_idx].contents.?));
digest1 = ch.final();
}
try cwd.writeFile(temp_file, updated_temp_file_contents);
{
var ch = try CacheHash.init(testing.allocator, cwd, temp_manifest_dir);
defer ch.release();
ch.add("1234");
const temp_file_idx = try ch.addFile(temp_file, 100);
// A file that we depend on has been updated, so the cache should not contain an entry for it
testing.expectEqual(@as(?[BASE64_DIGEST_LEN]u8, null), try ch.hit());
// The cache system does not keep the contents of re-hashed input files.
testing.expect(ch.files.items[temp_file_idx].contents == null);
digest2 = ch.final();
}
testing.expect(!mem.eql(u8, digest1[0..], digest2[0..]));
try cwd.deleteTree(temp_manifest_dir);
try cwd.deleteTree(temp_file);
}
test "no file inputs" {
if (std.Target.current.os.tag == .wasi) {
// https://github.com/ziglang/zig/issues/5437
return error.SkipZigTest;
}
const cwd = fs.cwd();
const temp_manifest_dir = "no_file_inputs_manifest_dir";
defer cwd.deleteTree(temp_manifest_dir) catch unreachable;
var digest1: [BASE64_DIGEST_LEN]u8 = undefined;
var digest2: [BASE64_DIGEST_LEN]u8 = undefined;
{
var ch = try CacheHash.init(testing.allocator, cwd, temp_manifest_dir);
defer ch.release();
ch.add("1234");
// There should be nothing in the cache
testing.expectEqual(@as(?[BASE64_DIGEST_LEN]u8, null), try ch.hit());
digest1 = ch.final();
}
{
var ch = try CacheHash.init(testing.allocator, cwd, temp_manifest_dir);
defer ch.release();
ch.add("1234");
digest2 = (try ch.hit()).?;
}
testing.expectEqual(digest1, digest2);
}
test "CacheHashes with files added after initial hash work" {
if (std.Target.current.os.tag == .wasi) {
// https://github.com/ziglang/zig/issues/5437
return error.SkipZigTest;
}
const cwd = fs.cwd();
const temp_file1 = "cache_hash_post_file_test1.txt";
const temp_file2 = "cache_hash_post_file_test2.txt";
const temp_manifest_dir = "cache_hash_post_file_manifest_dir";
const ts1 = std.time.nanoTimestamp();
try cwd.writeFile(temp_file1, "Hello, world!\n");
try cwd.writeFile(temp_file2, "Hello world the second!\n");
while (isProblematicTimestamp(ts1)) {
std.time.sleep(1);
}
var digest1: [BASE64_DIGEST_LEN]u8 = undefined;
var digest2: [BASE64_DIGEST_LEN]u8 = undefined;
var digest3: [BASE64_DIGEST_LEN]u8 = undefined;
{
var ch = try CacheHash.init(testing.allocator, cwd, temp_manifest_dir);
defer ch.release();
ch.add("1234");
_ = try ch.addFile(temp_file1, null);
// There should be nothing in the cache
testing.expectEqual(@as(?[BASE64_DIGEST_LEN]u8, null), try ch.hit());
_ = try ch.addFilePost(temp_file2);
digest1 = ch.final();
}
{
var ch = try CacheHash.init(testing.allocator, cwd, temp_manifest_dir);
defer ch.release();
ch.add("1234");
_ = try ch.addFile(temp_file1, null);
digest2 = (try ch.hit()).?;
}
testing.expect(mem.eql(u8, &digest1, &digest2));
// Modify the file added after initial hash
const ts2 = std.time.nanoTimestamp();
try cwd.writeFile(temp_file2, "Hello world the second, updated\n");
while (isProblematicTimestamp(ts2)) {
std.time.sleep(1);
}
{
var ch = try CacheHash.init(testing.allocator, cwd, temp_manifest_dir);
defer ch.release();
ch.add("1234");
_ = try ch.addFile(temp_file1, null);
// A file that we depend on has been updated, so the cache should not contain an entry for it
testing.expectEqual(@as(?[BASE64_DIGEST_LEN]u8, null), try ch.hit());
_ = try ch.addFilePost(temp_file2);
digest3 = ch.final();
}
testing.expect(!mem.eql(u8, &digest1, &digest3));
try cwd.deleteTree(temp_manifest_dir);
try cwd.deleteFile(temp_file1);
try cwd.deleteFile(temp_file2);
}

View File

@ -213,7 +213,7 @@ pub const ChildProcess = struct {
const stdout_in = child.stdout.?.inStream(); const stdout_in = child.stdout.?.inStream();
const stderr_in = child.stderr.?.inStream(); const stderr_in = child.stderr.?.inStream();
// TODO need to poll to read these streams to prevent a deadlock (or rely on evented I/O). // TODO https://github.com/ziglang/zig/issues/6343
const stdout = try stdout_in.readAllAlloc(args.allocator, args.max_output_bytes); const stdout = try stdout_in.readAllAlloc(args.allocator, args.max_output_bytes);
errdefer args.allocator.free(stdout); errdefer args.allocator.free(stdout);
const stderr = try stderr_in.readAllAlloc(args.allocator, args.max_output_bytes); const stderr = try stderr_in.readAllAlloc(args.allocator, args.max_output_bytes);
@ -816,6 +816,13 @@ fn destroyPipe(pipe: [2]os.fd_t) void {
// Then the child exits. // Then the child exits.
fn forkChildErrReport(fd: i32, err: ChildProcess.SpawnError) noreturn { fn forkChildErrReport(fd: i32, err: ChildProcess.SpawnError) noreturn {
writeIntFd(fd, @as(ErrInt, @errorToInt(err))) catch {}; writeIntFd(fd, @as(ErrInt, @errorToInt(err))) catch {};
// If we're linking libc, some naughty applications may have registered atexit handlers
// which we really do not want to run in the fork child. I caught LLVM doing this and
// it caused a deadlock instead of doing an exit syscall. In the words of Avril Lavigne,
// "Why'd you have to go and make things so complicated?"
if (std.Target.current.os.tag == .linux) {
std.os.linux.exit(1); // By-pass libc regardless of whether it is linked.
}
os.exit(1); os.exit(1);
} }

View File

@ -28,6 +28,8 @@ pub const aead = struct {
pub const Gimli = @import("crypto/gimli.zig").Aead; pub const Gimli = @import("crypto/gimli.zig").Aead;
pub const ChaCha20Poly1305 = chacha20.Chacha20Poly1305; pub const ChaCha20Poly1305 = chacha20.Chacha20Poly1305;
pub const XChaCha20Poly1305 = chacha20.XChacha20Poly1305; pub const XChaCha20Poly1305 = chacha20.XChacha20Poly1305;
pub const AEGIS128L = @import("crypto/aegis.zig").AEGIS128L;
pub const AEGIS256 = @import("crypto/aegis.zig").AEGIS256;
}; };
/// MAC functions requiring single-use secret keys. /// MAC functions requiring single-use secret keys.
@ -35,12 +37,23 @@ pub const onetimeauth = struct {
pub const Poly1305 = @import("crypto/poly1305.zig").Poly1305; pub const Poly1305 = @import("crypto/poly1305.zig").Poly1305;
}; };
/// A Key Derivation Function (KDF) is intended to turn a weak, human generated password into a /// A password hashing function derives a uniform key from low-entropy input material such as passwords.
/// strong key, suitable for cryptographic uses. It does this by salting and stretching the /// It is intentionally slow or expensive.
/// password. Salting injects non-secret random data, so that identical passwords will be converted ///
/// into unique keys. Stretching applies a deliberately slow hashing function to frustrate /// With the standard definition of a key derivation function, if a key space is small, an exhaustive search may be practical.
/// brute-force guessing. /// Password hashing functions make exhaustive searches way slower or way more expensive, even when implemented on GPUs and ASICs, by using different, optionally combined strategies:
pub const kdf = struct { ///
/// - Requiring a lot of computation cycles to complete
/// - Requiring a lot of memory to complete
/// - Requiring multiple CPU cores to complete
/// - Requiring cache-local data to complete in reasonable time
/// - Requiring large static tables
/// - Avoiding precomputations and time/memory tradeoffs
/// - Requiring multi-party computations
/// - Combining the input material with random per-entry data (salts), application-specific contexts and keys
///
/// Password hashing functions must be used whenever sensitive data has to be directly derived from a password.
pub const pwhash = struct {
pub const pbkdf2 = @import("crypto/pbkdf2.zig").pbkdf2; pub const pbkdf2 = @import("crypto/pbkdf2.zig").pbkdf2;
}; };
@ -48,6 +61,13 @@ pub const kdf = struct {
pub const core = struct { pub const core = struct {
pub const aes = @import("crypto/aes.zig"); pub const aes = @import("crypto/aes.zig");
pub const Gimli = @import("crypto/gimli.zig").State; pub const Gimli = @import("crypto/gimli.zig").State;
/// Modes are generic compositions to construct encryption/decryption functions from block ciphers and permutations.
///
/// These modes are designed to be building blocks for higher-level constructions, and should generally not be used directly by applications, as they may not provide the expected properties and security guarantees.
///
/// Most applications may want to use AEADs instead.
pub const modes = @import("crypto/modes.zig");
}; };
/// Elliptic-curve arithmetic. /// Elliptic-curve arithmetic.
@ -100,6 +120,7 @@ test "crypto" {
_ = @import("crypto/gimli.zig"); _ = @import("crypto/gimli.zig");
_ = @import("crypto/hmac.zig"); _ = @import("crypto/hmac.zig");
_ = @import("crypto/md5.zig"); _ = @import("crypto/md5.zig");
_ = @import("crypto/modes.zig");
_ = @import("crypto/pbkdf2.zig"); _ = @import("crypto/pbkdf2.zig");
_ = @import("crypto/poly1305.zig"); _ = @import("crypto/poly1305.zig");
_ = @import("crypto/sha1.zig"); _ = @import("crypto/sha1.zig");

447
lib/std/crypto/aegis.zig Normal file
View File

@ -0,0 +1,447 @@
const std = @import("std");
const mem = std.mem;
const assert = std.debug.assert;
const AESBlock = std.crypto.core.aes.Block;
const State128L = struct {
blocks: [8]AESBlock,
fn init(key: [16]u8, nonce: [16]u8) State128L {
const c1 = AESBlock.fromBytes(&[16]u8{ 0xdb, 0x3d, 0x18, 0x55, 0x6d, 0xc2, 0x2f, 0xf1, 0x20, 0x11, 0x31, 0x42, 0x73, 0xb5, 0x28, 0xdd });
const c2 = AESBlock.fromBytes(&[16]u8{ 0x0, 0x1, 0x01, 0x02, 0x03, 0x05, 0x08, 0x0d, 0x15, 0x22, 0x37, 0x59, 0x90, 0xe9, 0x79, 0x62 });
const key_block = AESBlock.fromBytes(&key);
const nonce_block = AESBlock.fromBytes(&nonce);
const blocks = [8]AESBlock{
key_block.xorBlocks(nonce_block),
c1,
c2,
c1,
key_block.xorBlocks(nonce_block),
key_block.xorBlocks(c2),
key_block.xorBlocks(c1),
key_block.xorBlocks(c2),
};
var state = State128L{ .blocks = blocks };
var i: usize = 0;
while (i < 10) : (i += 1) {
state.update(nonce_block, key_block);
}
return state;
}
inline fn update(state: *State128L, d1: AESBlock, d2: AESBlock) void {
const blocks = &state.blocks;
const tmp = blocks[7];
comptime var i: usize = 7;
inline while (i > 0) : (i -= 1) {
blocks[i] = blocks[i - 1].encrypt(blocks[i]);
}
blocks[0] = tmp.encrypt(blocks[0]);
blocks[0] = blocks[0].xorBlocks(d1);
blocks[4] = blocks[4].xorBlocks(d2);
}
fn enc(state: *State128L, dst: *[32]u8, src: *const [32]u8) void {
const blocks = &state.blocks;
const msg0 = AESBlock.fromBytes(src[0..16]);
const msg1 = AESBlock.fromBytes(src[16..32]);
var tmp0 = msg0.xorBlocks(blocks[6]).xorBlocks(blocks[1]);
var tmp1 = msg1.xorBlocks(blocks[2]).xorBlocks(blocks[5]);
tmp0 = tmp0.xorBlocks(blocks[2].andBlocks(blocks[3]));
tmp1 = tmp1.xorBlocks(blocks[6].andBlocks(blocks[7]));
dst[0..16].* = tmp0.toBytes();
dst[16..32].* = tmp1.toBytes();
state.update(msg0, msg1);
}
fn dec(state: *State128L, dst: *[32]u8, src: *const [32]u8) void {
const blocks = &state.blocks;
var msg0 = AESBlock.fromBytes(src[0..16]).xorBlocks(blocks[6]).xorBlocks(blocks[1]);
var msg1 = AESBlock.fromBytes(src[16..32]).xorBlocks(blocks[2]).xorBlocks(blocks[5]);
msg0 = msg0.xorBlocks(blocks[2].andBlocks(blocks[3]));
msg1 = msg1.xorBlocks(blocks[6].andBlocks(blocks[7]));
dst[0..16].* = msg0.toBytes();
dst[16..32].* = msg1.toBytes();
state.update(msg0, msg1);
}
fn mac(state: *State128L, adlen: usize, mlen: usize) [16]u8 {
const blocks = &state.blocks;
var sizes: [16]u8 = undefined;
mem.writeIntLittle(u64, sizes[0..8], adlen * 8);
mem.writeIntLittle(u64, sizes[8..16], mlen * 8);
const tmp = AESBlock.fromBytes(&sizes).xorBlocks(blocks[2]);
var i: usize = 0;
while (i < 7) : (i += 1) {
state.update(tmp, tmp);
}
return blocks[0].xorBlocks(blocks[1]).xorBlocks(blocks[2]).xorBlocks(blocks[3]).xorBlocks(blocks[4]).
xorBlocks(blocks[5]).xorBlocks(blocks[6]).toBytes();
}
};
/// AEGIS is a very fast authenticated encryption system built on top of the core AES function.
///
/// The 128L variant of AEGIS has a 128 bit key, a 128 bit nonce, and processes 256 bit message blocks.
/// It was designed to fully exploit the parallelism and built-in AES support of recent Intel and ARM CPUs.
///
/// https://competitions.cr.yp.to/round3/aegisv11.pdf
pub const AEGIS128L = struct {
pub const tag_length = 16;
pub const nonce_length = 16;
pub const key_length = 16;
/// c: ciphertext: output buffer should be of size m.len
/// tag: authentication tag: output MAC
/// m: message
/// ad: Associated Data
/// npub: public nonce
/// k: private key
pub fn encrypt(c: []u8, tag: *[tag_length]u8, m: []const u8, ad: []const u8, npub: [nonce_length]u8, key: [key_length]u8) void {
assert(c.len == m.len);
var state = State128L.init(key, npub);
var src: [32]u8 align(16) = undefined;
var dst: [32]u8 align(16) = undefined;
var i: usize = 0;
while (i + 32 <= ad.len) : (i += 32) {
state.enc(&dst, ad[i..][0..32]);
}
if (ad.len % 32 != 0) {
mem.set(u8, src[0..], 0);
mem.copy(u8, src[0 .. ad.len % 32], ad[i .. i + ad.len % 32]);
state.enc(&dst, &src);
}
i = 0;
while (i + 32 <= m.len) : (i += 32) {
state.enc(c[i..][0..32], m[i..][0..32]);
}
if (m.len % 32 != 0) {
mem.set(u8, src[0..], 0);
mem.copy(u8, src[0 .. m.len % 32], m[i .. i + m.len % 32]);
state.enc(&dst, &src);
mem.copy(u8, c[i .. i + m.len % 32], dst[0 .. m.len % 32]);
}
tag.* = state.mac(ad.len, m.len);
}
/// m: message: output buffer should be of size c.len
/// c: ciphertext
/// tag: authentication tag
/// ad: Associated Data
/// npub: public nonce
/// k: private key
pub fn decrypt(m: []u8, c: []const u8, tag: [tag_length]u8, ad: []const u8, npub: [nonce_length]u8, key: [key_length]u8) !void {
assert(c.len == m.len);
var state = State128L.init(key, npub);
var src: [32]u8 align(16) = undefined;
var dst: [32]u8 align(16) = undefined;
var i: usize = 0;
while (i + 32 <= ad.len) : (i += 32) {
state.enc(&dst, ad[i..][0..32]);
}
if (ad.len % 32 != 0) {
mem.set(u8, src[0..], 0);
mem.copy(u8, src[0 .. ad.len % 32], ad[i .. i + ad.len % 32]);
state.enc(&dst, &src);
}
i = 0;
while (i + 32 <= m.len) : (i += 32) {
state.dec(m[i..][0..32], c[i..][0..32]);
}
if (m.len % 32 != 0) {
mem.set(u8, src[0..], 0);
mem.copy(u8, src[0 .. m.len % 32], c[i .. i + m.len % 32]);
state.dec(&dst, &src);
mem.copy(u8, m[i .. i + m.len % 32], dst[0 .. m.len % 32]);
mem.set(u8, dst[0 .. m.len % 32], 0);
const blocks = &state.blocks;
blocks[0] = blocks[0].xorBlocks(AESBlock.fromBytes(dst[0..16]));
blocks[4] = blocks[4].xorBlocks(AESBlock.fromBytes(dst[16..32]));
}
const computed_tag = state.mac(ad.len, m.len);
var acc: u8 = 0;
for (computed_tag) |_, j| {
acc |= (computed_tag[j] ^ tag[j]);
}
if (acc != 0) {
mem.set(u8, m, 0xaa);
return error.AuthenticationFailed;
}
}
};
const State256 = struct {
blocks: [6]AESBlock,
fn init(key: [32]u8, nonce: [32]u8) State256 {
const c1 = AESBlock.fromBytes(&[16]u8{ 0xdb, 0x3d, 0x18, 0x55, 0x6d, 0xc2, 0x2f, 0xf1, 0x20, 0x11, 0x31, 0x42, 0x73, 0xb5, 0x28, 0xdd });
const c2 = AESBlock.fromBytes(&[16]u8{ 0x0, 0x1, 0x01, 0x02, 0x03, 0x05, 0x08, 0x0d, 0x15, 0x22, 0x37, 0x59, 0x90, 0xe9, 0x79, 0x62 });
const key_block1 = AESBlock.fromBytes(key[0..16]);
const key_block2 = AESBlock.fromBytes(key[16..32]);
const nonce_block1 = AESBlock.fromBytes(nonce[0..16]);
const nonce_block2 = AESBlock.fromBytes(nonce[16..32]);
const kxn1 = key_block1.xorBlocks(nonce_block1);
const kxn2 = key_block2.xorBlocks(nonce_block2);
const blocks = [6]AESBlock{
kxn1,
kxn2,
c1,
c2,
key_block1.xorBlocks(c2),
key_block2.xorBlocks(c1),
};
var state = State256{ .blocks = blocks };
var i: usize = 0;
while (i < 4) : (i += 1) {
state.update(key_block1);
state.update(key_block2);
state.update(kxn1);
state.update(kxn2);
}
return state;
}
inline fn update(state: *State256, d: AESBlock) void {
const blocks = &state.blocks;
const tmp = blocks[5].encrypt(blocks[0]);
comptime var i: usize = 5;
inline while (i > 0) : (i -= 1) {
blocks[i] = blocks[i - 1].encrypt(blocks[i]);
}
blocks[0] = tmp.xorBlocks(d);
}
fn enc(state: *State256, dst: *[16]u8, src: *const [16]u8) void {
const blocks = &state.blocks;
const msg = AESBlock.fromBytes(src);
var tmp = msg.xorBlocks(blocks[5]).xorBlocks(blocks[4]).xorBlocks(blocks[1]);
tmp = tmp.xorBlocks(blocks[2].andBlocks(blocks[3]));
dst.* = tmp.toBytes();
state.update(msg);
}
fn dec(state: *State256, dst: *[16]u8, src: *const [16]u8) void {
const blocks = &state.blocks;
var msg = AESBlock.fromBytes(src).xorBlocks(blocks[5]).xorBlocks(blocks[4]).xorBlocks(blocks[1]);
msg = msg.xorBlocks(blocks[2].andBlocks(blocks[3]));
dst.* = msg.toBytes();
state.update(msg);
}
fn mac(state: *State256, adlen: usize, mlen: usize) [16]u8 {
const blocks = &state.blocks;
var sizes: [16]u8 = undefined;
mem.writeIntLittle(u64, sizes[0..8], adlen * 8);
mem.writeIntLittle(u64, sizes[8..16], mlen * 8);
const tmp = AESBlock.fromBytes(&sizes).xorBlocks(blocks[3]);
var i: usize = 0;
while (i < 7) : (i += 1) {
state.update(tmp);
}
return blocks[0].xorBlocks(blocks[1]).xorBlocks(blocks[2]).xorBlocks(blocks[3]).xorBlocks(blocks[4]).
xorBlocks(blocks[5]).toBytes();
}
};
/// AEGIS is a very fast authenticated encryption system built on top of the core AES function.
///
/// The 256 bit variant of AEGIS has a 256 bit key, a 256 bit nonce, and processes 128 bit message blocks.
///
/// https://competitions.cr.yp.to/round3/aegisv11.pdf
pub const AEGIS256 = struct {
pub const tag_length = 16;
pub const nonce_length = 32;
pub const key_length = 32;
/// c: ciphertext: output buffer should be of size m.len
/// tag: authentication tag: output MAC
/// m: message
/// ad: Associated Data
/// npub: public nonce
/// k: private key
pub fn encrypt(c: []u8, tag: *[tag_length]u8, m: []const u8, ad: []const u8, npub: [nonce_length]u8, key: [key_length]u8) void {
assert(c.len == m.len);
var state = State256.init(key, npub);
var src: [16]u8 align(16) = undefined;
var dst: [16]u8 align(16) = undefined;
var i: usize = 0;
while (i + 16 <= ad.len) : (i += 16) {
state.enc(&dst, ad[i..][0..16]);
}
if (ad.len % 16 != 0) {
mem.set(u8, src[0..], 0);
mem.copy(u8, src[0 .. ad.len % 16], ad[i .. i + ad.len % 16]);
state.enc(&dst, &src);
}
i = 0;
while (i + 16 <= m.len) : (i += 16) {
state.enc(c[i..][0..16], m[i..][0..16]);
}
if (m.len % 16 != 0) {
mem.set(u8, src[0..], 0);
mem.copy(u8, src[0 .. m.len % 16], m[i .. i + m.len % 16]);
state.enc(&dst, &src);
mem.copy(u8, c[i .. i + m.len % 16], dst[0 .. m.len % 16]);
}
tag.* = state.mac(ad.len, m.len);
}
/// m: message: output buffer should be of size c.len
/// c: ciphertext
/// tag: authentication tag
/// ad: Associated Data
/// npub: public nonce
/// k: private key
pub fn decrypt(m: []u8, c: []const u8, tag: [tag_length]u8, ad: []const u8, npub: [nonce_length]u8, key: [key_length]u8) !void {
assert(c.len == m.len);
var state = State256.init(key, npub);
var src: [16]u8 align(16) = undefined;
var dst: [16]u8 align(16) = undefined;
var i: usize = 0;
while (i + 16 <= ad.len) : (i += 16) {
state.enc(&dst, ad[i..][0..16]);
}
if (ad.len % 16 != 0) {
mem.set(u8, src[0..], 0);
mem.copy(u8, src[0 .. ad.len % 16], ad[i .. i + ad.len % 16]);
state.enc(&dst, &src);
}
i = 0;
while (i + 16 <= m.len) : (i += 16) {
state.dec(m[i..][0..16], c[i..][0..16]);
}
if (m.len % 16 != 0) {
mem.set(u8, src[0..], 0);
mem.copy(u8, src[0 .. m.len % 16], c[i .. i + m.len % 16]);
state.dec(&dst, &src);
mem.copy(u8, m[i .. i + m.len % 16], dst[0 .. m.len % 16]);
mem.set(u8, dst[0 .. m.len % 16], 0);
const blocks = &state.blocks;
blocks[0] = blocks[0].xorBlocks(AESBlock.fromBytes(&dst));
}
const computed_tag = state.mac(ad.len, m.len);
var acc: u8 = 0;
for (computed_tag) |_, j| {
acc |= (computed_tag[j] ^ tag[j]);
}
if (acc != 0) {
mem.set(u8, m, 0xaa);
return error.AuthenticationFailed;
}
}
};
const htest = @import("test.zig");
const testing = std.testing;
test "AEGIS128L test vector 1" {
const key: [AEGIS128L.key_length]u8 = [_]u8{ 0x10, 0x01 } ++ [_]u8{0x00} ** 14;
const nonce: [AEGIS128L.nonce_length]u8 = [_]u8{ 0x10, 0x00, 0x02 } ++ [_]u8{0x00} ** 13;
const ad = [8]u8{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 };
const m = [32]u8{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f };
var c: [m.len]u8 = undefined;
var m2: [m.len]u8 = undefined;
var tag: [AEGIS128L.tag_length]u8 = undefined;
AEGIS128L.encrypt(&c, &tag, &m, &ad, nonce, key);
try AEGIS128L.decrypt(&m2, &c, tag, &ad, nonce, key);
testing.expectEqualSlices(u8, &m, &m2);
htest.assertEqual("79d94593d8c2119d7e8fd9b8fc77845c5c077a05b2528b6ac54b563aed8efe84", &c);
htest.assertEqual("cc6f3372f6aa1bb82388d695c3962d9a", &tag);
c[0] +%= 1;
testing.expectError(error.AuthenticationFailed, AEGIS128L.decrypt(&m2, &c, tag, &ad, nonce, key));
c[0] -%= 1;
tag[0] +%= 1;
testing.expectError(error.AuthenticationFailed, AEGIS128L.decrypt(&m2, &c, tag, &ad, nonce, key));
}
test "AEGIS128L test vector 2" {
const key: [AEGIS128L.key_length]u8 = [_]u8{0x00} ** 16;
const nonce: [AEGIS128L.nonce_length]u8 = [_]u8{0x00} ** 16;
const ad = [_]u8{};
const m = [_]u8{0x00} ** 16;
var c: [m.len]u8 = undefined;
var m2: [m.len]u8 = undefined;
var tag: [AEGIS128L.tag_length]u8 = undefined;
AEGIS128L.encrypt(&c, &tag, &m, &ad, nonce, key);
try AEGIS128L.decrypt(&m2, &c, tag, &ad, nonce, key);
testing.expectEqualSlices(u8, &m, &m2);
htest.assertEqual("41de9000a7b5e40e2d68bb64d99ebb19", &c);
htest.assertEqual("f4d997cc9b94227ada4fe4165422b1c8", &tag);
}
test "AEGIS128L test vector 3" {
const key: [AEGIS128L.key_length]u8 = [_]u8{0x00} ** 16;
const nonce: [AEGIS128L.nonce_length]u8 = [_]u8{0x00} ** 16;
const ad = [_]u8{};
const m = [_]u8{};
var c: [m.len]u8 = undefined;
var m2: [m.len]u8 = undefined;
var tag: [AEGIS128L.tag_length]u8 = undefined;
AEGIS128L.encrypt(&c, &tag, &m, &ad, nonce, key);
try AEGIS128L.decrypt(&m2, &c, tag, &ad, nonce, key);
testing.expectEqualSlices(u8, &m, &m2);
htest.assertEqual("83cc600dc4e3e7e62d4055826174f149", &tag);
}
test "AEGIS256 test vector 1" {
const key: [AEGIS256.key_length]u8 = [_]u8{ 0x10, 0x01 } ++ [_]u8{0x00} ** 30;
const nonce: [AEGIS256.nonce_length]u8 = [_]u8{ 0x10, 0x00, 0x02 } ++ [_]u8{0x00} ** 29;
const ad = [8]u8{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 };
const m = [32]u8{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f };
var c: [m.len]u8 = undefined;
var m2: [m.len]u8 = undefined;
var tag: [AEGIS256.tag_length]u8 = undefined;
AEGIS256.encrypt(&c, &tag, &m, &ad, nonce, key);
try AEGIS256.decrypt(&m2, &c, tag, &ad, nonce, key);
testing.expectEqualSlices(u8, &m, &m2);
htest.assertEqual("f373079ed84b2709faee373584585d60accd191db310ef5d8b11833df9dec711", &c);
htest.assertEqual("8d86f91ee606e9ff26a01b64ccbdd91d", &tag);
c[0] +%= 1;
testing.expectError(error.AuthenticationFailed, AEGIS256.decrypt(&m2, &c, tag, &ad, nonce, key));
c[0] -%= 1;
tag[0] +%= 1;
testing.expectError(error.AuthenticationFailed, AEGIS256.decrypt(&m2, &c, tag, &ad, nonce, key));
}
test "AEGIS256 test vector 2" {
const key: [AEGIS256.key_length]u8 = [_]u8{0x00} ** 32;
const nonce: [AEGIS256.nonce_length]u8 = [_]u8{0x00} ** 32;
const ad = [_]u8{};
const m = [_]u8{0x00} ** 16;
var c: [m.len]u8 = undefined;
var m2: [m.len]u8 = undefined;
var tag: [AEGIS256.tag_length]u8 = undefined;
AEGIS256.encrypt(&c, &tag, &m, &ad, nonce, key);
try AEGIS256.decrypt(&m2, &c, tag, &ad, nonce, key);
testing.expectEqualSlices(u8, &m, &m2);
htest.assertEqual("b98f03a947807713d75a4fff9fc277a6", &c);
htest.assertEqual("478f3b50dc478ef7d5cf2d0f7cc13180", &tag);
}
test "AEGIS256 test vector 3" {
const key: [AEGIS256.key_length]u8 = [_]u8{0x00} ** 32;
const nonce: [AEGIS256.nonce_length]u8 = [_]u8{0x00} ** 32;
const ad = [_]u8{};
const m = [_]u8{};
var c: [m.len]u8 = undefined;
var m2: [m.len]u8 = undefined;
var tag: [AEGIS256.tag_length]u8 = undefined;
AEGIS256.encrypt(&c, &tag, &m, &ad, nonce, key);
try AEGIS256.decrypt(&m2, &c, tag, &ad, nonce, key);
testing.expectEqualSlices(u8, &m, &m2);
htest.assertEqual("f7a0878f68bd083e8065354071fc27c3", &tag);
}

View File

@ -3,229 +3,25 @@
// This file is part of [zig](https://ziglang.org/), which is MIT licensed. // This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies // The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software. // and substantial portions of the software.
// Based on Go stdlib implementation
const std = @import("../std.zig"); const std = @import("../std.zig");
const mem = std.mem;
const testing = std.testing; const testing = std.testing;
const builtin = std.builtin;
// Apply sbox0 to each byte in w. const has_aesni = comptime std.Target.x86.featureSetHas(std.Target.current.cpu.features, .aes);
fn subw(w: u32) u32 { const has_avx = comptime std.Target.x86.featureSetHas(std.Target.current.cpu.features, .avx);
return @as(u32, sbox0[w >> 24]) << 24 | @as(u32, sbox0[w >> 16 & 0xff]) << 16 | @as(u32, sbox0[w >> 8 & 0xff]) << 8 | @as(u32, sbox0[w & 0xff]); const impl = if (std.Target.current.cpu.arch == .x86_64 and has_aesni and has_avx) @import("aes/aesni.zig") else @import("aes/soft.zig");
}
fn rotw(w: u32) u32 { pub const Block = impl.Block;
return w << 8 | w >> 24; pub const AESEncryptCtx = impl.AESEncryptCtx;
} pub const AESDecryptCtx = impl.AESDecryptCtx;
pub const AES128 = impl.AES128;
// Encrypt one block from src into dst, using the expanded key xk. pub const AES256 = impl.AES256;
fn encryptBlock(xk: []const u32, dst: []u8, src: []const u8) void {
var s0 = mem.readIntBig(u32, src[0..4]);
var s1 = mem.readIntBig(u32, src[4..8]);
var s2 = mem.readIntBig(u32, src[8..12]);
var s3 = mem.readIntBig(u32, src[12..16]);
// First round just XORs input with key.
s0 ^= xk[0];
s1 ^= xk[1];
s2 ^= xk[2];
s3 ^= xk[3];
// Middle rounds shuffle using tables.
// Number of rounds is set by length of expanded key.
var nr = xk.len / 4 - 2; // - 2: one above, one more below
var k: usize = 4;
var t0: u32 = undefined;
var t1: u32 = undefined;
var t2: u32 = undefined;
var t3: u32 = undefined;
var r: usize = 0;
while (r < nr) : (r += 1) {
t0 = xk[k + 0] ^ te0[@truncate(u8, s0 >> 24)] ^ te1[@truncate(u8, s1 >> 16)] ^ te2[@truncate(u8, s2 >> 8)] ^ te3[@truncate(u8, s3)];
t1 = xk[k + 1] ^ te0[@truncate(u8, s1 >> 24)] ^ te1[@truncate(u8, s2 >> 16)] ^ te2[@truncate(u8, s3 >> 8)] ^ te3[@truncate(u8, s0)];
t2 = xk[k + 2] ^ te0[@truncate(u8, s2 >> 24)] ^ te1[@truncate(u8, s3 >> 16)] ^ te2[@truncate(u8, s0 >> 8)] ^ te3[@truncate(u8, s1)];
t3 = xk[k + 3] ^ te0[@truncate(u8, s3 >> 24)] ^ te1[@truncate(u8, s0 >> 16)] ^ te2[@truncate(u8, s1 >> 8)] ^ te3[@truncate(u8, s2)];
k += 4;
s0 = t0;
s1 = t1;
s2 = t2;
s3 = t3;
}
// Last round uses s-box directly and XORs to produce output.
s0 = @as(u32, sbox0[t0 >> 24]) << 24 | @as(u32, sbox0[t1 >> 16 & 0xff]) << 16 | @as(u32, sbox0[t2 >> 8 & 0xff]) << 8 | @as(u32, sbox0[t3 & 0xff]);
s1 = @as(u32, sbox0[t1 >> 24]) << 24 | @as(u32, sbox0[t2 >> 16 & 0xff]) << 16 | @as(u32, sbox0[t3 >> 8 & 0xff]) << 8 | @as(u32, sbox0[t0 & 0xff]);
s2 = @as(u32, sbox0[t2 >> 24]) << 24 | @as(u32, sbox0[t3 >> 16 & 0xff]) << 16 | @as(u32, sbox0[t0 >> 8 & 0xff]) << 8 | @as(u32, sbox0[t1 & 0xff]);
s3 = @as(u32, sbox0[t3 >> 24]) << 24 | @as(u32, sbox0[t0 >> 16 & 0xff]) << 16 | @as(u32, sbox0[t1 >> 8 & 0xff]) << 8 | @as(u32, sbox0[t2 & 0xff]);
s0 ^= xk[k + 0];
s1 ^= xk[k + 1];
s2 ^= xk[k + 2];
s3 ^= xk[k + 3];
mem.writeIntBig(u32, dst[0..4], s0);
mem.writeIntBig(u32, dst[4..8], s1);
mem.writeIntBig(u32, dst[8..12], s2);
mem.writeIntBig(u32, dst[12..16], s3);
}
// Decrypt one block from src into dst, using the expanded key xk.
pub fn decryptBlock(xk: []const u32, dst: []u8, src: []const u8) void {
var s0 = mem.readIntBig(u32, src[0..4]);
var s1 = mem.readIntBig(u32, src[4..8]);
var s2 = mem.readIntBig(u32, src[8..12]);
var s3 = mem.readIntBig(u32, src[12..16]);
// First round just XORs input with key.
s0 ^= xk[0];
s1 ^= xk[1];
s2 ^= xk[2];
s3 ^= xk[3];
// Middle rounds shuffle using tables.
// Number of rounds is set by length of expanded key.
var nr = xk.len / 4 - 2; // - 2: one above, one more below
var k: usize = 4;
var t0: u32 = undefined;
var t1: u32 = undefined;
var t2: u32 = undefined;
var t3: u32 = undefined;
var r: usize = 0;
while (r < nr) : (r += 1) {
t0 = xk[k + 0] ^ td0[@truncate(u8, s0 >> 24)] ^ td1[@truncate(u8, s3 >> 16)] ^ td2[@truncate(u8, s2 >> 8)] ^ td3[@truncate(u8, s1)];
t1 = xk[k + 1] ^ td0[@truncate(u8, s1 >> 24)] ^ td1[@truncate(u8, s0 >> 16)] ^ td2[@truncate(u8, s3 >> 8)] ^ td3[@truncate(u8, s2)];
t2 = xk[k + 2] ^ td0[@truncate(u8, s2 >> 24)] ^ td1[@truncate(u8, s1 >> 16)] ^ td2[@truncate(u8, s0 >> 8)] ^ td3[@truncate(u8, s3)];
t3 = xk[k + 3] ^ td0[@truncate(u8, s3 >> 24)] ^ td1[@truncate(u8, s2 >> 16)] ^ td2[@truncate(u8, s1 >> 8)] ^ td3[@truncate(u8, s0)];
k += 4;
s0 = t0;
s1 = t1;
s2 = t2;
s3 = t3;
}
// Last round uses s-box directly and XORs to produce output.
s0 = @as(u32, sbox1[t0 >> 24]) << 24 | @as(u32, sbox1[t3 >> 16 & 0xff]) << 16 | @as(u32, sbox1[t2 >> 8 & 0xff]) << 8 | @as(u32, sbox1[t1 & 0xff]);
s1 = @as(u32, sbox1[t1 >> 24]) << 24 | @as(u32, sbox1[t0 >> 16 & 0xff]) << 16 | @as(u32, sbox1[t3 >> 8 & 0xff]) << 8 | @as(u32, sbox1[t2 & 0xff]);
s2 = @as(u32, sbox1[t2 >> 24]) << 24 | @as(u32, sbox1[t1 >> 16 & 0xff]) << 16 | @as(u32, sbox1[t0 >> 8 & 0xff]) << 8 | @as(u32, sbox1[t3 & 0xff]);
s3 = @as(u32, sbox1[t3 >> 24]) << 24 | @as(u32, sbox1[t2 >> 16 & 0xff]) << 16 | @as(u32, sbox1[t1 >> 8 & 0xff]) << 8 | @as(u32, sbox1[t0 & 0xff]);
s0 ^= xk[k + 0];
s1 ^= xk[k + 1];
s2 ^= xk[k + 2];
s3 ^= xk[k + 3];
mem.writeIntBig(u32, dst[0..4], s0);
mem.writeIntBig(u32, dst[4..8], s1);
mem.writeIntBig(u32, dst[8..12], s2);
mem.writeIntBig(u32, dst[12..16], s3);
}
fn xorBytes(dst: []u8, a: []const u8, b: []const u8) usize {
var n = std.math.min(dst.len, std.math.min(a.len, b.len));
for (dst[0..n]) |_, i| {
dst[i] = a[i] ^ b[i];
}
return n;
}
pub const AES128 = AES(128);
pub const AES256 = AES(256);
fn AES(comptime keysize: usize) type {
return struct {
const Self = @This();
pub const Encrypt = AESEncrypt(keysize);
pub const Decrypt = AESDecrypt(keysize);
const nn = (keysize / 8) + 28;
enc: Encrypt,
dec: Decrypt,
pub fn init(key: [keysize / 8]u8) Self {
var ctx: Self = undefined;
ctx.enc = Encrypt.init(key);
ctx.dec = ctx.enc.toDecrypt();
return ctx;
}
pub fn encrypt(ctx: Self, dst: []u8, src: []const u8) void {
ctx.enc.encrypt(dst, src);
}
pub fn decrypt(ctx: Self, dst: []u8, src: []const u8) void {
ctx.dec.decrypt(dst, src);
}
pub fn ctr(ctx: Self, dst: []u8, src: []const u8, iv: [16]u8) void {
ctx.enc.ctr(dst, src, iv);
}
};
}
fn AESEncrypt(comptime keysize: usize) type {
return struct {
const Self = @This();
const Decrypt = AESDecrypt(keysize);
const nn = (keysize / 8) + 28;
enc: [nn]u32,
pub fn init(key: [keysize / 8]u8) Self {
var ctx: Self = undefined;
expandKeyEncrypt(&key, ctx.enc[0..]);
return ctx;
}
pub fn toDecrypt(ctx: Self) Decrypt {
var dec: Decrypt = undefined;
expandKeyDecrypt(ctx.enc[0..], dec.dec[0..]);
return dec;
}
pub fn encrypt(ctx: Self, dst: []u8, src: []const u8) void {
encryptBlock(ctx.enc[0..], dst, src);
}
pub fn ctr(ctx: Self, dst: []u8, src: []const u8, iv: [16]u8) void {
std.debug.assert(dst.len >= src.len);
var keystream: [16]u8 = undefined;
var ctrbuf = iv;
var n: usize = 0;
while (n < src.len) {
ctx.encrypt(keystream[0..], ctrbuf[0..]);
var ctr_i = std.mem.readIntBig(u128, ctrbuf[0..]);
std.mem.writeIntBig(u128, ctrbuf[0..], ctr_i +% 1);
n += xorBytes(dst[n..], src[n..], &keystream);
}
}
};
}
fn AESDecrypt(comptime keysize: usize) type {
return struct {
const Self = @This();
const nn = (keysize / 8) + 28;
dec: [nn]u32,
pub fn init(key: [keysize / 8]u8) Self {
var ctx: Self = undefined;
var enc: [nn]u32 = undefined;
expandKeyEncrypt(key[0..], enc[0..]);
expandKeyDecrypt(enc[0..], ctx.dec[0..]);
return ctx;
}
pub fn decrypt(ctx: Self, dst: []u8, src: []const u8) void {
decryptBlock(ctx.dec[0..], dst, src);
}
};
}
test "ctr" { test "ctr" {
// NIST SP 800-38A pp 55-58 // NIST SP 800-38A pp 55-58
{ const ctr = @import("modes.zig").ctr;
const key = [_]u8{ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c }; const key = [_]u8{ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c };
const iv = [_]u8{ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff }; const iv = [_]u8{ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff };
const in = [_]u8{ const in = [_]u8{
@ -242,11 +38,10 @@ test "ctr" {
}; };
var out: [exp_out.len]u8 = undefined; var out: [exp_out.len]u8 = undefined;
var aes = AES128.init(key); var ctx = AES128.initEnc(key);
aes.ctr(out[0..], in[0..], iv); ctr(AESEncryptCtx(AES128), ctx, out[0..], in[0..], iv, builtin.Endian.Big);
testing.expectEqualSlices(u8, exp_out[0..], out[0..]); testing.expectEqualSlices(u8, exp_out[0..], out[0..]);
} }
}
test "encrypt" { test "encrypt" {
// Appendix B // Appendix B
@ -256,8 +51,8 @@ test "encrypt" {
const exp_out = [_]u8{ 0x39, 0x25, 0x84, 0x1d, 0x02, 0xdc, 0x09, 0xfb, 0xdc, 0x11, 0x85, 0x97, 0x19, 0x6a, 0x0b, 0x32 }; const exp_out = [_]u8{ 0x39, 0x25, 0x84, 0x1d, 0x02, 0xdc, 0x09, 0xfb, 0xdc, 0x11, 0x85, 0x97, 0x19, 0x6a, 0x0b, 0x32 };
var out: [exp_out.len]u8 = undefined; var out: [exp_out.len]u8 = undefined;
var aes = AES128.init(key); var ctx = AES128.initEnc(key);
aes.encrypt(out[0..], in[0..]); ctx.encrypt(out[0..], in[0..]);
testing.expectEqualSlices(u8, exp_out[0..], out[0..]); testing.expectEqualSlices(u8, exp_out[0..], out[0..]);
} }
@ -271,8 +66,8 @@ test "encrypt" {
const exp_out = [_]u8{ 0x8e, 0xa2, 0xb7, 0xca, 0x51, 0x67, 0x45, 0xbf, 0xea, 0xfc, 0x49, 0x90, 0x4b, 0x49, 0x60, 0x89 }; const exp_out = [_]u8{ 0x8e, 0xa2, 0xb7, 0xca, 0x51, 0x67, 0x45, 0xbf, 0xea, 0xfc, 0x49, 0x90, 0x4b, 0x49, 0x60, 0x89 };
var out: [exp_out.len]u8 = undefined; var out: [exp_out.len]u8 = undefined;
var aes = AES256.init(key); var ctx = AES256.initEnc(key);
aes.encrypt(out[0..], in[0..]); ctx.encrypt(out[0..], in[0..]);
testing.expectEqualSlices(u8, exp_out[0..], out[0..]); testing.expectEqualSlices(u8, exp_out[0..], out[0..]);
} }
} }
@ -285,8 +80,8 @@ test "decrypt" {
const exp_out = [_]u8{ 0x32, 0x43, 0xf6, 0xa8, 0x88, 0x5a, 0x30, 0x8d, 0x31, 0x31, 0x98, 0xa2, 0xe0, 0x37, 0x07, 0x34 }; const exp_out = [_]u8{ 0x32, 0x43, 0xf6, 0xa8, 0x88, 0x5a, 0x30, 0x8d, 0x31, 0x31, 0x98, 0xa2, 0xe0, 0x37, 0x07, 0x34 };
var out: [exp_out.len]u8 = undefined; var out: [exp_out.len]u8 = undefined;
var aes = AES128.init(key); var ctx = AES128.initDec(key);
aes.decrypt(out[0..], in[0..]); ctx.decrypt(out[0..], in[0..]);
testing.expectEqualSlices(u8, exp_out[0..], out[0..]); testing.expectEqualSlices(u8, exp_out[0..], out[0..]);
} }
@ -300,413 +95,52 @@ test "decrypt" {
const exp_out = [_]u8{ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff }; const exp_out = [_]u8{ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff };
var out: [exp_out.len]u8 = undefined; var out: [exp_out.len]u8 = undefined;
var aes = AES256.init(key); var ctx = AES256.initDec(key);
aes.decrypt(out[0..], in[0..]); ctx.decrypt(out[0..], in[0..]);
testing.expectEqualSlices(u8, exp_out[0..], out[0..]); testing.expectEqualSlices(u8, exp_out[0..], out[0..]);
} }
} }
// Key expansion algorithm. See FIPS-197, Figure 11. test "expand 128-bit key" {
fn expandKeyEncrypt(key: []const u8, enc: []u32) void {
var i: usize = 0;
var nk = key.len / 4;
while (i < nk) : (i += 1) {
enc[i] = mem.readIntBig(u32, key[4 * i ..][0..4]);
}
while (i < enc.len) : (i += 1) {
var t = enc[i - 1];
if (i % nk == 0) {
t = subw(rotw(t)) ^ (@as(u32, powx[i / nk - 1]) << 24);
} else if (nk > 6 and i % nk == 4) {
t = subw(t);
}
enc[i] = enc[i - nk] ^ t;
}
}
fn expandKeyDecrypt(enc: []const u32, dec: []u32) void {
var i: usize = 0;
var n = enc.len;
while (i < n) : (i += 4) {
var ei = n - i - 4;
var j: usize = 0;
while (j < 4) : (j += 1) {
var x = enc[ei + j];
if (i > 0 and i + 4 < n) {
x = td0[sbox0[x >> 24]] ^ td1[sbox0[x >> 16 & 0xff]] ^ td2[sbox0[x >> 8 & 0xff]] ^ td3[sbox0[x & 0xff]];
}
dec[i + j] = x;
}
}
}
test "expand key" {
const key = [_]u8{ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c }; const key = [_]u8{ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c };
const exp_enc = [_]u32{ const exp_enc = [_]*const [32:0]u8{
0x2b7e1516, 0x28aed2a6, 0xabf71588, 0x09cf4f3c, "2b7e151628aed2a6abf7158809cf4f3c", "a0fafe1788542cb123a339392a6c7605", "f2c295f27a96b9435935807a7359f67f", "3d80477d4716fe3e1e237e446d7a883b", "ef44a541a8525b7fb671253bdb0bad00", "d4d1c6f87c839d87caf2b8bc11f915bc", "6d88a37a110b3efddbf98641ca0093fd", "4e54f70e5f5fc9f384a64fb24ea6dc4f", "ead27321b58dbad2312bf5607f8d292f", "ac7766f319fadc2128d12941575c006e", "d014f9a8c9ee2589e13f0cc8b6630ca6",
0xa0fafe17, 0x88542cb1, 0x23a33939, 0x2a6c7605,
0xf2c295f2, 0x7a96b943, 0x5935807a, 0x7359f67f,
0x3d80477d, 0x4716fe3e, 0x1e237e44, 0x6d7a883b,
0xef44a541, 0xa8525b7f, 0xb671253b, 0xdb0bad00,
0xd4d1c6f8, 0x7c839d87, 0xcaf2b8bc, 0x11f915bc,
0x6d88a37a, 0x110b3efd, 0xdbf98641, 0xca0093fd,
0x4e54f70e, 0x5f5fc9f3, 0x84a64fb2, 0x4ea6dc4f,
0xead27321, 0xb58dbad2, 0x312bf560, 0x7f8d292f,
0xac7766f3, 0x19fadc21, 0x28d12941, 0x575c006e,
0xd014f9a8, 0xc9ee2589, 0xe13f0cc8, 0xb6630ca6,
}; };
const exp_dec = [_]u32{ const exp_dec = [_]*const [32:0]u8{
0xd014f9a8, 0xc9ee2589, 0xe13f0cc8, 0xb6630ca6, "2b7e151628aed2a6abf7158809cf4f3c", "a0fafe1788542cb123a339392a6c7605", "f2c295f27a96b9435935807a7359f67f", "3d80477d4716fe3e1e237e446d7a883b", "ef44a541a8525b7fb671253bdb0bad00", "d4d1c6f87c839d87caf2b8bc11f915bc", "6d88a37a110b3efddbf98641ca0093fd", "4e54f70e5f5fc9f384a64fb24ea6dc4f", "ead27321b58dbad2312bf5607f8d292f", "ac7766f319fadc2128d12941575c006e", "d014f9a8c9ee2589e13f0cc8b6630ca6",
0xc7b5a63, 0x1319eafe, 0xb0398890, 0x664cfbb4,
0xdf7d925a, 0x1f62b09d, 0xa320626e, 0xd6757324,
0x12c07647, 0xc01f22c7, 0xbc42d2f3, 0x7555114a,
0x6efcd876, 0xd2df5480, 0x7c5df034, 0xc917c3b9,
0x6ea30afc, 0xbc238cf6, 0xae82a4b4, 0xb54a338d,
0x90884413, 0xd280860a, 0x12a12842, 0x1bc89739,
0x7c1f13f7, 0x4208c219, 0xc021ae48, 0x969bf7b,
0xcc7505eb, 0x3e17d1ee, 0x82296c51, 0xc9481133,
0x2b3708a7, 0xf262d405, 0xbc3ebdbf, 0x4b617d62,
0x2b7e1516, 0x28aed2a6, 0xabf71588, 0x9cf4f3c,
}; };
var enc: [exp_enc.len]u32 = undefined; const enc = AES128.initEnc(key);
var dec: [exp_dec.len]u32 = undefined; const dec = AES128.initDec(key);
expandKeyEncrypt(key[0..], enc[0..]); var exp: [16]u8 = undefined;
expandKeyDecrypt(enc[0..], dec[0..]);
testing.expectEqualSlices(u32, exp_enc[0..], enc[0..]); for (enc.key_schedule.round_keys) |round_key, i| {
testing.expectEqualSlices(u32, exp_dec[0..], dec[0..]); try std.fmt.hexToBytes(&exp, exp_enc[i]);
testing.expectEqualSlices(u8, &exp, &round_key.toBytes());
}
for (enc.key_schedule.round_keys) |round_key, i| {
try std.fmt.hexToBytes(&exp, exp_dec[i]);
testing.expectEqualSlices(u8, &exp, &round_key.toBytes());
}
} }
// constants test "expand 256-bit key" {
const key = [_]u8{ 0x60, 0x3d, 0xeb, 0x10, 0x15, 0xca, 0x71, 0xbe, 0x2b, 0x73, 0xae, 0xf0, 0x85, 0x7d, 0x77, 0x81, 0x1f, 0x35, 0x2c, 0x07, 0x3b, 0x61, 0x08, 0xd7, 0x2d, 0x98, 0x10, 0xa3, 0x09, 0x14, 0xdf, 0xf4 };
const exp_enc = [_]*const [32:0]u8{
"603deb1015ca71be2b73aef0857d7781", "1f352c073b6108d72d9810a30914dff4", "9ba354118e6925afa51a8b5f2067fcde", "a8b09c1a93d194cdbe49846eb75d5b9a", "d59aecb85bf3c917fee94248de8ebe96", "b5a9328a2678a647983122292f6c79b3", "812c81addadf48ba24360af2fab8b464", "98c5bfc9bebd198e268c3ba709e04214", "68007bacb2df331696e939e46c518d80", "c814e20476a9fb8a5025c02d59c58239", "de1369676ccc5a71fa2563959674ee15", "5886ca5d2e2f31d77e0af1fa27cf73c3", "749c47ab18501ddae2757e4f7401905a", "cafaaae3e4d59b349adf6acebd10190d", "fe4890d1e6188d0b046df344706c631e",
};
const exp_dec = [_]*const [32:0]u8{
"fe4890d1e6188d0b046df344706c631e", "ada23f4963e23b2455427c8a5c709104", "57c96cf6074f07c0706abb07137f9241", "b668b621ce40046d36a047ae0932ed8e", "34ad1e4450866b367725bcc763152946", "32526c367828b24cf8e043c33f92aa20", "c440b289642b757227a3d7f114309581", "d669a7334a7ade7a80c8f18fc772e9e3", "25ba3c22a06bc7fb4388a28333934270", "54fb808b9c137949cab22ff547ba186c", "6c3d632985d1fbd9e3e36578701be0f3", "4a7459f9c8e8f9c256a156bc8d083799", "42107758e9ec98f066329ea193f8858b", "8ec6bff6829ca03b9e49af7edba96125", "603deb1015ca71be2b73aef0857d7781",
};
const enc = AES256.initEnc(key);
const dec = AES256.initDec(key);
var exp: [16]u8 = undefined;
const poly = 1 << 8 | 1 << 4 | 1 << 3 | 1 << 1 | 1 << 0; for (enc.key_schedule.round_keys) |round_key, i| {
try std.fmt.hexToBytes(&exp, exp_enc[i]);
const powx = [16]u8{ testing.expectEqualSlices(u8, &exp, &round_key.toBytes());
0x01, }
0x02, for (dec.key_schedule.round_keys) |round_key, i| {
0x04, try std.fmt.hexToBytes(&exp, exp_dec[i]);
0x08, testing.expectEqualSlices(u8, &exp, &round_key.toBytes());
0x10, }
0x20, }
0x40,
0x80,
0x1b,
0x36,
0x6c,
0xd8,
0xab,
0x4d,
0x9a,
0x2f,
};
const sbox0 = [256]u8{
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16,
};
const sbox1 = [256]u8{
0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb,
0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb,
0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25,
0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92,
0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06,
0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b,
0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e,
0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,
0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f,
0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d,
};
const te0 = [256]u32{
0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d, 0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554,
0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d, 0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a,
0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87, 0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b,
0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea, 0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b,
0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a, 0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f,
0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108, 0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f,
0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e, 0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5,
0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d, 0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f,
0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e, 0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb,
0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce, 0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497,
0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c, 0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed,
0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b, 0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a,
0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16, 0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594,
0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81, 0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3,
0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a, 0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504,
0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163, 0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d,
0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f, 0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739,
0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47, 0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395,
0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f, 0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883,
0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c, 0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76,
0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e, 0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4,
0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6, 0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b,
0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7, 0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0,
0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25, 0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818,
0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72, 0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651,
0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21, 0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85,
0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa, 0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12,
0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0, 0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9,
0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133, 0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7,
0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920, 0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a,
0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17, 0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8,
0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11, 0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a,
};
const te1 = [256]u32{
0xa5c66363, 0x84f87c7c, 0x99ee7777, 0x8df67b7b, 0x0dfff2f2, 0xbdd66b6b, 0xb1de6f6f, 0x5491c5c5,
0x50603030, 0x03020101, 0xa9ce6767, 0x7d562b2b, 0x19e7fefe, 0x62b5d7d7, 0xe64dabab, 0x9aec7676,
0x458fcaca, 0x9d1f8282, 0x4089c9c9, 0x87fa7d7d, 0x15effafa, 0xebb25959, 0xc98e4747, 0x0bfbf0f0,
0xec41adad, 0x67b3d4d4, 0xfd5fa2a2, 0xea45afaf, 0xbf239c9c, 0xf753a4a4, 0x96e47272, 0x5b9bc0c0,
0xc275b7b7, 0x1ce1fdfd, 0xae3d9393, 0x6a4c2626, 0x5a6c3636, 0x417e3f3f, 0x02f5f7f7, 0x4f83cccc,
0x5c683434, 0xf451a5a5, 0x34d1e5e5, 0x08f9f1f1, 0x93e27171, 0x73abd8d8, 0x53623131, 0x3f2a1515,
0x0c080404, 0x5295c7c7, 0x65462323, 0x5e9dc3c3, 0x28301818, 0xa1379696, 0x0f0a0505, 0xb52f9a9a,
0x090e0707, 0x36241212, 0x9b1b8080, 0x3ddfe2e2, 0x26cdebeb, 0x694e2727, 0xcd7fb2b2, 0x9fea7575,
0x1b120909, 0x9e1d8383, 0x74582c2c, 0x2e341a1a, 0x2d361b1b, 0xb2dc6e6e, 0xeeb45a5a, 0xfb5ba0a0,
0xf6a45252, 0x4d763b3b, 0x61b7d6d6, 0xce7db3b3, 0x7b522929, 0x3edde3e3, 0x715e2f2f, 0x97138484,
0xf5a65353, 0x68b9d1d1, 0x00000000, 0x2cc1eded, 0x60402020, 0x1fe3fcfc, 0xc879b1b1, 0xedb65b5b,
0xbed46a6a, 0x468dcbcb, 0xd967bebe, 0x4b723939, 0xde944a4a, 0xd4984c4c, 0xe8b05858, 0x4a85cfcf,
0x6bbbd0d0, 0x2ac5efef, 0xe54faaaa, 0x16edfbfb, 0xc5864343, 0xd79a4d4d, 0x55663333, 0x94118585,
0xcf8a4545, 0x10e9f9f9, 0x06040202, 0x81fe7f7f, 0xf0a05050, 0x44783c3c, 0xba259f9f, 0xe34ba8a8,
0xf3a25151, 0xfe5da3a3, 0xc0804040, 0x8a058f8f, 0xad3f9292, 0xbc219d9d, 0x48703838, 0x04f1f5f5,
0xdf63bcbc, 0xc177b6b6, 0x75afdada, 0x63422121, 0x30201010, 0x1ae5ffff, 0x0efdf3f3, 0x6dbfd2d2,
0x4c81cdcd, 0x14180c0c, 0x35261313, 0x2fc3ecec, 0xe1be5f5f, 0xa2359797, 0xcc884444, 0x392e1717,
0x5793c4c4, 0xf255a7a7, 0x82fc7e7e, 0x477a3d3d, 0xacc86464, 0xe7ba5d5d, 0x2b321919, 0x95e67373,
0xa0c06060, 0x98198181, 0xd19e4f4f, 0x7fa3dcdc, 0x66442222, 0x7e542a2a, 0xab3b9090, 0x830b8888,
0xca8c4646, 0x29c7eeee, 0xd36bb8b8, 0x3c281414, 0x79a7dede, 0xe2bc5e5e, 0x1d160b0b, 0x76addbdb,
0x3bdbe0e0, 0x56643232, 0x4e743a3a, 0x1e140a0a, 0xdb924949, 0x0a0c0606, 0x6c482424, 0xe4b85c5c,
0x5d9fc2c2, 0x6ebdd3d3, 0xef43acac, 0xa6c46262, 0xa8399191, 0xa4319595, 0x37d3e4e4, 0x8bf27979,
0x32d5e7e7, 0x438bc8c8, 0x596e3737, 0xb7da6d6d, 0x8c018d8d, 0x64b1d5d5, 0xd29c4e4e, 0xe049a9a9,
0xb4d86c6c, 0xfaac5656, 0x07f3f4f4, 0x25cfeaea, 0xafca6565, 0x8ef47a7a, 0xe947aeae, 0x18100808,
0xd56fbaba, 0x88f07878, 0x6f4a2525, 0x725c2e2e, 0x24381c1c, 0xf157a6a6, 0xc773b4b4, 0x5197c6c6,
0x23cbe8e8, 0x7ca1dddd, 0x9ce87474, 0x213e1f1f, 0xdd964b4b, 0xdc61bdbd, 0x860d8b8b, 0x850f8a8a,
0x90e07070, 0x427c3e3e, 0xc471b5b5, 0xaacc6666, 0xd8904848, 0x05060303, 0x01f7f6f6, 0x121c0e0e,
0xa3c26161, 0x5f6a3535, 0xf9ae5757, 0xd069b9b9, 0x91178686, 0x5899c1c1, 0x273a1d1d, 0xb9279e9e,
0x38d9e1e1, 0x13ebf8f8, 0xb32b9898, 0x33221111, 0xbbd26969, 0x70a9d9d9, 0x89078e8e, 0xa7339494,
0xb62d9b9b, 0x223c1e1e, 0x92158787, 0x20c9e9e9, 0x4987cece, 0xffaa5555, 0x78502828, 0x7aa5dfdf,
0x8f038c8c, 0xf859a1a1, 0x80098989, 0x171a0d0d, 0xda65bfbf, 0x31d7e6e6, 0xc6844242, 0xb8d06868,
0xc3824141, 0xb0299999, 0x775a2d2d, 0x111e0f0f, 0xcb7bb0b0, 0xfca85454, 0xd66dbbbb, 0x3a2c1616,
};
const te2 = [256]u32{
0x63a5c663, 0x7c84f87c, 0x7799ee77, 0x7b8df67b, 0xf20dfff2, 0x6bbdd66b, 0x6fb1de6f, 0xc55491c5,
0x30506030, 0x01030201, 0x67a9ce67, 0x2b7d562b, 0xfe19e7fe, 0xd762b5d7, 0xabe64dab, 0x769aec76,
0xca458fca, 0x829d1f82, 0xc94089c9, 0x7d87fa7d, 0xfa15effa, 0x59ebb259, 0x47c98e47, 0xf00bfbf0,
0xadec41ad, 0xd467b3d4, 0xa2fd5fa2, 0xafea45af, 0x9cbf239c, 0xa4f753a4, 0x7296e472, 0xc05b9bc0,
0xb7c275b7, 0xfd1ce1fd, 0x93ae3d93, 0x266a4c26, 0x365a6c36, 0x3f417e3f, 0xf702f5f7, 0xcc4f83cc,
0x345c6834, 0xa5f451a5, 0xe534d1e5, 0xf108f9f1, 0x7193e271, 0xd873abd8, 0x31536231, 0x153f2a15,
0x040c0804, 0xc75295c7, 0x23654623, 0xc35e9dc3, 0x18283018, 0x96a13796, 0x050f0a05, 0x9ab52f9a,
0x07090e07, 0x12362412, 0x809b1b80, 0xe23ddfe2, 0xeb26cdeb, 0x27694e27, 0xb2cd7fb2, 0x759fea75,
0x091b1209, 0x839e1d83, 0x2c74582c, 0x1a2e341a, 0x1b2d361b, 0x6eb2dc6e, 0x5aeeb45a, 0xa0fb5ba0,
0x52f6a452, 0x3b4d763b, 0xd661b7d6, 0xb3ce7db3, 0x297b5229, 0xe33edde3, 0x2f715e2f, 0x84971384,
0x53f5a653, 0xd168b9d1, 0x00000000, 0xed2cc1ed, 0x20604020, 0xfc1fe3fc, 0xb1c879b1, 0x5bedb65b,
0x6abed46a, 0xcb468dcb, 0xbed967be, 0x394b7239, 0x4ade944a, 0x4cd4984c, 0x58e8b058, 0xcf4a85cf,
0xd06bbbd0, 0xef2ac5ef, 0xaae54faa, 0xfb16edfb, 0x43c58643, 0x4dd79a4d, 0x33556633, 0x85941185,
0x45cf8a45, 0xf910e9f9, 0x02060402, 0x7f81fe7f, 0x50f0a050, 0x3c44783c, 0x9fba259f, 0xa8e34ba8,
0x51f3a251, 0xa3fe5da3, 0x40c08040, 0x8f8a058f, 0x92ad3f92, 0x9dbc219d, 0x38487038, 0xf504f1f5,
0xbcdf63bc, 0xb6c177b6, 0xda75afda, 0x21634221, 0x10302010, 0xff1ae5ff, 0xf30efdf3, 0xd26dbfd2,
0xcd4c81cd, 0x0c14180c, 0x13352613, 0xec2fc3ec, 0x5fe1be5f, 0x97a23597, 0x44cc8844, 0x17392e17,
0xc45793c4, 0xa7f255a7, 0x7e82fc7e, 0x3d477a3d, 0x64acc864, 0x5de7ba5d, 0x192b3219, 0x7395e673,
0x60a0c060, 0x81981981, 0x4fd19e4f, 0xdc7fa3dc, 0x22664422, 0x2a7e542a, 0x90ab3b90, 0x88830b88,
0x46ca8c46, 0xee29c7ee, 0xb8d36bb8, 0x143c2814, 0xde79a7de, 0x5ee2bc5e, 0x0b1d160b, 0xdb76addb,
0xe03bdbe0, 0x32566432, 0x3a4e743a, 0x0a1e140a, 0x49db9249, 0x060a0c06, 0x246c4824, 0x5ce4b85c,
0xc25d9fc2, 0xd36ebdd3, 0xacef43ac, 0x62a6c462, 0x91a83991, 0x95a43195, 0xe437d3e4, 0x798bf279,
0xe732d5e7, 0xc8438bc8, 0x37596e37, 0x6db7da6d, 0x8d8c018d, 0xd564b1d5, 0x4ed29c4e, 0xa9e049a9,
0x6cb4d86c, 0x56faac56, 0xf407f3f4, 0xea25cfea, 0x65afca65, 0x7a8ef47a, 0xaee947ae, 0x08181008,
0xbad56fba, 0x7888f078, 0x256f4a25, 0x2e725c2e, 0x1c24381c, 0xa6f157a6, 0xb4c773b4, 0xc65197c6,
0xe823cbe8, 0xdd7ca1dd, 0x749ce874, 0x1f213e1f, 0x4bdd964b, 0xbddc61bd, 0x8b860d8b, 0x8a850f8a,
0x7090e070, 0x3e427c3e, 0xb5c471b5, 0x66aacc66, 0x48d89048, 0x03050603, 0xf601f7f6, 0x0e121c0e,
0x61a3c261, 0x355f6a35, 0x57f9ae57, 0xb9d069b9, 0x86911786, 0xc15899c1, 0x1d273a1d, 0x9eb9279e,
0xe138d9e1, 0xf813ebf8, 0x98b32b98, 0x11332211, 0x69bbd269, 0xd970a9d9, 0x8e89078e, 0x94a73394,
0x9bb62d9b, 0x1e223c1e, 0x87921587, 0xe920c9e9, 0xce4987ce, 0x55ffaa55, 0x28785028, 0xdf7aa5df,
0x8c8f038c, 0xa1f859a1, 0x89800989, 0x0d171a0d, 0xbfda65bf, 0xe631d7e6, 0x42c68442, 0x68b8d068,
0x41c38241, 0x99b02999, 0x2d775a2d, 0x0f111e0f, 0xb0cb7bb0, 0x54fca854, 0xbbd66dbb, 0x163a2c16,
};
const te3 = [256]u32{
0x6363a5c6, 0x7c7c84f8, 0x777799ee, 0x7b7b8df6, 0xf2f20dff, 0x6b6bbdd6, 0x6f6fb1de, 0xc5c55491,
0x30305060, 0x01010302, 0x6767a9ce, 0x2b2b7d56, 0xfefe19e7, 0xd7d762b5, 0xababe64d, 0x76769aec,
0xcaca458f, 0x82829d1f, 0xc9c94089, 0x7d7d87fa, 0xfafa15ef, 0x5959ebb2, 0x4747c98e, 0xf0f00bfb,
0xadadec41, 0xd4d467b3, 0xa2a2fd5f, 0xafafea45, 0x9c9cbf23, 0xa4a4f753, 0x727296e4, 0xc0c05b9b,
0xb7b7c275, 0xfdfd1ce1, 0x9393ae3d, 0x26266a4c, 0x36365a6c, 0x3f3f417e, 0xf7f702f5, 0xcccc4f83,
0x34345c68, 0xa5a5f451, 0xe5e534d1, 0xf1f108f9, 0x717193e2, 0xd8d873ab, 0x31315362, 0x15153f2a,
0x04040c08, 0xc7c75295, 0x23236546, 0xc3c35e9d, 0x18182830, 0x9696a137, 0x05050f0a, 0x9a9ab52f,
0x0707090e, 0x12123624, 0x80809b1b, 0xe2e23ddf, 0xebeb26cd, 0x2727694e, 0xb2b2cd7f, 0x75759fea,
0x09091b12, 0x83839e1d, 0x2c2c7458, 0x1a1a2e34, 0x1b1b2d36, 0x6e6eb2dc, 0x5a5aeeb4, 0xa0a0fb5b,
0x5252f6a4, 0x3b3b4d76, 0xd6d661b7, 0xb3b3ce7d, 0x29297b52, 0xe3e33edd, 0x2f2f715e, 0x84849713,
0x5353f5a6, 0xd1d168b9, 0x00000000, 0xeded2cc1, 0x20206040, 0xfcfc1fe3, 0xb1b1c879, 0x5b5bedb6,
0x6a6abed4, 0xcbcb468d, 0xbebed967, 0x39394b72, 0x4a4ade94, 0x4c4cd498, 0x5858e8b0, 0xcfcf4a85,
0xd0d06bbb, 0xefef2ac5, 0xaaaae54f, 0xfbfb16ed, 0x4343c586, 0x4d4dd79a, 0x33335566, 0x85859411,
0x4545cf8a, 0xf9f910e9, 0x02020604, 0x7f7f81fe, 0x5050f0a0, 0x3c3c4478, 0x9f9fba25, 0xa8a8e34b,
0x5151f3a2, 0xa3a3fe5d, 0x4040c080, 0x8f8f8a05, 0x9292ad3f, 0x9d9dbc21, 0x38384870, 0xf5f504f1,
0xbcbcdf63, 0xb6b6c177, 0xdada75af, 0x21216342, 0x10103020, 0xffff1ae5, 0xf3f30efd, 0xd2d26dbf,
0xcdcd4c81, 0x0c0c1418, 0x13133526, 0xecec2fc3, 0x5f5fe1be, 0x9797a235, 0x4444cc88, 0x1717392e,
0xc4c45793, 0xa7a7f255, 0x7e7e82fc, 0x3d3d477a, 0x6464acc8, 0x5d5de7ba, 0x19192b32, 0x737395e6,
0x6060a0c0, 0x81819819, 0x4f4fd19e, 0xdcdc7fa3, 0x22226644, 0x2a2a7e54, 0x9090ab3b, 0x8888830b,
0x4646ca8c, 0xeeee29c7, 0xb8b8d36b, 0x14143c28, 0xdede79a7, 0x5e5ee2bc, 0x0b0b1d16, 0xdbdb76ad,
0xe0e03bdb, 0x32325664, 0x3a3a4e74, 0x0a0a1e14, 0x4949db92, 0x06060a0c, 0x24246c48, 0x5c5ce4b8,
0xc2c25d9f, 0xd3d36ebd, 0xacacef43, 0x6262a6c4, 0x9191a839, 0x9595a431, 0xe4e437d3, 0x79798bf2,
0xe7e732d5, 0xc8c8438b, 0x3737596e, 0x6d6db7da, 0x8d8d8c01, 0xd5d564b1, 0x4e4ed29c, 0xa9a9e049,
0x6c6cb4d8, 0x5656faac, 0xf4f407f3, 0xeaea25cf, 0x6565afca, 0x7a7a8ef4, 0xaeaee947, 0x08081810,
0xbabad56f, 0x787888f0, 0x25256f4a, 0x2e2e725c, 0x1c1c2438, 0xa6a6f157, 0xb4b4c773, 0xc6c65197,
0xe8e823cb, 0xdddd7ca1, 0x74749ce8, 0x1f1f213e, 0x4b4bdd96, 0xbdbddc61, 0x8b8b860d, 0x8a8a850f,
0x707090e0, 0x3e3e427c, 0xb5b5c471, 0x6666aacc, 0x4848d890, 0x03030506, 0xf6f601f7, 0x0e0e121c,
0x6161a3c2, 0x35355f6a, 0x5757f9ae, 0xb9b9d069, 0x86869117, 0xc1c15899, 0x1d1d273a, 0x9e9eb927,
0xe1e138d9, 0xf8f813eb, 0x9898b32b, 0x11113322, 0x6969bbd2, 0xd9d970a9, 0x8e8e8907, 0x9494a733,
0x9b9bb62d, 0x1e1e223c, 0x87879215, 0xe9e920c9, 0xcece4987, 0x5555ffaa, 0x28287850, 0xdfdf7aa5,
0x8c8c8f03, 0xa1a1f859, 0x89898009, 0x0d0d171a, 0xbfbfda65, 0xe6e631d7, 0x4242c684, 0x6868b8d0,
0x4141c382, 0x9999b029, 0x2d2d775a, 0x0f0f111e, 0xb0b0cb7b, 0x5454fca8, 0xbbbbd66d, 0x16163a2c,
};
const td0 = [256]u32{
0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96, 0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393,
0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25, 0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f,
0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1, 0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6,
0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da, 0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844,
0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd, 0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4,
0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45, 0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94,
0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7, 0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a,
0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5, 0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c,
0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1, 0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a,
0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75, 0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051,
0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46, 0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff,
0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77, 0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb,
0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000, 0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e,
0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927, 0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a,
0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e, 0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16,
0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d, 0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8,
0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd, 0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34,
0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163, 0xd731dcca, 0x42638510, 0x13972240, 0x84c61120,
0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d, 0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0,
0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422, 0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef,
0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36, 0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4,
0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662, 0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5,
0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3, 0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b,
0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8, 0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6,
0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6, 0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0,
0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815, 0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f,
0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df, 0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f,
0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e, 0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713,
0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89, 0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c,
0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf, 0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86,
0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f, 0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541,
0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190, 0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742,
};
const td1 = [256]u32{
0x5051f4a7, 0x537e4165, 0xc31a17a4, 0x963a275e, 0xcb3bab6b, 0xf11f9d45, 0xabacfa58, 0x934be303,
0x552030fa, 0xf6ad766d, 0x9188cc76, 0x25f5024c, 0xfc4fe5d7, 0xd7c52acb, 0x80263544, 0x8fb562a3,
0x49deb15a, 0x6725ba1b, 0x9845ea0e, 0xe15dfec0, 0x02c32f75, 0x12814cf0, 0xa38d4697, 0xc66bd3f9,
0xe7038f5f, 0x9515929c, 0xebbf6d7a, 0xda955259, 0x2dd4be83, 0xd3587421, 0x2949e069, 0x448ec9c8,
0x6a75c289, 0x78f48e79, 0x6b99583e, 0xdd27b971, 0xb6bee14f, 0x17f088ad, 0x66c920ac, 0xb47dce3a,
0x1863df4a, 0x82e51a31, 0x60975133, 0x4562537f, 0xe0b16477, 0x84bb6bae, 0x1cfe81a0, 0x94f9082b,
0x58704868, 0x198f45fd, 0x8794de6c, 0xb7527bf8, 0x23ab73d3, 0xe2724b02, 0x57e31f8f, 0x2a6655ab,
0x07b2eb28, 0x032fb5c2, 0x9a86c57b, 0xa5d33708, 0xf2302887, 0xb223bfa5, 0xba02036a, 0x5ced1682,
0x2b8acf1c, 0x92a779b4, 0xf0f307f2, 0xa14e69e2, 0xcd65daf4, 0xd50605be, 0x1fd13462, 0x8ac4a6fe,
0x9d342e53, 0xa0a2f355, 0x32058ae1, 0x75a4f6eb, 0x390b83ec, 0xaa4060ef, 0x065e719f, 0x51bd6e10,
0xf93e218a, 0x3d96dd06, 0xaedd3e05, 0x464de6bd, 0xb591548d, 0x0571c45d, 0x6f0406d4, 0xff605015,
0x241998fb, 0x97d6bde9, 0xcc894043, 0x7767d99e, 0xbdb0e842, 0x8807898b, 0x38e7195b, 0xdb79c8ee,
0x47a17c0a, 0xe97c420f, 0xc9f8841e, 0x00000000, 0x83098086, 0x48322bed, 0xac1e1170, 0x4e6c5a72,
0xfbfd0eff, 0x560f8538, 0x1e3daed5, 0x27362d39, 0x640a0fd9, 0x21685ca6, 0xd19b5b54, 0x3a24362e,
0xb10c0a67, 0x0f9357e7, 0xd2b4ee96, 0x9e1b9b91, 0x4f80c0c5, 0xa261dc20, 0x695a774b, 0x161c121a,
0x0ae293ba, 0xe5c0a02a, 0x433c22e0, 0x1d121b17, 0x0b0e090d, 0xadf28bc7, 0xb92db6a8, 0xc8141ea9,
0x8557f119, 0x4caf7507, 0xbbee99dd, 0xfda37f60, 0x9ff70126, 0xbc5c72f5, 0xc544663b, 0x345bfb7e,
0x768b4329, 0xdccb23c6, 0x68b6edfc, 0x63b8e4f1, 0xcad731dc, 0x10426385, 0x40139722, 0x2084c611,
0x7d854a24, 0xf8d2bb3d, 0x11aef932, 0x6dc729a1, 0x4b1d9e2f, 0xf3dcb230, 0xec0d8652, 0xd077c1e3,
0x6c2bb316, 0x99a970b9, 0xfa119448, 0x2247e964, 0xc4a8fc8c, 0x1aa0f03f, 0xd8567d2c, 0xef223390,
0xc787494e, 0xc1d938d1, 0xfe8ccaa2, 0x3698d40b, 0xcfa6f581, 0x28a57ade, 0x26dab78e, 0xa43fadbf,
0xe42c3a9d, 0x0d507892, 0x9b6a5fcc, 0x62547e46, 0xc2f68d13, 0xe890d8b8, 0x5e2e39f7, 0xf582c3af,
0xbe9f5d80, 0x7c69d093, 0xa96fd52d, 0xb3cf2512, 0x3bc8ac99, 0xa710187d, 0x6ee89c63, 0x7bdb3bbb,
0x09cd2678, 0xf46e5918, 0x01ec9ab7, 0xa8834f9a, 0x65e6956e, 0x7eaaffe6, 0x0821bccf, 0xe6ef15e8,
0xd9bae79b, 0xce4a6f36, 0xd4ea9f09, 0xd629b07c, 0xaf31a4b2, 0x312a3f23, 0x30c6a594, 0xc035a266,
0x37744ebc, 0xa6fc82ca, 0xb0e090d0, 0x1533a7d8, 0x4af10498, 0xf741ecda, 0x0e7fcd50, 0x2f1791f6,
0x8d764dd6, 0x4d43efb0, 0x54ccaa4d, 0xdfe49604, 0xe39ed1b5, 0x1b4c6a88, 0xb8c12c1f, 0x7f466551,
0x049d5eea, 0x5d018c35, 0x73fa8774, 0x2efb0b41, 0x5ab3671d, 0x5292dbd2, 0x33e91056, 0x136dd647,
0x8c9ad761, 0x7a37a10c, 0x8e59f814, 0x89eb133c, 0xeecea927, 0x35b761c9, 0xede11ce5, 0x3c7a47b1,
0x599cd2df, 0x3f55f273, 0x791814ce, 0xbf73c737, 0xea53f7cd, 0x5b5ffdaa, 0x14df3d6f, 0x867844db,
0x81caaff3, 0x3eb968c4, 0x2c382434, 0x5fc2a340, 0x72161dc3, 0x0cbce225, 0x8b283c49, 0x41ff0d95,
0x7139a801, 0xde080cb3, 0x9cd8b4e4, 0x906456c1, 0x617bcb84, 0x70d532b6, 0x74486c5c, 0x42d0b857,
};
const td2 = [256]u32{
0xa75051f4, 0x65537e41, 0xa4c31a17, 0x5e963a27, 0x6bcb3bab, 0x45f11f9d, 0x58abacfa, 0x03934be3,
0xfa552030, 0x6df6ad76, 0x769188cc, 0x4c25f502, 0xd7fc4fe5, 0xcbd7c52a, 0x44802635, 0xa38fb562,
0x5a49deb1, 0x1b6725ba, 0x0e9845ea, 0xc0e15dfe, 0x7502c32f, 0xf012814c, 0x97a38d46, 0xf9c66bd3,
0x5fe7038f, 0x9c951592, 0x7aebbf6d, 0x59da9552, 0x832dd4be, 0x21d35874, 0x692949e0, 0xc8448ec9,
0x896a75c2, 0x7978f48e, 0x3e6b9958, 0x71dd27b9, 0x4fb6bee1, 0xad17f088, 0xac66c920, 0x3ab47dce,
0x4a1863df, 0x3182e51a, 0x33609751, 0x7f456253, 0x77e0b164, 0xae84bb6b, 0xa01cfe81, 0x2b94f908,
0x68587048, 0xfd198f45, 0x6c8794de, 0xf8b7527b, 0xd323ab73, 0x02e2724b, 0x8f57e31f, 0xab2a6655,
0x2807b2eb, 0xc2032fb5, 0x7b9a86c5, 0x08a5d337, 0x87f23028, 0xa5b223bf, 0x6aba0203, 0x825ced16,
0x1c2b8acf, 0xb492a779, 0xf2f0f307, 0xe2a14e69, 0xf4cd65da, 0xbed50605, 0x621fd134, 0xfe8ac4a6,
0x539d342e, 0x55a0a2f3, 0xe132058a, 0xeb75a4f6, 0xec390b83, 0xefaa4060, 0x9f065e71, 0x1051bd6e,
0x8af93e21, 0x063d96dd, 0x05aedd3e, 0xbd464de6, 0x8db59154, 0x5d0571c4, 0xd46f0406, 0x15ff6050,
0xfb241998, 0xe997d6bd, 0x43cc8940, 0x9e7767d9, 0x42bdb0e8, 0x8b880789, 0x5b38e719, 0xeedb79c8,
0x0a47a17c, 0x0fe97c42, 0x1ec9f884, 0x00000000, 0x86830980, 0xed48322b, 0x70ac1e11, 0x724e6c5a,
0xfffbfd0e, 0x38560f85, 0xd51e3dae, 0x3927362d, 0xd9640a0f, 0xa621685c, 0x54d19b5b, 0x2e3a2436,
0x67b10c0a, 0xe70f9357, 0x96d2b4ee, 0x919e1b9b, 0xc54f80c0, 0x20a261dc, 0x4b695a77, 0x1a161c12,
0xba0ae293, 0x2ae5c0a0, 0xe0433c22, 0x171d121b, 0x0d0b0e09, 0xc7adf28b, 0xa8b92db6, 0xa9c8141e,
0x198557f1, 0x074caf75, 0xddbbee99, 0x60fda37f, 0x269ff701, 0xf5bc5c72, 0x3bc54466, 0x7e345bfb,
0x29768b43, 0xc6dccb23, 0xfc68b6ed, 0xf163b8e4, 0xdccad731, 0x85104263, 0x22401397, 0x112084c6,
0x247d854a, 0x3df8d2bb, 0x3211aef9, 0xa16dc729, 0x2f4b1d9e, 0x30f3dcb2, 0x52ec0d86, 0xe3d077c1,
0x166c2bb3, 0xb999a970, 0x48fa1194, 0x642247e9, 0x8cc4a8fc, 0x3f1aa0f0, 0x2cd8567d, 0x90ef2233,
0x4ec78749, 0xd1c1d938, 0xa2fe8cca, 0x0b3698d4, 0x81cfa6f5, 0xde28a57a, 0x8e26dab7, 0xbfa43fad,
0x9de42c3a, 0x920d5078, 0xcc9b6a5f, 0x4662547e, 0x13c2f68d, 0xb8e890d8, 0xf75e2e39, 0xaff582c3,
0x80be9f5d, 0x937c69d0, 0x2da96fd5, 0x12b3cf25, 0x993bc8ac, 0x7da71018, 0x636ee89c, 0xbb7bdb3b,
0x7809cd26, 0x18f46e59, 0xb701ec9a, 0x9aa8834f, 0x6e65e695, 0xe67eaaff, 0xcf0821bc, 0xe8e6ef15,
0x9bd9bae7, 0x36ce4a6f, 0x09d4ea9f, 0x7cd629b0, 0xb2af31a4, 0x23312a3f, 0x9430c6a5, 0x66c035a2,
0xbc37744e, 0xcaa6fc82, 0xd0b0e090, 0xd81533a7, 0x984af104, 0xdaf741ec, 0x500e7fcd, 0xf62f1791,
0xd68d764d, 0xb04d43ef, 0x4d54ccaa, 0x04dfe496, 0xb5e39ed1, 0x881b4c6a, 0x1fb8c12c, 0x517f4665,
0xea049d5e, 0x355d018c, 0x7473fa87, 0x412efb0b, 0x1d5ab367, 0xd25292db, 0x5633e910, 0x47136dd6,
0x618c9ad7, 0x0c7a37a1, 0x148e59f8, 0x3c89eb13, 0x27eecea9, 0xc935b761, 0xe5ede11c, 0xb13c7a47,
0xdf599cd2, 0x733f55f2, 0xce791814, 0x37bf73c7, 0xcdea53f7, 0xaa5b5ffd, 0x6f14df3d, 0xdb867844,
0xf381caaf, 0xc43eb968, 0x342c3824, 0x405fc2a3, 0xc372161d, 0x250cbce2, 0x498b283c, 0x9541ff0d,
0x017139a8, 0xb3de080c, 0xe49cd8b4, 0xc1906456, 0x84617bcb, 0xb670d532, 0x5c74486c, 0x5742d0b8,
};
const td3 = [256]u32{
0xf4a75051, 0x4165537e, 0x17a4c31a, 0x275e963a, 0xab6bcb3b, 0x9d45f11f, 0xfa58abac, 0xe303934b,
0x30fa5520, 0x766df6ad, 0xcc769188, 0x024c25f5, 0xe5d7fc4f, 0x2acbd7c5, 0x35448026, 0x62a38fb5,
0xb15a49de, 0xba1b6725, 0xea0e9845, 0xfec0e15d, 0x2f7502c3, 0x4cf01281, 0x4697a38d, 0xd3f9c66b,
0x8f5fe703, 0x929c9515, 0x6d7aebbf, 0x5259da95, 0xbe832dd4, 0x7421d358, 0xe0692949, 0xc9c8448e,
0xc2896a75, 0x8e7978f4, 0x583e6b99, 0xb971dd27, 0xe14fb6be, 0x88ad17f0, 0x20ac66c9, 0xce3ab47d,
0xdf4a1863, 0x1a3182e5, 0x51336097, 0x537f4562, 0x6477e0b1, 0x6bae84bb, 0x81a01cfe, 0x082b94f9,
0x48685870, 0x45fd198f, 0xde6c8794, 0x7bf8b752, 0x73d323ab, 0x4b02e272, 0x1f8f57e3, 0x55ab2a66,
0xeb2807b2, 0xb5c2032f, 0xc57b9a86, 0x3708a5d3, 0x2887f230, 0xbfa5b223, 0x036aba02, 0x16825ced,
0xcf1c2b8a, 0x79b492a7, 0x07f2f0f3, 0x69e2a14e, 0xdaf4cd65, 0x05bed506, 0x34621fd1, 0xa6fe8ac4,
0x2e539d34, 0xf355a0a2, 0x8ae13205, 0xf6eb75a4, 0x83ec390b, 0x60efaa40, 0x719f065e, 0x6e1051bd,
0x218af93e, 0xdd063d96, 0x3e05aedd, 0xe6bd464d, 0x548db591, 0xc45d0571, 0x06d46f04, 0x5015ff60,
0x98fb2419, 0xbde997d6, 0x4043cc89, 0xd99e7767, 0xe842bdb0, 0x898b8807, 0x195b38e7, 0xc8eedb79,
0x7c0a47a1, 0x420fe97c, 0x841ec9f8, 0x00000000, 0x80868309, 0x2bed4832, 0x1170ac1e, 0x5a724e6c,
0x0efffbfd, 0x8538560f, 0xaed51e3d, 0x2d392736, 0x0fd9640a, 0x5ca62168, 0x5b54d19b, 0x362e3a24,
0x0a67b10c, 0x57e70f93, 0xee96d2b4, 0x9b919e1b, 0xc0c54f80, 0xdc20a261, 0x774b695a, 0x121a161c,
0x93ba0ae2, 0xa02ae5c0, 0x22e0433c, 0x1b171d12, 0x090d0b0e, 0x8bc7adf2, 0xb6a8b92d, 0x1ea9c814,
0xf1198557, 0x75074caf, 0x99ddbbee, 0x7f60fda3, 0x01269ff7, 0x72f5bc5c, 0x663bc544, 0xfb7e345b,
0x4329768b, 0x23c6dccb, 0xedfc68b6, 0xe4f163b8, 0x31dccad7, 0x63851042, 0x97224013, 0xc6112084,
0x4a247d85, 0xbb3df8d2, 0xf93211ae, 0x29a16dc7, 0x9e2f4b1d, 0xb230f3dc, 0x8652ec0d, 0xc1e3d077,
0xb3166c2b, 0x70b999a9, 0x9448fa11, 0xe9642247, 0xfc8cc4a8, 0xf03f1aa0, 0x7d2cd856, 0x3390ef22,
0x494ec787, 0x38d1c1d9, 0xcaa2fe8c, 0xd40b3698, 0xf581cfa6, 0x7ade28a5, 0xb78e26da, 0xadbfa43f,
0x3a9de42c, 0x78920d50, 0x5fcc9b6a, 0x7e466254, 0x8d13c2f6, 0xd8b8e890, 0x39f75e2e, 0xc3aff582,
0x5d80be9f, 0xd0937c69, 0xd52da96f, 0x2512b3cf, 0xac993bc8, 0x187da710, 0x9c636ee8, 0x3bbb7bdb,
0x267809cd, 0x5918f46e, 0x9ab701ec, 0x4f9aa883, 0x956e65e6, 0xffe67eaa, 0xbccf0821, 0x15e8e6ef,
0xe79bd9ba, 0x6f36ce4a, 0x9f09d4ea, 0xb07cd629, 0xa4b2af31, 0x3f23312a, 0xa59430c6, 0xa266c035,
0x4ebc3774, 0x82caa6fc, 0x90d0b0e0, 0xa7d81533, 0x04984af1, 0xecdaf741, 0xcd500e7f, 0x91f62f17,
0x4dd68d76, 0xefb04d43, 0xaa4d54cc, 0x9604dfe4, 0xd1b5e39e, 0x6a881b4c, 0x2c1fb8c1, 0x65517f46,
0x5eea049d, 0x8c355d01, 0x877473fa, 0x0b412efb, 0x671d5ab3, 0xdbd25292, 0x105633e9, 0xd647136d,
0xd7618c9a, 0xa10c7a37, 0xf8148e59, 0x133c89eb, 0xa927eece, 0x61c935b7, 0x1ce5ede1, 0x47b13c7a,
0xd2df599c, 0xf2733f55, 0x14ce7918, 0xc737bf73, 0xf7cdea53, 0xfdaa5b5f, 0x3d6f14df, 0x44db8678,
0xaff381ca, 0x68c43eb9, 0x24342c38, 0xa3405fc2, 0x1dc37216, 0xe2250cbc, 0x3c498b28, 0x0d9541ff,
0xa8017139, 0x0cb3de08, 0xb4e49cd8, 0x56c19064, 0xcb84617b, 0x32b670d5, 0x6c5c7448, 0xb85742d0,
};

View File

@ -0,0 +1,430 @@
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2020 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
// Based on Go stdlib implementation
const std = @import("../../std.zig");
const mem = std.mem;
const debug = std.debug;
const Vector = std.meta.Vector;
const BlockVec = Vector(2, u64);
/// A single AES block.
pub const Block = struct {
pub const block_size: usize = 16;
/// Internal representation of a block.
repr: BlockVec,
/// Convert a byte sequence into an internal representation.
pub inline fn fromBytes(bytes: *const [16]u8) Block {
const repr = mem.bytesToValue(BlockVec, bytes);
return Block{ .repr = repr };
}
/// Convert the internal representation of a block into a byte sequence.
pub inline fn toBytes(block: Block) [16]u8 {
return mem.toBytes(block.repr);
}
/// XOR the block with a byte sequence.
pub inline fn xorBytes(block: Block, bytes: *const [16]u8) [16]u8 {
const x = block.repr ^ fromBytes(bytes).repr;
return mem.toBytes(x);
}
/// Encrypt a block with a round key.
pub inline fn encrypt(block: Block, round_key: Block) Block {
return Block{
.repr = asm (
\\ vaesenc %[rk], %[in], %[out]
: [out] "=x" (-> BlockVec)
: [in] "x" (block.repr),
[rk] "x" (round_key.repr)
),
};
}
/// Encrypt a block with the last round key.
pub inline fn encryptLast(block: Block, round_key: Block) Block {
return Block{
.repr = asm (
\\ vaesenclast %[rk], %[in], %[out]
: [out] "=x" (-> BlockVec)
: [in] "x" (block.repr),
[rk] "x" (round_key.repr)
),
};
}
/// Decrypt a block with a round key.
pub inline fn decrypt(block: Block, inv_round_key: Block) Block {
return Block{
.repr = asm (
\\ vaesdec %[rk], %[in], %[out]
: [out] "=x" (-> BlockVec)
: [in] "x" (block.repr),
[rk] "x" (inv_round_key.repr)
),
};
}
/// Decrypt a block with the last round key.
pub inline fn decryptLast(block: Block, inv_round_key: Block) Block {
return Block{
.repr = asm (
\\ vaesdeclast %[rk], %[in], %[out]
: [out] "=x" (-> BlockVec)
: [in] "x" (block.repr),
[rk] "x" (inv_round_key.repr)
),
};
}
/// Apply the bitwise XOR operation to the content of two blocks.
pub inline fn xorBlocks(block1: Block, block2: Block) Block {
return Block{ .repr = block1.repr ^ block2.repr };
}
/// Apply the bitwise AND operation to the content of two blocks.
pub inline fn andBlocks(block1: Block, block2: Block) Block {
return Block{ .repr = block1.repr & block2.repr };
}
/// Apply the bitwise OR operation to the content of two blocks.
pub inline fn orBlocks(block1: Block, block2: Block) Block {
return Block{ .repr = block1.repr | block2.repr };
}
/// Perform operations on multiple blocks in parallel.
pub const parallel = struct {
/// The recommended number of AES encryption/decryption to perform in parallel for the chosen implementation.
pub const optimal_parallel_blocks = 8;
/// Encrypt multiple blocks in parallel, each their own round key.
pub inline fn encryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
out[i] = blocks[i].encrypt(round_keys[i]);
}
return out;
}
/// Decrypt multiple blocks in parallel, each their own round key.
pub inline fn decryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
out[i] = blocks[i].decrypt(round_keys[i]);
}
return out;
}
/// Encrypt multple blocks in parallel with the same round key.
pub inline fn encryptWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
out[i] = blocks[i].encrypt(round_key);
}
return out;
}
/// Decrypt multple blocks in parallel with the same round key.
pub inline fn decryptWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
out[i] = blocks[i].decrypt(round_key);
}
return out;
}
/// Encrypt multple blocks in parallel with the same last round key.
pub inline fn encryptLastWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
out[i] = blocks[i].encryptLast(round_key);
}
return out;
}
/// Decrypt multple blocks in parallel with the same last round key.
pub inline fn decryptLastWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
out[i] = blocks[i].decryptLast(round_key);
}
return out;
}
};
};
fn KeySchedule(comptime AES: type) type {
std.debug.assert(AES.rounds == 10 or AES.rounds == 14);
const rounds = AES.rounds;
return struct {
const Self = @This();
round_keys: [rounds + 1]Block,
fn drc(comptime second: bool, comptime rc: u8, t: BlockVec, tx: BlockVec) BlockVec {
var s: BlockVec = undefined;
var ts: BlockVec = undefined;
return asm (
\\ vaeskeygenassist %[rc], %[t], %[s]
\\ vpslldq $4, %[tx], %[ts]
\\ vpxor %[ts], %[tx], %[r]
\\ vpslldq $8, %[r], %[ts]
\\ vpxor %[ts], %[r], %[r]
\\ vpshufd %[mask], %[s], %[ts]
\\ vpxor %[ts], %[r], %[r]
: [r] "=&x" (-> BlockVec),
[s] "=&x" (s),
[ts] "=&x" (ts)
: [rc] "n" (rc),
[t] "x" (t),
[tx] "x" (tx),
[mask] "n" (@as(u8, if (second) 0xaa else 0xff))
);
}
fn expand128(t1: *Block) Self {
var round_keys: [11]Block = undefined;
const rcs = [_]u8{ 1, 2, 4, 8, 16, 32, 64, 128, 27, 54 };
inline for (rcs) |rc, round| {
round_keys[round] = t1.*;
t1.repr = drc(false, rc, t1.repr, t1.repr);
}
round_keys[rcs.len] = t1.*;
return Self{ .round_keys = round_keys };
}
fn expand256(t1: *Block, t2: *Block) Self {
var round_keys: [15]Block = undefined;
const rcs = [_]u8{ 1, 2, 4, 8, 16, 32 };
round_keys[0] = t1.*;
inline for (rcs) |rc, round| {
round_keys[round * 2 + 1] = t2.*;
t1.repr = drc(false, rc, t2.repr, t1.repr);
round_keys[round * 2 + 2] = t1.*;
t2.repr = drc(true, rc, t1.repr, t2.repr);
}
round_keys[rcs.len * 2 + 1] = t2.*;
t1.repr = drc(false, 64, t2.repr, t1.repr);
round_keys[rcs.len * 2 + 2] = t1.*;
return Self{ .round_keys = round_keys };
}
/// Invert the key schedule.
pub fn invert(key_schedule: Self) Self {
const round_keys = &key_schedule.round_keys;
var inv_round_keys: [rounds + 1]Block = undefined;
inv_round_keys[0] = round_keys[rounds];
comptime var i = 1;
inline while (i < rounds) : (i += 1) {
inv_round_keys[i] = Block{
.repr = asm (
\\ vaesimc %[rk], %[inv_rk]
: [inv_rk] "=x" (-> BlockVec)
: [rk] "x" (round_keys[rounds - i].repr)
),
};
}
inv_round_keys[rounds] = round_keys[0];
return Self{ .round_keys = inv_round_keys };
}
};
}
/// A context to perform encryption using the standard AES key schedule.
pub fn AESEncryptCtx(comptime AES: type) type {
std.debug.assert(AES.key_bits == 128 or AES.key_bits == 256);
const rounds = AES.rounds;
return struct {
const Self = @This();
pub const block = AES.block;
pub const block_size = block.block_size;
key_schedule: KeySchedule(AES),
/// Create a new encryption context with the given key.
pub fn init(key: [AES.key_bits / 8]u8) Self {
var t1 = Block.fromBytes(key[0..16]);
const key_schedule = if (AES.key_bits == 128) ks: {
break :ks KeySchedule(AES).expand128(&t1);
} else ks: {
var t2 = Block.fromBytes(key[16..32]);
break :ks KeySchedule(AES).expand256(&t1, &t2);
};
return Self{
.key_schedule = key_schedule,
};
}
/// Encrypt a single block.
pub fn encrypt(ctx: Self, dst: *[16]u8, src: *const [16]u8) void {
const round_keys = ctx.key_schedule.round_keys;
var t = Block.fromBytes(src).xorBlocks(round_keys[0]);
comptime var i = 1;
inline while (i < rounds) : (i += 1) {
t = t.encrypt(round_keys[i]);
}
t = t.encryptLast(round_keys[rounds]);
dst.* = t.toBytes();
}
/// Encrypt+XOR a single block.
pub fn xor(ctx: Self, dst: *[16]u8, src: *const [16]u8, counter: [16]u8) void {
const round_keys = ctx.key_schedule.round_keys;
var t = Block.fromBytes(&counter).xorBlocks(round_keys[0]);
comptime var i = 1;
inline while (i < rounds) : (i += 1) {
t = t.encrypt(round_keys[i]);
}
t = t.encryptLast(round_keys[rounds]);
dst.* = t.xorBytes(src);
}
/// Encrypt multiple blocks, possibly leveraging parallelization.
pub fn encryptWide(ctx: Self, comptime count: usize, dst: *[16 * count]u8, src: *const [16 * count]u8) void {
const round_keys = ctx.key_schedule.round_keys;
var ts: [count]Block = undefined;
comptime var j = 0;
inline while (j < count) : (j += 1) {
ts[j] = Block.fromBytes(src[j * 16 .. j * 16 + 16][0..16]).xorBlocks(round_keys[0]);
}
comptime var i = 1;
inline while (i < rounds) : (i += 1) {
ts = Block.parallel.encryptWide(count, ts, round_keys[i]);
}
i = 1;
inline while (i < count) : (i += 1) {
ts = Block.parallel.encryptLastWide(count, ts, round_keys[i]);
}
j = 0;
inline while (j < count) : (j += 1) {
dst[16 * j .. 16 * j + 16].* = ts[j].toBytes();
}
}
/// Encrypt+XOR multiple blocks, possibly leveraging parallelization.
pub fn xorWide(ctx: Self, comptime count: usize, dst: *[16 * count]u8, src: *const [16 * count]u8, counters: [16 * count]u8) void {
const round_keys = ctx.key_schedule.round_keys;
var ts: [count]Block = undefined;
comptime var j = 0;
inline while (j < count) : (j += 1) {
ts[j] = Block.fromBytes(counters[j * 16 .. j * 16 + 16][0..16]).xorBlocks(round_keys[0]);
}
comptime var i = 1;
inline while (i < rounds) : (i += 1) {
ts = Block.parallel.encryptWide(count, ts, round_keys[i]);
}
ts = Block.parallel.encryptLastWide(count, ts, round_keys[i]);
j = 0;
inline while (j < count) : (j += 1) {
dst[16 * j .. 16 * j + 16].* = ts[j].xorBytes(src[16 * j .. 16 * j + 16]);
}
}
};
}
/// A context to perform decryption using the standard AES key schedule.
pub fn AESDecryptCtx(comptime AES: type) type {
std.debug.assert(AES.key_bits == 128 or AES.key_bits == 256);
const rounds = AES.rounds;
return struct {
const Self = @This();
pub const block = AES.block;
pub const block_size = block.block_size;
key_schedule: KeySchedule(AES),
/// Create a decryption context from an existing encryption context.
pub fn initFromEnc(ctx: AESEncryptCtx(AES)) Self {
return Self{
.key_schedule = ctx.key_schedule.invert(),
};
}
/// Create a new decryption context with the given key.
pub fn init(key: [AES.key_bits / 8]u8) Self {
const enc_ctx = AESEncryptCtx(AES).init(key);
return initFromEnc(enc_ctx);
}
/// Decrypt a single block.
pub fn decrypt(ctx: Self, dst: *[16]u8, src: *const [16]u8) void {
const inv_round_keys = ctx.key_schedule.round_keys;
var t = Block.fromBytes(src).xorBlocks(inv_round_keys[0]);
comptime var i = 1;
inline while (i < rounds) : (i += 1) {
t = t.decrypt(inv_round_keys[i]);
}
t = t.decryptLast(inv_round_keys[rounds]);
dst.* = t.toBytes();
}
/// Decrypt multiple blocks, possibly leveraging parallelization.
pub fn decryptWide(ctx: Self, comptime count: usize, dst: *[16 * count]u8, src: *const [16 * count]u8) void {
const inv_round_keys = ctx.key_schedule.round_keys;
var ts: [count]Block = undefined;
comptime var j = 0;
inline while (j < count) : (j += 1) {
ts[j] = Block.fromBytes(src[j * 16 .. j * 16 + 16][0..16]).xorBlocks(inv_round_keys[0]);
}
comptime var i = 1;
inline while (i < rounds) : (i += 1) {
ts = Block.parallel.decryptWide(count, ts, inv_round_keys[i]);
}
i = 1;
inline while (i < count) : (i += 1) {
ts = Block.parallel.decryptLastWide(count, ts, inv_round_keys[i]);
}
j = 0;
inline while (j < count) : (j += 1) {
dst[16 * j .. 16 * j + 16].* = ts[j].toBytes();
}
}
};
}
/// AES-128 with the standard key schedule.
pub const AES128 = struct {
pub const key_bits: usize = 128;
pub const rounds = ((key_bits - 64) / 32 + 8);
pub const block = Block;
/// Create a new context for encryption.
pub fn initEnc(key: [key_bits / 8]u8) AESEncryptCtx(AES128) {
return AESEncryptCtx(AES128).init(key);
}
/// Create a new context for decryption.
pub fn initDec(key: [key_bits / 8]u8) AESDecryptCtx(AES128) {
return AESDecryptCtx(AES128).init(key);
}
};
/// AES-256 with the standard key schedule.
pub const AES256 = struct {
pub const key_bits: usize = 256;
pub const rounds = ((key_bits - 64) / 32 + 8);
pub const block = Block;
/// Create a new context for encryption.
pub fn initEnc(key: [key_bits / 8]u8) AESEncryptCtx(AES256) {
return AESEncryptCtx(AES256).init(key);
}
/// Create a new context for decryption.
pub fn initDec(key: [key_bits / 8]u8) AESDecryptCtx(AES256) {
return AESDecryptCtx(AES256).init(key);
}
};

755
lib/std/crypto/aes/soft.zig Normal file
View File

@ -0,0 +1,755 @@
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2020 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
// Based on Go stdlib implementation
const std = @import("../../std.zig");
const mem = std.mem;
const BlockVec = [4]u32;
/// A single AES block.
pub const Block = struct {
pub const block_size: usize = 16;
/// Internal representation of a block.
repr: BlockVec align(16),
/// Convert a byte sequence into an internal representation.
pub inline fn fromBytes(bytes: *const [16]u8) Block {
const s0 = mem.readIntBig(u32, bytes[0..4]);
const s1 = mem.readIntBig(u32, bytes[4..8]);
const s2 = mem.readIntBig(u32, bytes[8..12]);
const s3 = mem.readIntBig(u32, bytes[12..16]);
return Block{ .repr = BlockVec{ s0, s1, s2, s3 } };
}
/// Convert the internal representation of a block into a byte sequence.
pub inline fn toBytes(block: Block) [16]u8 {
var bytes: [16]u8 = undefined;
mem.writeIntBig(u32, bytes[0..4], block.repr[0]);
mem.writeIntBig(u32, bytes[4..8], block.repr[1]);
mem.writeIntBig(u32, bytes[8..12], block.repr[2]);
mem.writeIntBig(u32, bytes[12..16], block.repr[3]);
return bytes;
}
/// XOR the block with a byte sequence.
pub inline fn xorBytes(block: Block, bytes: *const [16]u8) [16]u8 {
const block_bytes = block.toBytes();
var x: [16]u8 = undefined;
comptime var i: usize = 0;
inline while (i < 16) : (i += 1) {
x[i] = block_bytes[i] ^ bytes[i];
}
return x;
}
/// Encrypt a block with a round key.
pub inline fn encrypt(block: Block, round_key: Block) Block {
const src = &block.repr;
const s0 = block.repr[0];
const s1 = block.repr[1];
const s2 = block.repr[2];
const s3 = block.repr[3];
const t0 = round_key.repr[0] ^ te0[@truncate(u8, s0 >> 24)] ^ te1[@truncate(u8, s1 >> 16)] ^ te2[@truncate(u8, s2 >> 8)] ^ te3[@truncate(u8, s3)];
const t1 = round_key.repr[1] ^ te0[@truncate(u8, s1 >> 24)] ^ te1[@truncate(u8, s2 >> 16)] ^ te2[@truncate(u8, s3 >> 8)] ^ te3[@truncate(u8, s0)];
const t2 = round_key.repr[2] ^ te0[@truncate(u8, s2 >> 24)] ^ te1[@truncate(u8, s3 >> 16)] ^ te2[@truncate(u8, s0 >> 8)] ^ te3[@truncate(u8, s1)];
const t3 = round_key.repr[3] ^ te0[@truncate(u8, s3 >> 24)] ^ te1[@truncate(u8, s0 >> 16)] ^ te2[@truncate(u8, s1 >> 8)] ^ te3[@truncate(u8, s2)];
return Block{ .repr = BlockVec{ t0, t1, t2, t3 } };
}
/// Encrypt a block with the last round key.
pub inline fn encryptLast(block: Block, round_key: Block) Block {
const src = &block.repr;
const t0 = block.repr[0];
const t1 = block.repr[1];
const t2 = block.repr[2];
const t3 = block.repr[3];
// Last round uses s-box directly and XORs to produce output.
var s0 = @as(u32, sbox0[t0 >> 24]) << 24 | @as(u32, sbox0[t1 >> 16 & 0xff]) << 16 | @as(u32, sbox0[t2 >> 8 & 0xff]) << 8 | @as(u32, sbox0[t3 & 0xff]);
var s1 = @as(u32, sbox0[t1 >> 24]) << 24 | @as(u32, sbox0[t2 >> 16 & 0xff]) << 16 | @as(u32, sbox0[t3 >> 8 & 0xff]) << 8 | @as(u32, sbox0[t0 & 0xff]);
var s2 = @as(u32, sbox0[t2 >> 24]) << 24 | @as(u32, sbox0[t3 >> 16 & 0xff]) << 16 | @as(u32, sbox0[t0 >> 8 & 0xff]) << 8 | @as(u32, sbox0[t1 & 0xff]);
var s3 = @as(u32, sbox0[t3 >> 24]) << 24 | @as(u32, sbox0[t0 >> 16 & 0xff]) << 16 | @as(u32, sbox0[t1 >> 8 & 0xff]) << 8 | @as(u32, sbox0[t2 & 0xff]);
s0 ^= round_key.repr[0];
s1 ^= round_key.repr[1];
s2 ^= round_key.repr[2];
s3 ^= round_key.repr[3];
return Block{ .repr = BlockVec{ s0, s1, s2, s3 } };
}
/// Decrypt a block with a round key.
pub inline fn decrypt(block: Block, round_key: Block) Block {
const src = &block.repr;
const s0 = block.repr[0];
const s1 = block.repr[1];
const s2 = block.repr[2];
const s3 = block.repr[3];
const t0 = round_key.repr[0] ^ td0[@truncate(u8, s0 >> 24)] ^ td1[@truncate(u8, s3 >> 16)] ^ td2[@truncate(u8, s2 >> 8)] ^ td3[@truncate(u8, s1)];
const t1 = round_key.repr[1] ^ td0[@truncate(u8, s1 >> 24)] ^ td1[@truncate(u8, s0 >> 16)] ^ td2[@truncate(u8, s3 >> 8)] ^ td3[@truncate(u8, s2)];
const t2 = round_key.repr[2] ^ td0[@truncate(u8, s2 >> 24)] ^ td1[@truncate(u8, s1 >> 16)] ^ td2[@truncate(u8, s0 >> 8)] ^ td3[@truncate(u8, s3)];
const t3 = round_key.repr[3] ^ td0[@truncate(u8, s3 >> 24)] ^ td1[@truncate(u8, s2 >> 16)] ^ td2[@truncate(u8, s1 >> 8)] ^ td3[@truncate(u8, s0)];
return Block{ .repr = BlockVec{ t0, t1, t2, t3 } };
}
/// Decrypt a block with the last round key.
pub inline fn decryptLast(block: Block, round_key: Block) Block {
const src = &block.repr;
const t0 = block.repr[0];
const t1 = block.repr[1];
const t2 = block.repr[2];
const t3 = block.repr[3];
// Last round uses s-box directly and XORs to produce output.
var s0 = @as(u32, sbox1[t0 >> 24]) << 24 | @as(u32, sbox1[t3 >> 16 & 0xff]) << 16 | @as(u32, sbox1[t2 >> 8 & 0xff]) << 8 | @as(u32, sbox1[t1 & 0xff]);
var s1 = @as(u32, sbox1[t1 >> 24]) << 24 | @as(u32, sbox1[t0 >> 16 & 0xff]) << 16 | @as(u32, sbox1[t3 >> 8 & 0xff]) << 8 | @as(u32, sbox1[t2 & 0xff]);
var s2 = @as(u32, sbox1[t2 >> 24]) << 24 | @as(u32, sbox1[t1 >> 16 & 0xff]) << 16 | @as(u32, sbox1[t0 >> 8 & 0xff]) << 8 | @as(u32, sbox1[t3 & 0xff]);
var s3 = @as(u32, sbox1[t3 >> 24]) << 24 | @as(u32, sbox1[t2 >> 16 & 0xff]) << 16 | @as(u32, sbox1[t1 >> 8 & 0xff]) << 8 | @as(u32, sbox1[t0 & 0xff]);
s0 ^= round_key.repr[0];
s1 ^= round_key.repr[1];
s2 ^= round_key.repr[2];
s3 ^= round_key.repr[3];
return Block{ .repr = BlockVec{ s0, s1, s2, s3 } };
}
/// Apply the bitwise XOR operation to the content of two blocks.
pub inline fn xorBlocks(block1: Block, block2: Block) Block {
var x: BlockVec = undefined;
comptime var i = 0;
inline while (i < 4) : (i += 1) {
x[i] = block1.repr[i] ^ block2.repr[i];
}
return Block{ .repr = x };
}
/// Apply the bitwise AND operation to the content of two blocks.
pub inline fn andBlocks(block1: Block, block2: Block) Block {
var x: BlockVec = undefined;
comptime var i = 0;
inline while (i < 4) : (i += 1) {
x[i] = block1.repr[i] & block2.repr[i];
}
return Block{ .repr = x };
}
/// Apply the bitwise OR operation to the content of two blocks.
pub inline fn orBlocks(block1: Block, block2: Block) Block {
var x: BlockVec = undefined;
comptime var i = 0;
inline while (i < 4) : (i += 1) {
x[i] = block1.repr[i] | block2.repr[i];
}
return Block{ .repr = x };
}
/// Perform operations on multiple blocks in parallel.
pub const parallel = struct {
/// The recommended number of AES encryption/decryption to perform in parallel for the chosen implementation.
pub const optimal_parallel_blocks = 1;
/// Encrypt multiple blocks in parallel, each their own round key.
pub fn encryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) [count]Block {
var i = 0;
var out: [count]Block = undefined;
while (i < count) : (i += 1) {
out[i] = blocks[i].encrypt(round_keys[i]);
}
return out;
}
/// Decrypt multiple blocks in parallel, each their own round key.
pub fn decryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) [count]Block {
var i = 0;
var out: [count]Block = undefined;
while (i < count) : (i += 1) {
out[i] = blocks[i].decrypt(round_keys[i]);
}
return out;
}
/// Encrypt multple blocks in parallel with the same round key.
pub fn encryptWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
var i = 0;
var out: [count]Block = undefined;
while (i < count) : (i += 1) {
out[i] = blocks[i].encrypt(round_key);
}
return out;
}
/// Decrypt multple blocks in parallel with the same round key.
pub fn decryptWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
var i = 0;
var out: [count]Block = undefined;
while (i < count) : (i += 1) {
out[i] = blocks[i].decrypt(round_key);
}
return out;
}
/// Encrypt multple blocks in parallel with the same last round key.
pub fn encryptLastWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
var i = 0;
var out: [count]Block = undefined;
while (i < count) : (i += 1) {
out[i] = blocks[i].encryptLast(round_key);
}
return out;
}
/// Decrypt multple blocks in parallel with the same last round key.
pub fn decryptLastWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
var i = 0;
var out: [count]Block = undefined;
while (i < count) : (i += 1) {
out[i] = blocks[i].decryptLast(round_key);
}
return out;
}
};
};
fn KeySchedule(comptime AES: type) type {
std.debug.assert(AES.rounds == 10 or AES.rounds == 14);
const key_size = AES.key_bits / 8;
const rounds = AES.rounds;
return struct {
const Self = @This();
const words_in_key = key_size / 4;
round_keys: [rounds + 1]Block,
// Key expansion algorithm. See FIPS-197, Figure 11.
fn expandKey(key: [key_size]u8) Self {
const subw = struct {
// Apply sbox0 to each byte in w.
fn func(w: u32) u32 {
return @as(u32, sbox0[w >> 24]) << 24 | @as(u32, sbox0[w >> 16 & 0xff]) << 16 | @as(u32, sbox0[w >> 8 & 0xff]) << 8 | @as(u32, sbox0[w & 0xff]);
}
}.func;
var round_keys: [rounds + 1]Block = undefined;
comptime var i: usize = 0;
inline while (i < words_in_key) : (i += 1) {
round_keys[i / 4].repr[i % 4] = mem.readIntBig(u32, key[4 * i ..][0..4]);
}
inline while (i < round_keys.len * 4) : (i += 1) {
var t = round_keys[(i - 1) / 4].repr[(i - 1) % 4];
if (i % words_in_key == 0) {
t = subw(std.math.rotl(u32, t, 8)) ^ (@as(u32, powx[i / words_in_key - 1]) << 24);
} else if (words_in_key > 6 and i % words_in_key == 4) {
t = subw(t);
}
round_keys[i / 4].repr[i % 4] = round_keys[(i - words_in_key) / 4].repr[(i - words_in_key) % 4] ^ t;
}
return Self{ .round_keys = round_keys };
}
/// Invert the key schedule.
pub fn invert(key_schedule: Self) Self {
const round_keys = &key_schedule.round_keys;
var inv_round_keys: [rounds + 1]Block = undefined;
const total_words = 4 * round_keys.len;
var i: usize = 0;
while (i < total_words) : (i += 4) {
const ei = total_words - i - 4;
comptime var j: usize = 0;
inline while (j < 4) : (j += 1) {
var x = round_keys[(ei + j) / 4].repr[(ei + j) % 4];
if (i > 0 and i + 4 < total_words) {
x = td0[sbox0[x >> 24]] ^ td1[sbox0[x >> 16 & 0xff]] ^ td2[sbox0[x >> 8 & 0xff]] ^ td3[sbox0[x & 0xff]];
}
inv_round_keys[(i + j) / 4].repr[(i + j) % 4] = x;
}
}
return Self{ .round_keys = inv_round_keys };
}
};
}
/// A context to perform encryption using the standard AES key schedule.
pub fn AESEncryptCtx(comptime AES: type) type {
std.debug.assert(AES.key_bits == 128 or AES.key_bits == 256);
const rounds = AES.rounds;
return struct {
const Self = @This();
pub const block = AES.block;
pub const block_size = block.block_size;
key_schedule: KeySchedule(AES),
/// Create a new encryption context with the given key.
pub fn init(key: [AES.key_bits / 8]u8) Self {
const key_schedule = KeySchedule(AES).expandKey(key);
return Self{
.key_schedule = key_schedule,
};
}
/// Encrypt a single block.
pub fn encrypt(ctx: Self, dst: *[16]u8, src: *const [16]u8) void {
const round_keys = ctx.key_schedule.round_keys;
var t = Block.fromBytes(src).xorBlocks(round_keys[0]);
comptime var i = 1;
inline while (i < rounds) : (i += 1) {
t = t.encrypt(round_keys[i]);
}
t = t.encryptLast(round_keys[rounds]);
dst.* = t.toBytes();
}
/// Encrypt+XOR a single block.
pub fn xor(ctx: Self, dst: *[16]u8, src: *const [16]u8, counter: [16]u8) void {
const round_keys = ctx.key_schedule.round_keys;
var t = Block.fromBytes(&counter).xorBlocks(round_keys[0]);
comptime var i = 1;
inline while (i < rounds) : (i += 1) {
t = t.encrypt(round_keys[i]);
}
t = t.encryptLast(round_keys[rounds]);
dst.* = t.xorBytes(src);
}
/// Encrypt multiple blocks, possibly leveraging parallelization.
pub fn encryptWide(ctx: Self, comptime count: usize, dst: *[16 * count]u8, src: *const [16 * count]u8) void {
var i: usize = 0;
while (i < count) : (i += 1) {
ctx.encrypt(dst[16 * i .. 16 * i + 16][0..16], src[16 * i .. 16 * i + 16][0..16]);
}
}
/// Encrypt+XOR multiple blocks, possibly leveraging parallelization.
pub fn xorWide(ctx: Self, comptime count: usize, dst: *[16 * count]u8, src: *const [16 * count]u8, counters: [16 * count]u8) void {
var i: usize = 0;
while (i < count) : (i += 1) {
ctx.xor(dst[16 * i .. 16 * i + 16][0..16], src[16 * i .. 16 * i + 16][0..16], counters[16 * i .. 16 * i + 16][0..16].*);
}
}
};
}
/// A context to perform decryption using the standard AES key schedule.
pub fn AESDecryptCtx(comptime AES: type) type {
std.debug.assert(AES.key_bits == 128 or AES.key_bits == 256);
const rounds = AES.rounds;
return struct {
const Self = @This();
pub const block = AES.block;
pub const block_size = block.block_size;
key_schedule: KeySchedule(AES),
/// Create a decryption context from an existing encryption context.
pub fn initFromEnc(ctx: AESEncryptCtx(AES)) Self {
return Self{
.key_schedule = ctx.key_schedule.invert(),
};
}
/// Create a new decryption context with the given key.
pub fn init(key: [AES.key_bits / 8]u8) Self {
const enc_ctx = AESEncryptCtx(AES).init(key);
return initFromEnc(enc_ctx);
}
/// Decrypt a single block.
pub fn decrypt(ctx: Self, dst: *[16]u8, src: *const [16]u8) void {
const inv_round_keys = ctx.key_schedule.round_keys;
var t = Block.fromBytes(src).xorBlocks(inv_round_keys[0]);
comptime var i = 1;
inline while (i < rounds) : (i += 1) {
t = t.decrypt(inv_round_keys[i]);
}
t = t.decryptLast(inv_round_keys[rounds]);
dst.* = t.toBytes();
}
/// Decrypt multiple blocks, possibly leveraging parallelization.
pub fn decryptWide(ctx: Self, comptime count: usize, dst: *[16 * count]u8, src: *const [16 * count]u8) void {
var i: usize = 0;
while (i < count) : (i += 1) {
ctx.decrypt(dst[16 * i .. 16 * i + 16][0..16], src[16 * i .. 16 * i + 16][0..16]);
}
}
};
}
/// AES-128 with the standard key schedule.
pub const AES128 = struct {
pub const key_bits: usize = 128;
pub const rounds = ((key_bits - 64) / 32 + 8);
pub const block = Block;
/// Create a new context for encryption.
pub fn initEnc(key: [key_bits / 8]u8) AESEncryptCtx(AES128) {
return AESEncryptCtx(AES128).init(key);
}
/// Create a new context for decryption.
pub fn initDec(key: [key_bits / 8]u8) AESDecryptCtx(AES128) {
return AESDecryptCtx(AES128).init(key);
}
};
/// AES-256 with the standard key schedule.
pub const AES256 = struct {
pub const key_bits: usize = 256;
pub const rounds = ((key_bits - 64) / 32 + 8);
pub const block = Block;
/// Create a new context for encryption.
pub fn initEnc(key: [key_bits / 8]u8) AESEncryptCtx(AES256) {
return AESEncryptCtx(AES256).init(key);
}
/// Create a new context for decryption.
pub fn initDec(key: [key_bits / 8]u8) AESDecryptCtx(AES256) {
return AESDecryptCtx(AES256).init(key);
}
};
// constants
const powx = [16]u8{
0x01,
0x02,
0x04,
0x08,
0x10,
0x20,
0x40,
0x80,
0x1b,
0x36,
0x6c,
0xd8,
0xab,
0x4d,
0x9a,
0x2f,
};
const sbox0 align(64) = [256]u8{
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16,
};
const sbox1 align(64) = [256]u8{
0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb,
0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb,
0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25,
0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92,
0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06,
0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b,
0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e,
0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,
0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f,
0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d,
};
const te0 align(64) = [256]u32{
0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d, 0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554,
0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d, 0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a,
0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87, 0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b,
0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea, 0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b,
0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a, 0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f,
0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108, 0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f,
0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e, 0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5,
0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d, 0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f,
0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e, 0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb,
0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce, 0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497,
0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c, 0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed,
0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b, 0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a,
0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16, 0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594,
0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81, 0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3,
0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a, 0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504,
0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163, 0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d,
0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f, 0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739,
0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47, 0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395,
0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f, 0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883,
0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c, 0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76,
0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e, 0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4,
0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6, 0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b,
0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7, 0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0,
0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25, 0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818,
0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72, 0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651,
0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21, 0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85,
0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa, 0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12,
0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0, 0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9,
0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133, 0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7,
0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920, 0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a,
0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17, 0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8,
0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11, 0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a,
};
const te1 align(64) = [256]u32{
0xa5c66363, 0x84f87c7c, 0x99ee7777, 0x8df67b7b, 0x0dfff2f2, 0xbdd66b6b, 0xb1de6f6f, 0x5491c5c5,
0x50603030, 0x03020101, 0xa9ce6767, 0x7d562b2b, 0x19e7fefe, 0x62b5d7d7, 0xe64dabab, 0x9aec7676,
0x458fcaca, 0x9d1f8282, 0x4089c9c9, 0x87fa7d7d, 0x15effafa, 0xebb25959, 0xc98e4747, 0x0bfbf0f0,
0xec41adad, 0x67b3d4d4, 0xfd5fa2a2, 0xea45afaf, 0xbf239c9c, 0xf753a4a4, 0x96e47272, 0x5b9bc0c0,
0xc275b7b7, 0x1ce1fdfd, 0xae3d9393, 0x6a4c2626, 0x5a6c3636, 0x417e3f3f, 0x02f5f7f7, 0x4f83cccc,
0x5c683434, 0xf451a5a5, 0x34d1e5e5, 0x08f9f1f1, 0x93e27171, 0x73abd8d8, 0x53623131, 0x3f2a1515,
0x0c080404, 0x5295c7c7, 0x65462323, 0x5e9dc3c3, 0x28301818, 0xa1379696, 0x0f0a0505, 0xb52f9a9a,
0x090e0707, 0x36241212, 0x9b1b8080, 0x3ddfe2e2, 0x26cdebeb, 0x694e2727, 0xcd7fb2b2, 0x9fea7575,
0x1b120909, 0x9e1d8383, 0x74582c2c, 0x2e341a1a, 0x2d361b1b, 0xb2dc6e6e, 0xeeb45a5a, 0xfb5ba0a0,
0xf6a45252, 0x4d763b3b, 0x61b7d6d6, 0xce7db3b3, 0x7b522929, 0x3edde3e3, 0x715e2f2f, 0x97138484,
0xf5a65353, 0x68b9d1d1, 0x00000000, 0x2cc1eded, 0x60402020, 0x1fe3fcfc, 0xc879b1b1, 0xedb65b5b,
0xbed46a6a, 0x468dcbcb, 0xd967bebe, 0x4b723939, 0xde944a4a, 0xd4984c4c, 0xe8b05858, 0x4a85cfcf,
0x6bbbd0d0, 0x2ac5efef, 0xe54faaaa, 0x16edfbfb, 0xc5864343, 0xd79a4d4d, 0x55663333, 0x94118585,
0xcf8a4545, 0x10e9f9f9, 0x06040202, 0x81fe7f7f, 0xf0a05050, 0x44783c3c, 0xba259f9f, 0xe34ba8a8,
0xf3a25151, 0xfe5da3a3, 0xc0804040, 0x8a058f8f, 0xad3f9292, 0xbc219d9d, 0x48703838, 0x04f1f5f5,
0xdf63bcbc, 0xc177b6b6, 0x75afdada, 0x63422121, 0x30201010, 0x1ae5ffff, 0x0efdf3f3, 0x6dbfd2d2,
0x4c81cdcd, 0x14180c0c, 0x35261313, 0x2fc3ecec, 0xe1be5f5f, 0xa2359797, 0xcc884444, 0x392e1717,
0x5793c4c4, 0xf255a7a7, 0x82fc7e7e, 0x477a3d3d, 0xacc86464, 0xe7ba5d5d, 0x2b321919, 0x95e67373,
0xa0c06060, 0x98198181, 0xd19e4f4f, 0x7fa3dcdc, 0x66442222, 0x7e542a2a, 0xab3b9090, 0x830b8888,
0xca8c4646, 0x29c7eeee, 0xd36bb8b8, 0x3c281414, 0x79a7dede, 0xe2bc5e5e, 0x1d160b0b, 0x76addbdb,
0x3bdbe0e0, 0x56643232, 0x4e743a3a, 0x1e140a0a, 0xdb924949, 0x0a0c0606, 0x6c482424, 0xe4b85c5c,
0x5d9fc2c2, 0x6ebdd3d3, 0xef43acac, 0xa6c46262, 0xa8399191, 0xa4319595, 0x37d3e4e4, 0x8bf27979,
0x32d5e7e7, 0x438bc8c8, 0x596e3737, 0xb7da6d6d, 0x8c018d8d, 0x64b1d5d5, 0xd29c4e4e, 0xe049a9a9,
0xb4d86c6c, 0xfaac5656, 0x07f3f4f4, 0x25cfeaea, 0xafca6565, 0x8ef47a7a, 0xe947aeae, 0x18100808,
0xd56fbaba, 0x88f07878, 0x6f4a2525, 0x725c2e2e, 0x24381c1c, 0xf157a6a6, 0xc773b4b4, 0x5197c6c6,
0x23cbe8e8, 0x7ca1dddd, 0x9ce87474, 0x213e1f1f, 0xdd964b4b, 0xdc61bdbd, 0x860d8b8b, 0x850f8a8a,
0x90e07070, 0x427c3e3e, 0xc471b5b5, 0xaacc6666, 0xd8904848, 0x05060303, 0x01f7f6f6, 0x121c0e0e,
0xa3c26161, 0x5f6a3535, 0xf9ae5757, 0xd069b9b9, 0x91178686, 0x5899c1c1, 0x273a1d1d, 0xb9279e9e,
0x38d9e1e1, 0x13ebf8f8, 0xb32b9898, 0x33221111, 0xbbd26969, 0x70a9d9d9, 0x89078e8e, 0xa7339494,
0xb62d9b9b, 0x223c1e1e, 0x92158787, 0x20c9e9e9, 0x4987cece, 0xffaa5555, 0x78502828, 0x7aa5dfdf,
0x8f038c8c, 0xf859a1a1, 0x80098989, 0x171a0d0d, 0xda65bfbf, 0x31d7e6e6, 0xc6844242, 0xb8d06868,
0xc3824141, 0xb0299999, 0x775a2d2d, 0x111e0f0f, 0xcb7bb0b0, 0xfca85454, 0xd66dbbbb, 0x3a2c1616,
};
const te2 align(64) = [256]u32{
0x63a5c663, 0x7c84f87c, 0x7799ee77, 0x7b8df67b, 0xf20dfff2, 0x6bbdd66b, 0x6fb1de6f, 0xc55491c5,
0x30506030, 0x01030201, 0x67a9ce67, 0x2b7d562b, 0xfe19e7fe, 0xd762b5d7, 0xabe64dab, 0x769aec76,
0xca458fca, 0x829d1f82, 0xc94089c9, 0x7d87fa7d, 0xfa15effa, 0x59ebb259, 0x47c98e47, 0xf00bfbf0,
0xadec41ad, 0xd467b3d4, 0xa2fd5fa2, 0xafea45af, 0x9cbf239c, 0xa4f753a4, 0x7296e472, 0xc05b9bc0,
0xb7c275b7, 0xfd1ce1fd, 0x93ae3d93, 0x266a4c26, 0x365a6c36, 0x3f417e3f, 0xf702f5f7, 0xcc4f83cc,
0x345c6834, 0xa5f451a5, 0xe534d1e5, 0xf108f9f1, 0x7193e271, 0xd873abd8, 0x31536231, 0x153f2a15,
0x040c0804, 0xc75295c7, 0x23654623, 0xc35e9dc3, 0x18283018, 0x96a13796, 0x050f0a05, 0x9ab52f9a,
0x07090e07, 0x12362412, 0x809b1b80, 0xe23ddfe2, 0xeb26cdeb, 0x27694e27, 0xb2cd7fb2, 0x759fea75,
0x091b1209, 0x839e1d83, 0x2c74582c, 0x1a2e341a, 0x1b2d361b, 0x6eb2dc6e, 0x5aeeb45a, 0xa0fb5ba0,
0x52f6a452, 0x3b4d763b, 0xd661b7d6, 0xb3ce7db3, 0x297b5229, 0xe33edde3, 0x2f715e2f, 0x84971384,
0x53f5a653, 0xd168b9d1, 0x00000000, 0xed2cc1ed, 0x20604020, 0xfc1fe3fc, 0xb1c879b1, 0x5bedb65b,
0x6abed46a, 0xcb468dcb, 0xbed967be, 0x394b7239, 0x4ade944a, 0x4cd4984c, 0x58e8b058, 0xcf4a85cf,
0xd06bbbd0, 0xef2ac5ef, 0xaae54faa, 0xfb16edfb, 0x43c58643, 0x4dd79a4d, 0x33556633, 0x85941185,
0x45cf8a45, 0xf910e9f9, 0x02060402, 0x7f81fe7f, 0x50f0a050, 0x3c44783c, 0x9fba259f, 0xa8e34ba8,
0x51f3a251, 0xa3fe5da3, 0x40c08040, 0x8f8a058f, 0x92ad3f92, 0x9dbc219d, 0x38487038, 0xf504f1f5,
0xbcdf63bc, 0xb6c177b6, 0xda75afda, 0x21634221, 0x10302010, 0xff1ae5ff, 0xf30efdf3, 0xd26dbfd2,
0xcd4c81cd, 0x0c14180c, 0x13352613, 0xec2fc3ec, 0x5fe1be5f, 0x97a23597, 0x44cc8844, 0x17392e17,
0xc45793c4, 0xa7f255a7, 0x7e82fc7e, 0x3d477a3d, 0x64acc864, 0x5de7ba5d, 0x192b3219, 0x7395e673,
0x60a0c060, 0x81981981, 0x4fd19e4f, 0xdc7fa3dc, 0x22664422, 0x2a7e542a, 0x90ab3b90, 0x88830b88,
0x46ca8c46, 0xee29c7ee, 0xb8d36bb8, 0x143c2814, 0xde79a7de, 0x5ee2bc5e, 0x0b1d160b, 0xdb76addb,
0xe03bdbe0, 0x32566432, 0x3a4e743a, 0x0a1e140a, 0x49db9249, 0x060a0c06, 0x246c4824, 0x5ce4b85c,
0xc25d9fc2, 0xd36ebdd3, 0xacef43ac, 0x62a6c462, 0x91a83991, 0x95a43195, 0xe437d3e4, 0x798bf279,
0xe732d5e7, 0xc8438bc8, 0x37596e37, 0x6db7da6d, 0x8d8c018d, 0xd564b1d5, 0x4ed29c4e, 0xa9e049a9,
0x6cb4d86c, 0x56faac56, 0xf407f3f4, 0xea25cfea, 0x65afca65, 0x7a8ef47a, 0xaee947ae, 0x08181008,
0xbad56fba, 0x7888f078, 0x256f4a25, 0x2e725c2e, 0x1c24381c, 0xa6f157a6, 0xb4c773b4, 0xc65197c6,
0xe823cbe8, 0xdd7ca1dd, 0x749ce874, 0x1f213e1f, 0x4bdd964b, 0xbddc61bd, 0x8b860d8b, 0x8a850f8a,
0x7090e070, 0x3e427c3e, 0xb5c471b5, 0x66aacc66, 0x48d89048, 0x03050603, 0xf601f7f6, 0x0e121c0e,
0x61a3c261, 0x355f6a35, 0x57f9ae57, 0xb9d069b9, 0x86911786, 0xc15899c1, 0x1d273a1d, 0x9eb9279e,
0xe138d9e1, 0xf813ebf8, 0x98b32b98, 0x11332211, 0x69bbd269, 0xd970a9d9, 0x8e89078e, 0x94a73394,
0x9bb62d9b, 0x1e223c1e, 0x87921587, 0xe920c9e9, 0xce4987ce, 0x55ffaa55, 0x28785028, 0xdf7aa5df,
0x8c8f038c, 0xa1f859a1, 0x89800989, 0x0d171a0d, 0xbfda65bf, 0xe631d7e6, 0x42c68442, 0x68b8d068,
0x41c38241, 0x99b02999, 0x2d775a2d, 0x0f111e0f, 0xb0cb7bb0, 0x54fca854, 0xbbd66dbb, 0x163a2c16,
};
const te3 align(64) = [256]u32{
0x6363a5c6, 0x7c7c84f8, 0x777799ee, 0x7b7b8df6, 0xf2f20dff, 0x6b6bbdd6, 0x6f6fb1de, 0xc5c55491,
0x30305060, 0x01010302, 0x6767a9ce, 0x2b2b7d56, 0xfefe19e7, 0xd7d762b5, 0xababe64d, 0x76769aec,
0xcaca458f, 0x82829d1f, 0xc9c94089, 0x7d7d87fa, 0xfafa15ef, 0x5959ebb2, 0x4747c98e, 0xf0f00bfb,
0xadadec41, 0xd4d467b3, 0xa2a2fd5f, 0xafafea45, 0x9c9cbf23, 0xa4a4f753, 0x727296e4, 0xc0c05b9b,
0xb7b7c275, 0xfdfd1ce1, 0x9393ae3d, 0x26266a4c, 0x36365a6c, 0x3f3f417e, 0xf7f702f5, 0xcccc4f83,
0x34345c68, 0xa5a5f451, 0xe5e534d1, 0xf1f108f9, 0x717193e2, 0xd8d873ab, 0x31315362, 0x15153f2a,
0x04040c08, 0xc7c75295, 0x23236546, 0xc3c35e9d, 0x18182830, 0x9696a137, 0x05050f0a, 0x9a9ab52f,
0x0707090e, 0x12123624, 0x80809b1b, 0xe2e23ddf, 0xebeb26cd, 0x2727694e, 0xb2b2cd7f, 0x75759fea,
0x09091b12, 0x83839e1d, 0x2c2c7458, 0x1a1a2e34, 0x1b1b2d36, 0x6e6eb2dc, 0x5a5aeeb4, 0xa0a0fb5b,
0x5252f6a4, 0x3b3b4d76, 0xd6d661b7, 0xb3b3ce7d, 0x29297b52, 0xe3e33edd, 0x2f2f715e, 0x84849713,
0x5353f5a6, 0xd1d168b9, 0x00000000, 0xeded2cc1, 0x20206040, 0xfcfc1fe3, 0xb1b1c879, 0x5b5bedb6,
0x6a6abed4, 0xcbcb468d, 0xbebed967, 0x39394b72, 0x4a4ade94, 0x4c4cd498, 0x5858e8b0, 0xcfcf4a85,
0xd0d06bbb, 0xefef2ac5, 0xaaaae54f, 0xfbfb16ed, 0x4343c586, 0x4d4dd79a, 0x33335566, 0x85859411,
0x4545cf8a, 0xf9f910e9, 0x02020604, 0x7f7f81fe, 0x5050f0a0, 0x3c3c4478, 0x9f9fba25, 0xa8a8e34b,
0x5151f3a2, 0xa3a3fe5d, 0x4040c080, 0x8f8f8a05, 0x9292ad3f, 0x9d9dbc21, 0x38384870, 0xf5f504f1,
0xbcbcdf63, 0xb6b6c177, 0xdada75af, 0x21216342, 0x10103020, 0xffff1ae5, 0xf3f30efd, 0xd2d26dbf,
0xcdcd4c81, 0x0c0c1418, 0x13133526, 0xecec2fc3, 0x5f5fe1be, 0x9797a235, 0x4444cc88, 0x1717392e,
0xc4c45793, 0xa7a7f255, 0x7e7e82fc, 0x3d3d477a, 0x6464acc8, 0x5d5de7ba, 0x19192b32, 0x737395e6,
0x6060a0c0, 0x81819819, 0x4f4fd19e, 0xdcdc7fa3, 0x22226644, 0x2a2a7e54, 0x9090ab3b, 0x8888830b,
0x4646ca8c, 0xeeee29c7, 0xb8b8d36b, 0x14143c28, 0xdede79a7, 0x5e5ee2bc, 0x0b0b1d16, 0xdbdb76ad,
0xe0e03bdb, 0x32325664, 0x3a3a4e74, 0x0a0a1e14, 0x4949db92, 0x06060a0c, 0x24246c48, 0x5c5ce4b8,
0xc2c25d9f, 0xd3d36ebd, 0xacacef43, 0x6262a6c4, 0x9191a839, 0x9595a431, 0xe4e437d3, 0x79798bf2,
0xe7e732d5, 0xc8c8438b, 0x3737596e, 0x6d6db7da, 0x8d8d8c01, 0xd5d564b1, 0x4e4ed29c, 0xa9a9e049,
0x6c6cb4d8, 0x5656faac, 0xf4f407f3, 0xeaea25cf, 0x6565afca, 0x7a7a8ef4, 0xaeaee947, 0x08081810,
0xbabad56f, 0x787888f0, 0x25256f4a, 0x2e2e725c, 0x1c1c2438, 0xa6a6f157, 0xb4b4c773, 0xc6c65197,
0xe8e823cb, 0xdddd7ca1, 0x74749ce8, 0x1f1f213e, 0x4b4bdd96, 0xbdbddc61, 0x8b8b860d, 0x8a8a850f,
0x707090e0, 0x3e3e427c, 0xb5b5c471, 0x6666aacc, 0x4848d890, 0x03030506, 0xf6f601f7, 0x0e0e121c,
0x6161a3c2, 0x35355f6a, 0x5757f9ae, 0xb9b9d069, 0x86869117, 0xc1c15899, 0x1d1d273a, 0x9e9eb927,
0xe1e138d9, 0xf8f813eb, 0x9898b32b, 0x11113322, 0x6969bbd2, 0xd9d970a9, 0x8e8e8907, 0x9494a733,
0x9b9bb62d, 0x1e1e223c, 0x87879215, 0xe9e920c9, 0xcece4987, 0x5555ffaa, 0x28287850, 0xdfdf7aa5,
0x8c8c8f03, 0xa1a1f859, 0x89898009, 0x0d0d171a, 0xbfbfda65, 0xe6e631d7, 0x4242c684, 0x6868b8d0,
0x4141c382, 0x9999b029, 0x2d2d775a, 0x0f0f111e, 0xb0b0cb7b, 0x5454fca8, 0xbbbbd66d, 0x16163a2c,
};
const td0 align(64) = [256]u32{
0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96, 0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393,
0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25, 0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f,
0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1, 0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6,
0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da, 0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844,
0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd, 0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4,
0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45, 0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94,
0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7, 0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a,
0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5, 0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c,
0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1, 0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a,
0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75, 0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051,
0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46, 0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff,
0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77, 0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb,
0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000, 0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e,
0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927, 0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a,
0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e, 0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16,
0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d, 0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8,
0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd, 0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34,
0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163, 0xd731dcca, 0x42638510, 0x13972240, 0x84c61120,
0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d, 0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0,
0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422, 0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef,
0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36, 0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4,
0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662, 0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5,
0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3, 0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b,
0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8, 0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6,
0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6, 0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0,
0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815, 0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f,
0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df, 0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f,
0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e, 0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713,
0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89, 0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c,
0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf, 0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86,
0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f, 0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541,
0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190, 0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742,
};
const td1 align(64) = [256]u32{
0x5051f4a7, 0x537e4165, 0xc31a17a4, 0x963a275e, 0xcb3bab6b, 0xf11f9d45, 0xabacfa58, 0x934be303,
0x552030fa, 0xf6ad766d, 0x9188cc76, 0x25f5024c, 0xfc4fe5d7, 0xd7c52acb, 0x80263544, 0x8fb562a3,
0x49deb15a, 0x6725ba1b, 0x9845ea0e, 0xe15dfec0, 0x02c32f75, 0x12814cf0, 0xa38d4697, 0xc66bd3f9,
0xe7038f5f, 0x9515929c, 0xebbf6d7a, 0xda955259, 0x2dd4be83, 0xd3587421, 0x2949e069, 0x448ec9c8,
0x6a75c289, 0x78f48e79, 0x6b99583e, 0xdd27b971, 0xb6bee14f, 0x17f088ad, 0x66c920ac, 0xb47dce3a,
0x1863df4a, 0x82e51a31, 0x60975133, 0x4562537f, 0xe0b16477, 0x84bb6bae, 0x1cfe81a0, 0x94f9082b,
0x58704868, 0x198f45fd, 0x8794de6c, 0xb7527bf8, 0x23ab73d3, 0xe2724b02, 0x57e31f8f, 0x2a6655ab,
0x07b2eb28, 0x032fb5c2, 0x9a86c57b, 0xa5d33708, 0xf2302887, 0xb223bfa5, 0xba02036a, 0x5ced1682,
0x2b8acf1c, 0x92a779b4, 0xf0f307f2, 0xa14e69e2, 0xcd65daf4, 0xd50605be, 0x1fd13462, 0x8ac4a6fe,
0x9d342e53, 0xa0a2f355, 0x32058ae1, 0x75a4f6eb, 0x390b83ec, 0xaa4060ef, 0x065e719f, 0x51bd6e10,
0xf93e218a, 0x3d96dd06, 0xaedd3e05, 0x464de6bd, 0xb591548d, 0x0571c45d, 0x6f0406d4, 0xff605015,
0x241998fb, 0x97d6bde9, 0xcc894043, 0x7767d99e, 0xbdb0e842, 0x8807898b, 0x38e7195b, 0xdb79c8ee,
0x47a17c0a, 0xe97c420f, 0xc9f8841e, 0x00000000, 0x83098086, 0x48322bed, 0xac1e1170, 0x4e6c5a72,
0xfbfd0eff, 0x560f8538, 0x1e3daed5, 0x27362d39, 0x640a0fd9, 0x21685ca6, 0xd19b5b54, 0x3a24362e,
0xb10c0a67, 0x0f9357e7, 0xd2b4ee96, 0x9e1b9b91, 0x4f80c0c5, 0xa261dc20, 0x695a774b, 0x161c121a,
0x0ae293ba, 0xe5c0a02a, 0x433c22e0, 0x1d121b17, 0x0b0e090d, 0xadf28bc7, 0xb92db6a8, 0xc8141ea9,
0x8557f119, 0x4caf7507, 0xbbee99dd, 0xfda37f60, 0x9ff70126, 0xbc5c72f5, 0xc544663b, 0x345bfb7e,
0x768b4329, 0xdccb23c6, 0x68b6edfc, 0x63b8e4f1, 0xcad731dc, 0x10426385, 0x40139722, 0x2084c611,
0x7d854a24, 0xf8d2bb3d, 0x11aef932, 0x6dc729a1, 0x4b1d9e2f, 0xf3dcb230, 0xec0d8652, 0xd077c1e3,
0x6c2bb316, 0x99a970b9, 0xfa119448, 0x2247e964, 0xc4a8fc8c, 0x1aa0f03f, 0xd8567d2c, 0xef223390,
0xc787494e, 0xc1d938d1, 0xfe8ccaa2, 0x3698d40b, 0xcfa6f581, 0x28a57ade, 0x26dab78e, 0xa43fadbf,
0xe42c3a9d, 0x0d507892, 0x9b6a5fcc, 0x62547e46, 0xc2f68d13, 0xe890d8b8, 0x5e2e39f7, 0xf582c3af,
0xbe9f5d80, 0x7c69d093, 0xa96fd52d, 0xb3cf2512, 0x3bc8ac99, 0xa710187d, 0x6ee89c63, 0x7bdb3bbb,
0x09cd2678, 0xf46e5918, 0x01ec9ab7, 0xa8834f9a, 0x65e6956e, 0x7eaaffe6, 0x0821bccf, 0xe6ef15e8,
0xd9bae79b, 0xce4a6f36, 0xd4ea9f09, 0xd629b07c, 0xaf31a4b2, 0x312a3f23, 0x30c6a594, 0xc035a266,
0x37744ebc, 0xa6fc82ca, 0xb0e090d0, 0x1533a7d8, 0x4af10498, 0xf741ecda, 0x0e7fcd50, 0x2f1791f6,
0x8d764dd6, 0x4d43efb0, 0x54ccaa4d, 0xdfe49604, 0xe39ed1b5, 0x1b4c6a88, 0xb8c12c1f, 0x7f466551,
0x049d5eea, 0x5d018c35, 0x73fa8774, 0x2efb0b41, 0x5ab3671d, 0x5292dbd2, 0x33e91056, 0x136dd647,
0x8c9ad761, 0x7a37a10c, 0x8e59f814, 0x89eb133c, 0xeecea927, 0x35b761c9, 0xede11ce5, 0x3c7a47b1,
0x599cd2df, 0x3f55f273, 0x791814ce, 0xbf73c737, 0xea53f7cd, 0x5b5ffdaa, 0x14df3d6f, 0x867844db,
0x81caaff3, 0x3eb968c4, 0x2c382434, 0x5fc2a340, 0x72161dc3, 0x0cbce225, 0x8b283c49, 0x41ff0d95,
0x7139a801, 0xde080cb3, 0x9cd8b4e4, 0x906456c1, 0x617bcb84, 0x70d532b6, 0x74486c5c, 0x42d0b857,
};
const td2 align(64) = [256]u32{
0xa75051f4, 0x65537e41, 0xa4c31a17, 0x5e963a27, 0x6bcb3bab, 0x45f11f9d, 0x58abacfa, 0x03934be3,
0xfa552030, 0x6df6ad76, 0x769188cc, 0x4c25f502, 0xd7fc4fe5, 0xcbd7c52a, 0x44802635, 0xa38fb562,
0x5a49deb1, 0x1b6725ba, 0x0e9845ea, 0xc0e15dfe, 0x7502c32f, 0xf012814c, 0x97a38d46, 0xf9c66bd3,
0x5fe7038f, 0x9c951592, 0x7aebbf6d, 0x59da9552, 0x832dd4be, 0x21d35874, 0x692949e0, 0xc8448ec9,
0x896a75c2, 0x7978f48e, 0x3e6b9958, 0x71dd27b9, 0x4fb6bee1, 0xad17f088, 0xac66c920, 0x3ab47dce,
0x4a1863df, 0x3182e51a, 0x33609751, 0x7f456253, 0x77e0b164, 0xae84bb6b, 0xa01cfe81, 0x2b94f908,
0x68587048, 0xfd198f45, 0x6c8794de, 0xf8b7527b, 0xd323ab73, 0x02e2724b, 0x8f57e31f, 0xab2a6655,
0x2807b2eb, 0xc2032fb5, 0x7b9a86c5, 0x08a5d337, 0x87f23028, 0xa5b223bf, 0x6aba0203, 0x825ced16,
0x1c2b8acf, 0xb492a779, 0xf2f0f307, 0xe2a14e69, 0xf4cd65da, 0xbed50605, 0x621fd134, 0xfe8ac4a6,
0x539d342e, 0x55a0a2f3, 0xe132058a, 0xeb75a4f6, 0xec390b83, 0xefaa4060, 0x9f065e71, 0x1051bd6e,
0x8af93e21, 0x063d96dd, 0x05aedd3e, 0xbd464de6, 0x8db59154, 0x5d0571c4, 0xd46f0406, 0x15ff6050,
0xfb241998, 0xe997d6bd, 0x43cc8940, 0x9e7767d9, 0x42bdb0e8, 0x8b880789, 0x5b38e719, 0xeedb79c8,
0x0a47a17c, 0x0fe97c42, 0x1ec9f884, 0x00000000, 0x86830980, 0xed48322b, 0x70ac1e11, 0x724e6c5a,
0xfffbfd0e, 0x38560f85, 0xd51e3dae, 0x3927362d, 0xd9640a0f, 0xa621685c, 0x54d19b5b, 0x2e3a2436,
0x67b10c0a, 0xe70f9357, 0x96d2b4ee, 0x919e1b9b, 0xc54f80c0, 0x20a261dc, 0x4b695a77, 0x1a161c12,
0xba0ae293, 0x2ae5c0a0, 0xe0433c22, 0x171d121b, 0x0d0b0e09, 0xc7adf28b, 0xa8b92db6, 0xa9c8141e,
0x198557f1, 0x074caf75, 0xddbbee99, 0x60fda37f, 0x269ff701, 0xf5bc5c72, 0x3bc54466, 0x7e345bfb,
0x29768b43, 0xc6dccb23, 0xfc68b6ed, 0xf163b8e4, 0xdccad731, 0x85104263, 0x22401397, 0x112084c6,
0x247d854a, 0x3df8d2bb, 0x3211aef9, 0xa16dc729, 0x2f4b1d9e, 0x30f3dcb2, 0x52ec0d86, 0xe3d077c1,
0x166c2bb3, 0xb999a970, 0x48fa1194, 0x642247e9, 0x8cc4a8fc, 0x3f1aa0f0, 0x2cd8567d, 0x90ef2233,
0x4ec78749, 0xd1c1d938, 0xa2fe8cca, 0x0b3698d4, 0x81cfa6f5, 0xde28a57a, 0x8e26dab7, 0xbfa43fad,
0x9de42c3a, 0x920d5078, 0xcc9b6a5f, 0x4662547e, 0x13c2f68d, 0xb8e890d8, 0xf75e2e39, 0xaff582c3,
0x80be9f5d, 0x937c69d0, 0x2da96fd5, 0x12b3cf25, 0x993bc8ac, 0x7da71018, 0x636ee89c, 0xbb7bdb3b,
0x7809cd26, 0x18f46e59, 0xb701ec9a, 0x9aa8834f, 0x6e65e695, 0xe67eaaff, 0xcf0821bc, 0xe8e6ef15,
0x9bd9bae7, 0x36ce4a6f, 0x09d4ea9f, 0x7cd629b0, 0xb2af31a4, 0x23312a3f, 0x9430c6a5, 0x66c035a2,
0xbc37744e, 0xcaa6fc82, 0xd0b0e090, 0xd81533a7, 0x984af104, 0xdaf741ec, 0x500e7fcd, 0xf62f1791,
0xd68d764d, 0xb04d43ef, 0x4d54ccaa, 0x04dfe496, 0xb5e39ed1, 0x881b4c6a, 0x1fb8c12c, 0x517f4665,
0xea049d5e, 0x355d018c, 0x7473fa87, 0x412efb0b, 0x1d5ab367, 0xd25292db, 0x5633e910, 0x47136dd6,
0x618c9ad7, 0x0c7a37a1, 0x148e59f8, 0x3c89eb13, 0x27eecea9, 0xc935b761, 0xe5ede11c, 0xb13c7a47,
0xdf599cd2, 0x733f55f2, 0xce791814, 0x37bf73c7, 0xcdea53f7, 0xaa5b5ffd, 0x6f14df3d, 0xdb867844,
0xf381caaf, 0xc43eb968, 0x342c3824, 0x405fc2a3, 0xc372161d, 0x250cbce2, 0x498b283c, 0x9541ff0d,
0x017139a8, 0xb3de080c, 0xe49cd8b4, 0xc1906456, 0x84617bcb, 0xb670d532, 0x5c74486c, 0x5742d0b8,
};
const td3 align(64) = [256]u32{
0xf4a75051, 0x4165537e, 0x17a4c31a, 0x275e963a, 0xab6bcb3b, 0x9d45f11f, 0xfa58abac, 0xe303934b,
0x30fa5520, 0x766df6ad, 0xcc769188, 0x024c25f5, 0xe5d7fc4f, 0x2acbd7c5, 0x35448026, 0x62a38fb5,
0xb15a49de, 0xba1b6725, 0xea0e9845, 0xfec0e15d, 0x2f7502c3, 0x4cf01281, 0x4697a38d, 0xd3f9c66b,
0x8f5fe703, 0x929c9515, 0x6d7aebbf, 0x5259da95, 0xbe832dd4, 0x7421d358, 0xe0692949, 0xc9c8448e,
0xc2896a75, 0x8e7978f4, 0x583e6b99, 0xb971dd27, 0xe14fb6be, 0x88ad17f0, 0x20ac66c9, 0xce3ab47d,
0xdf4a1863, 0x1a3182e5, 0x51336097, 0x537f4562, 0x6477e0b1, 0x6bae84bb, 0x81a01cfe, 0x082b94f9,
0x48685870, 0x45fd198f, 0xde6c8794, 0x7bf8b752, 0x73d323ab, 0x4b02e272, 0x1f8f57e3, 0x55ab2a66,
0xeb2807b2, 0xb5c2032f, 0xc57b9a86, 0x3708a5d3, 0x2887f230, 0xbfa5b223, 0x036aba02, 0x16825ced,
0xcf1c2b8a, 0x79b492a7, 0x07f2f0f3, 0x69e2a14e, 0xdaf4cd65, 0x05bed506, 0x34621fd1, 0xa6fe8ac4,
0x2e539d34, 0xf355a0a2, 0x8ae13205, 0xf6eb75a4, 0x83ec390b, 0x60efaa40, 0x719f065e, 0x6e1051bd,
0x218af93e, 0xdd063d96, 0x3e05aedd, 0xe6bd464d, 0x548db591, 0xc45d0571, 0x06d46f04, 0x5015ff60,
0x98fb2419, 0xbde997d6, 0x4043cc89, 0xd99e7767, 0xe842bdb0, 0x898b8807, 0x195b38e7, 0xc8eedb79,
0x7c0a47a1, 0x420fe97c, 0x841ec9f8, 0x00000000, 0x80868309, 0x2bed4832, 0x1170ac1e, 0x5a724e6c,
0x0efffbfd, 0x8538560f, 0xaed51e3d, 0x2d392736, 0x0fd9640a, 0x5ca62168, 0x5b54d19b, 0x362e3a24,
0x0a67b10c, 0x57e70f93, 0xee96d2b4, 0x9b919e1b, 0xc0c54f80, 0xdc20a261, 0x774b695a, 0x121a161c,
0x93ba0ae2, 0xa02ae5c0, 0x22e0433c, 0x1b171d12, 0x090d0b0e, 0x8bc7adf2, 0xb6a8b92d, 0x1ea9c814,
0xf1198557, 0x75074caf, 0x99ddbbee, 0x7f60fda3, 0x01269ff7, 0x72f5bc5c, 0x663bc544, 0xfb7e345b,
0x4329768b, 0x23c6dccb, 0xedfc68b6, 0xe4f163b8, 0x31dccad7, 0x63851042, 0x97224013, 0xc6112084,
0x4a247d85, 0xbb3df8d2, 0xf93211ae, 0x29a16dc7, 0x9e2f4b1d, 0xb230f3dc, 0x8652ec0d, 0xc1e3d077,
0xb3166c2b, 0x70b999a9, 0x9448fa11, 0xe9642247, 0xfc8cc4a8, 0xf03f1aa0, 0x7d2cd856, 0x3390ef22,
0x494ec787, 0x38d1c1d9, 0xcaa2fe8c, 0xd40b3698, 0xf581cfa6, 0x7ade28a5, 0xb78e26da, 0xadbfa43f,
0x3a9de42c, 0x78920d50, 0x5fcc9b6a, 0x7e466254, 0x8d13c2f6, 0xd8b8e890, 0x39f75e2e, 0xc3aff582,
0x5d80be9f, 0xd0937c69, 0xd52da96f, 0x2512b3cf, 0xac993bc8, 0x187da710, 0x9c636ee8, 0x3bbb7bdb,
0x267809cd, 0x5918f46e, 0x9ab701ec, 0x4f9aa883, 0x956e65e6, 0xffe67eaa, 0xbccf0821, 0x15e8e6ef,
0xe79bd9ba, 0x6f36ce4a, 0x9f09d4ea, 0xb07cd629, 0xa4b2af31, 0x3f23312a, 0xa59430c6, 0xa266c035,
0x4ebc3774, 0x82caa6fc, 0x90d0b0e0, 0xa7d81533, 0x04984af1, 0xecdaf741, 0xcd500e7f, 0x91f62f17,
0x4dd68d76, 0xefb04d43, 0xaa4d54cc, 0x9604dfe4, 0xd1b5e39e, 0x6a881b4c, 0x2c1fb8c1, 0x65517f46,
0x5eea049d, 0x8c355d01, 0x877473fa, 0x0b412efb, 0x671d5ab3, 0xdbd25292, 0x105633e9, 0xd647136d,
0xd7618c9a, 0xa10c7a37, 0xf8148e59, 0x133c89eb, 0xa927eece, 0x61c935b7, 0x1ce5ede1, 0x47b13c7a,
0xd2df599c, 0xf2733f55, 0x14ce7918, 0xc737bf73, 0xf7cdea53, 0xfdaa5b5f, 0x3d6f14df, 0x44db8678,
0xaff381ca, 0x68c43eb9, 0x24342c38, 0xa3405fc2, 0x1dc37216, 0xe2250cbc, 0x3c498b28, 0x0d9541ff,
0xa8017139, 0x0cb3de08, 0xb4e49cd8, 0x56c19064, 0xcb84617b, 0x32b670d5, 0x6c5c7448, 0xb85742d0,
};

View File

@ -149,6 +149,8 @@ const aeads = [_]Crypto{
Crypto{ .ty = crypto.aead.ChaCha20Poly1305, .name = "chacha20Poly1305" }, Crypto{ .ty = crypto.aead.ChaCha20Poly1305, .name = "chacha20Poly1305" },
Crypto{ .ty = crypto.aead.XChaCha20Poly1305, .name = "xchacha20Poly1305" }, Crypto{ .ty = crypto.aead.XChaCha20Poly1305, .name = "xchacha20Poly1305" },
Crypto{ .ty = crypto.aead.Gimli, .name = "gimli-aead" }, Crypto{ .ty = crypto.aead.Gimli, .name = "gimli-aead" },
Crypto{ .ty = crypto.aead.AEGIS128L, .name = "aegis-128l" },
Crypto{ .ty = crypto.aead.AEGIS256, .name = "aegis-256" },
}; };
pub fn benchmarkAead(comptime Aead: anytype, comptime bytes: comptime_int) !u64 { pub fn benchmarkAead(comptime Aead: anytype, comptime bytes: comptime_int) !u64 {
@ -168,7 +170,7 @@ pub fn benchmarkAead(comptime Aead: anytype, comptime bytes: comptime_int) !u64
const start = timer.lap(); const start = timer.lap();
while (offset < bytes) : (offset += in.len) { while (offset < bytes) : (offset += in.len) {
Aead.encrypt(in[0..], tag[0..], in[0..], &[_]u8{}, nonce, key); Aead.encrypt(in[0..], tag[0..], in[0..], &[_]u8{}, nonce, key);
Aead.decrypt(in[0..], in[0..], tag, &[_]u8{}, nonce, key) catch unreachable; try Aead.decrypt(in[0..], in[0..], tag, &[_]u8{}, nonce, key);
} }
mem.doNotOptimizeAway(&in); mem.doNotOptimizeAway(&in);
const end = timer.read(); const end = timer.read();
@ -179,6 +181,64 @@ pub fn benchmarkAead(comptime Aead: anytype, comptime bytes: comptime_int) !u64
return throughput; return throughput;
} }
const aes = [_]Crypto{
Crypto{ .ty = crypto.core.aes.AES128, .name = "aes128-single" },
Crypto{ .ty = crypto.core.aes.AES256, .name = "aes256-single" },
};
pub fn benchmarkAES(comptime AES: anytype, comptime count: comptime_int) !u64 {
var key: [AES.key_bits / 8]u8 = undefined;
prng.random.bytes(key[0..]);
const ctx = AES.initEnc(key);
var in = [_]u8{0} ** 16;
var timer = try Timer.start();
const start = timer.lap();
{
var i: usize = 0;
while (i < count) : (i += 1) {
ctx.encrypt(&in, &in);
}
}
mem.doNotOptimizeAway(&in);
const end = timer.read();
const elapsed_s = @intToFloat(f64, end - start) / time.ns_per_s;
const throughput = @floatToInt(u64, count / elapsed_s);
return throughput;
}
const aes8 = [_]Crypto{
Crypto{ .ty = crypto.core.aes.AES128, .name = "aes128-8" },
Crypto{ .ty = crypto.core.aes.AES256, .name = "aes256-8" },
};
pub fn benchmarkAES8(comptime AES: anytype, comptime count: comptime_int) !u64 {
var key: [AES.key_bits / 8]u8 = undefined;
prng.random.bytes(key[0..]);
const ctx = AES.initEnc(key);
var in = [_]u8{0} ** (8 * 16);
var timer = try Timer.start();
const start = timer.lap();
{
var i: usize = 0;
while (i < count) : (i += 1) {
ctx.encryptWide(8, &in, &in);
}
}
mem.doNotOptimizeAway(&in);
const end = timer.read();
const elapsed_s = @intToFloat(f64, end - start) / time.ns_per_s;
const throughput = @floatToInt(u64, 8 * count / elapsed_s);
return throughput;
}
fn usage() void { fn usage() void {
std.debug.warn( std.debug.warn(
\\throughput_test [options] \\throughput_test [options]
@ -238,35 +298,49 @@ pub fn main() !void {
inline for (hashes) |H| { inline for (hashes) |H| {
if (filter == null or std.mem.indexOf(u8, H.name, filter.?) != null) { if (filter == null or std.mem.indexOf(u8, H.name, filter.?) != null) {
const throughput = try benchmarkHash(H.ty, mode(128 * MiB)); const throughput = try benchmarkHash(H.ty, mode(128 * MiB));
try stdout.print("{:>17}: {:7} MiB/s\n", .{ H.name, throughput / (1 * MiB) }); try stdout.print("{:>17}: {:10} MiB/s\n", .{ H.name, throughput / (1 * MiB) });
} }
} }
inline for (macs) |M| { inline for (macs) |M| {
if (filter == null or std.mem.indexOf(u8, M.name, filter.?) != null) { if (filter == null or std.mem.indexOf(u8, M.name, filter.?) != null) {
const throughput = try benchmarkMac(M.ty, mode(128 * MiB)); const throughput = try benchmarkMac(M.ty, mode(128 * MiB));
try stdout.print("{:>17}: {:7} MiB/s\n", .{ M.name, throughput / (1 * MiB) }); try stdout.print("{:>17}: {:10} MiB/s\n", .{ M.name, throughput / (1 * MiB) });
} }
} }
inline for (exchanges) |E| { inline for (exchanges) |E| {
if (filter == null or std.mem.indexOf(u8, E.name, filter.?) != null) { if (filter == null or std.mem.indexOf(u8, E.name, filter.?) != null) {
const throughput = try benchmarkKeyExchange(E.ty, mode(1000)); const throughput = try benchmarkKeyExchange(E.ty, mode(1000));
try stdout.print("{:>17}: {:7} exchanges/s\n", .{ E.name, throughput }); try stdout.print("{:>17}: {:10} exchanges/s\n", .{ E.name, throughput });
} }
} }
inline for (signatures) |E| { inline for (signatures) |E| {
if (filter == null or std.mem.indexOf(u8, E.name, filter.?) != null) { if (filter == null or std.mem.indexOf(u8, E.name, filter.?) != null) {
const throughput = try benchmarkSignature(E.ty, mode(1000)); const throughput = try benchmarkSignature(E.ty, mode(1000));
try stdout.print("{:>17}: {:7} signatures/s\n", .{ E.name, throughput }); try stdout.print("{:>17}: {:10} signatures/s\n", .{ E.name, throughput });
} }
} }
inline for (aeads) |E| { inline for (aeads) |E| {
if (filter == null or std.mem.indexOf(u8, E.name, filter.?) != null) { if (filter == null or std.mem.indexOf(u8, E.name, filter.?) != null) {
const throughput = try benchmarkAead(E.ty, mode(128 * MiB)); const throughput = try benchmarkAead(E.ty, mode(128 * MiB));
try stdout.print("{:>17}: {:7} MiB/s\n", .{ E.name, throughput / (1 * MiB) }); try stdout.print("{:>17}: {:10} MiB/s\n", .{ E.name, throughput / (1 * MiB) });
}
}
inline for (aes) |E| {
if (filter == null or std.mem.indexOf(u8, E.name, filter.?) != null) {
const throughput = try benchmarkAES(E.ty, mode(100000000));
try stdout.print("{:>17}: {:10} ops/s\n", .{ E.name, throughput });
}
}
inline for (aes8) |E| {
if (filter == null or std.mem.indexOf(u8, E.name, filter.?) != null) {
const throughput = try benchmarkAES8(E.ty, mode(10000000));
try stdout.print("{:>17}: {:10} ops/s\n", .{ E.name, throughput });
} }
} }
} }

View File

@ -38,7 +38,35 @@ pub const State = struct {
return mem.sliceAsBytes(self.data[0..]); return mem.sliceAsBytes(self.data[0..]);
} }
pub fn permute(self: *Self) void { fn permute_unrolled(self: *Self) void {
const state = &self.data;
comptime var round = @as(u32, 24);
inline while (round > 0) : (round -= 1) {
var column = @as(usize, 0);
while (column < 4) : (column += 1) {
const x = math.rotl(u32, state[column], 24);
const y = math.rotl(u32, state[4 + column], 9);
const z = state[8 + column];
state[8 + column] = ((x ^ (z << 1)) ^ ((y & z) << 2));
state[4 + column] = ((y ^ x) ^ ((x | z) << 1));
state[column] = ((z ^ y) ^ ((x & y) << 3));
}
switch (round & 3) {
0 => {
mem.swap(u32, &state[0], &state[1]);
mem.swap(u32, &state[2], &state[3]);
state[0] ^= round | 0x9e377900;
},
2 => {
mem.swap(u32, &state[0], &state[2]);
mem.swap(u32, &state[1], &state[3]);
},
else => {},
}
}
}
fn permute_small(self: *Self) void {
const state = &self.data; const state = &self.data;
var round = @as(u32, 24); var round = @as(u32, 24);
while (round > 0) : (round -= 1) { while (round > 0) : (round -= 1) {
@ -66,6 +94,8 @@ pub const State = struct {
} }
} }
pub const permute = if (std.builtin.mode == .ReleaseSmall) permute_small else permute_unrolled;
pub fn squeeze(self: *Self, out: []u8) void { pub fn squeeze(self: *Self, out: []u8) void {
var i = @as(usize, 0); var i = @as(usize, 0);
while (i + RATE <= out.len) : (i += RATE) { while (i + RATE <= out.len) : (i += RATE) {
@ -249,15 +279,15 @@ pub const Aead = struct {
in = in[State.RATE..]; in = in[State.RATE..];
out = out[State.RATE..]; out = out[State.RATE..];
}) { }) {
for (buf[0..State.RATE]) |*p, i| { for (in[0..State.RATE]) |v, i| {
p.* ^= in[i]; buf[i] ^= v;
out[i] = p.*;
} }
mem.copy(u8, out[0..State.RATE], buf[0..State.RATE]);
state.permute(); state.permute();
} }
for (buf[0..in.len]) |*p, i| { for (in[0..]) |v, i| {
p.* ^= in[i]; buf[i] ^= v;
out[i] = p.*; out[i] = buf[i];
} }
// XOR 1 into the next byte of the state // XOR 1 into the next byte of the state
@ -291,15 +321,17 @@ pub const Aead = struct {
in = in[State.RATE..]; in = in[State.RATE..];
out = out[State.RATE..]; out = out[State.RATE..];
}) { }) {
for (buf[0..State.RATE]) |*p, i| { const d = in[0..State.RATE].*;
out[i] = p.* ^ in[i]; for (d) |v, i| {
p.* = in[i]; out[i] = buf[i] ^ v;
} }
mem.copy(u8, buf[0..State.RATE], d[0..State.RATE]);
state.permute(); state.permute();
} }
for (buf[0..in.len]) |*p, i| { for (buf[0..in.len]) |*p, i| {
out[i] = p.* ^ in[i]; const d = in[i];
p.* = in[i]; out[i] = p.* ^ d;
p.* = d;
} }
// XOR 1 into the next byte of the state // XOR 1 into the next byte of the state

51
lib/std/crypto/modes.zig Normal file
View File

@ -0,0 +1,51 @@
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2020 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
// Based on Go stdlib implementation
const std = @import("../std.zig");
const builtin = std.builtin;
const mem = std.mem;
const debug = std.debug;
/// Counter mode.
///
/// This mode creates a key stream by encrypting an incrementing counter using a block cipher, and adding it to the source material.
///
/// Important: the counter mode doesn't provide authenticated encryption: the ciphertext can be trivially modified without this being detected.
/// As a result, applications should generally never use it directly, but only in a construction that includes a MAC.
pub fn ctr(comptime BlockCipher: anytype, block_cipher: BlockCipher, dst: []u8, src: []const u8, iv: [BlockCipher.block_size]u8, endian: comptime builtin.Endian) void {
debug.assert(dst.len >= src.len);
const block_size = BlockCipher.block_size;
var counter: [BlockCipher.block_size]u8 = undefined;
var counterInt = mem.readInt(u128, &iv, endian);
var i: usize = 0;
const parallel_count = BlockCipher.block.parallel.optimal_parallel_blocks;
const wide_block_size = parallel_count * 16;
if (src.len >= wide_block_size) {
var counters: [parallel_count * 16]u8 = undefined;
while (i + wide_block_size <= src.len) : (i += wide_block_size) {
comptime var j = 0;
inline while (j < parallel_count) : (j += 1) {
mem.writeInt(u128, counters[j * 16 .. j * 16 + 16], counterInt, endian);
counterInt +%= 1;
}
block_cipher.xorWide(parallel_count, dst[i .. i + wide_block_size][0..wide_block_size], src[i .. i + wide_block_size][0..wide_block_size], counters);
}
}
while (i + block_size <= src.len) : (i += block_size) {
mem.writeInt(u128, &counter, counterInt, endian);
counterInt +%= 1;
block_cipher.xor(dst[i .. i + block_size][0..block_size], src[i .. i + block_size][0..block_size], counter);
}
if (i < src.len) {
mem.writeInt(u128, &counter, counterInt, endian);
var pad = [_]u8{0} ** block_size;
mem.copy(u8, &pad, src[i..]);
block_cipher.xor(&pad, &pad, counter);
mem.copy(u8, dst[i..], pad[0 .. src.len - i]);
}
}

View File

@ -95,7 +95,7 @@ test "std.event.Future" {
// TODO provide a way to run tests in evented I/O mode // TODO provide a way to run tests in evented I/O mode
if (!std.io.is_async) return error.SkipZigTest; if (!std.io.is_async) return error.SkipZigTest;
const handle = async testFuture(); testFuture();
} }
fn testFuture() void { fn testFuture() void {

View File

@ -16,107 +16,111 @@ const Loop = std.event.Loop;
/// Allows only one actor to hold the lock. /// Allows only one actor to hold the lock.
/// TODO: make this API also work in blocking I/O mode. /// TODO: make this API also work in blocking I/O mode.
pub const Lock = struct { pub const Lock = struct {
shared: bool, mutex: std.Mutex = std.Mutex{},
queue: Queue, head: usize = UNLOCKED,
queue_empty: bool,
const Queue = std.atomic.Queue(anyframe); const UNLOCKED = 0;
const LOCKED = 1;
const global_event_loop = Loop.instance orelse const global_event_loop = Loop.instance orelse
@compileError("std.event.Lock currently only works with event-based I/O"); @compileError("std.event.Lock currently only works with event-based I/O");
const Waiter = struct {
// forced Waiter alignment to ensure it doesn't clash with LOCKED
next: ?*Waiter align(2),
tail: *Waiter,
node: Loop.NextTickNode,
};
pub fn initLocked() Lock {
return Lock{ .head = LOCKED };
}
pub fn acquire(self: *Lock) Held {
const held = self.mutex.acquire();
// self.head transitions from multiple stages depending on the value:
// UNLOCKED -> LOCKED:
// acquire Lock ownership when theres no waiters
// LOCKED -> <Waiter head ptr>:
// Lock is already owned, enqueue first Waiter
// <head ptr> -> <head ptr>:
// Lock is owned with pending waiters. Push our waiter to the queue.
if (self.head == UNLOCKED) {
self.head = LOCKED;
held.release();
return Held{ .lock = self };
}
var waiter: Waiter = undefined;
waiter.next = null;
waiter.tail = &waiter;
const head = switch (self.head) {
UNLOCKED => unreachable,
LOCKED => null,
else => @intToPtr(*Waiter, self.head),
};
if (head) |h| {
h.tail.next = &waiter;
h.tail = &waiter;
} else {
self.head = @ptrToInt(&waiter);
}
suspend {
waiter.node = Loop.NextTickNode{
.prev = undefined,
.next = undefined,
.data = @frame(),
};
held.release();
}
return Held{ .lock = self };
}
pub const Held = struct { pub const Held = struct {
lock: *Lock, lock: *Lock,
pub fn release(self: Held) void { pub fn release(self: Held) void {
// Resume the next item from the queue. const waiter = blk: {
if (self.lock.queue.get()) |node| { const held = self.lock.mutex.acquire();
global_event_loop.onNextTick(node); defer held.release();
return;
}
// We need to release the lock. // self.head goes through the reverse transition from acquire():
@atomicStore(bool, &self.lock.queue_empty, true, .SeqCst); // <head ptr> -> <new head ptr>:
@atomicStore(bool, &self.lock.shared, false, .SeqCst); // pop a waiter from the queue to give Lock ownership when theres still others pending
// <head ptr> -> LOCKED:
// pop the laster waiter from the queue, while also giving it lock ownership when awaken
// LOCKED -> UNLOCKED:
// last lock owner releases lock while no one else is waiting for it
// There might be a queue item. If we know the queue is empty, we can be done, switch (self.lock.head) {
// because the other actor will try to obtain the lock. UNLOCKED => {
// But if there's a queue item, we are the actor which must loop and attempt unreachable; // Lock unlocked while unlocking
// to grab the lock again. },
if (@atomicLoad(bool, &self.lock.queue_empty, .SeqCst)) { LOCKED => {
return; self.lock.head = UNLOCKED;
} break :blk null;
},
while (true) { else => {
if (@atomicRmw(bool, &self.lock.shared, .Xchg, true, .SeqCst)) { const waiter = @intToPtr(*Waiter, self.lock.head);
// We did not obtain the lock. Great, the queue is someone else's problem. self.lock.head = if (waiter.next == null) LOCKED else @ptrToInt(waiter.next);
return; if (waiter.next) |next|
} next.tail = waiter.tail;
break :blk waiter;
// Resume the next item from the queue. },
if (self.lock.queue.get()) |node| {
global_event_loop.onNextTick(node);
return;
}
// Release the lock again.
@atomicStore(bool, &self.lock.queue_empty, true, .SeqCst);
@atomicStore(bool, &self.lock.shared, false, .SeqCst);
// Find out if we can be done.
if (@atomicLoad(bool, &self.lock.queue_empty, .SeqCst)) {
return;
}
}
} }
}; };
pub fn init() Lock { if (waiter) |w| {
return Lock{ global_event_loop.onNextTick(&w.node);
.shared = false, }
.queue = Queue.init(), }
.queue_empty = true,
}; };
}
pub fn initLocked() Lock {
return Lock{
.shared = true,
.queue = Queue.init(),
.queue_empty = true,
};
}
/// Must be called when not locked. Not thread safe.
/// All calls to acquire() and release() must complete before calling deinit().
pub fn deinit(self: *Lock) void {
assert(!self.shared);
while (self.queue.get()) |node| resume node.data;
}
pub fn acquire(self: *Lock) callconv(.Async) Held {
var my_tick_node = Loop.NextTickNode.init(@frame());
errdefer _ = self.queue.remove(&my_tick_node); // TODO test canceling an acquire
suspend {
self.queue.put(&my_tick_node);
// At this point, we are in the queue, so we might have already been resumed.
// We set this bit so that later we can rely on the fact, that if queue_empty == true, some actor
// will attempt to grab the lock.
@atomicStore(bool, &self.queue_empty, false, .SeqCst);
if (!@atomicRmw(bool, &self.shared, .Xchg, true, .SeqCst)) {
if (self.queue.get()) |node| {
// Whether this node is us or someone else, we tail resume it.
resume node.data;
}
}
}
return Held{ .lock = self };
}
}; };
test "std.event.Lock" { test "std.event.Lock" {
@ -128,41 +132,16 @@ test "std.event.Lock" {
// TODO https://github.com/ziglang/zig/issues/3251 // TODO https://github.com/ziglang/zig/issues/3251
if (builtin.os.tag == .freebsd) return error.SkipZigTest; if (builtin.os.tag == .freebsd) return error.SkipZigTest;
// TODO this file has bit-rotted. repair it var lock = Lock{};
if (true) return error.SkipZigTest; testLock(&lock);
var lock = Lock.init();
defer lock.deinit();
_ = async testLock(&lock);
const expected_result = [1]i32{3 * @intCast(i32, shared_test_data.len)} ** shared_test_data.len; const expected_result = [1]i32{3 * @intCast(i32, shared_test_data.len)} ** shared_test_data.len;
testing.expectEqualSlices(i32, &expected_result, &shared_test_data); testing.expectEqualSlices(i32, &expected_result, &shared_test_data);
} }
fn testLock(lock: *Lock) callconv(.Async) void { fn testLock(lock: *Lock) void {
var handle1 = async lockRunner(lock); var handle1 = async lockRunner(lock);
var tick_node1 = Loop.NextTickNode{
.prev = undefined,
.next = undefined,
.data = &handle1,
};
Loop.instance.?.onNextTick(&tick_node1);
var handle2 = async lockRunner(lock); var handle2 = async lockRunner(lock);
var tick_node2 = Loop.NextTickNode{
.prev = undefined,
.next = undefined,
.data = &handle2,
};
Loop.instance.?.onNextTick(&tick_node2);
var handle3 = async lockRunner(lock); var handle3 = async lockRunner(lock);
var tick_node3 = Loop.NextTickNode{
.prev = undefined,
.next = undefined,
.data = &handle3,
};
Loop.instance.?.onNextTick(&tick_node3);
await handle1; await handle1;
await handle2; await handle2;
@ -171,13 +150,13 @@ fn testLock(lock: *Lock) callconv(.Async) void {
var shared_test_data = [1]i32{0} ** 10; var shared_test_data = [1]i32{0} ** 10;
var shared_test_index: usize = 0; var shared_test_index: usize = 0;
fn lockRunner(lock: *Lock) callconv(.Async) void {
suspend; // resumed by onNextTick fn lockRunner(lock: *Lock) void {
Lock.global_event_loop.yield();
var i: usize = 0; var i: usize = 0;
while (i < shared_test_data.len) : (i += 1) { while (i < shared_test_data.len) : (i += 1) {
var lock_frame = async lock.acquire(); const handle = lock.acquire();
const handle = await lock_frame;
defer handle.release(); defer handle.release();
shared_test_index = 0; shared_test_index = 0;

View File

@ -112,8 +112,9 @@ pub const Loop = struct {
/// have the correct pointer value. /// have the correct pointer value.
/// https://github.com/ziglang/zig/issues/2761 and https://github.com/ziglang/zig/issues/2765 /// https://github.com/ziglang/zig/issues/2761 and https://github.com/ziglang/zig/issues/2765
pub fn init(self: *Loop) !void { pub fn init(self: *Loop) !void {
if (builtin.single_threaded if (builtin.single_threaded or
or (@hasDecl(root, "event_loop_mode") and root.event_loop_mode == .single_threaded)) { (@hasDecl(root, "event_loop_mode") and root.event_loop_mode == .single_threaded))
{
return self.initSingleThreaded(); return self.initSingleThreaded();
} else { } else {
return self.initMultiThreaded(); return self.initMultiThreaded();
@ -687,9 +688,14 @@ pub const Loop = struct {
switch (builtin.os.tag) { switch (builtin.os.tag) {
.linux => { .linux => {
// writing to the eventfd will only wake up one thread, thus multiple writes
// are needed to wakeup all the threads
var i: usize = 0;
while (i < self.extra_threads.len + 1) : (i += 1) {
// writing 8 bytes to an eventfd cannot fail // writing 8 bytes to an eventfd cannot fail
const amt = os.write(self.os_data.final_eventfd, &wakeup_bytes) catch unreachable; const amt = os.write(self.os_data.final_eventfd, &wakeup_bytes) catch unreachable;
assert(amt == wakeup_bytes.len); assert(amt == wakeup_bytes.len);
}
return; return;
}, },
.macosx, .freebsd, .netbsd, .dragonfly => { .macosx, .freebsd, .netbsd, .dragonfly => {
@ -715,6 +721,50 @@ pub const Loop = struct {
} }
} }
/// ------- I/0 APIs -------
pub fn accept(
self: *Loop,
/// This argument is a socket that has been created with `socket`, bound to a local address
/// with `bind`, and is listening for connections after a `listen`.
sockfd: os.fd_t,
/// This argument is a pointer to a sockaddr structure. This structure is filled in with the
/// address of the peer socket, as known to the communications layer. The exact format of the
/// address returned addr is determined by the socket's address family (see `socket` and the
/// respective protocol man pages).
addr: *os.sockaddr,
/// This argument is a value-result argument: the caller must initialize it to contain the
/// size (in bytes) of the structure pointed to by addr; on return it will contain the actual size
/// of the peer address.
///
/// The returned address is truncated if the buffer provided is too small; in this case, `addr_size`
/// will return a value greater than was supplied to the call.
addr_size: *os.socklen_t,
/// The following values can be bitwise ORed in flags to obtain different behavior:
/// * `SOCK_CLOEXEC` - Set the close-on-exec (`FD_CLOEXEC`) flag on the new file descriptor. See the
/// description of the `O_CLOEXEC` flag in `open` for reasons why this may be useful.
flags: u32,
) os.AcceptError!os.fd_t {
while (true) {
return os.accept(sockfd, addr, addr_size, flags | os.SOCK_NONBLOCK) catch |err| switch (err) {
error.WouldBlock => {
self.waitUntilFdReadable(sockfd);
continue;
},
else => return err,
};
}
}
pub fn connect(self: *Loop, sockfd: os.socket_t, sock_addr: *const os.sockaddr, len: os.socklen_t) os.ConnectError!void {
os.connect(sockfd, sock_addr, len) catch |err| switch (err) {
error.WouldBlock => {
self.waitUntilFdWritable(sockfd);
return os.getsockoptError(sockfd);
},
else => return err,
};
}
/// Performs an async `os.open` using a separate thread. /// Performs an async `os.open` using a separate thread.
pub fn openZ(self: *Loop, file_path: [*:0]const u8, flags: u32, mode: os.mode_t) os.OpenError!os.fd_t { pub fn openZ(self: *Loop, file_path: [*:0]const u8, flags: u32, mode: os.mode_t) os.OpenError!os.fd_t {
var req_node = Request.Node{ var req_node = Request.Node{
@ -773,7 +823,8 @@ pub const Loop = struct {
/// Performs an async `os.read` using a separate thread. /// Performs an async `os.read` using a separate thread.
/// `fd` must block and not return EAGAIN. /// `fd` must block and not return EAGAIN.
pub fn read(self: *Loop, fd: os.fd_t, buf: []u8) os.ReadError!usize { pub fn read(self: *Loop, fd: os.fd_t, buf: []u8, simulate_evented: bool) os.ReadError!usize {
if (simulate_evented) {
var req_node = Request.Node{ var req_node = Request.Node{
.data = .{ .data = .{
.msg = .{ .msg = .{
@ -790,11 +841,23 @@ pub const Loop = struct {
self.posixFsRequest(&req_node); self.posixFsRequest(&req_node);
} }
return req_node.data.msg.read.result; return req_node.data.msg.read.result;
} else {
while (true) {
return os.read(fd, buf) catch |err| switch (err) {
error.WouldBlock => {
self.waitUntilFdReadable(fd);
continue;
},
else => return err,
};
}
}
} }
/// Performs an async `os.readv` using a separate thread. /// Performs an async `os.readv` using a separate thread.
/// `fd` must block and not return EAGAIN. /// `fd` must block and not return EAGAIN.
pub fn readv(self: *Loop, fd: os.fd_t, iov: []const os.iovec) os.ReadError!usize { pub fn readv(self: *Loop, fd: os.fd_t, iov: []const os.iovec, simulate_evented: bool) os.ReadError!usize {
if (simulate_evented) {
var req_node = Request.Node{ var req_node = Request.Node{
.data = .{ .data = .{
.msg = .{ .msg = .{
@ -811,11 +874,23 @@ pub const Loop = struct {
self.posixFsRequest(&req_node); self.posixFsRequest(&req_node);
} }
return req_node.data.msg.readv.result; return req_node.data.msg.readv.result;
} else {
while (true) {
return os.readv(fd, iov) catch |err| switch (err) {
error.WouldBlock => {
self.waitUntilFdReadable(fd);
continue;
},
else => return err,
};
}
}
} }
/// Performs an async `os.pread` using a separate thread. /// Performs an async `os.pread` using a separate thread.
/// `fd` must block and not return EAGAIN. /// `fd` must block and not return EAGAIN.
pub fn pread(self: *Loop, fd: os.fd_t, buf: []u8, offset: u64) os.PReadError!usize { pub fn pread(self: *Loop, fd: os.fd_t, buf: []u8, offset: u64, simulate_evented: bool) os.PReadError!usize {
if (simulate_evented) {
var req_node = Request.Node{ var req_node = Request.Node{
.data = .{ .data = .{
.msg = .{ .msg = .{
@ -833,11 +908,23 @@ pub const Loop = struct {
self.posixFsRequest(&req_node); self.posixFsRequest(&req_node);
} }
return req_node.data.msg.pread.result; return req_node.data.msg.pread.result;
} else {
while (true) {
return os.pread(fd, buf, offset) catch |err| switch (err) {
error.WouldBlock => {
self.waitUntilFdReadable(fd);
continue;
},
else => return err,
};
}
}
} }
/// Performs an async `os.preadv` using a separate thread. /// Performs an async `os.preadv` using a separate thread.
/// `fd` must block and not return EAGAIN. /// `fd` must block and not return EAGAIN.
pub fn preadv(self: *Loop, fd: os.fd_t, iov: []const os.iovec, offset: u64) os.ReadError!usize { pub fn preadv(self: *Loop, fd: os.fd_t, iov: []const os.iovec, offset: u64, simulate_evented: bool) os.ReadError!usize {
if (simulate_evented) {
var req_node = Request.Node{ var req_node = Request.Node{
.data = .{ .data = .{
.msg = .{ .msg = .{
@ -855,11 +942,23 @@ pub const Loop = struct {
self.posixFsRequest(&req_node); self.posixFsRequest(&req_node);
} }
return req_node.data.msg.preadv.result; return req_node.data.msg.preadv.result;
} else {
while (true) {
return os.preadv(fd, iov, offset) catch |err| switch (err) {
error.WouldBlock => {
self.waitUntilFdReadable(fd);
continue;
},
else => return err,
};
}
}
} }
/// Performs an async `os.write` using a separate thread. /// Performs an async `os.write` using a separate thread.
/// `fd` must block and not return EAGAIN. /// `fd` must block and not return EAGAIN.
pub fn write(self: *Loop, fd: os.fd_t, bytes: []const u8) os.WriteError!usize { pub fn write(self: *Loop, fd: os.fd_t, bytes: []const u8, simulate_evented: bool) os.WriteError!usize {
if (simulate_evented) {
var req_node = Request.Node{ var req_node = Request.Node{
.data = .{ .data = .{
.msg = .{ .msg = .{
@ -876,11 +975,23 @@ pub const Loop = struct {
self.posixFsRequest(&req_node); self.posixFsRequest(&req_node);
} }
return req_node.data.msg.write.result; return req_node.data.msg.write.result;
} else {
while (true) {
return os.write(fd, bytes) catch |err| switch (err) {
error.WouldBlock => {
self.waitUntilFdWritable(fd);
continue;
},
else => return err,
};
}
}
} }
/// Performs an async `os.writev` using a separate thread. /// Performs an async `os.writev` using a separate thread.
/// `fd` must block and not return EAGAIN. /// `fd` must block and not return EAGAIN.
pub fn writev(self: *Loop, fd: os.fd_t, iov: []const os.iovec_const) os.WriteError!usize { pub fn writev(self: *Loop, fd: os.fd_t, iov: []const os.iovec_const, simulate_evented: bool) os.WriteError!usize {
if (simulate_evented) {
var req_node = Request.Node{ var req_node = Request.Node{
.data = .{ .data = .{
.msg = .{ .msg = .{
@ -897,11 +1008,57 @@ pub const Loop = struct {
self.posixFsRequest(&req_node); self.posixFsRequest(&req_node);
} }
return req_node.data.msg.writev.result; return req_node.data.msg.writev.result;
} else {
while (true) {
return os.writev(fd, iov) catch |err| switch (err) {
error.WouldBlock => {
self.waitUntilFdWritable(fd);
continue;
},
else => return err,
};
}
}
}
/// Performs an async `os.pwrite` using a separate thread.
/// `fd` must block and not return EAGAIN.
pub fn pwrite(self: *Loop, fd: os.fd_t, bytes: []const u8, offset: u64, simulate_evented: bool) os.PerformsWriteError!usize {
if (simulate_evented) {
var req_node = Request.Node{
.data = .{
.msg = .{
.pwrite = .{
.fd = fd,
.bytes = bytes,
.offset = offset,
.result = undefined,
},
},
.finish = .{ .TickNode = .{ .data = @frame() } },
},
};
suspend {
self.posixFsRequest(&req_node);
}
return req_node.data.msg.pwrite.result;
} else {
while (true) {
return os.pwrite(fd, bytes, offset) catch |err| switch (err) {
error.WouldBlock => {
self.waitUntilFdWritable(fd);
continue;
},
else => return err,
};
}
}
} }
/// Performs an async `os.pwritev` using a separate thread. /// Performs an async `os.pwritev` using a separate thread.
/// `fd` must block and not return EAGAIN. /// `fd` must block and not return EAGAIN.
pub fn pwritev(self: *Loop, fd: os.fd_t, iov: []const os.iovec_const, offset: u64) os.WriteError!usize { pub fn pwritev(self: *Loop, fd: os.fd_t, iov: []const os.iovec_const, offset: u64, simulate_evented: bool) os.PWriteError!usize {
if (simulate_evented) {
var req_node = Request.Node{ var req_node = Request.Node{
.data = .{ .data = .{
.msg = .{ .msg = .{
@ -919,6 +1076,56 @@ pub const Loop = struct {
self.posixFsRequest(&req_node); self.posixFsRequest(&req_node);
} }
return req_node.data.msg.pwritev.result; return req_node.data.msg.pwritev.result;
} else {
while (true) {
return os.pwritev(fd, iov, offset) catch |err| switch (err) {
error.WouldBlock => {
self.waitUntilFdWritable(fd);
continue;
},
else => return err,
};
}
}
}
pub fn sendto(
self: *Loop,
/// The file descriptor of the sending socket.
sockfd: os.fd_t,
/// Message to send.
buf: []const u8,
flags: u32,
dest_addr: ?*const os.sockaddr,
addrlen: os.socklen_t,
) os.SendError!usize {
while (true) {
return os.sendto(sockfd, buf, flags, dest_addr, addrlen) catch |err| switch (err) {
error.WouldBlock => {
self.waitUntilFdWritable(sockfd);
continue;
},
else => return err,
};
}
}
pub fn recvfrom(
sockfd: os.fd_t,
buf: []u8,
flags: u32,
src_addr: ?*os.sockaddr,
addrlen: ?*os.socklen_t,
) os.RecvFromError!usize {
while (true) {
return os.recvfrom(sockfd, buf, flags, src_addr, addrlen) catch |err| switch (err) {
error.WouldBlock => {
self.waitUntilFdReadable(sockfd);
continue;
},
else => return err,
};
}
} }
/// Performs an async `os.faccessatZ` using a separate thread. /// Performs an async `os.faccessatZ` using a separate thread.
@ -1073,6 +1280,9 @@ pub const Loop = struct {
.writev => |*msg| { .writev => |*msg| {
msg.result = os.writev(msg.fd, msg.iov); msg.result = os.writev(msg.fd, msg.iov);
}, },
.pwrite => |*msg| {
msg.result = os.pwrite(msg.fd, msg.bytes, msg.offset);
},
.pwritev => |*msg| { .pwritev => |*msg| {
msg.result = os.pwritev(msg.fd, msg.iov, msg.offset); msg.result = os.pwritev(msg.fd, msg.iov, msg.offset);
}, },
@ -1142,6 +1352,7 @@ pub const Loop = struct {
readv: ReadV, readv: ReadV,
write: Write, write: Write,
writev: WriteV, writev: WriteV,
pwrite: PWrite,
pwritev: PWriteV, pwritev: PWriteV,
pread: PRead, pread: PRead,
preadv: PReadV, preadv: PReadV,
@ -1185,6 +1396,15 @@ pub const Loop = struct {
pub const Error = os.WriteError; pub const Error = os.WriteError;
}; };
pub const PWrite = struct {
fd: os.fd_t,
bytes: []const u8,
offset: usize,
result: Error!usize,
pub const Error = os.PWriteError;
};
pub const PWriteV = struct { pub const PWriteV = struct {
fd: os.fd_t, fd: os.fd_t,
iov: []const os.iovec_const, iov: []const os.iovec_const,

View File

@ -186,7 +186,9 @@ pub fn LinearFifo(
} else { } else {
var head = self.head + count; var head = self.head + count;
if (powers_of_two) { if (powers_of_two) {
head &= self.buf.len - 1; // Note it is safe to do a wrapping subtract as
// bitwise & with all 1s is a noop
head &= self.buf.len -% 1;
} else { } else {
head %= self.buf.len; head %= self.buf.len;
} }
@ -376,6 +378,14 @@ pub fn LinearFifo(
}; };
} }
test "LinearFifo(u8, .Dynamic) discard(0) from empty buffer should not error on overflow" {
var fifo = LinearFifo(u8, .Dynamic).init(testing.allocator);
defer fifo.deinit();
// If overflow is not explicitly allowed this will crash in debug / safe mode
fifo.discard(0);
}
test "LinearFifo(u8, .Dynamic)" { test "LinearFifo(u8, .Dynamic)" {
var fifo = LinearFifo(u8, .Dynamic).init(testing.allocator); var fifo = LinearFifo(u8, .Dynamic).init(testing.allocator);
defer fifo.deinit(); defer fifo.deinit();

View File

@ -414,10 +414,12 @@ pub const File = struct {
pub fn read(self: File, buffer: []u8) ReadError!usize { pub fn read(self: File, buffer: []u8) ReadError!usize {
if (is_windows) { if (is_windows) {
return windows.ReadFile(self.handle, buffer, null, self.intended_io_mode); return windows.ReadFile(self.handle, buffer, null, self.intended_io_mode);
} else if (self.capable_io_mode != self.intended_io_mode) { }
return std.event.Loop.instance.?.read(self.handle, buffer);
} else { if (self.intended_io_mode == .blocking) {
return os.read(self.handle, buffer); return os.read(self.handle, buffer);
} else {
return std.event.Loop.instance.?.read(self.handle, buffer, self.capable_io_mode != self.intended_io_mode);
} }
} }
@ -436,10 +438,12 @@ pub const File = struct {
pub fn pread(self: File, buffer: []u8, offset: u64) PReadError!usize { pub fn pread(self: File, buffer: []u8, offset: u64) PReadError!usize {
if (is_windows) { if (is_windows) {
return windows.ReadFile(self.handle, buffer, offset, self.intended_io_mode); return windows.ReadFile(self.handle, buffer, offset, self.intended_io_mode);
} else if (self.capable_io_mode != self.intended_io_mode) { }
return std.event.Loop.instance.?.pread(self.handle, buffer, offset);
} else { if (self.intended_io_mode == .blocking) {
return os.pread(self.handle, buffer, offset); return os.pread(self.handle, buffer, offset);
} else {
return std.event.Loop.instance.?.pread(self.handle, buffer, offset, self.capable_io_mode != self.intended_io_mode);
} }
} }
@ -461,10 +465,12 @@ pub const File = struct {
if (iovecs.len == 0) return @as(usize, 0); if (iovecs.len == 0) return @as(usize, 0);
const first = iovecs[0]; const first = iovecs[0];
return windows.ReadFile(self.handle, first.iov_base[0..first.iov_len], null, self.intended_io_mode); return windows.ReadFile(self.handle, first.iov_base[0..first.iov_len], null, self.intended_io_mode);
} else if (self.capable_io_mode != self.intended_io_mode) { }
return std.event.Loop.instance.?.readv(self.handle, iovecs);
} else { if (self.intended_io_mode == .blocking) {
return os.readv(self.handle, iovecs); return os.readv(self.handle, iovecs);
} else {
return std.event.Loop.instance.?.readv(self.handle, iovecs, self.capable_io_mode != self.intended_io_mode);
} }
} }
@ -500,10 +506,12 @@ pub const File = struct {
if (iovecs.len == 0) return @as(usize, 0); if (iovecs.len == 0) return @as(usize, 0);
const first = iovecs[0]; const first = iovecs[0];
return windows.ReadFile(self.handle, first.iov_base[0..first.iov_len], offset, self.intended_io_mode); return windows.ReadFile(self.handle, first.iov_base[0..first.iov_len], offset, self.intended_io_mode);
} else if (self.capable_io_mode != self.intended_io_mode) { }
return std.event.Loop.instance.?.preadv(self.handle, iovecs, offset);
} else { if (self.intended_io_mode == .blocking) {
return os.preadv(self.handle, iovecs, offset); return os.preadv(self.handle, iovecs, offset);
} else {
return std.event.Loop.instance.?.preadv(self.handle, iovecs, offset, self.capable_io_mode != self.intended_io_mode);
} }
} }
@ -539,10 +547,12 @@ pub const File = struct {
pub fn write(self: File, bytes: []const u8) WriteError!usize { pub fn write(self: File, bytes: []const u8) WriteError!usize {
if (is_windows) { if (is_windows) {
return windows.WriteFile(self.handle, bytes, null, self.intended_io_mode); return windows.WriteFile(self.handle, bytes, null, self.intended_io_mode);
} else if (self.capable_io_mode != self.intended_io_mode) { }
return std.event.Loop.instance.?.write(self.handle, bytes);
} else { if (self.intended_io_mode == .blocking) {
return os.write(self.handle, bytes); return os.write(self.handle, bytes);
} else {
return std.event.Loop.instance.?.write(self.handle, bytes, self.capable_io_mode != self.intended_io_mode);
} }
} }
@ -556,10 +566,12 @@ pub const File = struct {
pub fn pwrite(self: File, bytes: []const u8, offset: u64) PWriteError!usize { pub fn pwrite(self: File, bytes: []const u8, offset: u64) PWriteError!usize {
if (is_windows) { if (is_windows) {
return windows.WriteFile(self.handle, bytes, offset, self.intended_io_mode); return windows.WriteFile(self.handle, bytes, offset, self.intended_io_mode);
} else if (self.capable_io_mode != self.intended_io_mode) { }
return std.event.Loop.instance.?.pwrite(self.handle, bytes, offset);
} else { if (self.intended_io_mode == .blocking) {
return os.pwrite(self.handle, bytes, offset); return os.pwrite(self.handle, bytes, offset);
} else {
return std.event.Loop.instance.?.pwrite(self.handle, bytes, offset, self.capable_io_mode != self.intended_io_mode);
} }
} }
@ -576,10 +588,12 @@ pub const File = struct {
if (iovecs.len == 0) return @as(usize, 0); if (iovecs.len == 0) return @as(usize, 0);
const first = iovecs[0]; const first = iovecs[0];
return windows.WriteFile(self.handle, first.iov_base[0..first.iov_len], null, self.intended_io_mode); return windows.WriteFile(self.handle, first.iov_base[0..first.iov_len], null, self.intended_io_mode);
} else if (self.capable_io_mode != self.intended_io_mode) { }
return std.event.Loop.instance.?.writev(self.handle, iovecs);
} else { if (self.intended_io_mode == .blocking) {
return os.writev(self.handle, iovecs); return os.writev(self.handle, iovecs);
} else {
return std.event.Loop.instance.?.writev(self.handle, iovecs, self.capable_io_mode != self.intended_io_mode);
} }
} }
@ -607,10 +621,12 @@ pub const File = struct {
if (iovecs.len == 0) return @as(usize, 0); if (iovecs.len == 0) return @as(usize, 0);
const first = iovecs[0]; const first = iovecs[0];
return windows.WriteFile(self.handle, first.iov_base[0..first.iov_len], offset, self.intended_io_mode); return windows.WriteFile(self.handle, first.iov_base[0..first.iov_len], offset, self.intended_io_mode);
} else if (self.capable_io_mode != self.intended_io_mode) { }
return std.event.Loop.instance.?.pwritev(self.handle, iovecs, offset);
} else { if (self.intended_io_mode == .blocking) {
return os.pwritev(self.handle, iovecs, offset); return os.pwritev(self.handle, iovecs, offset);
} else {
return std.event.Loop.instance.?.pwritev(self.handle, iovecs, offset, self.capable_io_mode != self.intended_io_mode);
} }
} }

View File

@ -274,6 +274,32 @@ test "file operations on directories" {
dir.close(); dir.close();
} }
test "deleteDir" {
var tmp_dir = tmpDir(.{});
defer tmp_dir.cleanup();
// deleting a non-existent directory
testing.expectError(error.FileNotFound, tmp_dir.dir.deleteDir("test_dir"));
var dir = try tmp_dir.dir.makeOpenPath("test_dir", .{});
var file = try dir.createFile("test_file", .{});
file.close();
dir.close();
// deleting a non-empty directory
// TODO: Re-enable this check on Windows, see https://github.com/ziglang/zig/issues/5537
if (builtin.os.tag != .windows) {
testing.expectError(error.DirNotEmpty, tmp_dir.dir.deleteDir("test_dir"));
}
dir = try tmp_dir.dir.openDir("test_dir", .{});
try dir.deleteFile("test_file");
dir.close();
// deleting an empty directory
try tmp_dir.dir.deleteDir("test_dir");
}
test "Dir.rename files" { test "Dir.rename files" {
var tmp_dir = tmpDir(.{}); var tmp_dir = tmpDir(.{});
defer tmp_dir.cleanup(); defer tmp_dir.cleanup();

View File

@ -919,6 +919,13 @@ pub fn testAllocator(base_allocator: *mem.Allocator) !void {
const zero_bit_ptr = try allocator.create(u0); const zero_bit_ptr = try allocator.create(u0);
zero_bit_ptr.* = 0; zero_bit_ptr.* = 0;
allocator.destroy(zero_bit_ptr); allocator.destroy(zero_bit_ptr);
const oversize = try allocator.allocAdvanced(u32, null, 5, .at_least);
testing.expect(oversize.len >= 5);
for (oversize) |*item| {
item.* = 0xDEADBEEF;
}
allocator.free(oversize);
} }
pub fn testAllocatorAligned(base_allocator: *mem.Allocator, comptime alignment: u29) !void { pub fn testAllocatorAligned(base_allocator: *mem.Allocator, comptime alignment: u29) !void {

View File

@ -75,14 +75,23 @@ pub const ArenaAllocator = struct {
const adjusted_addr = mem.alignForward(addr, ptr_align); const adjusted_addr = mem.alignForward(addr, ptr_align);
const adjusted_index = self.state.end_index + (adjusted_addr - addr); const adjusted_index = self.state.end_index + (adjusted_addr - addr);
const new_end_index = adjusted_index + n; const new_end_index = adjusted_index + n;
if (new_end_index > cur_buf.len) {
cur_node = try self.createNode(cur_buf.len, n + ptr_align); if (new_end_index <= cur_buf.len) {
continue;
}
const result = cur_buf[adjusted_index..new_end_index]; const result = cur_buf[adjusted_index..new_end_index];
self.state.end_index = new_end_index; self.state.end_index = new_end_index;
return result; return result;
} }
const bigger_buf_size = @sizeOf(BufNode) + new_end_index;
// Try to grow the buffer in-place
cur_node.data = self.child_allocator.resize(cur_node.data, bigger_buf_size) catch |err| switch (err) {
error.OutOfMemory => {
// Allocate a new node if that's not possible
cur_node = try self.createNode(cur_buf.len, n + ptr_align);
continue;
},
};
}
} }
fn resize(allocator: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Allocator.Error!usize { fn resize(allocator: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Allocator.Error!usize {

View File

@ -1,10 +0,0 @@
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2020 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
test "std.http" {
_ = @import("http/headers.zig");
}
pub const Headers = @import("http/headers.zig").Headers;

View File

@ -1,597 +0,0 @@
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2020 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
// HTTP Header data structure/type
// Based on lua-http's http.header module
//
// Design criteria:
// - the same header field is allowed more than once
// - must be able to fetch separate occurrences (important for some headers e.g. Set-Cookie)
// - optionally available as comma separated list
// - http2 adds flag to headers that they should never be indexed
// - header order should be recoverable
//
// Headers are implemented as an array of entries.
// An index of field name => array indices is kept.
const std = @import("../std.zig");
const debug = std.debug;
const assert = debug.assert;
const testing = std.testing;
const mem = std.mem;
const Allocator = mem.Allocator;
fn never_index_default(name: []const u8) bool {
if (mem.eql(u8, "authorization", name)) return true;
if (mem.eql(u8, "proxy-authorization", name)) return true;
if (mem.eql(u8, "cookie", name)) return true;
if (mem.eql(u8, "set-cookie", name)) return true;
return false;
}
const HeaderEntry = struct {
name: []const u8,
value: []u8,
never_index: bool,
const Self = @This();
fn init(allocator: *Allocator, name: []const u8, value: []const u8, never_index: ?bool) !Self {
return Self{
.name = name, // takes reference
.value = try allocator.dupe(u8, value),
.never_index = never_index orelse never_index_default(name),
};
}
fn deinit(self: Self, allocator: *Allocator) void {
allocator.free(self.value);
}
pub fn modify(self: *Self, allocator: *Allocator, value: []const u8, never_index: ?bool) !void {
const old_len = self.value.len;
if (value.len > old_len) {
self.value = try allocator.realloc(self.value, value.len);
} else if (value.len < old_len) {
self.value = allocator.shrink(self.value, value.len);
}
mem.copy(u8, self.value, value);
self.never_index = never_index orelse never_index_default(self.name);
}
fn compare(context: void, a: HeaderEntry, b: HeaderEntry) bool {
if (a.name.ptr != b.name.ptr and a.name.len != b.name.len) {
// Things beginning with a colon *must* be before others
const a_is_colon = a.name[0] == ':';
const b_is_colon = b.name[0] == ':';
if (a_is_colon and !b_is_colon) {
return true;
} else if (!a_is_colon and b_is_colon) {
return false;
}
// Sort lexicographically on header name
return mem.order(u8, a.name, b.name) == .lt;
}
// Sort lexicographically on header value
if (!mem.eql(u8, a.value, b.value)) {
return mem.order(u8, a.value, b.value) == .lt;
}
// Doesn't matter here; need to pick something for sort consistency
return a.never_index;
}
};
test "HeaderEntry" {
var e = try HeaderEntry.init(testing.allocator, "foo", "bar", null);
defer e.deinit(testing.allocator);
testing.expectEqualSlices(u8, "foo", e.name);
testing.expectEqualSlices(u8, "bar", e.value);
testing.expectEqual(false, e.never_index);
try e.modify(testing.allocator, "longer value", null);
testing.expectEqualSlices(u8, "longer value", e.value);
// shorter value
try e.modify(testing.allocator, "x", null);
testing.expectEqualSlices(u8, "x", e.value);
}
const HeaderList = std.ArrayListUnmanaged(HeaderEntry);
const HeaderIndexList = std.ArrayListUnmanaged(usize);
const HeaderIndex = std.StringHashMapUnmanaged(HeaderIndexList);
pub const Headers = struct {
// the owned header field name is stored in the index as part of the key
allocator: *Allocator,
data: HeaderList,
index: HeaderIndex,
const Self = @This();
pub fn init(allocator: *Allocator) Self {
return Self{
.allocator = allocator,
.data = HeaderList{},
.index = HeaderIndex{},
};
}
pub fn deinit(self: *Self) void {
{
var it = self.index.iterator();
while (it.next()) |entry| {
entry.value.deinit(self.allocator);
self.allocator.free(entry.key);
}
self.index.deinit(self.allocator);
}
{
for (self.data.items) |entry| {
entry.deinit(self.allocator);
}
self.data.deinit(self.allocator);
}
self.* = undefined;
}
pub fn clone(self: Self, allocator: *Allocator) !Self {
var other = Headers.init(allocator);
errdefer other.deinit();
try other.data.ensureCapacity(allocator, self.data.items.len);
try other.index.initCapacity(allocator, self.index.entries.len);
for (self.data.items) |entry| {
try other.append(entry.name, entry.value, entry.never_index);
}
return other;
}
pub fn toSlice(self: Self) []const HeaderEntry {
return self.data.items;
}
pub fn append(self: *Self, name: []const u8, value: []const u8, never_index: ?bool) !void {
const n = self.data.items.len + 1;
try self.data.ensureCapacity(self.allocator, n);
var entry: HeaderEntry = undefined;
if (self.index.getEntry(name)) |kv| {
entry = try HeaderEntry.init(self.allocator, kv.key, value, never_index);
errdefer entry.deinit(self.allocator);
const dex = &kv.value;
try dex.append(self.allocator, n - 1);
} else {
const name_dup = try self.allocator.dupe(u8, name);
errdefer self.allocator.free(name_dup);
entry = try HeaderEntry.init(self.allocator, name_dup, value, never_index);
errdefer entry.deinit(self.allocator);
var dex = HeaderIndexList{};
try dex.append(self.allocator, n - 1);
errdefer dex.deinit(self.allocator);
_ = try self.index.put(self.allocator, name_dup, dex);
}
self.data.appendAssumeCapacity(entry);
}
/// If the header already exists, replace the current value, otherwise append it to the list of headers.
/// If the header has multiple entries then returns an error.
pub fn upsert(self: *Self, name: []const u8, value: []const u8, never_index: ?bool) !void {
if (self.index.get(name)) |kv| {
const dex = kv.value;
if (dex.len != 1)
return error.CannotUpsertMultiValuedField;
var e = &self.data.at(dex.at(0));
try e.modify(value, never_index);
} else {
try self.append(name, value, never_index);
}
}
/// Returns boolean indicating if the field is present.
pub fn contains(self: Self, name: []const u8) bool {
return self.index.contains(name);
}
/// Returns boolean indicating if something was deleted.
pub fn delete(self: *Self, name: []const u8) bool {
if (self.index.remove(name)) |*kv| {
const dex = &kv.value;
// iterate backwards
var i = dex.items.len;
while (i > 0) {
i -= 1;
const data_index = dex.items[i];
const removed = self.data.orderedRemove(data_index);
assert(mem.eql(u8, removed.name, name));
removed.deinit(self.allocator);
}
dex.deinit(self.allocator);
self.allocator.free(kv.key);
self.rebuildIndex();
return true;
} else {
return false;
}
}
/// Removes the element at the specified index.
/// Moves items down to fill the empty space.
/// TODO this implementation can be replaced by adding
/// orderedRemove to the new hash table implementation as an
/// alternative to swapRemove.
pub fn orderedRemove(self: *Self, i: usize) void {
const removed = self.data.orderedRemove(i);
const kv = self.index.getEntry(removed.name).?;
const dex = &kv.value;
if (dex.items.len == 1) {
// was last item; delete the index
dex.deinit(self.allocator);
removed.deinit(self.allocator);
const key = kv.key;
_ = self.index.remove(key); // invalidates `kv` and `dex`
self.allocator.free(key);
} else {
dex.shrink(self.allocator, dex.items.len - 1);
removed.deinit(self.allocator);
}
// if it was the last item; no need to rebuild index
if (i != self.data.items.len) {
self.rebuildIndex();
}
}
/// Removes the element at the specified index.
/// The empty slot is filled from the end of the list.
/// TODO this implementation can be replaced by simply using the
/// new hash table which does swap removal.
pub fn swapRemove(self: *Self, i: usize) void {
const removed = self.data.swapRemove(i);
const kv = self.index.getEntry(removed.name).?;
const dex = &kv.value;
if (dex.items.len == 1) {
// was last item; delete the index
dex.deinit(self.allocator);
removed.deinit(self.allocator);
const key = kv.key;
_ = self.index.remove(key); // invalidates `kv` and `dex`
self.allocator.free(key);
} else {
dex.shrink(self.allocator, dex.items.len - 1);
removed.deinit(self.allocator);
}
// if it was the last item; no need to rebuild index
if (i != self.data.items.len) {
self.rebuildIndex();
}
}
/// Access the header at the specified index.
pub fn at(self: Self, i: usize) HeaderEntry {
return self.data.items[i];
}
/// Returns a list of indices containing headers with the given name.
/// The returned list should not be modified by the caller.
pub fn getIndices(self: Self, name: []const u8) ?HeaderIndexList {
return self.index.get(name);
}
/// Returns a slice containing each header with the given name.
pub fn get(self: Self, allocator: *Allocator, name: []const u8) !?[]const HeaderEntry {
const dex = self.getIndices(name) orelse return null;
const buf = try allocator.alloc(HeaderEntry, dex.items.len);
var n: usize = 0;
for (dex.items) |idx| {
buf[n] = self.data.items[idx];
n += 1;
}
return buf;
}
/// Returns all headers with the given name as a comma separated string.
///
/// Useful for HTTP headers that follow RFC-7230 section 3.2.2:
/// A recipient MAY combine multiple header fields with the same field
/// name into one "field-name: field-value" pair, without changing the
/// semantics of the message, by appending each subsequent field value to
/// the combined field value in order, separated by a comma. The order
/// in which header fields with the same field name are received is
/// therefore significant to the interpretation of the combined field
/// value
pub fn getCommaSeparated(self: Self, allocator: *Allocator, name: []const u8) !?[]u8 {
const dex = self.getIndices(name) orelse return null;
// adapted from mem.join
const total_len = blk: {
var sum: usize = dex.items.len - 1; // space for separator(s)
for (dex.items) |idx|
sum += self.data.items[idx].value.len;
break :blk sum;
};
const buf = try allocator.alloc(u8, total_len);
errdefer allocator.free(buf);
const first_value = self.data.items[dex.items[0]].value;
mem.copy(u8, buf, first_value);
var buf_index: usize = first_value.len;
for (dex.items[1..]) |idx| {
const value = self.data.items[idx].value;
buf[buf_index] = ',';
buf_index += 1;
mem.copy(u8, buf[buf_index..], value);
buf_index += value.len;
}
// No need for shrink since buf is exactly the correct size.
return buf;
}
fn rebuildIndex(self: *Self) void {
// clear out the indexes
var it = self.index.iterator();
while (it.next()) |entry| {
entry.value.shrinkRetainingCapacity(0);
}
// fill up indexes again; we know capacity is fine from before
for (self.data.items) |entry, i| {
self.index.getEntry(entry.name).?.value.appendAssumeCapacity(i);
}
}
pub fn sort(self: *Self) void {
std.sort.sort(HeaderEntry, self.data.items, {}, HeaderEntry.compare);
self.rebuildIndex();
}
pub fn format(
self: Self,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
out_stream: anytype,
) !void {
for (self.toSlice()) |entry| {
try out_stream.writeAll(entry.name);
try out_stream.writeAll(": ");
try out_stream.writeAll(entry.value);
try out_stream.writeAll("\n");
}
}
};
test "Headers.iterator" {
var h = Headers.init(testing.allocator);
defer h.deinit();
try h.append("foo", "bar", null);
try h.append("cookie", "somevalue", null);
var count: i32 = 0;
for (h.toSlice()) |e| {
if (count == 0) {
testing.expectEqualSlices(u8, "foo", e.name);
testing.expectEqualSlices(u8, "bar", e.value);
testing.expectEqual(false, e.never_index);
} else if (count == 1) {
testing.expectEqualSlices(u8, "cookie", e.name);
testing.expectEqualSlices(u8, "somevalue", e.value);
testing.expectEqual(true, e.never_index);
}
count += 1;
}
testing.expectEqual(@as(i32, 2), count);
}
test "Headers.contains" {
var h = Headers.init(testing.allocator);
defer h.deinit();
try h.append("foo", "bar", null);
try h.append("cookie", "somevalue", null);
testing.expectEqual(true, h.contains("foo"));
testing.expectEqual(false, h.contains("flooble"));
}
test "Headers.delete" {
var h = Headers.init(testing.allocator);
defer h.deinit();
try h.append("foo", "bar", null);
try h.append("baz", "qux", null);
try h.append("cookie", "somevalue", null);
testing.expectEqual(false, h.delete("not-present"));
testing.expectEqual(@as(usize, 3), h.toSlice().len);
testing.expectEqual(true, h.delete("foo"));
testing.expectEqual(@as(usize, 2), h.toSlice().len);
{
const e = h.at(0);
testing.expectEqualSlices(u8, "baz", e.name);
testing.expectEqualSlices(u8, "qux", e.value);
testing.expectEqual(false, e.never_index);
}
{
const e = h.at(1);
testing.expectEqualSlices(u8, "cookie", e.name);
testing.expectEqualSlices(u8, "somevalue", e.value);
testing.expectEqual(true, e.never_index);
}
testing.expectEqual(false, h.delete("foo"));
}
test "Headers.orderedRemove" {
var h = Headers.init(testing.allocator);
defer h.deinit();
try h.append("foo", "bar", null);
try h.append("baz", "qux", null);
try h.append("cookie", "somevalue", null);
h.orderedRemove(0);
testing.expectEqual(@as(usize, 2), h.toSlice().len);
{
const e = h.at(0);
testing.expectEqualSlices(u8, "baz", e.name);
testing.expectEqualSlices(u8, "qux", e.value);
testing.expectEqual(false, e.never_index);
}
{
const e = h.at(1);
testing.expectEqualSlices(u8, "cookie", e.name);
testing.expectEqualSlices(u8, "somevalue", e.value);
testing.expectEqual(true, e.never_index);
}
}
test "Headers.swapRemove" {
var h = Headers.init(testing.allocator);
defer h.deinit();
try h.append("foo", "bar", null);
try h.append("baz", "qux", null);
try h.append("cookie", "somevalue", null);
h.swapRemove(0);
testing.expectEqual(@as(usize, 2), h.toSlice().len);
{
const e = h.at(0);
testing.expectEqualSlices(u8, "cookie", e.name);
testing.expectEqualSlices(u8, "somevalue", e.value);
testing.expectEqual(true, e.never_index);
}
{
const e = h.at(1);
testing.expectEqualSlices(u8, "baz", e.name);
testing.expectEqualSlices(u8, "qux", e.value);
testing.expectEqual(false, e.never_index);
}
}
test "Headers.at" {
var h = Headers.init(testing.allocator);
defer h.deinit();
try h.append("foo", "bar", null);
try h.append("cookie", "somevalue", null);
{
const e = h.at(0);
testing.expectEqualSlices(u8, "foo", e.name);
testing.expectEqualSlices(u8, "bar", e.value);
testing.expectEqual(false, e.never_index);
}
{
const e = h.at(1);
testing.expectEqualSlices(u8, "cookie", e.name);
testing.expectEqualSlices(u8, "somevalue", e.value);
testing.expectEqual(true, e.never_index);
}
}
test "Headers.getIndices" {
var h = Headers.init(testing.allocator);
defer h.deinit();
try h.append("foo", "bar", null);
try h.append("set-cookie", "x=1", null);
try h.append("set-cookie", "y=2", null);
testing.expect(null == h.getIndices("not-present"));
testing.expectEqualSlices(usize, &[_]usize{0}, h.getIndices("foo").?.items);
testing.expectEqualSlices(usize, &[_]usize{ 1, 2 }, h.getIndices("set-cookie").?.items);
}
test "Headers.get" {
var h = Headers.init(testing.allocator);
defer h.deinit();
try h.append("foo", "bar", null);
try h.append("set-cookie", "x=1", null);
try h.append("set-cookie", "y=2", null);
{
const v = try h.get(testing.allocator, "not-present");
testing.expect(null == v);
}
{
const v = (try h.get(testing.allocator, "foo")).?;
defer testing.allocator.free(v);
const e = v[0];
testing.expectEqualSlices(u8, "foo", e.name);
testing.expectEqualSlices(u8, "bar", e.value);
testing.expectEqual(false, e.never_index);
}
{
const v = (try h.get(testing.allocator, "set-cookie")).?;
defer testing.allocator.free(v);
{
const e = v[0];
testing.expectEqualSlices(u8, "set-cookie", e.name);
testing.expectEqualSlices(u8, "x=1", e.value);
testing.expectEqual(true, e.never_index);
}
{
const e = v[1];
testing.expectEqualSlices(u8, "set-cookie", e.name);
testing.expectEqualSlices(u8, "y=2", e.value);
testing.expectEqual(true, e.never_index);
}
}
}
test "Headers.getCommaSeparated" {
var h = Headers.init(testing.allocator);
defer h.deinit();
try h.append("foo", "bar", null);
try h.append("set-cookie", "x=1", null);
try h.append("set-cookie", "y=2", null);
{
const v = try h.getCommaSeparated(testing.allocator, "not-present");
testing.expect(null == v);
}
{
const v = (try h.getCommaSeparated(testing.allocator, "foo")).?;
defer testing.allocator.free(v);
testing.expectEqualSlices(u8, "bar", v);
}
{
const v = (try h.getCommaSeparated(testing.allocator, "set-cookie")).?;
defer testing.allocator.free(v);
testing.expectEqualSlices(u8, "x=1,y=2", v);
}
}
test "Headers.sort" {
var h = Headers.init(testing.allocator);
defer h.deinit();
try h.append("foo", "bar", null);
try h.append("cookie", "somevalue", null);
h.sort();
{
const e = h.at(0);
testing.expectEqualSlices(u8, "cookie", e.name);
testing.expectEqualSlices(u8, "somevalue", e.value);
testing.expectEqual(true, e.never_index);
}
{
const e = h.at(1);
testing.expectEqualSlices(u8, "foo", e.name);
testing.expectEqualSlices(u8, "bar", e.value);
testing.expectEqual(false, e.never_index);
}
}
test "Headers.format" {
var h = Headers.init(testing.allocator);
defer h.deinit();
try h.append("foo", "bar", null);
try h.append("cookie", "somevalue", null);
var buf: [100]u8 = undefined;
testing.expectEqualSlices(u8,
\\foo: bar
\\cookie: somevalue
\\
, try std.fmt.bufPrint(buf[0..], "{}", .{h}));
}

View File

@ -101,14 +101,12 @@ pub const Level = enum {
debug, debug,
}; };
/// The default log level is based on build mode. Note that in ReleaseSmall /// The default log level is based on build mode.
/// builds the default level is emerg but no messages will be stored/logged
/// by the default logger to save space.
pub const default_level: Level = switch (builtin.mode) { pub const default_level: Level = switch (builtin.mode) {
.Debug => .debug, .Debug => .debug,
.ReleaseSafe => .notice, .ReleaseSafe => .notice,
.ReleaseFast => .err, .ReleaseFast => .err,
.ReleaseSmall => .emerg, .ReleaseSmall => .err,
}; };
/// The current log level. This is set to root.log_level if present, otherwise /// The current log level. This is set to root.log_level if present, otherwise
@ -131,11 +129,22 @@ fn log(
// On freestanding one must provide a log function; we do not have // On freestanding one must provide a log function; we do not have
// any I/O configured. // any I/O configured.
return; return;
} else if (builtin.mode != .ReleaseSmall) { } else {
const level_txt = switch (message_level) {
.emerg => "emergency",
.alert => "alert",
.crit => "critical",
.err => "error",
.warn => "warning",
.notice => "notice",
.info => "info",
.debug => "debug",
};
const prefix2 = if (scope == .default) ": " else "(" ++ @tagName(scope) ++ "): ";
const stderr = std.io.getStdErr().writer();
const held = std.debug.getStderrMutex().acquire(); const held = std.debug.getStderrMutex().acquire();
defer held.release(); defer held.release();
const stderr = std.io.getStdErr().writer(); nosuspend stderr.print(level_txt ++ prefix2 ++ format ++ "\n", args) catch return;
nosuspend stderr.print(format ++ "\n", args) catch return;
} }
} }
} }

View File

@ -231,8 +231,6 @@ fn AllocWithOptionsPayload(comptime Elem: type, comptime alignment: ?u29, compti
/// call `free` when done. /// call `free` when done.
/// ///
/// For allocating a single item, see `create`. /// For allocating a single item, see `create`.
///
/// Deprecated; use `allocWithOptions`.
pub fn allocSentinel( pub fn allocSentinel(
self: *Allocator, self: *Allocator,
comptime Elem: type, comptime Elem: type,

View File

@ -826,3 +826,112 @@ test "sizeof" {
testing.expect(sizeof(E.One) == @sizeOf(c_int)); testing.expect(sizeof(E.One) == @sizeOf(c_int));
testing.expect(sizeof(S) == 4); testing.expect(sizeof(S) == 4);
} }
/// For a given function type, returns a tuple type which fields will
/// correspond to the argument types.
///
/// Examples:
/// - `ArgsTuple(fn() void)` `tuple { }`
/// - `ArgsTuple(fn(a: u32) u32)` `tuple { u32 }`
/// - `ArgsTuple(fn(a: u32, b: f16) noreturn)` `tuple { u32, f16 }`
pub fn ArgsTuple(comptime Function: type) type {
const info = @typeInfo(Function);
if (info != .Fn)
@compileError("ArgsTuple expects a function type");
const function_info = info.Fn;
if (function_info.is_generic)
@compileError("Cannot create ArgsTuple for generic function");
if (function_info.is_var_args)
@compileError("Cannot create ArgsTuple for variadic function");
var argument_field_list: [function_info.args.len]std.builtin.TypeInfo.StructField = undefined;
inline for (function_info.args) |arg, i| {
@setEvalBranchQuota(10_000);
var num_buf: [128]u8 = undefined;
argument_field_list[i] = std.builtin.TypeInfo.StructField{
.name = std.fmt.bufPrint(&num_buf, "{d}", .{i}) catch unreachable,
.field_type = arg.arg_type.?,
.default_value = @as(?(arg.arg_type.?), null),
.is_comptime = false,
};
}
return @Type(std.builtin.TypeInfo{
.Struct = std.builtin.TypeInfo.Struct{
.is_tuple = true,
.layout = .Auto,
.decls = &[_]std.builtin.TypeInfo.Declaration{},
.fields = &argument_field_list,
},
});
}
/// For a given anonymous list of types, returns a new tuple type
/// with those types as fields.
///
/// Examples:
/// - `Tuple(&[_]type {})` `tuple { }`
/// - `Tuple(&[_]type {f32})` `tuple { f32 }`
/// - `Tuple(&[_]type {f32,u32})` `tuple { f32, u32 }`
pub fn Tuple(comptime types: []const type) type {
var tuple_fields: [types.len]std.builtin.TypeInfo.StructField = undefined;
inline for (types) |T, i| {
@setEvalBranchQuota(10_000);
var num_buf: [128]u8 = undefined;
tuple_fields[i] = std.builtin.TypeInfo.StructField{
.name = std.fmt.bufPrint(&num_buf, "{d}", .{i}) catch unreachable,
.field_type = T,
.default_value = @as(?T, null),
.is_comptime = false,
};
}
return @Type(std.builtin.TypeInfo{
.Struct = std.builtin.TypeInfo.Struct{
.is_tuple = true,
.layout = .Auto,
.decls = &[_]std.builtin.TypeInfo.Declaration{},
.fields = &tuple_fields,
},
});
}
const TupleTester = struct {
fn assertTypeEqual(comptime Expected: type, comptime Actual: type) void {
if (Expected != Actual)
@compileError("Expected type " ++ @typeName(Expected) ++ ", but got type " ++ @typeName(Actual));
}
fn assertTuple(comptime expected: anytype, comptime Actual: type) void {
const info = @typeInfo(Actual);
if (info != .Struct)
@compileError("Expected struct type");
if (!info.Struct.is_tuple)
@compileError("Struct type must be a tuple type");
const fields_list = std.meta.fields(Actual);
if (expected.len != fields_list.len)
@compileError("Argument count mismatch");
inline for (fields_list) |fld, i| {
if (expected[i] != fld.field_type) {
@compileError("Field " ++ fld.name ++ " expected to be type " ++ @typeName(expected[i]) ++ ", but was type " ++ @typeName(fld.field_type));
}
}
}
};
test "ArgsTuple" {
TupleTester.assertTuple(.{}, ArgsTuple(fn () void));
TupleTester.assertTuple(.{u32}, ArgsTuple(fn (a: u32) []const u8));
TupleTester.assertTuple(.{ u32, f16 }, ArgsTuple(fn (a: u32, b: f16) noreturn));
TupleTester.assertTuple(.{ u32, f16, []const u8 }, ArgsTuple(fn (a: u32, b: f16, c: []const u8) noreturn));
}
test "Tuple" {
TupleTester.assertTuple(.{}, Tuple(&[_]type{}));
TupleTester.assertTuple(.{u32}, Tuple(&[_]type{u32}));
TupleTester.assertTuple(.{ u32, f16 }, Tuple(&[_]type{ u32, f16 }));
TupleTester.assertTuple(.{ u32, f16, []const u8 }, Tuple(&[_]type{ u32, f16, []const u8 }));
}

View File

@ -614,11 +614,11 @@ pub fn connectUnixSocket(path: []const u8) !fs.File {
var addr = try std.net.Address.initUnix(path); var addr = try std.net.Address.initUnix(path);
try os.connect( if (std.io.is_async) {
sockfd, try loop.connect(sockfd, &addr.any, addr.getOsSockLen());
&addr.any, } else {
addr.getOsSockLen(), try os.connect(sockfd, &addr.any, addr.getOsSockLen());
); }
return fs.File{ return fs.File{
.handle = sockfd, .handle = sockfd,
@ -677,7 +677,13 @@ pub fn tcpConnectToAddress(address: Address) !fs.File {
(if (builtin.os.tag == .windows) 0 else os.SOCK_CLOEXEC); (if (builtin.os.tag == .windows) 0 else os.SOCK_CLOEXEC);
const sockfd = try os.socket(address.any.family, sock_flags, os.IPPROTO_TCP); const sockfd = try os.socket(address.any.family, sock_flags, os.IPPROTO_TCP);
errdefer os.close(sockfd); errdefer os.close(sockfd);
if (std.io.is_async) {
const loop = std.event.Loop.instance orelse return error.WouldBlock;
try loop.connect(sockfd, &address.any, address.getOsSockLen());
} else {
try os.connect(sockfd, &address.any, address.getOsSockLen()); try os.connect(sockfd, &address.any, address.getOsSockLen());
}
return fs.File{ .handle = sockfd }; return fs.File{ .handle = sockfd };
} }
@ -1429,10 +1435,14 @@ fn resMSendRc(
if (answers[i].len == 0) { if (answers[i].len == 0) {
var j: usize = 0; var j: usize = 0;
while (j < ns.len) : (j += 1) { while (j < ns.len) : (j += 1) {
if (std.io.is_async) {
_ = std.event.Loop.instance.?.sendto(fd, queries[i], os.MSG_NOSIGNAL, &ns[j].any, sl) catch undefined;
} else {
_ = os.sendto(fd, queries[i], os.MSG_NOSIGNAL, &ns[j].any, sl) catch undefined; _ = os.sendto(fd, queries[i], os.MSG_NOSIGNAL, &ns[j].any, sl) catch undefined;
} }
} }
} }
}
t1 = t2; t1 = t2;
servfail_retry = 2 * queries.len; servfail_retry = 2 * queries.len;
} }
@ -1444,7 +1454,10 @@ fn resMSendRc(
while (true) { while (true) {
var sl_copy = sl; var sl_copy = sl;
const rlen = os.recvfrom(fd, answer_bufs[next], 0, &sa.any, &sl_copy) catch break; const rlen = if (std.io.is_async)
std.event.Loop.instance.?.recvfrom(fd, answer_bufs[next], 0, &sa.any, &sl_copy) catch break
else
os.recvfrom(fd, answer_bufs[next], 0, &sa.any, &sl_copy) catch break;
// Ignore non-identifiable packets // Ignore non-identifiable packets
if (rlen < 4) continue; if (rlen < 4) continue;
@ -1470,7 +1483,11 @@ fn resMSendRc(
0, 3 => {}, 0, 3 => {},
2 => if (servfail_retry != 0) { 2 => if (servfail_retry != 0) {
servfail_retry -= 1; servfail_retry -= 1;
if (std.io.is_async) {
_ = std.event.Loop.instance.?.sendto(fd, queries[i], os.MSG_NOSIGNAL, &ns[j].any, sl) catch undefined;
} else {
_ = os.sendto(fd, queries[i], os.MSG_NOSIGNAL, &ns[j].any, sl) catch undefined; _ = os.sendto(fd, queries[i], os.MSG_NOSIGNAL, &ns[j].any, sl) catch undefined;
}
}, },
else => continue, else => continue,
} }
@ -1661,18 +1678,23 @@ pub const StreamServer = struct {
/// If this function succeeds, the returned `Connection` is a caller-managed resource. /// If this function succeeds, the returned `Connection` is a caller-managed resource.
pub fn accept(self: *StreamServer) AcceptError!Connection { pub fn accept(self: *StreamServer) AcceptError!Connection {
const nonblock = if (std.io.is_async) os.SOCK_NONBLOCK else 0;
const accept_flags = nonblock | os.SOCK_CLOEXEC;
var accepted_addr: Address = undefined; var accepted_addr: Address = undefined;
var adr_len: os.socklen_t = @sizeOf(Address); var adr_len: os.socklen_t = @sizeOf(Address);
if (os.accept(self.sockfd.?, &accepted_addr.any, &adr_len, accept_flags)) |fd| { const accept_result = blk: {
if (std.io.is_async) {
const loop = std.event.Loop.instance orelse return error.UnexpectedError;
break :blk loop.accept(self.sockfd.?, &accepted_addr.any, &adr_len, os.SOCK_CLOEXEC);
} else {
break :blk os.accept(self.sockfd.?, &accepted_addr.any, &adr_len, os.SOCK_CLOEXEC);
}
};
if (accept_result) |fd| {
return Connection{ return Connection{
.file = fs.File{ .handle = fd }, .file = fs.File{ .handle = fd },
.address = accepted_addr, .address = accepted_addr,
}; };
} else |err| switch (err) { } else |err| switch (err) {
// We only give SOCK_NONBLOCK when I/O mode is async, in which case this error
// is handled by os.accept4.
error.WouldBlock => unreachable, error.WouldBlock => unreachable,
else => |e| return e, else => |e| return e,
} }

View File

@ -314,8 +314,8 @@ pub const ReadError = error{
/// Returns the number of bytes that were read, which can be less than /// Returns the number of bytes that were read, which can be less than
/// buf.len. If 0 bytes were read, that means EOF. /// buf.len. If 0 bytes were read, that means EOF.
/// If the application has a global event loop enabled, EAGAIN is handled /// If `fd` is opened in non blocking mode, the function will return error.WouldBlock
/// via the event loop. Otherwise EAGAIN results in error.WouldBlock. /// when EAGAIN is received.
/// ///
/// Linux has a limit on how many bytes may be transferred in one `read` call, which is `0x7ffff000` /// Linux has a limit on how many bytes may be transferred in one `read` call, which is `0x7ffff000`
/// on both 64-bit and 32-bit systems. This is due to using a signed C int as the return value, as /// on both 64-bit and 32-bit systems. This is due to using a signed C int as the return value, as
@ -366,12 +366,7 @@ pub fn read(fd: fd_t, buf: []u8) ReadError!usize {
EINTR => continue, EINTR => continue,
EINVAL => unreachable, EINVAL => unreachable,
EFAULT => unreachable, EFAULT => unreachable,
EAGAIN => if (std.event.Loop.instance) |loop| { EAGAIN => return error.WouldBlock,
loop.waitUntilFdReadable(fd);
continue;
} else {
return error.WouldBlock;
},
EBADF => return error.NotOpenForReading, // Can be a race condition. EBADF => return error.NotOpenForReading, // Can be a race condition.
EIO => return error.InputOutput, EIO => return error.InputOutput,
EISDIR => return error.IsDir, EISDIR => return error.IsDir,
@ -387,8 +382,8 @@ pub fn read(fd: fd_t, buf: []u8) ReadError!usize {
/// Number of bytes read is returned. Upon reading end-of-file, zero is returned. /// Number of bytes read is returned. Upon reading end-of-file, zero is returned.
/// ///
/// For POSIX systems, if the application has a global event loop enabled, EAGAIN is handled /// For POSIX systems, if `fd` is opened in non blocking mode, the function will
/// via the event loop. Otherwise EAGAIN results in `error.WouldBlock`. /// return error.WouldBlock when EAGAIN is received.
/// On Windows, if the application has a global event loop enabled, I/O Completion Ports are /// On Windows, if the application has a global event loop enabled, I/O Completion Ports are
/// used to perform the I/O. `error.WouldBlock` is not possible on Windows. /// used to perform the I/O. `error.WouldBlock` is not possible on Windows.
/// ///
@ -428,12 +423,7 @@ pub fn readv(fd: fd_t, iov: []const iovec) ReadError!usize {
EINTR => continue, EINTR => continue,
EINVAL => unreachable, EINVAL => unreachable,
EFAULT => unreachable, EFAULT => unreachable,
EAGAIN => if (std.event.Loop.instance) |loop| { EAGAIN => return error.WouldBlock,
loop.waitUntilFdReadable(fd);
continue;
} else {
return error.WouldBlock;
},
EBADF => return error.NotOpenForReading, // can be a race condition EBADF => return error.NotOpenForReading, // can be a race condition
EIO => return error.InputOutput, EIO => return error.InputOutput,
EISDIR => return error.IsDir, EISDIR => return error.IsDir,
@ -450,8 +440,8 @@ pub const PReadError = ReadError || error{Unseekable};
/// ///
/// Retries when interrupted by a signal. /// Retries when interrupted by a signal.
/// ///
/// For POSIX systems, if the application has a global event loop enabled, EAGAIN is handled /// For POSIX systems, if `fd` is opened in non blocking mode, the function will
/// via the event loop. Otherwise EAGAIN results in `error.WouldBlock`. /// return error.WouldBlock when EAGAIN is received.
/// On Windows, if the application has a global event loop enabled, I/O Completion Ports are /// On Windows, if the application has a global event loop enabled, I/O Completion Ports are
/// used to perform the I/O. `error.WouldBlock` is not possible on Windows. /// used to perform the I/O. `error.WouldBlock` is not possible on Windows.
pub fn pread(fd: fd_t, buf: []u8, offset: u64) PReadError!usize { pub fn pread(fd: fd_t, buf: []u8, offset: u64) PReadError!usize {
@ -492,12 +482,7 @@ pub fn pread(fd: fd_t, buf: []u8, offset: u64) PReadError!usize {
EINTR => continue, EINTR => continue,
EINVAL => unreachable, EINVAL => unreachable,
EFAULT => unreachable, EFAULT => unreachable,
EAGAIN => if (std.event.Loop.instance) |loop| { EAGAIN => return error.WouldBlock,
loop.waitUntilFdReadable(fd);
continue;
} else {
return error.WouldBlock;
},
EBADF => return error.NotOpenForReading, // Can be a race condition. EBADF => return error.NotOpenForReading, // Can be a race condition.
EIO => return error.InputOutput, EIO => return error.InputOutput,
EISDIR => return error.IsDir, EISDIR => return error.IsDir,
@ -586,8 +571,8 @@ pub fn ftruncate(fd: fd_t, length: u64) TruncateError!void {
/// ///
/// Retries when interrupted by a signal. /// Retries when interrupted by a signal.
/// ///
/// For POSIX systems, if the application has a global event loop enabled, EAGAIN is handled /// For POSIX systems, if `fd` is opened in non blocking mode, the function will
/// via the event loop. Otherwise EAGAIN results in `error.WouldBlock`. /// return error.WouldBlock when EAGAIN is received.
/// On Windows, if the application has a global event loop enabled, I/O Completion Ports are /// On Windows, if the application has a global event loop enabled, I/O Completion Ports are
/// used to perform the I/O. `error.WouldBlock` is not possible on Windows. /// used to perform the I/O. `error.WouldBlock` is not possible on Windows.
/// ///
@ -637,12 +622,7 @@ pub fn preadv(fd: fd_t, iov: []const iovec, offset: u64) PReadError!usize {
EINTR => continue, EINTR => continue,
EINVAL => unreachable, EINVAL => unreachable,
EFAULT => unreachable, EFAULT => unreachable,
EAGAIN => if (std.event.Loop.instance) |loop| { EAGAIN => return error.WouldBlock,
loop.waitUntilFdReadable(fd);
continue;
} else {
return error.WouldBlock;
},
EBADF => return error.NotOpenForReading, // can be a race condition EBADF => return error.NotOpenForReading, // can be a race condition
EIO => return error.InputOutput, EIO => return error.InputOutput,
EISDIR => return error.IsDir, EISDIR => return error.IsDir,
@ -687,8 +667,8 @@ pub const WriteError = error{
/// another write() call to transfer the remaining bytes. The subsequent call will either /// another write() call to transfer the remaining bytes. The subsequent call will either
/// transfer further bytes or may result in an error (e.g., if the disk is now full). /// transfer further bytes or may result in an error (e.g., if the disk is now full).
/// ///
/// For POSIX systems, if the application has a global event loop enabled, EAGAIN is handled /// For POSIX systems, if `fd` is opened in non blocking mode, the function will
/// via the event loop. Otherwise EAGAIN results in `error.WouldBlock`. /// return error.WouldBlock when EAGAIN is received.
/// On Windows, if the application has a global event loop enabled, I/O Completion Ports are /// On Windows, if the application has a global event loop enabled, I/O Completion Ports are
/// used to perform the I/O. `error.WouldBlock` is not possible on Windows. /// used to perform the I/O. `error.WouldBlock` is not possible on Windows.
/// ///
@ -741,12 +721,7 @@ pub fn write(fd: fd_t, bytes: []const u8) WriteError!usize {
EINTR => continue, EINTR => continue,
EINVAL => unreachable, EINVAL => unreachable,
EFAULT => unreachable, EFAULT => unreachable,
EAGAIN => if (std.event.Loop.instance) |loop| { EAGAIN => return error.WouldBlock,
loop.waitUntilFdWritable(fd);
continue;
} else {
return error.WouldBlock;
},
EBADF => return error.NotOpenForWriting, // can be a race condition. EBADF => return error.NotOpenForWriting, // can be a race condition.
EDESTADDRREQ => unreachable, // `connect` was never called. EDESTADDRREQ => unreachable, // `connect` was never called.
EDQUOT => return error.DiskQuota, EDQUOT => return error.DiskQuota,
@ -772,8 +747,8 @@ pub fn write(fd: fd_t, bytes: []const u8) WriteError!usize {
/// another write() call to transfer the remaining bytes. The subsequent call will either /// another write() call to transfer the remaining bytes. The subsequent call will either
/// transfer further bytes or may result in an error (e.g., if the disk is now full). /// transfer further bytes or may result in an error (e.g., if the disk is now full).
/// ///
/// For POSIX systems, if the application has a global event loop enabled, EAGAIN is handled /// For POSIX systems, if `fd` is opened in non blocking mode, the function will
/// via the event loop. Otherwise EAGAIN results in `error.WouldBlock`. /// return error.WouldBlock when EAGAIN is received.k`.
/// On Windows, if the application has a global event loop enabled, I/O Completion Ports are /// On Windows, if the application has a global event loop enabled, I/O Completion Ports are
/// used to perform the I/O. `error.WouldBlock` is not possible on Windows. /// used to perform the I/O. `error.WouldBlock` is not possible on Windows.
/// ///
@ -814,12 +789,7 @@ pub fn writev(fd: fd_t, iov: []const iovec_const) WriteError!usize {
EINTR => continue, EINTR => continue,
EINVAL => unreachable, EINVAL => unreachable,
EFAULT => unreachable, EFAULT => unreachable,
EAGAIN => if (std.event.Loop.instance) |loop| { EAGAIN => return error.WouldBlock,
loop.waitUntilFdWritable(fd);
continue;
} else {
return error.WouldBlock;
},
EBADF => return error.NotOpenForWriting, // Can be a race condition. EBADF => return error.NotOpenForWriting, // Can be a race condition.
EDESTADDRREQ => unreachable, // `connect` was never called. EDESTADDRREQ => unreachable, // `connect` was never called.
EDQUOT => return error.DiskQuota, EDQUOT => return error.DiskQuota,
@ -847,8 +817,8 @@ pub const PWriteError = WriteError || error{Unseekable};
/// another write() call to transfer the remaining bytes. The subsequent call will either /// another write() call to transfer the remaining bytes. The subsequent call will either
/// transfer further bytes or may result in an error (e.g., if the disk is now full). /// transfer further bytes or may result in an error (e.g., if the disk is now full).
/// ///
/// For POSIX systems, if the application has a global event loop enabled, EAGAIN is handled /// For POSIX systems, if `fd` is opened in non blocking mode, the function will
/// via the event loop. Otherwise EAGAIN results in `error.WouldBlock`. /// return error.WouldBlock when EAGAIN is received.
/// On Windows, if the application has a global event loop enabled, I/O Completion Ports are /// On Windows, if the application has a global event loop enabled, I/O Completion Ports are
/// used to perform the I/O. `error.WouldBlock` is not possible on Windows. /// used to perform the I/O. `error.WouldBlock` is not possible on Windows.
/// ///
@ -905,12 +875,7 @@ pub fn pwrite(fd: fd_t, bytes: []const u8, offset: u64) PWriteError!usize {
EINTR => continue, EINTR => continue,
EINVAL => unreachable, EINVAL => unreachable,
EFAULT => unreachable, EFAULT => unreachable,
EAGAIN => if (std.event.Loop.instance) |loop| { EAGAIN => return error.WouldBlock,
loop.waitUntilFdWritable(fd);
continue;
} else {
return error.WouldBlock;
},
EBADF => return error.NotOpenForWriting, // Can be a race condition. EBADF => return error.NotOpenForWriting, // Can be a race condition.
EDESTADDRREQ => unreachable, // `connect` was never called. EDESTADDRREQ => unreachable, // `connect` was never called.
EDQUOT => return error.DiskQuota, EDQUOT => return error.DiskQuota,
@ -939,8 +904,8 @@ pub fn pwrite(fd: fd_t, bytes: []const u8, offset: u64) PWriteError!usize {
/// another write() call to transfer the remaining bytes. The subsequent call will either /// another write() call to transfer the remaining bytes. The subsequent call will either
/// transfer further bytes or may result in an error (e.g., if the disk is now full). /// transfer further bytes or may result in an error (e.g., if the disk is now full).
/// ///
/// If the application has a global event loop enabled, EAGAIN is handled /// If `fd` is opened in non blocking mode, the function will
/// via the event loop. Otherwise EAGAIN results in `error.WouldBlock`. /// return error.WouldBlock when EAGAIN is received.
/// ///
/// The following systems do not have this syscall, and will return partial writes if more than one /// The following systems do not have this syscall, and will return partial writes if more than one
/// vector is provided: /// vector is provided:
@ -993,12 +958,7 @@ pub fn pwritev(fd: fd_t, iov: []const iovec_const, offset: u64) PWriteError!usiz
EINTR => continue, EINTR => continue,
EINVAL => unreachable, EINVAL => unreachable,
EFAULT => unreachable, EFAULT => unreachable,
EAGAIN => if (std.event.Loop.instance) |loop| { EAGAIN => return error.WouldBlock,
loop.waitUntilFdWritable(fd);
continue;
} else {
return error.WouldBlock;
},
EBADF => return error.NotOpenForWriting, // Can be a race condition. EBADF => return error.NotOpenForWriting, // Can be a race condition.
EDESTADDRREQ => unreachable, // `connect` was never called. EDESTADDRREQ => unreachable, // `connect` was never called.
EDQUOT => return error.DiskQuota, EDQUOT => return error.DiskQuota,
@ -2846,8 +2806,8 @@ pub const AcceptError = error{
} || UnexpectedError; } || UnexpectedError;
/// Accept a connection on a socket. /// Accept a connection on a socket.
/// If the application has a global event loop enabled, EAGAIN is handled /// If `sockfd` is opened in non blocking mode, the function will
/// via the event loop. Otherwise EAGAIN results in error.WouldBlock. /// return error.WouldBlock when EAGAIN is received.
pub fn accept( pub fn accept(
/// This argument is a socket that has been created with `socket`, bound to a local address /// This argument is a socket that has been created with `socket`, bound to a local address
/// with `bind`, and is listening for connections after a `listen`. /// with `bind`, and is listening for connections after a `listen`.
@ -2890,12 +2850,7 @@ pub fn accept(
return fd; return fd;
}, },
EINTR => continue, EINTR => continue,
EAGAIN => if (std.event.Loop.instance) |loop| { EAGAIN => return error.WouldBlock,
loop.waitUntilFdReadable(sockfd);
continue;
} else {
return error.WouldBlock;
},
EBADF => unreachable, // always a race condition EBADF => unreachable, // always a race condition
ECONNABORTED => return error.ConnectionAborted, ECONNABORTED => return error.ConnectionAborted,
EFAULT => unreachable, EFAULT => unreachable,
@ -3081,6 +3036,8 @@ pub const ConnectError = error{
} || UnexpectedError; } || UnexpectedError;
/// Initiate a connection on a socket. /// Initiate a connection on a socket.
/// If `sockfd` is opened in non blocking mode, the function will
/// return error.WouldBlock when EAGAIN or EINPROGRESS is received.
pub fn connect(sockfd: socket_t, sock_addr: *const sockaddr, len: socklen_t) ConnectError!void { pub fn connect(sockfd: socket_t, sock_addr: *const sockaddr, len: socklen_t) ConnectError!void {
if (builtin.os.tag == .windows) { if (builtin.os.tag == .windows) {
const rc = windows.ws2_32.connect(sockfd, sock_addr, len); const rc = windows.ws2_32.connect(sockfd, sock_addr, len);
@ -3113,11 +3070,7 @@ pub fn connect(sockfd: socket_t, sock_addr: *const sockaddr, len: socklen_t) Con
EADDRINUSE => return error.AddressInUse, EADDRINUSE => return error.AddressInUse,
EADDRNOTAVAIL => return error.AddressNotAvailable, EADDRNOTAVAIL => return error.AddressNotAvailable,
EAFNOSUPPORT => return error.AddressFamilyNotSupported, EAFNOSUPPORT => return error.AddressFamilyNotSupported,
EAGAIN, EINPROGRESS => { EAGAIN, EINPROGRESS => return error.WouldBlock,
const loop = std.event.Loop.instance orelse return error.WouldBlock;
loop.waitUntilFdWritable(sockfd);
return getsockoptError(sockfd);
},
EALREADY => unreachable, // The socket is nonblocking and a previous connection attempt has not yet been completed. EALREADY => unreachable, // The socket is nonblocking and a previous connection attempt has not yet been completed.
EBADF => unreachable, // sockfd is not a valid open file descriptor. EBADF => unreachable, // sockfd is not a valid open file descriptor.
ECONNREFUSED => return error.ConnectionRefused, ECONNREFUSED => return error.ConnectionRefused,
@ -4620,14 +4573,8 @@ pub fn sendto(
const rc = system.sendto(sockfd, buf.ptr, buf.len, flags, dest_addr, addrlen); const rc = system.sendto(sockfd, buf.ptr, buf.len, flags, dest_addr, addrlen);
switch (errno(rc)) { switch (errno(rc)) {
0 => return @intCast(usize, rc), 0 => return @intCast(usize, rc),
EACCES => return error.AccessDenied, EACCES => return error.AccessDenied,
EAGAIN => if (std.event.Loop.instance) |loop| { EAGAIN => return error.WouldBlock,
loop.waitUntilFdWritable(sockfd);
continue;
} else {
return error.WouldBlock;
},
EALREADY => return error.FastOpenAlreadyInProgress, EALREADY => return error.FastOpenAlreadyInProgress,
EBADF => unreachable, // always a race condition EBADF => unreachable, // always a race condition
ECONNRESET => return error.ConnectionResetByPeer, ECONNRESET => return error.ConnectionResetByPeer,
@ -5106,6 +5053,8 @@ pub const RecvFromError = error{
SystemResources, SystemResources,
} || UnexpectedError; } || UnexpectedError;
/// If `sockfd` is opened in non blocking mode, the function will
/// return error.WouldBlock when EAGAIN is received.
pub fn recvfrom( pub fn recvfrom(
sockfd: fd_t, sockfd: fd_t,
buf: []u8, buf: []u8,
@ -5123,12 +5072,7 @@ pub fn recvfrom(
ENOTCONN => unreachable, ENOTCONN => unreachable,
ENOTSOCK => unreachable, ENOTSOCK => unreachable,
EINTR => continue, EINTR => continue,
EAGAIN => if (std.event.Loop.instance) |loop| { EAGAIN => return error.WouldBlock,
loop.waitUntilFdReadable(sockfd);
continue;
} else {
return error.WouldBlock;
},
ENOMEM => return error.SystemResources, ENOMEM => return error.SystemResources,
ECONNREFUSED => return error.ConnectionRefused, ECONNREFUSED => return error.ConnectionRefused,
else => |err| return unexpectedErrno(err), else => |err| return unexpectedErrno(err),

View File

@ -35,7 +35,7 @@ pub const SystemTable = extern struct {
runtime_services: *RuntimeServices, runtime_services: *RuntimeServices,
boot_services: ?*BootServices, boot_services: ?*BootServices,
number_of_table_entries: usize, number_of_table_entries: usize,
configuration_table: *ConfigurationTable, configuration_table: [*]ConfigurationTable,
pub const signature: u64 = 0x5453595320494249; pub const signature: u64 = 0x5453595320494249;
pub const revision_1_02: u32 = (1 << 16) | 2; pub const revision_1_02: u32 = (1 << 16) | 2;

View File

@ -217,6 +217,7 @@ pub fn DeviceIoControl(
switch (rc) { switch (rc) {
.SUCCESS => {}, .SUCCESS => {},
.PRIVILEGE_NOT_HELD => return error.AccessDenied, .PRIVILEGE_NOT_HELD => return error.AccessDenied,
.ACCESS_DENIED => return error.AccessDenied,
.INVALID_PARAMETER => unreachable, .INVALID_PARAMETER => unreachable,
else => return unexpectedStatus(rc), else => return unexpectedStatus(rc),
} }
@ -760,6 +761,7 @@ pub const DeleteFileError = error{
FileNotFound, FileNotFound,
AccessDenied, AccessDenied,
NameTooLong, NameTooLong,
/// Also known as sharing violation.
FileBusy, FileBusy,
Unexpected, Unexpected,
NotDir, NotDir,
@ -824,6 +826,7 @@ pub fn DeleteFile(sub_path_w: []const u16, options: DeleteFileOptions) DeleteFil
.INVALID_PARAMETER => unreachable, .INVALID_PARAMETER => unreachable,
.FILE_IS_A_DIRECTORY => return error.IsDir, .FILE_IS_A_DIRECTORY => return error.IsDir,
.NOT_A_DIRECTORY => return error.NotDir, .NOT_A_DIRECTORY => return error.NotDir,
.SHARING_VIOLATION => return error.FileBusy,
else => return unexpectedStatus(rc), else => return unexpectedStatus(rc),
} }
} }

View File

@ -1,633 +0,0 @@
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2020 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
const std = @import("std");
const assert = std.debug.assert;
const testing = std.testing;
const Order = std.math.Order;
const Color = enum(u1) {
Black,
Red,
};
const Red = Color.Red;
const Black = Color.Black;
const ReplaceError = error{NotEqual};
const SortError = error{NotUnique}; // The new comparison function results in duplicates.
/// Insert this into your struct that you want to add to a red-black tree.
/// Do not use a pointer. Turn the *rb.Node results of the functions in rb
/// (after resolving optionals) to your structure using @fieldParentPtr(). Example:
///
/// const Number = struct {
/// node: rb.Node,
/// value: i32,
/// };
/// fn number(node: *rb.Node) Number {
/// return @fieldParentPtr(Number, "node", node);
/// }
pub const Node = struct {
left: ?*Node,
right: ?*Node,
/// parent | color
parent_and_color: usize,
pub fn next(constnode: *Node) ?*Node {
var node = constnode;
if (node.right) |right| {
var n = right;
while (n.left) |left|
n = left;
return n;
}
while (true) {
var parent = node.getParent();
if (parent) |p| {
if (node != p.right)
return p;
node = p;
} else
return null;
}
}
pub fn prev(constnode: *Node) ?*Node {
var node = constnode;
if (node.left) |left| {
var n = left;
while (n.right) |right|
n = right;
return n;
}
while (true) {
var parent = node.getParent();
if (parent) |p| {
if (node != p.left)
return p;
node = p;
} else
return null;
}
}
pub fn isRoot(node: *Node) bool {
return node.getParent() == null;
}
fn isRed(node: *Node) bool {
return node.getColor() == Red;
}
fn isBlack(node: *Node) bool {
return node.getColor() == Black;
}
fn setParent(node: *Node, parent: ?*Node) void {
node.parent_and_color = @ptrToInt(parent) | (node.parent_and_color & 1);
}
fn getParent(node: *Node) ?*Node {
const mask: usize = 1;
comptime {
assert(@alignOf(*Node) >= 2);
}
const maybe_ptr = node.parent_and_color & ~mask;
return if (maybe_ptr == 0) null else @intToPtr(*Node, maybe_ptr);
}
fn setColor(node: *Node, color: Color) void {
const mask: usize = 1;
node.parent_and_color = (node.parent_and_color & ~mask) | @enumToInt(color);
}
fn getColor(node: *Node) Color {
return @intToEnum(Color, @intCast(u1, node.parent_and_color & 1));
}
fn setChild(node: *Node, child: ?*Node, is_left: bool) void {
if (is_left) {
node.left = child;
} else {
node.right = child;
}
}
fn getFirst(nodeconst: *Node) *Node {
var node = nodeconst;
while (node.left) |left| {
node = left;
}
return node;
}
fn getLast(nodeconst: *Node) *Node {
var node = nodeconst;
while (node.right) |right| {
node = right;
}
return node;
}
};
pub const Tree = struct {
root: ?*Node,
compareFn: fn (*Node, *Node, *Tree) Order,
/// Re-sorts a tree with a new compare function
pub fn sort(tree: *Tree, newCompareFn: fn (*Node, *Node, *Tree) Order) SortError!void {
var newTree = Tree.init(newCompareFn);
var node: *Node = undefined;
while (true) {
node = tree.first() orelse break;
tree.remove(node);
if (newTree.insert(node) != null) {
return error.NotUnique; // EEXISTS
}
}
tree.* = newTree;
}
/// If you have a need for a version that caches this, please file a bug.
pub fn first(tree: *Tree) ?*Node {
var node: *Node = tree.root orelse return null;
while (node.left) |left| {
node = left;
}
return node;
}
pub fn last(tree: *Tree) ?*Node {
var node: *Node = tree.root orelse return null;
while (node.right) |right| {
node = right;
}
return node;
}
/// Duplicate keys are not allowed. The item with the same key already in the
/// tree will be returned, and the item will not be inserted.
pub fn insert(tree: *Tree, node_const: *Node) ?*Node {
var node = node_const;
var maybe_key: ?*Node = undefined;
var maybe_parent: ?*Node = undefined;
var is_left: bool = undefined;
maybe_key = doLookup(node, tree, &maybe_parent, &is_left);
if (maybe_key) |key| {
return key;
}
node.left = null;
node.right = null;
node.setColor(Red);
node.setParent(maybe_parent);
if (maybe_parent) |parent| {
parent.setChild(node, is_left);
} else {
tree.root = node;
}
while (node.getParent()) |*parent| {
if (parent.*.isBlack())
break;
// the root is always black
var grandpa = parent.*.getParent() orelse unreachable;
if (parent.* == grandpa.left) {
var maybe_uncle = grandpa.right;
if (maybe_uncle) |uncle| {
if (uncle.isBlack())
break;
parent.*.setColor(Black);
uncle.setColor(Black);
grandpa.setColor(Red);
node = grandpa;
} else {
if (node == parent.*.right) {
rotateLeft(parent.*, tree);
node = parent.*;
parent.* = node.getParent().?; // Just rotated
}
parent.*.setColor(Black);
grandpa.setColor(Red);
rotateRight(grandpa, tree);
}
} else {
var maybe_uncle = grandpa.left;
if (maybe_uncle) |uncle| {
if (uncle.isBlack())
break;
parent.*.setColor(Black);
uncle.setColor(Black);
grandpa.setColor(Red);
node = grandpa;
} else {
if (node == parent.*.left) {
rotateRight(parent.*, tree);
node = parent.*;
parent.* = node.getParent().?; // Just rotated
}
parent.*.setColor(Black);
grandpa.setColor(Red);
rotateLeft(grandpa, tree);
}
}
}
// This was an insert, there is at least one node.
tree.root.?.setColor(Black);
return null;
}
/// lookup searches for the value of key, using binary search. It will
/// return a pointer to the node if it is there, otherwise it will return null.
/// Complexity guaranteed O(log n), where n is the number of nodes book-kept
/// by tree.
pub fn lookup(tree: *Tree, key: *Node) ?*Node {
var parent: ?*Node = undefined;
var is_left: bool = undefined;
return doLookup(key, tree, &parent, &is_left);
}
/// If node is not part of tree, behavior is undefined.
pub fn remove(tree: *Tree, nodeconst: *Node) void {
var node = nodeconst;
// as this has the same value as node, it is unsafe to access node after newnode
var newnode: ?*Node = nodeconst;
var maybe_parent: ?*Node = node.getParent();
var color: Color = undefined;
var next: *Node = undefined;
// This clause is to avoid optionals
if (node.left == null and node.right == null) {
if (maybe_parent) |parent| {
parent.setChild(null, parent.left == node);
} else
tree.root = null;
color = node.getColor();
newnode = null;
} else {
if (node.left == null) {
next = node.right.?; // Not both null as per above
} else if (node.right == null) {
next = node.left.?; // Not both null as per above
} else
next = node.right.?.getFirst(); // Just checked for null above
if (maybe_parent) |parent| {
parent.setChild(next, parent.left == node);
} else
tree.root = next;
if (node.left != null and node.right != null) {
const left = node.left.?;
const right = node.right.?;
color = next.getColor();
next.setColor(node.getColor());
next.left = left;
left.setParent(next);
if (next != right) {
var parent = next.getParent().?; // Was traversed via child node (right/left)
next.setParent(node.getParent());
newnode = next.right;
parent.left = node;
next.right = right;
right.setParent(next);
} else {
next.setParent(maybe_parent);
maybe_parent = next;
newnode = next.right;
}
} else {
color = node.getColor();
newnode = next;
}
}
if (newnode) |n|
n.setParent(maybe_parent);
if (color == Red)
return;
if (newnode) |n| {
n.setColor(Black);
return;
}
while (node == tree.root) {
// If not root, there must be parent
var parent = maybe_parent.?;
if (node == parent.left) {
var sibling = parent.right.?; // Same number of black nodes.
if (sibling.isRed()) {
sibling.setColor(Black);
parent.setColor(Red);
rotateLeft(parent, tree);
sibling = parent.right.?; // Just rotated
}
if ((if (sibling.left) |n| n.isBlack() else true) and
(if (sibling.right) |n| n.isBlack() else true))
{
sibling.setColor(Red);
node = parent;
maybe_parent = parent.getParent();
continue;
}
if (if (sibling.right) |n| n.isBlack() else true) {
sibling.left.?.setColor(Black); // Same number of black nodes.
sibling.setColor(Red);
rotateRight(sibling, tree);
sibling = parent.right.?; // Just rotated
}
sibling.setColor(parent.getColor());
parent.setColor(Black);
sibling.right.?.setColor(Black); // Same number of black nodes.
rotateLeft(parent, tree);
newnode = tree.root;
break;
} else {
var sibling = parent.left.?; // Same number of black nodes.
if (sibling.isRed()) {
sibling.setColor(Black);
parent.setColor(Red);
rotateRight(parent, tree);
sibling = parent.left.?; // Just rotated
}
if ((if (sibling.left) |n| n.isBlack() else true) and
(if (sibling.right) |n| n.isBlack() else true))
{
sibling.setColor(Red);
node = parent;
maybe_parent = parent.getParent();
continue;
}
if (if (sibling.left) |n| n.isBlack() else true) {
sibling.right.?.setColor(Black); // Same number of black nodes
sibling.setColor(Red);
rotateLeft(sibling, tree);
sibling = parent.left.?; // Just rotated
}
sibling.setColor(parent.getColor());
parent.setColor(Black);
sibling.left.?.setColor(Black); // Same number of black nodes
rotateRight(parent, tree);
newnode = tree.root;
break;
}
if (node.isRed())
break;
}
if (newnode) |n|
n.setColor(Black);
}
/// This is a shortcut to avoid removing and re-inserting an item with the same key.
pub fn replace(tree: *Tree, old: *Node, newconst: *Node) !void {
var new = newconst;
// I assume this can get optimized out if the caller already knows.
if (tree.compareFn(old, new, tree) != .eq) return ReplaceError.NotEqual;
if (old.getParent()) |parent| {
parent.setChild(new, parent.left == old);
} else
tree.root = new;
if (old.left) |left|
left.setParent(new);
if (old.right) |right|
right.setParent(new);
new.* = old.*;
}
pub fn init(f: fn (*Node, *Node, *Tree) Order) Tree {
return Tree{
.root = null,
.compareFn = f,
};
}
};
fn rotateLeft(node: *Node, tree: *Tree) void {
var p: *Node = node;
var q: *Node = node.right orelse unreachable;
var parent: *Node = undefined;
if (!p.isRoot()) {
parent = p.getParent().?;
if (parent.left == p) {
parent.left = q;
} else {
parent.right = q;
}
q.setParent(parent);
} else {
tree.root = q;
q.setParent(null);
}
p.setParent(q);
p.right = q.left;
if (p.right) |right| {
right.setParent(p);
}
q.left = p;
}
fn rotateRight(node: *Node, tree: *Tree) void {
var p: *Node = node;
var q: *Node = node.left orelse unreachable;
var parent: *Node = undefined;
if (!p.isRoot()) {
parent = p.getParent().?;
if (parent.left == p) {
parent.left = q;
} else {
parent.right = q;
}
q.setParent(parent);
} else {
tree.root = q;
q.setParent(null);
}
p.setParent(q);
p.left = q.right;
if (p.left) |left| {
left.setParent(p);
}
q.right = p;
}
fn doLookup(key: *Node, tree: *Tree, pparent: *?*Node, is_left: *bool) ?*Node {
var maybe_node: ?*Node = tree.root;
pparent.* = null;
is_left.* = false;
while (maybe_node) |node| {
const res = tree.compareFn(node, key, tree);
if (res == .eq) {
return node;
}
pparent.* = node;
switch (res) {
.gt => {
is_left.* = true;
maybe_node = node.left;
},
.lt => {
is_left.* = false;
maybe_node = node.right;
},
.eq => unreachable, // handled above
}
}
return null;
}
const testNumber = struct {
node: Node,
value: usize,
};
fn testGetNumber(node: *Node) *testNumber {
return @fieldParentPtr(testNumber, "node", node);
}
fn testCompare(l: *Node, r: *Node, contextIgnored: *Tree) Order {
var left = testGetNumber(l);
var right = testGetNumber(r);
if (left.value < right.value) {
return .lt;
} else if (left.value == right.value) {
return .eq;
} else if (left.value > right.value) {
return .gt;
}
unreachable;
}
fn testCompareReverse(l: *Node, r: *Node, contextIgnored: *Tree) Order {
return testCompare(r, l, contextIgnored);
}
test "rb" {
if (@import("builtin").arch == .aarch64) {
// TODO https://github.com/ziglang/zig/issues/3288
return error.SkipZigTest;
}
var tree = Tree.init(testCompare);
var ns: [10]testNumber = undefined;
ns[0].value = 42;
ns[1].value = 41;
ns[2].value = 40;
ns[3].value = 39;
ns[4].value = 38;
ns[5].value = 39;
ns[6].value = 3453;
ns[7].value = 32345;
ns[8].value = 392345;
ns[9].value = 4;
var dup: testNumber = undefined;
dup.value = 32345;
_ = tree.insert(&ns[1].node);
_ = tree.insert(&ns[2].node);
_ = tree.insert(&ns[3].node);
_ = tree.insert(&ns[4].node);
_ = tree.insert(&ns[5].node);
_ = tree.insert(&ns[6].node);
_ = tree.insert(&ns[7].node);
_ = tree.insert(&ns[8].node);
_ = tree.insert(&ns[9].node);
tree.remove(&ns[3].node);
testing.expect(tree.insert(&dup.node) == &ns[7].node);
try tree.replace(&ns[7].node, &dup.node);
var num: *testNumber = undefined;
num = testGetNumber(tree.first().?);
while (num.node.next() != null) {
testing.expect(testGetNumber(num.node.next().?).value > num.value);
num = testGetNumber(num.node.next().?);
}
}
test "inserting and looking up" {
var tree = Tree.init(testCompare);
var number: testNumber = undefined;
number.value = 1000;
_ = tree.insert(&number.node);
var dup: testNumber = undefined;
//Assert that tuples with identical value fields finds the same pointer
dup.value = 1000;
assert(tree.lookup(&dup.node) == &number.node);
//Assert that tuples with identical values do not clobber when inserted.
_ = tree.insert(&dup.node);
assert(tree.lookup(&dup.node) == &number.node);
assert(tree.lookup(&number.node) != &dup.node);
assert(testGetNumber(tree.lookup(&dup.node).?).value == testGetNumber(&dup.node).value);
//Assert that if looking for a non-existing value, return null.
var non_existing_value: testNumber = undefined;
non_existing_value.value = 1234;
assert(tree.lookup(&non_existing_value.node) == null);
}
test "multiple inserts, followed by calling first and last" {
if (@import("builtin").arch == .aarch64) {
// TODO https://github.com/ziglang/zig/issues/3288
return error.SkipZigTest;
}
var tree = Tree.init(testCompare);
var zeroth: testNumber = undefined;
zeroth.value = 0;
var first: testNumber = undefined;
first.value = 1;
var second: testNumber = undefined;
second.value = 2;
var third: testNumber = undefined;
third.value = 3;
_ = tree.insert(&zeroth.node);
_ = tree.insert(&first.node);
_ = tree.insert(&second.node);
_ = tree.insert(&third.node);
assert(testGetNumber(tree.first().?).value == 0);
assert(testGetNumber(tree.last().?).value == 3);
var lookupNode: testNumber = undefined;
lookupNode.value = 3;
assert(tree.lookup(&lookupNode.node) == &third.node);
tree.sort(testCompareReverse) catch unreachable;
assert(testGetNumber(tree.first().?).value == 3);
assert(testGetNumber(tree.last().?).value == 0);
assert(tree.lookup(&lookupNode.node) == &third.node);
}

View File

@ -161,7 +161,7 @@ fn usage(builder: *Builder, already_ran_build: bool, out_stream: anytype) !void
try fmt.allocPrint(allocator, "{} (default)", .{top_level_step.step.name}) try fmt.allocPrint(allocator, "{} (default)", .{top_level_step.step.name})
else else
top_level_step.step.name; top_level_step.step.name;
try out_stream.print(" {s:22} {}\n", .{ name, top_level_step.description }); try out_stream.print(" {s:<27} {}\n", .{ name, top_level_step.description });
} }
try out_stream.writeAll( try out_stream.writeAll(
@ -185,7 +185,7 @@ fn usage(builder: *Builder, already_ran_build: bool, out_stream: anytype) !void
Builder.typeIdName(option.type_id), Builder.typeIdName(option.type_id),
}); });
defer allocator.free(name); defer allocator.free(name);
try out_stream.print("{s:24} {}\n", .{ name, option.description }); try out_stream.print("{s:<29} {}\n", .{ name, option.description });
} }
} }

View File

@ -103,6 +103,6 @@ pub fn log(
log_err_count += 1; log_err_count += 1;
} }
if (@enumToInt(message_level) <= @enumToInt(std.testing.log_level)) { if (@enumToInt(message_level) <= @enumToInt(std.testing.log_level)) {
std.debug.print("[{}] ({}): " ++ format, .{ @tagName(scope), @tagName(message_level) } ++ args); std.debug.print("[{}] ({}): " ++ format ++ "\n", .{ @tagName(scope), @tagName(message_level) } ++ args);
} }
} }

View File

@ -224,7 +224,7 @@ inline fn initEventLoopAndCallMain() u8 {
if (std.event.Loop.instance) |loop| { if (std.event.Loop.instance) |loop| {
if (!@hasDecl(root, "event_loop")) { if (!@hasDecl(root, "event_loop")) {
loop.init() catch |err| { loop.init() catch |err| {
std.debug.warn("error: {}\n", .{@errorName(err)}); std.log.err("{}", .{@errorName(err)});
if (@errorReturnTrace()) |trace| { if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*); std.debug.dumpStackTrace(trace.*);
} }
@ -270,7 +270,7 @@ pub fn callMain() u8 {
}, },
.ErrorUnion => { .ErrorUnion => {
const result = root.main() catch |err| { const result = root.main() catch |err| {
std.debug.warn("error: {}\n", .{@errorName(err)}); std.log.err("{}", .{@errorName(err)});
if (@errorReturnTrace()) |trace| { if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*); std.debug.dumpStackTrace(trace.*);
} }

View File

@ -14,7 +14,6 @@ pub const AutoArrayHashMap = array_hash_map.AutoArrayHashMap;
pub const AutoArrayHashMapUnmanaged = array_hash_map.AutoArrayHashMapUnmanaged; pub const AutoArrayHashMapUnmanaged = array_hash_map.AutoArrayHashMapUnmanaged;
pub const AutoHashMap = hash_map.AutoHashMap; pub const AutoHashMap = hash_map.AutoHashMap;
pub const AutoHashMapUnmanaged = hash_map.AutoHashMapUnmanaged; pub const AutoHashMapUnmanaged = hash_map.AutoHashMapUnmanaged;
pub const BloomFilter = @import("bloom_filter.zig").BloomFilter;
pub const BufMap = @import("buf_map.zig").BufMap; pub const BufMap = @import("buf_map.zig").BufMap;
pub const BufSet = @import("buf_set.zig").BufSet; pub const BufSet = @import("buf_set.zig").BufSet;
pub const ChildProcess = @import("child_process.zig").ChildProcess; pub const ChildProcess = @import("child_process.zig").ChildProcess;
@ -48,7 +47,6 @@ pub const base64 = @import("base64.zig");
pub const build = @import("build.zig"); pub const build = @import("build.zig");
pub const builtin = @import("builtin.zig"); pub const builtin = @import("builtin.zig");
pub const c = @import("c.zig"); pub const c = @import("c.zig");
pub const cache_hash = @import("cache_hash.zig");
pub const coff = @import("coff.zig"); pub const coff = @import("coff.zig");
pub const compress = @import("compress.zig"); pub const compress = @import("compress.zig");
pub const crypto = @import("crypto.zig"); pub const crypto = @import("crypto.zig");
@ -63,7 +61,6 @@ pub const fs = @import("fs.zig");
pub const hash = @import("hash.zig"); pub const hash = @import("hash.zig");
pub const hash_map = @import("hash_map.zig"); pub const hash_map = @import("hash_map.zig");
pub const heap = @import("heap.zig"); pub const heap = @import("heap.zig");
pub const http = @import("http.zig");
pub const io = @import("io.zig"); pub const io = @import("io.zig");
pub const json = @import("json.zig"); pub const json = @import("json.zig");
pub const log = @import("log.zig"); pub const log = @import("log.zig");
@ -78,7 +75,6 @@ pub const packed_int_array = @import("packed_int_array.zig");
pub const pdb = @import("pdb.zig"); pub const pdb = @import("pdb.zig");
pub const process = @import("process.zig"); pub const process = @import("process.zig");
pub const rand = @import("rand.zig"); pub const rand = @import("rand.zig");
pub const rb = @import("rb.zig");
pub const sort = @import("sort.zig"); pub const sort = @import("sort.zig");
pub const ascii = @import("ascii.zig"); pub const ascii = @import("ascii.zig");
pub const testing = @import("testing.zig"); pub const testing = @import("testing.zig");

View File

@ -75,6 +75,13 @@ pub const Target = struct {
else => return ".so", else => return ".so",
} }
} }
pub fn defaultVersionRange(tag: Tag) Os {
return .{
.tag = tag,
.version_range = VersionRange.default(tag),
};
}
}; };
/// Based on NTDDI version constants from /// Based on NTDDI version constants from
@ -290,11 +297,32 @@ pub const Target = struct {
} }
}; };
pub fn defaultVersionRange(tag: Tag) Os { pub const TaggedVersionRange = union(enum) {
return .{ none: void,
.tag = tag, semver: Version.Range,
.version_range = VersionRange.default(tag), linux: LinuxVersionRange,
windows: WindowsVersion.Range,
}; };
/// Provides a tagged union. `Target` does not store the tag because it is
/// redundant with the OS tag; this function abstracts that part away.
pub fn getVersionRange(self: Os) TaggedVersionRange {
switch (self.tag) {
.linux => return TaggedVersionRange{ .linux = self.version_range.linux },
.windows => return TaggedVersionRange{ .windows = self.version_range.windows },
.freebsd,
.macosx,
.ios,
.tvos,
.watchos,
.netbsd,
.openbsd,
.dragonfly,
=> return TaggedVersionRange{ .semver = self.version_range.semver },
else => return .none,
}
} }
/// Checks if system is guaranteed to be at least `version` or older than `version`. /// Checks if system is guaranteed to be at least `version` or older than `version`.
@ -455,18 +483,9 @@ pub const Target = struct {
else => false, else => false,
}; };
} }
pub fn oFileExt(abi: Abi) [:0]const u8 {
return switch (abi) {
.msvc => ".obj",
else => ".o",
};
}
}; };
pub const ObjectFormat = enum { pub const ObjectFormat = enum {
/// TODO Get rid of this one.
unknown,
coff, coff,
pe, pe,
elf, elf,
@ -1116,8 +1135,18 @@ pub const Target = struct {
return linuxTripleSimple(allocator, self.cpu.arch, self.os.tag, self.abi); return linuxTripleSimple(allocator, self.cpu.arch, self.os.tag, self.abi);
} }
pub fn oFileExt_cpu_arch_abi(cpu_arch: Cpu.Arch, abi: Abi) [:0]const u8 {
if (cpu_arch.isWasm()) {
return ".o.wasm";
}
switch (abi) {
.msvc => return ".obj",
else => return ".o",
}
}
pub fn oFileExt(self: Target) [:0]const u8 { pub fn oFileExt(self: Target) [:0]const u8 {
return self.abi.oFileExt(); return oFileExt_cpu_arch_abi(self.cpu.arch, self.abi);
} }
pub fn exeFileExtSimple(cpu_arch: Cpu.Arch, os_tag: Os.Tag) [:0]const u8 { pub fn exeFileExtSimple(cpu_arch: Cpu.Arch, os_tag: Os.Tag) [:0]const u8 {
@ -1457,6 +1486,27 @@ pub const Target = struct {
=> return result, => return result,
} }
} }
/// Return whether or not the given host target is capable of executing natively executables
/// of the other target.
pub fn canExecBinariesOf(host_target: Target, binary_target: Target) bool {
if (host_target.os.tag != binary_target.os.tag)
return false;
if (host_target.cpu.arch == binary_target.cpu.arch)
return true;
if (host_target.cpu.arch == .x86_64 and binary_target.cpu.arch == .i386)
return true;
if (host_target.cpu.arch == .aarch64 and binary_target.cpu.arch == .arm)
return true;
if (host_target.cpu.arch == .aarch64_be and binary_target.cpu.arch == .armeb)
return true;
return false;
}
}; };
test "" { test "" {

View File

@ -38,7 +38,7 @@ pub fn expectError(expected_error: anyerror, actual_error_union: anytype) void {
/// This function is intended to be used only in tests. When the two values are not /// This function is intended to be used only in tests. When the two values are not
/// equal, prints diagnostics to stderr to show exactly how they are not equal, /// equal, prints diagnostics to stderr to show exactly how they are not equal,
/// then aborts. /// then aborts.
/// The types must match exactly. /// `actual` is casted to the type of `expected`.
pub fn expectEqual(expected: anytype, actual: @TypeOf(expected)) void { pub fn expectEqual(expected: anytype, actual: @TypeOf(expected)) void {
switch (@typeInfo(@TypeOf(actual))) { switch (@typeInfo(@TypeOf(actual))) {
.NoReturn, .NoReturn,

View File

@ -64,24 +64,84 @@ pub fn lineDelta(source: []const u8, start: usize, end: usize) isize {
return line; return line;
} }
/// Returns the standard file system basename of a binary generated by the Zig compiler. pub const BinNameOptions = struct {
pub fn binNameAlloc(
allocator: *std.mem.Allocator,
root_name: []const u8, root_name: []const u8,
target: std.Target, target: std.Target,
output_mode: std.builtin.OutputMode, output_mode: std.builtin.OutputMode,
link_mode: ?std.builtin.LinkMode, link_mode: ?std.builtin.LinkMode = null,
) error{OutOfMemory}![]u8 { object_format: ?std.Target.ObjectFormat = null,
switch (output_mode) { version: ?std.builtin.Version = null,
.Exe => return std.fmt.allocPrint(allocator, "{}{}", .{ root_name, target.exeFileExt() }),
.Lib => {
const suffix = switch (link_mode orelse .Static) {
.Static => target.staticLibSuffix(),
.Dynamic => target.dynamicLibSuffix(),
}; };
return std.fmt.allocPrint(allocator, "{}{}{}", .{ target.libPrefix(), root_name, suffix });
/// Returns the standard file system basename of a binary generated by the Zig compiler.
pub fn binNameAlloc(allocator: *std.mem.Allocator, options: BinNameOptions) error{OutOfMemory}![]u8 {
const root_name = options.root_name;
const target = options.target;
switch (options.object_format orelse target.getObjectFormat()) {
.coff, .pe => switch (options.output_mode) {
.Exe => return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, target.exeFileExt() }),
.Lib => {
const suffix = switch (options.link_mode orelse .Static) {
.Static => ".lib",
.Dynamic => ".dll",
};
return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, suffix });
}, },
.Obj => return std.fmt.allocPrint(allocator, "{}{}", .{ root_name, target.oFileExt() }), .Obj => return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, target.oFileExt() }),
},
.elf => switch (options.output_mode) {
.Exe => return allocator.dupe(u8, root_name),
.Lib => {
switch (options.link_mode orelse .Static) {
.Static => return std.fmt.allocPrint(allocator, "{s}{s}.a", .{
target.libPrefix(), root_name,
}),
.Dynamic => {
if (options.version) |ver| {
return std.fmt.allocPrint(allocator, "{s}{s}.so.{d}.{d}.{d}", .{
target.libPrefix(), root_name, ver.major, ver.minor, ver.patch,
});
} else {
return std.fmt.allocPrint(allocator, "{s}{s}.so", .{
target.libPrefix(), root_name,
});
}
},
}
},
.Obj => return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, target.oFileExt() }),
},
.macho => switch (options.output_mode) {
.Exe => return allocator.dupe(u8, root_name),
.Lib => {
switch (options.link_mode orelse .Static) {
.Static => return std.fmt.allocPrint(allocator, "{s}{s}.a", .{
target.libPrefix(), root_name,
}),
.Dynamic => {
if (options.version) |ver| {
return std.fmt.allocPrint(allocator, "{s}{s}.{d}.{d}.{d}.dylib", .{
target.libPrefix(), root_name, ver.major, ver.minor, ver.patch,
});
} else {
return std.fmt.allocPrint(allocator, "{s}{s}.dylib", .{
target.libPrefix(), root_name,
});
}
},
}
return std.fmt.allocPrint(allocator, "{s}{s}{s}", .{ target.libPrefix(), root_name, suffix });
},
.Obj => return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, target.oFileExt() }),
},
.wasm => switch (options.output_mode) {
.Exe => return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, target.exeFileExt() }),
.Obj => return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, target.oFileExt() }),
.Lib => return std.fmt.allocPrint(allocator, "{s}.wasm", .{root_name}),
},
.c => return std.fmt.allocPrint(allocator, "{s}.c", .{root_name}),
.hex => return std.fmt.allocPrint(allocator, "{s}.ihex", .{root_name}),
.raw => return std.fmt.allocPrint(allocator, "{s}.bin", .{root_name}),
} }
} }

View File

@ -823,6 +823,15 @@ pub const Node = struct {
} }
} }
pub fn findFirstWithId(self: *Node, id: Id) ?*Node {
if (self.id == id) return self;
var child_i: usize = 0;
while (self.iterate(child_i)) |child| : (child_i += 1) {
if (child.findFirstWithId(id)) |result| return result;
}
return null;
}
pub fn dump(self: *Node, indent: usize) void { pub fn dump(self: *Node, indent: usize) void {
{ {
var i: usize = 0; var i: usize = 0;

View File

@ -375,7 +375,7 @@ pub const CrossTarget = struct {
// `Target.current.os` works when doing `zig build` because Zig generates a build executable using // `Target.current.os` works when doing `zig build` because Zig generates a build executable using
// native OS version range. However this will not be accurate otherwise, and // native OS version range. However this will not be accurate otherwise, and
// will need to be integrated with `std.zig.system.NativeTargetInfo.detect`. // will need to be integrated with `std.zig.system.NativeTargetInfo.detect`.
var adjusted_os = if (self.os_tag) |os_tag| Target.Os.defaultVersionRange(os_tag) else Target.current.os; var adjusted_os = if (self.os_tag) |os_tag| os_tag.defaultVersionRange() else Target.current.os;
if (self.os_version_min) |min| switch (min) { if (self.os_version_min) |min| switch (min) {
.none => {}, .none => {},
@ -466,7 +466,7 @@ pub const CrossTarget = struct {
} }
pub fn oFileExt(self: CrossTarget) [:0]const u8 { pub fn oFileExt(self: CrossTarget) [:0]const u8 {
return self.getAbi().oFileExt(); return Target.oFileExt_cpu_arch_abi(self.getCpuArch(), self.getAbi());
} }
pub fn exeFileExt(self: CrossTarget) [:0]const u8 { pub fn exeFileExt(self: CrossTarget) [:0]const u8 {

View File

@ -1301,8 +1301,10 @@ test "zig fmt: array literal with hint" {
\\const a = []u8{ \\const a = []u8{
\\ 1, 2, \\ 1, 2,
\\ 3, 4, \\ 3, 4,
\\ 5, 6, // blah \\ 5,
\\ 7, 8, \\ 6, // blah
\\ 7,
\\ 8,
\\}; \\};
\\const a = []u8{ \\const a = []u8{
\\ 1, 2, \\ 1, 2,
@ -3321,6 +3323,326 @@ test "zig fmt: Don't add extra newline after if" {
); );
} }
test "zig fmt: comments in ternary ifs" {
try testCanonical(
\\const x = if (true) {
\\ 1;
\\} else if (false)
\\ // Comment
\\ 0;
\\const y = if (true)
\\ // Comment
\\ 1
\\else
\\ 0;
\\
\\pub extern "c" fn printf(format: [*:0]const u8, ...) c_int;
\\
);
}
test "zig fmt: test comments in field access chain" {
try testCanonical(
\\pub const str = struct {
\\ pub const Thing = more.more //
\\ .more() //
\\ .more().more() //
\\ .more() //
\\ // .more() //
\\ .more() //
\\ .more();
\\ data: Data,
\\};
\\
\\pub const str = struct {
\\ pub const Thing = more.more //
\\ .more() //
\\ // .more() //
\\ // .more() //
\\ // .more() //
\\ .more() //
\\ .more();
\\ data: Data,
\\};
\\
\\pub const str = struct {
\\ pub const Thing = more //
\\ .more //
\\ .more() //
\\ .more();
\\ data: Data,
\\};
\\
);
}
test "zig fmt: Indent comma correctly after multiline string literals in arg list (trailing comma)" {
try testCanonical(
\\fn foo() void {
\\ z.display_message_dialog(
\\ *const [323:0]u8,
\\ \\Message Text
\\ \\------------
\\ \\xxxxxxxxxxxx
\\ \\xxxxxxxxxxxx
\\ ,
\\ g.GtkMessageType.GTK_MESSAGE_WARNING,
\\ null,
\\ );
\\
\\ z.display_message_dialog(*const [323:0]u8,
\\ \\Message Text
\\ \\------------
\\ \\xxxxxxxxxxxx
\\ \\xxxxxxxxxxxx
\\ , g.GtkMessageType.GTK_MESSAGE_WARNING, null);
\\}
\\
);
}
test "zig fmt: Control flow statement as body of blockless if" {
try testCanonical(
\\pub fn main() void {
\\ const zoom_node = if (focused_node == layout_first)
\\ if (it.next()) {
\\ if (!node.view.pending.float and !node.view.pending.fullscreen) break node;
\\ } else null
\\ else
\\ focused_node;
\\
\\ const zoom_node = if (focused_node == layout_first) while (it.next()) |node| {
\\ if (!node.view.pending.float and !node.view.pending.fullscreen) break node;
\\ } else null else
\\ focused_node;
\\
\\ const zoom_node = if (focused_node == layout_first)
\\ if (it.next()) {
\\ if (!node.view.pending.float and !node.view.pending.fullscreen) break node;
\\ } else null;
\\
\\ const zoom_node = if (focused_node == layout_first) while (it.next()) |node| {
\\ if (!node.view.pending.float and !node.view.pending.fullscreen) break node;
\\ };
\\
\\ const zoom_node = if (focused_node == layout_first) for (nodes) |node| {
\\ break node;
\\ };
\\
\\ const zoom_node = if (focused_node == layout_first) switch (nodes) {
\\ 0 => 0,
\\ } else
\\ focused_node;
\\}
\\
);
}
test "zig fmt: " {
try testCanonical(
\\pub fn sendViewTags(self: Self) void {
\\ var it = ViewStack(View).iterator(self.output.views.first, std.math.maxInt(u32));
\\ while (it.next()) |node|
\\ view_tags.append(node.view.current_tags) catch {
\\ c.wl_resource_post_no_memory(self.wl_resource);
\\ log.crit(.river_status, "out of memory", .{});
\\ return;
\\ };
\\}
\\
);
}
test "zig fmt: allow trailing line comments to do manual array formatting" {
try testCanonical(
\\fn foo() void {
\\ self.code.appendSliceAssumeCapacity(&[_]u8{
\\ 0x55, // push rbp
\\ 0x48, 0x89, 0xe5, // mov rbp, rsp
\\ 0x48, 0x81, 0xec, // sub rsp, imm32 (with reloc)
\\ });
\\
\\ di_buf.appendAssumeCapacity(&[_]u8{
\\ 1, DW.TAG_compile_unit, DW.CHILDREN_no, // header
\\ DW.AT_stmt_list, DW_FORM_data4, // form value pairs
\\ DW.AT_low_pc, DW_FORM_addr,
\\ DW.AT_high_pc, DW_FORM_addr,
\\ DW.AT_name, DW_FORM_strp,
\\ DW.AT_comp_dir, DW_FORM_strp,
\\ DW.AT_producer, DW_FORM_strp,
\\ DW.AT_language, DW_FORM_data2,
\\ 0, 0, // sentinel
\\ });
\\
\\ self.code.appendSliceAssumeCapacity(&[_]u8{
\\ 0x55, // push rbp
\\ 0x48, 0x89, 0xe5, // mov rbp, rsp
\\ // How do we handle this?
\\ //0x48, 0x81, 0xec, // sub rsp, imm32 (with reloc)
\\ // Here's a blank line, should that be allowed?
\\
\\ 0x48, 0x89, 0xe5,
\\ 0x33, 0x45,
\\ // Now the comment breaks a single line -- how do we handle this?
\\ 0x88,
\\ });
\\}
\\
);
}
test "zig fmt: multiline string literals should play nice with array initializers" {
try testCanonical(
\\fn main() void {
\\ var a = .{.{.{.{.{.{.{.{
\\ 0,
\\ }}}}}}}};
\\ myFunc(.{
\\ "aaaaaaa", "bbbbbb", "ccccc",
\\ "dddd", ("eee"), ("fff"),
\\ ("gggg"),
\\ // Line comment
\\ \\Multiline String Literals can be quite long
\\ ,
\\ \\Multiline String Literals can be quite long
\\ \\Multiline String Literals can be quite long
\\ ,
\\ \\Multiline String Literals can be quite long
\\ \\Multiline String Literals can be quite long
\\ \\Multiline String Literals can be quite long
\\ \\Multiline String Literals can be quite long
\\ ,
\\ (
\\ \\Multiline String Literals can be quite long
\\ ),
\\ .{
\\ \\xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
\\ \\xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
\\ \\xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
\\ },
\\ .{(
\\ \\xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
\\ )},
\\ .{
\\ "xxxxxxx", "xxx",
\\ (
\\ \\ xxx
\\ ),
\\ "xxx", "xxx",
\\ },
\\ .{ "xxxxxxx", "xxx", "xxx", "xxx" }, .{ "xxxxxxx", "xxx", "xxx", "xxx" },
\\ "aaaaaaa", "bbbbbb", "ccccc", // -
\\ "dddd", ("eee"), ("fff"),
\\ .{
\\ "xxx", "xxx",
\\ (
\\ \\ xxx
\\ ),
\\ "xxxxxxxxxxxxxx", "xxx",
\\ },
\\ .{
\\ (
\\ \\xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
\\ ),
\\ \\xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
\\ },
\\ \\xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
\\ \\xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
\\ });
\\}
\\
);
}
test "zig fmt: use of comments and Multiline string literals may force the parameters over multiple lines" {
try testCanonical(
\\pub fn makeMemUndefined(qzz: []u8) i1 {
\\ cases.add( // fixed bug #2032
\\ "compile diagnostic string for top level decl type",
\\ \\export fn entry() void {
\\ \\ var foo: u32 = @This(){};
\\ \\}
\\ , &[_][]const u8{
\\ "tmp.zig:2:27: error: type 'u32' does not support array initialization",
\\ });
\\ @compileError(
\\ \\ unknown-length pointers and C pointers cannot be hashed deeply.
\\ \\ Consider providing your own hash function.
\\ \\ unknown-length pointers and C pointers cannot be hashed deeply.
\\ \\ Consider providing your own hash function.
\\ );
\\ return @intCast(i1, doMemCheckClientRequestExpr(0, // default return
\\ .MakeMemUndefined, @ptrToInt(qzz.ptr), qzz.len, 0, 0, 0));
\\}
\\
\\// This looks like garbage don't do this
\\const rparen = tree.prevToken(
\\// the first token for the annotation expressions is the left
\\// parenthesis, hence the need for two prevToken
\\ if (fn_proto.getAlignExpr()) |align_expr|
\\ tree.prevToken(tree.prevToken(align_expr.firstToken()))
\\else if (fn_proto.getSectionExpr()) |section_expr|
\\ tree.prevToken(tree.prevToken(section_expr.firstToken()))
\\else if (fn_proto.getCallconvExpr()) |callconv_expr|
\\ tree.prevToken(tree.prevToken(callconv_expr.firstToken()))
\\else switch (fn_proto.return_type) {
\\ .Explicit => |node| node.firstToken(),
\\ .InferErrorSet => |node| tree.prevToken(node.firstToken()),
\\ .Invalid => unreachable,
\\});
\\
);
}
test "zig fmt: single argument trailing commas in @builtins()" {
try testCanonical(
\\pub fn foo(qzz: []u8) i1 {
\\ @panic(
\\ foo,
\\ );
\\ panic(
\\ foo,
\\ );
\\ @panic(
\\ foo,
\\ bar,
\\ );
\\}
\\
);
}
test "zig fmt: trailing comma should force multiline 1 column" {
try testTransform(
\\pub const UUID_NULL: uuid_t = [16]u8{0,0,0,0,};
\\
,
\\pub const UUID_NULL: uuid_t = [16]u8{
\\ 0,
\\ 0,
\\ 0,
\\ 0,
\\};
\\
);
}
test "zig fmt: function params should align nicely" {
try testCanonical(
\\pub fn foo() void {
\\ cases.addRuntimeSafety("slicing operator with sentinel",
\\ \\const std = @import("std");
\\ ++ check_panic_msg ++
\\ \\pub fn main() void {
\\ \\ var buf = [4]u8{'a','b','c',0};
\\ \\ const slice = buf[0..:0];
\\ \\}
\\ );
\\}
\\
);
}
const std = @import("std"); const std = @import("std");
const mem = std.mem; const mem = std.mem;
const warn = std.debug.warn; const warn = std.debug.warn;

View File

@ -522,7 +522,11 @@ fn renderExpression(
break :blk if (loc.line == 0) op_space else Space.Newline; break :blk if (loc.line == 0) op_space else Space.Newline;
}; };
{
ais.pushIndent();
defer ais.popIndent();
try renderToken(tree, ais, infix_op_node.op_token, after_op_space); try renderToken(tree, ais, infix_op_node.op_token, after_op_space);
}
ais.pushIndentOneShot(); ais.pushIndentOneShot();
return renderExpression(allocator, ais, tree, infix_op_node.rhs, space); return renderExpression(allocator, ais, tree, infix_op_node.rhs, space);
}, },
@ -710,127 +714,181 @@ fn renderExpression(
.node => |node| tree.nextToken(node.lastToken()), .node => |node| tree.nextToken(node.lastToken()),
}; };
if (exprs.len == 0) {
switch (lhs) { switch (lhs) {
.dot => |dot| try renderToken(tree, ais, dot, Space.None), .dot => |dot| try renderToken(tree, ais, dot, Space.None),
.node => |node| try renderExpression(allocator, ais, tree, node, Space.None), .node => |node| try renderExpression(allocator, ais, tree, node, Space.None),
} }
{ if (exprs.len == 0) {
ais.pushIndent();
defer ais.popIndent();
try renderToken(tree, ais, lbrace, Space.None); try renderToken(tree, ais, lbrace, Space.None);
}
return renderToken(tree, ais, rtoken, space); return renderToken(tree, ais, rtoken, space);
} }
if (exprs.len == 1 and tree.token_ids[exprs[0].*.lastToken() + 1] == .RBrace) {
if (exprs.len == 1 and exprs[0].tag != .MultilineStringLiteral and tree.token_ids[exprs[0].*.lastToken() + 1] == .RBrace) {
const expr = exprs[0]; const expr = exprs[0];
switch (lhs) {
.dot => |dot| try renderToken(tree, ais, dot, Space.None),
.node => |node| try renderExpression(allocator, ais, tree, node, Space.None),
}
try renderToken(tree, ais, lbrace, Space.None); try renderToken(tree, ais, lbrace, Space.None);
try renderExpression(allocator, ais, tree, expr, Space.None); try renderExpression(allocator, ais, tree, expr, Space.None);
return renderToken(tree, ais, rtoken, space); return renderToken(tree, ais, rtoken, space);
} }
switch (lhs) {
.dot => |dot| try renderToken(tree, ais, dot, Space.None),
.node => |node| try renderExpression(allocator, ais, tree, node, Space.None),
}
// scan to find row size // scan to find row size
const maybe_row_size: ?usize = blk: { if (rowSize(tree, exprs, rtoken) != null) {
var count: usize = 1;
for (exprs) |expr, i| {
if (i + 1 < exprs.len) {
const expr_last_token = expr.lastToken() + 1;
const loc = tree.tokenLocation(tree.token_locs[expr_last_token].end, exprs[i + 1].firstToken());
if (loc.line != 0) break :blk count;
count += 1;
} else {
const expr_last_token = expr.lastToken();
const loc = tree.tokenLocation(tree.token_locs[expr_last_token].end, rtoken);
if (loc.line == 0) {
// all on one line
const src_has_trailing_comma = trailblk: {
const maybe_comma = tree.prevToken(rtoken);
break :trailblk tree.token_ids[maybe_comma] == .Comma;
};
if (src_has_trailing_comma) {
break :blk 1; // force row size 1
} else {
break :blk null; // no newlines
}
}
break :blk count;
}
}
unreachable;
};
if (maybe_row_size) |row_size| {
// A place to store the width of each expression and its column's maximum
var widths = try allocator.alloc(usize, exprs.len + row_size);
defer allocator.free(widths);
mem.set(usize, widths, 0);
var expr_widths = widths[0 .. widths.len - row_size];
var column_widths = widths[widths.len - row_size ..];
// Null ais for counting the printed length of each expression
var counting_stream = std.io.countingOutStream(std.io.null_out_stream);
var auto_indenting_stream = std.io.autoIndentingStream(indent_delta, counting_stream.writer());
for (exprs) |expr, i| {
counting_stream.bytes_written = 0;
try renderExpression(allocator, &auto_indenting_stream, tree, expr, Space.None);
const width = @intCast(usize, counting_stream.bytes_written);
const col = i % row_size;
column_widths[col] = std.math.max(column_widths[col], width);
expr_widths[i] = width;
}
{ {
ais.pushIndentNextLine(); ais.pushIndentNextLine();
defer ais.popIndent(); defer ais.popIndent();
try renderToken(tree, ais, lbrace, Space.Newline); try renderToken(tree, ais, lbrace, Space.Newline);
var col: usize = 1; var expr_index: usize = 0;
for (exprs) |expr, i| { while (rowSize(tree, exprs[expr_index..], rtoken)) |row_size| {
if (i + 1 < exprs.len) { const row_exprs = exprs[expr_index..];
const next_expr = exprs[i + 1]; // A place to store the width of each expression and its column's maximum
var widths = try allocator.alloc(usize, row_exprs.len + row_size);
defer allocator.free(widths);
mem.set(usize, widths, 0);
var expr_newlines = try allocator.alloc(bool, row_exprs.len);
defer allocator.free(expr_newlines);
mem.set(bool, expr_newlines, false);
var expr_widths = widths[0 .. widths.len - row_size];
var column_widths = widths[widths.len - row_size ..];
// Find next row with trailing comment (if any) to end the current section
var section_end = sec_end: {
var this_line_first_expr: usize = 0;
var this_line_size = rowSize(tree, row_exprs, rtoken);
for (row_exprs) |expr, i| {
// Ignore comment on first line of this section
if (i == 0 or tree.tokensOnSameLine(row_exprs[0].firstToken(), expr.lastToken())) continue;
// Track start of line containing comment
if (!tree.tokensOnSameLine(row_exprs[this_line_first_expr].firstToken(), expr.lastToken())) {
this_line_first_expr = i;
this_line_size = rowSize(tree, row_exprs[this_line_first_expr..], rtoken);
}
const maybe_comma = expr.lastToken() + 1;
const maybe_comment = expr.lastToken() + 2;
if (maybe_comment < tree.token_ids.len) {
if (tree.token_ids[maybe_comma] == .Comma and
tree.token_ids[maybe_comment] == .LineComment and
tree.tokensOnSameLine(expr.lastToken(), maybe_comment))
{
var comment_token_loc = tree.token_locs[maybe_comment];
const comment_is_empty = mem.trimRight(u8, tree.tokenSliceLoc(comment_token_loc), " ").len == 2;
if (!comment_is_empty) {
// Found row ending in comment
break :sec_end i - this_line_size.? + 1;
}
}
}
}
break :sec_end row_exprs.len;
};
expr_index += section_end;
const section_exprs = row_exprs[0..section_end];
// Null stream for counting the printed length of each expression
var line_find_stream = std.io.findByteOutStream('\n', std.io.null_out_stream);
var counting_stream = std.io.countingOutStream(line_find_stream.writer());
var auto_indenting_stream = std.io.autoIndentingStream(indent_delta, counting_stream.writer());
// Calculate size of columns in current section
var column_counter: usize = 0;
var single_line = true;
for (section_exprs) |expr, i| {
if (i + 1 < section_exprs.len) {
counting_stream.bytes_written = 0;
line_find_stream.byte_found = false;
try renderExpression(allocator, &auto_indenting_stream, tree, expr, Space.None);
const width = @intCast(usize, counting_stream.bytes_written);
expr_widths[i] = width;
expr_newlines[i] = line_find_stream.byte_found;
if (!line_find_stream.byte_found) {
const column = column_counter % row_size;
column_widths[column] = std.math.max(column_widths[column], width);
const expr_last_token = expr.*.lastToken() + 1;
const next_expr = section_exprs[i + 1];
const loc = tree.tokenLocation(tree.token_locs[expr_last_token].start, next_expr.*.firstToken());
if (loc.line == 0) {
column_counter += 1;
} else {
single_line = false;
column_counter = 0;
}
} else {
single_line = false;
column_counter = 0;
}
} else {
counting_stream.bytes_written = 0;
try renderExpression(allocator, &auto_indenting_stream, tree, expr, Space.None);
const width = @intCast(usize, counting_stream.bytes_written);
expr_widths[i] = width;
expr_newlines[i] = line_find_stream.byte_found;
if (!line_find_stream.byte_found) {
const column = column_counter % row_size;
column_widths[column] = std.math.max(column_widths[column], width);
}
break;
}
}
// Render exprs in current section
column_counter = 0;
var last_col_index: usize = row_size - 1;
for (section_exprs) |expr, i| {
if (i + 1 < section_exprs.len) {
const next_expr = section_exprs[i + 1];
try renderExpression(allocator, ais, tree, expr, Space.None); try renderExpression(allocator, ais, tree, expr, Space.None);
const comma = tree.nextToken(expr.*.lastToken()); const comma = tree.nextToken(expr.*.lastToken());
if (col != row_size) { if (column_counter != last_col_index) {
if (!expr_newlines[i] and !expr_newlines[i + 1]) {
// Neither the current or next expression is multiline
try renderToken(tree, ais, comma, Space.Space); // , try renderToken(tree, ais, comma, Space.Space); // ,
assert(column_widths[column_counter % row_size] >= expr_widths[i]);
const padding = column_widths[i % row_size] - expr_widths[i]; const padding = column_widths[column_counter % row_size] - expr_widths[i];
try ais.writer().writeByteNTimes(' ', padding); try ais.writer().writeByteNTimes(' ', padding);
col += 1; column_counter += 1;
continue; continue;
} }
col = 1; }
if (single_line and row_size != 1) {
if (tree.token_ids[tree.nextToken(comma)] != .MultilineStringLiteralLine) { try renderToken(tree, ais, comma, Space.Space); // ,
try renderToken(tree, ais, comma, Space.Newline); // , continue;
} else {
try renderToken(tree, ais, comma, Space.None); // ,
} }
column_counter = 0;
try renderToken(tree, ais, comma, Space.Newline); // ,
try renderExtraNewline(tree, ais, next_expr); try renderExtraNewline(tree, ais, next_expr);
} else {
const maybe_comma = tree.nextToken(expr.*.lastToken());
if (tree.token_ids[maybe_comma] == .Comma) {
try renderExpression(allocator, ais, tree, expr, Space.None); // ,
try renderToken(tree, ais, maybe_comma, Space.Newline); // ,
} else { } else {
try renderExpression(allocator, ais, tree, expr, Space.Comma); // , try renderExpression(allocator, ais, tree, expr, Space.Comma); // ,
} }
} }
} }
if (expr_index == exprs.len) {
break;
}
}
}
return renderToken(tree, ais, rtoken, space); return renderToken(tree, ais, rtoken, space);
} else { }
// Single line
try renderToken(tree, ais, lbrace, Space.Space); try renderToken(tree, ais, lbrace, Space.Space);
for (exprs) |expr, i| { for (exprs) |expr, i| {
if (i + 1 < exprs.len) { if (i + 1 < exprs.len) {
@ -844,7 +902,6 @@ fn renderExpression(
} }
return renderToken(tree, ais, rtoken, space); return renderToken(tree, ais, rtoken, space);
}
}, },
.StructInitializer, .StructInitializerDot => { .StructInitializer, .StructInitializerDot => {
@ -1004,16 +1061,23 @@ fn renderExpression(
}; };
if (src_has_trailing_comma) { if (src_has_trailing_comma) {
try renderToken(tree, ais, lparen, Space.Newline); {
const params = call.params();
for (params) |param_node, i| {
ais.pushIndent(); ais.pushIndent();
defer ais.popIndent(); defer ais.popIndent();
try renderToken(tree, ais, lparen, Space.Newline); // (
const params = call.params();
for (params) |param_node, i| {
if (i + 1 < params.len) { if (i + 1 < params.len) {
const next_node = params[i + 1]; const next_node = params[i + 1];
try renderExpression(allocator, ais, tree, param_node, Space.None); try renderExpression(allocator, ais, tree, param_node, Space.None);
// Unindent the comma for multiline string literals
const maybe_multiline_string = param_node.firstToken();
const is_multiline_string = tree.token_ids[maybe_multiline_string] == .MultilineStringLiteralLine;
if (is_multiline_string) ais.popIndent();
defer if (is_multiline_string) ais.pushIndent();
const comma = tree.nextToken(param_node.lastToken()); const comma = tree.nextToken(param_node.lastToken());
try renderToken(tree, ais, comma, Space.Newline); // , try renderToken(tree, ais, comma, Space.Newline); // ,
try renderExtraNewline(tree, ais, next_node); try renderExtraNewline(tree, ais, next_node);
@ -1021,6 +1085,7 @@ fn renderExpression(
try renderExpression(allocator, ais, tree, param_node, Space.Comma); try renderExpression(allocator, ais, tree, param_node, Space.Comma);
} }
} }
}
return renderToken(tree, ais, call.rtoken, space); return renderToken(tree, ais, call.rtoken, space);
} }
@ -1028,17 +1093,20 @@ fn renderExpression(
const params = call.params(); const params = call.params();
for (params) |param_node, i| { for (params) |param_node, i| {
if (param_node.*.tag == .MultilineStringLiteral) ais.pushIndentOneShot(); const maybe_comment = param_node.firstToken() - 1;
const maybe_multiline_string = param_node.firstToken();
if (tree.token_ids[maybe_multiline_string] == .MultilineStringLiteralLine or tree.token_ids[maybe_comment] == .LineComment) {
ais.pushIndentOneShot();
}
try renderExpression(allocator, ais, tree, param_node, Space.None); try renderExpression(allocator, ais, tree, param_node, Space.None);
if (i + 1 < params.len) { if (i + 1 < params.len) {
const next_param = params[i + 1];
const comma = tree.nextToken(param_node.lastToken()); const comma = tree.nextToken(param_node.lastToken());
try renderToken(tree, ais, comma, Space.Space); try renderToken(tree, ais, comma, Space.Space);
} }
} }
return renderToken(tree, ais, call.rtoken, space); return renderToken(tree, ais, call.rtoken, space); // )
}, },
.ArrayAccess => { .ArrayAccess => {
@ -1429,7 +1497,7 @@ fn renderExpression(
try renderToken(tree, ais, builtin_call.builtin_token, Space.None); // @name try renderToken(tree, ais, builtin_call.builtin_token, Space.None); // @name
const src_params_trailing_comma = blk: { const src_params_trailing_comma = blk: {
if (builtin_call.params_len < 2) break :blk false; if (builtin_call.params_len == 0) break :blk false;
const last_node = builtin_call.params()[builtin_call.params_len - 1]; const last_node = builtin_call.params()[builtin_call.params_len - 1];
const maybe_comma = tree.nextToken(last_node.lastToken()); const maybe_comma = tree.nextToken(last_node.lastToken());
break :blk tree.token_ids[maybe_comma] == .Comma; break :blk tree.token_ids[maybe_comma] == .Comma;
@ -1443,6 +1511,10 @@ fn renderExpression(
// render all on one line, no trailing comma // render all on one line, no trailing comma
const params = builtin_call.params(); const params = builtin_call.params();
for (params) |param_node, i| { for (params) |param_node, i| {
const maybe_comment = param_node.firstToken() - 1;
if (param_node.*.tag == .MultilineStringLiteral or tree.token_ids[maybe_comment] == .LineComment) {
ais.pushIndentOneShot();
}
try renderExpression(allocator, ais, tree, param_node, Space.None); try renderExpression(allocator, ais, tree, param_node, Space.None);
if (i + 1 < params.len) { if (i + 1 < params.len) {
@ -1506,7 +1578,8 @@ fn renderExpression(
.Explicit => |node| node.firstToken(), .Explicit => |node| node.firstToken(),
.InferErrorSet => |node| tree.prevToken(node.firstToken()), .InferErrorSet => |node| tree.prevToken(node.firstToken()),
.Invalid => unreachable, .Invalid => unreachable,
}); },
);
assert(tree.token_ids[rparen] == .RParen); assert(tree.token_ids[rparen] == .RParen);
const src_params_trailing_comma = blk: { const src_params_trailing_comma = blk: {
@ -1758,7 +1831,7 @@ fn renderExpression(
} }
if (while_node.payload) |payload| { if (while_node.payload) |payload| {
const payload_space = Space.Space; //if (while_node.continue_expr != null) Space.Space else block_start_space; const payload_space = if (while_node.continue_expr != null) Space.Space else block_start_space;
try renderExpression(allocator, ais, tree, payload, payload_space); try renderExpression(allocator, ais, tree, payload, payload_space);
} }
@ -1873,7 +1946,12 @@ fn renderExpression(
if (src_has_newline) { if (src_has_newline) {
const after_rparen_space = if (if_node.payload == null) Space.Newline else Space.Space; const after_rparen_space = if (if_node.payload == null) Space.Newline else Space.Space;
{
ais.pushIndent();
defer ais.popIndent();
try renderToken(tree, ais, rparen, after_rparen_space); // ) try renderToken(tree, ais, rparen, after_rparen_space); // )
}
if (if_node.payload) |payload| { if (if_node.payload) |payload| {
try renderExpression(allocator, ais, tree, payload, Space.Newline); try renderExpression(allocator, ais, tree, payload, Space.Newline);
@ -2558,3 +2636,27 @@ fn copyFixingWhitespace(ais: anytype, slice: []const u8) @TypeOf(ais.*).Error!vo
else => try ais.writer().writeByte(byte), else => try ais.writer().writeByte(byte),
}; };
} }
fn rowSize(tree: *ast.Tree, exprs: []*ast.Node, rtoken: ast.TokenIndex) ?usize {
const first_token = exprs[0].firstToken();
const first_loc = tree.tokenLocation(tree.token_locs[first_token].start, rtoken);
if (first_loc.line == 0) {
const maybe_comma = tree.prevToken(rtoken);
if (tree.token_ids[maybe_comma] == .Comma)
return 1;
return null; // no newlines
}
var count: usize = 1;
for (exprs) |expr, i| {
if (i + 1 < exprs.len) {
const expr_last_token = expr.lastToken() + 1;
const loc = tree.tokenLocation(tree.token_locs[expr_last_token].start, exprs[i + 1].firstToken());
if (loc.line != 0) return count;
count += 1;
} else {
return count;
}
}
unreachable;
}

View File

@ -203,7 +203,7 @@ pub const NativeTargetInfo = struct {
/// deinitialization method. /// deinitialization method.
/// TODO Remove the Allocator requirement from this function. /// TODO Remove the Allocator requirement from this function.
pub fn detect(allocator: *Allocator, cross_target: CrossTarget) DetectError!NativeTargetInfo { pub fn detect(allocator: *Allocator, cross_target: CrossTarget) DetectError!NativeTargetInfo {
var os = Target.Os.defaultVersionRange(cross_target.getOsTag()); var os = cross_target.getOsTag().defaultVersionRange();
if (cross_target.os_tag == null) { if (cross_target.os_tag == null) {
switch (Target.current.os.tag) { switch (Target.current.os.tag) {
.linux => { .linux => {
@ -393,6 +393,12 @@ pub const NativeTargetInfo = struct {
if (!native_target_has_ld or have_all_info or os_is_non_native) { if (!native_target_has_ld or have_all_info or os_is_non_native) {
return defaultAbiAndDynamicLinker(cpu, os, cross_target); return defaultAbiAndDynamicLinker(cpu, os, cross_target);
} }
if (cross_target.abi) |abi| {
if (abi.isMusl()) {
// musl implies static linking.
return defaultAbiAndDynamicLinker(cpu, os, cross_target);
}
}
// The current target's ABI cannot be relied on for this. For example, we may build the zig // The current target's ABI cannot be relied on for this. For example, we may build the zig
// compiler for target riscv64-linux-musl and provide a tarball for users to download. // compiler for target riscv64-linux-musl and provide a tarball for users to download.
// A user could then run that zig compiler on riscv64-linux-gnu. This use case is well-defined // A user could then run that zig compiler on riscv64-linux-gnu. This use case is well-defined

View File

@ -1195,6 +1195,7 @@ pub const Tokenizer = struct {
}, },
.num_dot_hex => switch (c) { .num_dot_hex => switch (c) {
'.' => { '.' => {
result.id = .IntegerLiteral;
self.index -= 1; self.index -= 1;
state = .start; state = .start;
break; break;
@ -1758,6 +1759,14 @@ test "correctly parse pointer assignment" {
}); });
} }
test "tokenizer - range literals" {
testTokenize("0...9", &[_]Token.Id{ .IntegerLiteral, .Ellipsis3, .IntegerLiteral });
testTokenize("'0'...'9'", &[_]Token.Id{ .CharLiteral, .Ellipsis3, .CharLiteral });
testTokenize("0x00...0x09", &[_]Token.Id{ .IntegerLiteral, .Ellipsis3, .IntegerLiteral });
testTokenize("0b00...0b11", &[_]Token.Id{ .IntegerLiteral, .Ellipsis3, .IntegerLiteral });
testTokenize("0o00...0o11", &[_]Token.Id{ .IntegerLiteral, .Ellipsis3, .IntegerLiteral });
}
test "tokenizer - number literals decimal" { test "tokenizer - number literals decimal" {
testTokenize("0", &[_]Token.Id{.IntegerLiteral}); testTokenize("0", &[_]Token.Id{.IntegerLiteral});
testTokenize("1", &[_]Token.Id{.IntegerLiteral}); testTokenize("1", &[_]Token.Id{.IntegerLiteral});

View File

@ -1,59 +0,0 @@
pub const Table = std.StringHashMap(*Package);
/// This should be used for file operations.
root_src_dir: std.fs.Dir,
/// This is for metadata purposes, for example putting into debug information.
root_src_dir_path: []u8,
/// Relative to `root_src_dir` and `root_src_dir_path`.
root_src_path: []u8,
table: Table,
/// No references to `root_src_dir` and `root_src_path` are kept.
pub fn create(
allocator: *mem.Allocator,
base_dir: std.fs.Dir,
/// Relative to `base_dir`.
root_src_dir: []const u8,
/// Relative to `root_src_dir`.
root_src_path: []const u8,
) !*Package {
const ptr = try allocator.create(Package);
errdefer allocator.destroy(ptr);
const root_src_path_dupe = try mem.dupe(allocator, u8, root_src_path);
errdefer allocator.free(root_src_path_dupe);
const root_src_dir_path = try mem.dupe(allocator, u8, root_src_dir);
errdefer allocator.free(root_src_dir_path);
ptr.* = .{
.root_src_dir = try base_dir.openDir(root_src_dir, .{}),
.root_src_dir_path = root_src_dir_path,
.root_src_path = root_src_path_dupe,
.table = Table.init(allocator),
};
return ptr;
}
pub fn destroy(self: *Package) void {
const allocator = self.table.allocator;
self.root_src_dir.close();
allocator.free(self.root_src_path);
allocator.free(self.root_src_dir_path);
{
var it = self.table.iterator();
while (it.next()) |kv| {
allocator.free(kv.key);
}
}
self.table.deinit();
allocator.destroy(self);
}
pub fn add(self: *Package, name: []const u8, package: *Package) !void {
try self.table.ensureCapacity(self.table.items().len + 1);
const name_dupe = try mem.dupe(self.table.allocator, u8, name);
self.table.putAssumeCapacityNoClobber(name_dupe, package);
}
const std = @import("std");
const mem = std.mem;
const assert = std.debug.assert;
const Package = @This();

View File

@ -1,138 +0,0 @@
//! Introspection and determination of system libraries needed by zig.
const std = @import("std");
const mem = std.mem;
const fs = std.fs;
const CacheHash = std.cache_hash.CacheHash;
/// Caller must free result
pub fn testZigInstallPrefix(allocator: *mem.Allocator, test_path: []const u8) ![]u8 {
{
const test_zig_dir = try fs.path.join(allocator, &[_][]const u8{ test_path, "lib", "zig" });
errdefer allocator.free(test_zig_dir);
const test_index_file = try fs.path.join(allocator, &[_][]const u8{ test_zig_dir, "std", "std.zig" });
defer allocator.free(test_index_file);
if (fs.cwd().openFile(test_index_file, .{})) |file| {
file.close();
return test_zig_dir;
} else |err| switch (err) {
error.FileNotFound => {
allocator.free(test_zig_dir);
},
else => |e| return e,
}
}
// Also try without "zig"
const test_zig_dir = try fs.path.join(allocator, &[_][]const u8{ test_path, "lib" });
errdefer allocator.free(test_zig_dir);
const test_index_file = try fs.path.join(allocator, &[_][]const u8{ test_zig_dir, "std", "std.zig" });
defer allocator.free(test_index_file);
const file = try fs.cwd().openFile(test_index_file, .{});
file.close();
return test_zig_dir;
}
/// Caller must free result
pub fn findZigLibDir(allocator: *mem.Allocator) ![]u8 {
const self_exe_path = try fs.selfExePathAlloc(allocator);
defer allocator.free(self_exe_path);
var cur_path: []const u8 = self_exe_path;
while (true) {
const test_dir = fs.path.dirname(cur_path) orelse ".";
if (mem.eql(u8, test_dir, cur_path)) {
break;
}
return testZigInstallPrefix(allocator, test_dir) catch |err| {
cur_path = test_dir;
continue;
};
}
return error.FileNotFound;
}
pub fn resolveZigLibDir(allocator: *mem.Allocator) ![]u8 {
return findZigLibDir(allocator) catch |err| {
std.debug.print(
\\Unable to find zig lib directory: {}.
\\Reinstall Zig or use --zig-install-prefix.
\\
, .{@errorName(err)});
return error.ZigLibDirNotFound;
};
}
/// Caller owns returned memory.
pub fn resolveGlobalCacheDir(allocator: *mem.Allocator) ![]u8 {
const appname = "zig";
if (std.Target.current.os.tag != .windows) {
if (std.os.getenv("XDG_CACHE_HOME")) |cache_root| {
return fs.path.join(allocator, &[_][]const u8{ cache_root, appname });
} else if (std.os.getenv("HOME")) |home| {
return fs.path.join(allocator, &[_][]const u8{ home, ".cache", appname });
}
}
return fs.getAppDataDir(allocator, appname);
}
pub fn openGlobalCacheDir() !fs.Dir {
var buf: [fs.MAX_PATH_BYTES]u8 = undefined;
var fba = std.heap.FixedBufferAllocator.init(&buf);
const path_name = try resolveGlobalCacheDir(&fba.allocator);
return fs.cwd().makeOpenPath(path_name, .{});
}
var compiler_id_mutex = std.Mutex{};
var compiler_id: [16]u8 = undefined;
var compiler_id_computed = false;
pub fn resolveCompilerId(gpa: *mem.Allocator) ![16]u8 {
const held = compiler_id_mutex.acquire();
defer held.release();
if (compiler_id_computed)
return compiler_id;
compiler_id_computed = true;
var cache_dir = try openGlobalCacheDir();
defer cache_dir.close();
var ch = try CacheHash.init(gpa, cache_dir, "exe");
defer ch.release();
const self_exe_path = try fs.selfExePathAlloc(gpa);
defer gpa.free(self_exe_path);
_ = try ch.addFile(self_exe_path, null);
if (try ch.hit()) |digest| {
compiler_id = digest[0..16].*;
return compiler_id;
}
const libs = try std.process.getSelfExeSharedLibPaths(gpa);
defer {
for (libs) |lib| gpa.free(lib);
gpa.free(libs);
}
for (libs) |lib| {
try ch.addFilePost(lib);
}
const digest = ch.final();
compiler_id = digest[0..16].*;
return compiler_id;
}

View File

@ -1,279 +0,0 @@
const std = @import("std");
const Allocator = std.mem.Allocator;
const Module = @import("Module.zig");
const fs = std.fs;
const trace = @import("tracy.zig").trace;
const Package = @import("Package.zig");
const Type = @import("type.zig").Type;
const build_options = @import("build_options");
pub const producer_string = if (std.builtin.is_test) "zig test" else "zig " ++ build_options.version;
pub const Options = struct {
target: std.Target,
output_mode: std.builtin.OutputMode,
link_mode: std.builtin.LinkMode,
object_format: std.builtin.ObjectFormat,
optimize_mode: std.builtin.Mode,
root_name: []const u8,
root_pkg: *const Package,
/// Used for calculating how much space to reserve for symbols in case the binary file
/// does not already have a symbol table.
symbol_count_hint: u64 = 32,
/// Used for calculating how much space to reserve for executable program code in case
/// the binary file deos not already have such a section.
program_code_size_hint: u64 = 256 * 1024,
entry_addr: ?u64 = null,
};
pub const File = struct {
tag: Tag,
options: Options,
file: ?fs.File,
allocator: *Allocator,
pub const LinkBlock = union {
elf: Elf.TextBlock,
coff: Coff.TextBlock,
macho: MachO.TextBlock,
c: void,
wasm: void,
};
pub const LinkFn = union {
elf: Elf.SrcFn,
coff: Coff.SrcFn,
macho: MachO.SrcFn,
c: void,
wasm: ?Wasm.FnData,
};
/// For DWARF .debug_info.
pub const DbgInfoTypeRelocsTable = std.HashMapUnmanaged(Type, DbgInfoTypeReloc, Type.hash, Type.eql, std.hash_map.DefaultMaxLoadPercentage);
/// For DWARF .debug_info.
pub const DbgInfoTypeReloc = struct {
/// Offset from `TextBlock.dbg_info_off` (the buffer that is local to a Decl).
/// This is where the .debug_info tag for the type is.
off: u32,
/// Offset from `TextBlock.dbg_info_off` (the buffer that is local to a Decl).
/// List of DW.AT_type / DW.FORM_ref4 that points to the type.
relocs: std.ArrayListUnmanaged(u32),
};
/// Attempts incremental linking, if the file already exists. If
/// incremental linking fails, falls back to truncating the file and
/// rewriting it. A malicious file is detected as incremental link failure
/// and does not cause Illegal Behavior. This operation is not atomic.
pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: Options) !*File {
switch (options.object_format) {
.unknown => unreachable,
.coff, .pe => return Coff.openPath(allocator, dir, sub_path, options),
.elf => return Elf.openPath(allocator, dir, sub_path, options),
.macho => return MachO.openPath(allocator, dir, sub_path, options),
.wasm => return Wasm.openPath(allocator, dir, sub_path, options),
.c => return C.openPath(allocator, dir, sub_path, options),
.hex => return error.TODOImplementHex,
.raw => return error.TODOImplementRaw,
}
}
pub fn cast(base: *File, comptime T: type) ?*T {
if (base.tag != T.base_tag)
return null;
return @fieldParentPtr(T, "base", base);
}
pub fn makeWritable(base: *File, dir: fs.Dir, sub_path: []const u8) !void {
switch (base.tag) {
.coff, .elf, .macho => {
if (base.file != null) return;
base.file = try dir.createFile(sub_path, .{
.truncate = false,
.read = true,
.mode = determineMode(base.options),
});
},
.c, .wasm => {},
}
}
pub fn makeExecutable(base: *File) !void {
switch (base.tag) {
.c => unreachable,
.wasm => {},
else => if (base.file) |f| {
f.close();
base.file = null;
},
}
}
/// May be called before or after updateDeclExports but must be called
/// after allocateDeclIndexes for any given Decl.
pub fn updateDecl(base: *File, module: *Module, decl: *Module.Decl) !void {
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).updateDecl(module, decl),
.elf => return @fieldParentPtr(Elf, "base", base).updateDecl(module, decl),
.macho => return @fieldParentPtr(MachO, "base", base).updateDecl(module, decl),
.c => return @fieldParentPtr(C, "base", base).updateDecl(module, decl),
.wasm => return @fieldParentPtr(Wasm, "base", base).updateDecl(module, decl),
}
}
pub fn updateDeclLineNumber(base: *File, module: *Module, decl: *Module.Decl) !void {
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).updateDeclLineNumber(module, decl),
.elf => return @fieldParentPtr(Elf, "base", base).updateDeclLineNumber(module, decl),
.macho => return @fieldParentPtr(MachO, "base", base).updateDeclLineNumber(module, decl),
.c, .wasm => {},
}
}
/// Must be called before any call to updateDecl or updateDeclExports for
/// any given Decl.
pub fn allocateDeclIndexes(base: *File, decl: *Module.Decl) !void {
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).allocateDeclIndexes(decl),
.elf => return @fieldParentPtr(Elf, "base", base).allocateDeclIndexes(decl),
.macho => return @fieldParentPtr(MachO, "base", base).allocateDeclIndexes(decl),
.c, .wasm => {},
}
}
pub fn deinit(base: *File) void {
if (base.file) |f| f.close();
switch (base.tag) {
.coff => @fieldParentPtr(Coff, "base", base).deinit(),
.elf => @fieldParentPtr(Elf, "base", base).deinit(),
.macho => @fieldParentPtr(MachO, "base", base).deinit(),
.c => @fieldParentPtr(C, "base", base).deinit(),
.wasm => @fieldParentPtr(Wasm, "base", base).deinit(),
}
}
pub fn destroy(base: *File) void {
switch (base.tag) {
.coff => {
const parent = @fieldParentPtr(Coff, "base", base);
parent.deinit();
base.allocator.destroy(parent);
},
.elf => {
const parent = @fieldParentPtr(Elf, "base", base);
parent.deinit();
base.allocator.destroy(parent);
},
.macho => {
const parent = @fieldParentPtr(MachO, "base", base);
parent.deinit();
base.allocator.destroy(parent);
},
.c => {
const parent = @fieldParentPtr(C, "base", base);
parent.deinit();
base.allocator.destroy(parent);
},
.wasm => {
const parent = @fieldParentPtr(Wasm, "base", base);
parent.deinit();
base.allocator.destroy(parent);
},
}
}
pub fn flush(base: *File, module: *Module) !void {
const tracy = trace(@src());
defer tracy.end();
try switch (base.tag) {
.coff => @fieldParentPtr(Coff, "base", base).flush(module),
.elf => @fieldParentPtr(Elf, "base", base).flush(module),
.macho => @fieldParentPtr(MachO, "base", base).flush(module),
.c => @fieldParentPtr(C, "base", base).flush(module),
.wasm => @fieldParentPtr(Wasm, "base", base).flush(module),
};
}
pub fn freeDecl(base: *File, decl: *Module.Decl) void {
switch (base.tag) {
.coff => @fieldParentPtr(Coff, "base", base).freeDecl(decl),
.elf => @fieldParentPtr(Elf, "base", base).freeDecl(decl),
.macho => @fieldParentPtr(MachO, "base", base).freeDecl(decl),
.c => unreachable,
.wasm => @fieldParentPtr(Wasm, "base", base).freeDecl(decl),
}
}
pub fn errorFlags(base: *File) ErrorFlags {
return switch (base.tag) {
.coff => @fieldParentPtr(Coff, "base", base).error_flags,
.elf => @fieldParentPtr(Elf, "base", base).error_flags,
.macho => @fieldParentPtr(MachO, "base", base).error_flags,
.c => return .{ .no_entry_point_found = false },
.wasm => return ErrorFlags{},
};
}
/// May be called before or after updateDecl, but must be called after
/// allocateDeclIndexes for any given Decl.
pub fn updateDeclExports(
base: *File,
module: *Module,
decl: *const Module.Decl,
exports: []const *Module.Export,
) !void {
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).updateDeclExports(module, decl, exports),
.elf => return @fieldParentPtr(Elf, "base", base).updateDeclExports(module, decl, exports),
.macho => return @fieldParentPtr(MachO, "base", base).updateDeclExports(module, decl, exports),
.c => return {},
.wasm => return @fieldParentPtr(Wasm, "base", base).updateDeclExports(module, decl, exports),
}
}
pub fn getDeclVAddr(base: *File, decl: *const Module.Decl) u64 {
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).getDeclVAddr(decl),
.elf => return @fieldParentPtr(Elf, "base", base).getDeclVAddr(decl),
.macho => return @fieldParentPtr(MachO, "base", base).getDeclVAddr(decl),
.c => unreachable,
.wasm => unreachable,
}
}
pub const Tag = enum {
coff,
elf,
macho,
c,
wasm,
};
pub const ErrorFlags = struct {
no_entry_point_found: bool = false,
};
pub const C = @import("link/C.zig");
pub const Coff = @import("link/Coff.zig");
pub const Elf = @import("link/Elf.zig");
pub const MachO = @import("link/MachO.zig");
pub const Wasm = @import("link/Wasm.zig");
};
pub fn determineMode(options: Options) fs.File.Mode {
// On common systems with a 0o022 umask, 0o777 will still result in a file created
// with 0o755 permissions, but it works appropriately if the system is configured
// more leniently. As another data point, C's fopen seems to open files with the
// 666 mode.
const executable_mode = if (std.Target.current.os.tag == .windows) 0 else 0o777;
switch (options.output_mode) {
.Lib => return switch (options.link_mode) {
.Dynamic => executable_mode,
.Static => fs.File.default_mode,
},
.Exe => return executable_mode,
.Obj => return fs.File.default_mode,
}
}

View File

@ -1,792 +0,0 @@
const Coff = @This();
const std = @import("std");
const log = std.log.scoped(.link);
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const fs = std.fs;
const trace = @import("../tracy.zig").trace;
const Module = @import("../Module.zig");
const codegen = @import("../codegen.zig");
const link = @import("../link.zig");
const allocation_padding = 4 / 3;
const minimum_text_block_size = 64 * allocation_padding;
const section_alignment = 4096;
const file_alignment = 512;
const image_base = 0x400_000;
const section_table_size = 2 * 40;
comptime {
std.debug.assert(std.mem.isAligned(image_base, section_alignment));
}
pub const base_tag: link.File.Tag = .coff;
const msdos_stub = @embedFile("msdos-stub.bin");
base: link.File,
ptr_width: enum { p32, p64 },
error_flags: link.File.ErrorFlags = .{},
text_block_free_list: std.ArrayListUnmanaged(*TextBlock) = .{},
last_text_block: ?*TextBlock = null,
/// Section table file pointer.
section_table_offset: u32 = 0,
/// Section data file pointer.
section_data_offset: u32 = 0,
/// Optiona header file pointer.
optional_header_offset: u32 = 0,
/// Absolute virtual address of the offset table when the executable is loaded in memory.
offset_table_virtual_address: u32 = 0,
/// Current size of the offset table on disk, must be a multiple of `file_alignment`
offset_table_size: u32 = 0,
/// Contains absolute virtual addresses
offset_table: std.ArrayListUnmanaged(u64) = .{},
/// Free list of offset table indices
offset_table_free_list: std.ArrayListUnmanaged(u32) = .{},
/// Virtual address of the entry point procedure relative to `image_base`
entry_addr: ?u32 = null,
/// Absolute virtual address of the text section when the executable is loaded in memory.
text_section_virtual_address: u32 = 0,
/// Current size of the `.text` section on disk, must be a multiple of `file_alignment`
text_section_size: u32 = 0,
offset_table_size_dirty: bool = false,
text_section_size_dirty: bool = false,
/// This flag is set when the virtual size of the whole image file when loaded in memory has changed
/// and needs to be updated in the optional header.
size_of_image_dirty: bool = false,
pub const TextBlock = struct {
/// Offset of the code relative to the start of the text section
text_offset: u32,
/// Used size of the text block
size: u32,
/// This field is undefined for symbols with size = 0.
offset_table_index: u32,
/// Points to the previous and next neighbors, based on the `text_offset`.
/// This can be used to find, for example, the capacity of this `TextBlock`.
prev: ?*TextBlock,
next: ?*TextBlock,
pub const empty = TextBlock{
.text_offset = 0,
.size = 0,
.offset_table_index = undefined,
.prev = null,
.next = null,
};
/// Returns how much room there is to grow in virtual address space.
fn capacity(self: TextBlock) u64 {
if (self.next) |next| {
return next.text_offset - self.text_offset;
}
// This is the last block, the capacity is only limited by the address space.
return std.math.maxInt(u32) - self.text_offset;
}
fn freeListEligible(self: TextBlock) bool {
// No need to keep a free list node for the last block.
const next = self.next orelse return false;
const cap = next.text_offset - self.text_offset;
const ideal_cap = self.size * allocation_padding;
if (cap <= ideal_cap) return false;
const surplus = cap - ideal_cap;
return surplus >= minimum_text_block_size;
}
/// Absolute virtual address of the text block when the file is loaded in memory.
fn getVAddr(self: TextBlock, coff: Coff) u32 {
return coff.text_section_virtual_address + self.text_offset;
}
};
pub const SrcFn = void;
pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: link.Options) !*link.File {
assert(options.object_format == .coff);
const file = try dir.createFile(sub_path, .{ .truncate = false, .read = true, .mode = link.determineMode(options) });
errdefer file.close();
var coff_file = try allocator.create(Coff);
errdefer allocator.destroy(coff_file);
coff_file.* = openFile(allocator, file, options) catch |err| switch (err) {
error.IncrFailed => try createFile(allocator, file, options),
else => |e| return e,
};
return &coff_file.base;
}
/// Returns error.IncrFailed if incremental update could not be performed.
fn openFile(allocator: *Allocator, file: fs.File, options: link.Options) !Coff {
switch (options.output_mode) {
.Exe => {},
.Obj => return error.IncrFailed,
.Lib => return error.IncrFailed,
}
var self: Coff = .{
.base = .{
.file = file,
.tag = .coff,
.options = options,
.allocator = allocator,
},
.ptr_width = switch (options.target.cpu.arch.ptrBitWidth()) {
32 => .p32,
64 => .p64,
else => return error.UnsupportedELFArchitecture,
},
};
errdefer self.deinit();
// TODO implement reading the PE/COFF file
return error.IncrFailed;
}
/// Truncates the existing file contents and overwrites the contents.
/// Returns an error if `file` is not already open with +read +write +seek abilities.
fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !Coff {
// TODO Write object specific relocations, COFF symbol table, then enable object file output.
switch (options.output_mode) {
.Exe => {},
.Obj => return error.TODOImplementWritingObjFiles,
.Lib => return error.TODOImplementWritingLibFiles,
}
var self: Coff = .{
.base = .{
.tag = .coff,
.options = options,
.allocator = allocator,
.file = file,
},
.ptr_width = switch (options.target.cpu.arch.ptrBitWidth()) {
32 => .p32,
64 => .p64,
else => return error.UnsupportedCOFFArchitecture,
},
};
errdefer self.deinit();
var coff_file_header_offset: u32 = 0;
if (options.output_mode == .Exe) {
// Write the MS-DOS stub and the PE signature
try self.base.file.?.pwriteAll(msdos_stub ++ "PE\x00\x00", 0);
coff_file_header_offset = msdos_stub.len + 4;
}
// COFF file header
const data_directory_count = 0;
var hdr_data: [112 + data_directory_count * 8 + section_table_size]u8 = undefined;
var index: usize = 0;
const machine = self.base.options.target.cpu.arch.toCoffMachine();
if (machine == .Unknown) {
return error.UnsupportedCOFFArchitecture;
}
std.mem.writeIntLittle(u16, hdr_data[0..2], @enumToInt(machine));
index += 2;
// Number of sections (we only use .got, .text)
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 2);
index += 2;
// TimeDateStamp (u32), PointerToSymbolTable (u32), NumberOfSymbols (u32)
std.mem.set(u8, hdr_data[index..][0..12], 0);
index += 12;
const optional_header_size = switch (options.output_mode) {
.Exe => data_directory_count * 8 + switch (self.ptr_width) {
.p32 => @as(u16, 96),
.p64 => 112,
},
else => 0,
};
const section_table_offset = coff_file_header_offset + 20 + optional_header_size;
const default_offset_table_size = file_alignment;
const default_size_of_code = 0;
self.section_data_offset = std.mem.alignForwardGeneric(u32, self.section_table_offset + section_table_size, file_alignment);
const section_data_relative_virtual_address = std.mem.alignForwardGeneric(u32, self.section_table_offset + section_table_size, section_alignment);
self.offset_table_virtual_address = image_base + section_data_relative_virtual_address;
self.offset_table_size = default_offset_table_size;
self.section_table_offset = section_table_offset;
self.text_section_virtual_address = image_base + section_data_relative_virtual_address + section_alignment;
self.text_section_size = default_size_of_code;
// Size of file when loaded in memory
const size_of_image = std.mem.alignForwardGeneric(u32, self.text_section_virtual_address - image_base + default_size_of_code, section_alignment);
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], optional_header_size);
index += 2;
// Characteristics
var characteristics: u16 = std.coff.IMAGE_FILE_DEBUG_STRIPPED | std.coff.IMAGE_FILE_RELOCS_STRIPPED; // TODO Remove debug info stripped flag when necessary
if (options.output_mode == .Exe) {
characteristics |= std.coff.IMAGE_FILE_EXECUTABLE_IMAGE;
}
switch (self.ptr_width) {
.p32 => characteristics |= std.coff.IMAGE_FILE_32BIT_MACHINE,
.p64 => characteristics |= std.coff.IMAGE_FILE_LARGE_ADDRESS_AWARE,
}
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], characteristics);
index += 2;
assert(index == 20);
try self.base.file.?.pwriteAll(hdr_data[0..index], coff_file_header_offset);
if (options.output_mode == .Exe) {
self.optional_header_offset = coff_file_header_offset + 20;
// Optional header
index = 0;
std.mem.writeIntLittle(u16, hdr_data[0..2], switch (self.ptr_width) {
.p32 => @as(u16, 0x10b),
.p64 => 0x20b,
});
index += 2;
// Linker version (u8 + u8)
std.mem.set(u8, hdr_data[index..][0..2], 0);
index += 2;
// SizeOfCode (UNUSED, u32), SizeOfInitializedData (u32), SizeOfUninitializedData (u32), AddressOfEntryPoint (u32), BaseOfCode (UNUSED, u32)
std.mem.set(u8, hdr_data[index..][0..20], 0);
index += 20;
if (self.ptr_width == .p32) {
// Base of data relative to the image base (UNUSED)
std.mem.set(u8, hdr_data[index..][0..4], 0);
index += 4;
// Image base address
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], image_base);
index += 4;
} else {
// Image base address
std.mem.writeIntLittle(u64, hdr_data[index..][0..8], image_base);
index += 8;
}
// Section alignment
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], section_alignment);
index += 4;
// File alignment
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], file_alignment);
index += 4;
// Required OS version, 6.0 is vista
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 6);
index += 2;
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 0);
index += 2;
// Image version
std.mem.set(u8, hdr_data[index..][0..4], 0);
index += 4;
// Required subsystem version, same as OS version
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 6);
index += 2;
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 0);
index += 2;
// Reserved zeroes (u32)
std.mem.set(u8, hdr_data[index..][0..4], 0);
index += 4;
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], size_of_image);
index += 4;
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.section_data_offset);
index += 4;
// CheckSum (u32)
std.mem.set(u8, hdr_data[index..][0..4], 0);
index += 4;
// Subsystem, TODO: Let users specify the subsystem, always CUI for now
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 3);
index += 2;
// DLL characteristics
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 0x0);
index += 2;
switch (self.ptr_width) {
.p32 => {
// Size of stack reserve + commit
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x1_000_000);
index += 4;
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x1_000);
index += 4;
// Size of heap reserve + commit
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x100_000);
index += 4;
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x1_000);
index += 4;
},
.p64 => {
// Size of stack reserve + commit
std.mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x1_000_000);
index += 8;
std.mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x1_000);
index += 8;
// Size of heap reserve + commit
std.mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x100_000);
index += 8;
std.mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x1_000);
index += 8;
},
}
// Reserved zeroes
std.mem.set(u8, hdr_data[index..][0..4], 0);
index += 4;
// Number of data directories
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], data_directory_count);
index += 4;
// Initialize data directories to zero
std.mem.set(u8, hdr_data[index..][0 .. data_directory_count * 8], 0);
index += data_directory_count * 8;
assert(index == optional_header_size);
}
// Write section table.
// First, the .got section
hdr_data[index..][0..8].* = ".got\x00\x00\x00\x00".*;
index += 8;
if (options.output_mode == .Exe) {
// Virtual size (u32)
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], default_offset_table_size);
index += 4;
// Virtual address (u32)
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.offset_table_virtual_address - image_base);
index += 4;
} else {
std.mem.set(u8, hdr_data[index..][0..8], 0);
index += 8;
}
// Size of raw data (u32)
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], default_offset_table_size);
index += 4;
// File pointer to the start of the section
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.section_data_offset);
index += 4;
// Pointer to relocations (u32), PointerToLinenumbers (u32), NumberOfRelocations (u16), NumberOfLinenumbers (u16)
std.mem.set(u8, hdr_data[index..][0..12], 0);
index += 12;
// Section flags
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], std.coff.IMAGE_SCN_CNT_INITIALIZED_DATA | std.coff.IMAGE_SCN_MEM_READ);
index += 4;
// Then, the .text section
hdr_data[index..][0..8].* = ".text\x00\x00\x00".*;
index += 8;
if (options.output_mode == .Exe) {
// Virtual size (u32)
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], default_size_of_code);
index += 4;
// Virtual address (u32)
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.text_section_virtual_address - image_base);
index += 4;
} else {
std.mem.set(u8, hdr_data[index..][0..8], 0);
index += 8;
}
// Size of raw data (u32)
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], default_size_of_code);
index += 4;
// File pointer to the start of the section
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.section_data_offset + default_offset_table_size);
index += 4;
// Pointer to relocations (u32), PointerToLinenumbers (u32), NumberOfRelocations (u16), NumberOfLinenumbers (u16)
std.mem.set(u8, hdr_data[index..][0..12], 0);
index += 12;
// Section flags
std.mem.writeIntLittle(
u32,
hdr_data[index..][0..4],
std.coff.IMAGE_SCN_CNT_CODE | std.coff.IMAGE_SCN_MEM_EXECUTE | std.coff.IMAGE_SCN_MEM_READ | std.coff.IMAGE_SCN_MEM_WRITE,
);
index += 4;
assert(index == optional_header_size + section_table_size);
try self.base.file.?.pwriteAll(hdr_data[0..index], self.optional_header_offset);
try self.base.file.?.setEndPos(self.section_data_offset + default_offset_table_size + default_size_of_code);
return self;
}
pub fn allocateDeclIndexes(self: *Coff, decl: *Module.Decl) !void {
try self.offset_table.ensureCapacity(self.base.allocator, self.offset_table.items.len + 1);
if (self.offset_table_free_list.popOrNull()) |i| {
decl.link.coff.offset_table_index = i;
} else {
decl.link.coff.offset_table_index = @intCast(u32, self.offset_table.items.len);
_ = self.offset_table.addOneAssumeCapacity();
const entry_size = self.base.options.target.cpu.arch.ptrBitWidth() / 8;
if (self.offset_table.items.len > self.offset_table_size / entry_size) {
self.offset_table_size_dirty = true;
}
}
self.offset_table.items[decl.link.coff.offset_table_index] = 0;
}
fn allocateTextBlock(self: *Coff, text_block: *TextBlock, new_block_size: u64, alignment: u64) !u64 {
const new_block_min_capacity = new_block_size * allocation_padding;
// We use these to indicate our intention to update metadata, placing the new block,
// and possibly removing a free list node.
// It would be simpler to do it inside the for loop below, but that would cause a
// problem if an error was returned later in the function. So this action
// is actually carried out at the end of the function, when errors are no longer possible.
var block_placement: ?*TextBlock = null;
var free_list_removal: ?usize = null;
const vaddr = blk: {
var i: usize = 0;
while (i < self.text_block_free_list.items.len) {
const free_block = self.text_block_free_list.items[i];
const next_block_text_offset = free_block.text_offset + free_block.capacity();
const new_block_text_offset = std.mem.alignForwardGeneric(u64, free_block.getVAddr(self.*) + free_block.size, alignment) - self.text_section_virtual_address;
if (new_block_text_offset < next_block_text_offset and next_block_text_offset - new_block_text_offset >= new_block_min_capacity) {
block_placement = free_block;
const remaining_capacity = next_block_text_offset - new_block_text_offset - new_block_min_capacity;
if (remaining_capacity < minimum_text_block_size) {
free_list_removal = i;
}
break :blk new_block_text_offset + self.text_section_virtual_address;
} else {
if (!free_block.freeListEligible()) {
_ = self.text_block_free_list.swapRemove(i);
} else {
i += 1;
}
continue;
}
} else if (self.last_text_block) |last| {
const new_block_vaddr = std.mem.alignForwardGeneric(u64, last.getVAddr(self.*) + last.size, alignment);
block_placement = last;
break :blk new_block_vaddr;
} else {
break :blk self.text_section_virtual_address;
}
};
const expand_text_section = block_placement == null or block_placement.?.next == null;
if (expand_text_section) {
const needed_size = @intCast(u32, std.mem.alignForwardGeneric(u64, vaddr + new_block_size - self.text_section_virtual_address, file_alignment));
if (needed_size > self.text_section_size) {
const current_text_section_virtual_size = std.mem.alignForwardGeneric(u32, self.text_section_size, section_alignment);
const new_text_section_virtual_size = std.mem.alignForwardGeneric(u32, needed_size, section_alignment);
if (current_text_section_virtual_size != new_text_section_virtual_size) {
self.size_of_image_dirty = true;
// Write new virtual size
var buf: [4]u8 = undefined;
std.mem.writeIntLittle(u32, &buf, new_text_section_virtual_size);
try self.base.file.?.pwriteAll(&buf, self.section_table_offset + 40 + 8);
}
self.text_section_size = needed_size;
self.text_section_size_dirty = true;
}
self.last_text_block = text_block;
}
text_block.text_offset = @intCast(u32, vaddr - self.text_section_virtual_address);
text_block.size = @intCast(u32, new_block_size);
// This function can also reallocate a text block.
// In this case we need to "unplug" it from its previous location before
// plugging it in to its new location.
if (text_block.prev) |prev| {
prev.next = text_block.next;
}
if (text_block.next) |next| {
next.prev = text_block.prev;
}
if (block_placement) |big_block| {
text_block.prev = big_block;
text_block.next = big_block.next;
big_block.next = text_block;
} else {
text_block.prev = null;
text_block.next = null;
}
if (free_list_removal) |i| {
_ = self.text_block_free_list.swapRemove(i);
}
return vaddr;
}
fn growTextBlock(self: *Coff, text_block: *TextBlock, new_block_size: u64, alignment: u64) !u64 {
const block_vaddr = text_block.getVAddr(self.*);
const align_ok = std.mem.alignBackwardGeneric(u64, block_vaddr, alignment) == block_vaddr;
const need_realloc = !align_ok or new_block_size > text_block.capacity();
if (!need_realloc) return @as(u64, block_vaddr);
return self.allocateTextBlock(text_block, new_block_size, alignment);
}
fn shrinkTextBlock(self: *Coff, text_block: *TextBlock, new_block_size: u64) void {
text_block.size = @intCast(u32, new_block_size);
if (text_block.capacity() - text_block.size >= minimum_text_block_size) {
self.text_block_free_list.append(self.base.allocator, text_block) catch {};
}
}
fn freeTextBlock(self: *Coff, text_block: *TextBlock) void {
var already_have_free_list_node = false;
{
var i: usize = 0;
// TODO turn text_block_free_list into a hash map
while (i < self.text_block_free_list.items.len) {
if (self.text_block_free_list.items[i] == text_block) {
_ = self.text_block_free_list.swapRemove(i);
continue;
}
if (self.text_block_free_list.items[i] == text_block.prev) {
already_have_free_list_node = true;
}
i += 1;
}
}
if (self.last_text_block == text_block) {
self.last_text_block = text_block.prev;
}
if (text_block.prev) |prev| {
prev.next = text_block.next;
if (!already_have_free_list_node and prev.freeListEligible()) {
// The free list is heuristics, it doesn't have to be perfect, so we can
// ignore the OOM here.
self.text_block_free_list.append(self.base.allocator, prev) catch {};
}
}
if (text_block.next) |next| {
next.prev = text_block.prev;
}
}
fn writeOffsetTableEntry(self: *Coff, index: usize) !void {
const entry_size = self.base.options.target.cpu.arch.ptrBitWidth() / 8;
const endian = self.base.options.target.cpu.arch.endian();
const offset_table_start = self.section_data_offset;
if (self.offset_table_size_dirty) {
const current_raw_size = self.offset_table_size;
const new_raw_size = self.offset_table_size * 2;
log.debug("growing offset table from raw size {} to {}\n", .{ current_raw_size, new_raw_size });
// Move the text section to a new place in the executable
const current_text_section_start = self.section_data_offset + current_raw_size;
const new_text_section_start = self.section_data_offset + new_raw_size;
const amt = try self.base.file.?.copyRangeAll(current_text_section_start, self.base.file.?, new_text_section_start, self.text_section_size);
if (amt != self.text_section_size) return error.InputOutput;
// Write the new raw size in the .got header
var buf: [8]u8 = undefined;
std.mem.writeIntLittle(u32, buf[0..4], new_raw_size);
try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 16);
// Write the new .text section file offset in the .text section header
std.mem.writeIntLittle(u32, buf[0..4], new_text_section_start);
try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 40 + 20);
const current_virtual_size = std.mem.alignForwardGeneric(u32, self.offset_table_size, section_alignment);
const new_virtual_size = std.mem.alignForwardGeneric(u32, new_raw_size, section_alignment);
// If we had to move in the virtual address space, we need to fix the VAs in the offset table, as well as the virtual address of the `.text` section
// and the virutal size of the `.got` section
if (new_virtual_size != current_virtual_size) {
log.debug("growing offset table from virtual size {} to {}\n", .{ current_virtual_size, new_virtual_size });
self.size_of_image_dirty = true;
const va_offset = new_virtual_size - current_virtual_size;
// Write .got virtual size
std.mem.writeIntLittle(u32, buf[0..4], new_virtual_size);
try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 8);
// Write .text new virtual address
self.text_section_virtual_address = self.text_section_virtual_address + va_offset;
std.mem.writeIntLittle(u32, buf[0..4], self.text_section_virtual_address - image_base);
try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 40 + 12);
// Fix the VAs in the offset table
for (self.offset_table.items) |*va, idx| {
if (va.* != 0) {
va.* += va_offset;
switch (entry_size) {
4 => {
std.mem.writeInt(u32, buf[0..4], @intCast(u32, va.*), endian);
try self.base.file.?.pwriteAll(buf[0..4], offset_table_start + idx * entry_size);
},
8 => {
std.mem.writeInt(u64, &buf, va.*, endian);
try self.base.file.?.pwriteAll(&buf, offset_table_start + idx * entry_size);
},
else => unreachable,
}
}
}
}
self.offset_table_size = new_raw_size;
self.offset_table_size_dirty = false;
}
// Write the new entry
switch (entry_size) {
4 => {
var buf: [4]u8 = undefined;
std.mem.writeInt(u32, &buf, @intCast(u32, self.offset_table.items[index]), endian);
try self.base.file.?.pwriteAll(&buf, offset_table_start + index * entry_size);
},
8 => {
var buf: [8]u8 = undefined;
std.mem.writeInt(u64, &buf, self.offset_table.items[index], endian);
try self.base.file.?.pwriteAll(&buf, offset_table_start + index * entry_size);
},
else => unreachable,
}
}
pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void {
// TODO COFF/PE debug information
// TODO Implement exports
const tracy = trace(@src());
defer tracy.end();
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
const typed_value = decl.typed_value.most_recent.typed_value;
const res = try codegen.generateSymbol(&self.base, decl.src(), typed_value, &code_buffer, .none);
const code = switch (res) {
.externally_managed => |x| x,
.appended => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl, em);
return;
},
};
const required_alignment = typed_value.ty.abiAlignment(self.base.options.target);
const curr_size = decl.link.coff.size;
if (curr_size != 0) {
const capacity = decl.link.coff.capacity();
const need_realloc = code.len > capacity or
!std.mem.isAlignedGeneric(u32, decl.link.coff.text_offset, required_alignment);
if (need_realloc) {
const curr_vaddr = self.getDeclVAddr(decl);
const vaddr = try self.growTextBlock(&decl.link.coff, code.len, required_alignment);
log.debug("growing {} from 0x{x} to 0x{x}\n", .{ decl.name, curr_vaddr, vaddr });
if (vaddr != curr_vaddr) {
log.debug(" (writing new offset table entry)\n", .{});
self.offset_table.items[decl.link.coff.offset_table_index] = vaddr;
try self.writeOffsetTableEntry(decl.link.coff.offset_table_index);
}
} else if (code.len < curr_size) {
self.shrinkTextBlock(&decl.link.coff, code.len);
}
} else {
const vaddr = try self.allocateTextBlock(&decl.link.coff, code.len, required_alignment);
log.debug("allocated text block for {} at 0x{x} (size: {Bi})\n", .{ std.mem.spanZ(decl.name), vaddr, code.len });
errdefer self.freeTextBlock(&decl.link.coff);
self.offset_table.items[decl.link.coff.offset_table_index] = vaddr;
try self.writeOffsetTableEntry(decl.link.coff.offset_table_index);
}
// Write the code into the file
try self.base.file.?.pwriteAll(code, self.section_data_offset + self.offset_table_size + decl.link.coff.text_offset);
// Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated.
const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{};
return self.updateDeclExports(module, decl, decl_exports);
}
pub fn freeDecl(self: *Coff, decl: *Module.Decl) void {
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
self.freeTextBlock(&decl.link.coff);
self.offset_table_free_list.append(self.base.allocator, decl.link.coff.offset_table_index) catch {};
}
pub fn updateDeclExports(self: *Coff, module: *Module, decl: *const Module.Decl, exports: []const *Module.Export) !void {
for (exports) |exp| {
if (exp.options.section) |section_name| {
if (!std.mem.eql(u8, section_name, ".text")) {
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1);
module.failed_exports.putAssumeCapacityNoClobber(
exp,
try Module.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: ExportOptions.section", .{}),
);
continue;
}
}
if (std.mem.eql(u8, exp.options.name, "_start")) {
self.entry_addr = decl.link.coff.getVAddr(self.*) - image_base;
} else {
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1);
module.failed_exports.putAssumeCapacityNoClobber(
exp,
try Module.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: Exports other than '_start'", .{}),
);
continue;
}
}
}
pub fn flush(self: *Coff, module: *Module) !void {
if (self.text_section_size_dirty) {
// Write the new raw size in the .text header
var buf: [4]u8 = undefined;
std.mem.writeIntLittle(u32, &buf, self.text_section_size);
try self.base.file.?.pwriteAll(&buf, self.section_table_offset + 40 + 16);
try self.base.file.?.setEndPos(self.section_data_offset + self.offset_table_size + self.text_section_size);
self.text_section_size_dirty = false;
}
if (self.base.options.output_mode == .Exe and self.size_of_image_dirty) {
const new_size_of_image = std.mem.alignForwardGeneric(u32, self.text_section_virtual_address - image_base + self.text_section_size, section_alignment);
var buf: [4]u8 = undefined;
std.mem.writeIntLittle(u32, &buf, new_size_of_image);
try self.base.file.?.pwriteAll(&buf, self.optional_header_offset + 56);
self.size_of_image_dirty = false;
}
if (self.entry_addr == null and self.base.options.output_mode == .Exe) {
log.debug("flushing. no_entry_point_found = true\n", .{});
self.error_flags.no_entry_point_found = true;
} else {
log.debug("flushing. no_entry_point_found = false\n", .{});
self.error_flags.no_entry_point_found = false;
if (self.base.options.output_mode == .Exe) {
// Write AddressOfEntryPoint
var buf: [4]u8 = undefined;
std.mem.writeIntLittle(u32, &buf, self.entry_addr.?);
try self.base.file.?.pwriteAll(&buf, self.optional_header_offset + 16);
}
}
}
pub fn getDeclVAddr(self: *Coff, decl: *const Module.Decl) u64 {
return self.text_section_virtual_address + decl.link.coff.text_offset;
}
pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl: *Module.Decl) !void {
// TODO Implement this
}
pub fn deinit(self: *Coff) void {
self.text_block_free_list.deinit(self.base.allocator);
self.offset_table.deinit(self.base.allocator);
self.offset_table_free_list.deinit(self.base.allocator);
}

View File

@ -1,251 +0,0 @@
const Wasm = @This();
const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const fs = std.fs;
const leb = std.debug.leb;
const Module = @import("../Module.zig");
const codegen = @import("../codegen/wasm.zig");
const link = @import("../link.zig");
/// Various magic numbers defined by the wasm spec
const spec = struct {
const magic = [_]u8{ 0x00, 0x61, 0x73, 0x6D }; // \0asm
const version = [_]u8{ 0x01, 0x00, 0x00, 0x00 }; // version 1
const custom_id = 0;
const types_id = 1;
const imports_id = 2;
const funcs_id = 3;
const tables_id = 4;
const memories_id = 5;
const globals_id = 6;
const exports_id = 7;
const start_id = 8;
const elements_id = 9;
const code_id = 10;
const data_id = 11;
};
pub const base_tag = link.File.Tag.wasm;
pub const FnData = struct {
/// Generated code for the type of the function
functype: std.ArrayListUnmanaged(u8) = .{},
/// Generated code for the body of the function
code: std.ArrayListUnmanaged(u8) = .{},
/// Locations in the generated code where function indexes must be filled in.
/// This must be kept ordered by offset.
idx_refs: std.ArrayListUnmanaged(struct { offset: u32, decl: *Module.Decl }) = .{},
};
base: link.File,
/// List of all function Decls to be written to the output file. The index of
/// each Decl in this list at the time of writing the binary is used as the
/// function index.
/// TODO: can/should we access some data structure in Module directly?
funcs: std.ArrayListUnmanaged(*Module.Decl) = .{},
pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: link.Options) !*link.File {
assert(options.object_format == .wasm);
// TODO: read the file and keep vaild parts instead of truncating
const file = try dir.createFile(sub_path, .{ .truncate = true, .read = true });
errdefer file.close();
const wasm = try allocator.create(Wasm);
errdefer allocator.destroy(wasm);
try file.writeAll(&(spec.magic ++ spec.version));
wasm.* = .{
.base = .{
.tag = .wasm,
.options = options,
.file = file,
.allocator = allocator,
},
};
return &wasm.base;
}
pub fn deinit(self: *Wasm) void {
for (self.funcs.items) |decl| {
decl.fn_link.wasm.?.functype.deinit(self.base.allocator);
decl.fn_link.wasm.?.code.deinit(self.base.allocator);
decl.fn_link.wasm.?.idx_refs.deinit(self.base.allocator);
}
self.funcs.deinit(self.base.allocator);
}
// Generate code for the Decl, storing it in memory to be later written to
// the file on flush().
pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void {
if (decl.typed_value.most_recent.typed_value.ty.zigTypeTag() != .Fn)
return error.TODOImplementNonFnDeclsForWasm;
if (decl.fn_link.wasm) |*fn_data| {
fn_data.functype.items.len = 0;
fn_data.code.items.len = 0;
fn_data.idx_refs.items.len = 0;
} else {
decl.fn_link.wasm = .{};
try self.funcs.append(self.base.allocator, decl);
}
const fn_data = &decl.fn_link.wasm.?;
var managed_functype = fn_data.functype.toManaged(self.base.allocator);
var managed_code = fn_data.code.toManaged(self.base.allocator);
try codegen.genFunctype(&managed_functype, decl);
try codegen.genCode(&managed_code, decl);
fn_data.functype = managed_functype.toUnmanaged();
fn_data.code = managed_code.toUnmanaged();
}
pub fn updateDeclExports(
self: *Wasm,
module: *Module,
decl: *const Module.Decl,
exports: []const *Module.Export,
) !void {}
pub fn freeDecl(self: *Wasm, decl: *Module.Decl) void {
// TODO: remove this assert when non-function Decls are implemented
assert(decl.typed_value.most_recent.typed_value.ty.zigTypeTag() == .Fn);
_ = self.funcs.swapRemove(self.getFuncidx(decl).?);
decl.fn_link.wasm.?.functype.deinit(self.base.allocator);
decl.fn_link.wasm.?.code.deinit(self.base.allocator);
decl.fn_link.wasm.?.idx_refs.deinit(self.base.allocator);
decl.fn_link.wasm = null;
}
pub fn flush(self: *Wasm, module: *Module) !void {
const file = self.base.file.?;
const header_size = 5 + 1;
// No need to rewrite the magic/version header
try file.setEndPos(@sizeOf(@TypeOf(spec.magic ++ spec.version)));
try file.seekTo(@sizeOf(@TypeOf(spec.magic ++ spec.version)));
// Type section
{
const header_offset = try reserveVecSectionHeader(file);
for (self.funcs.items) |decl| {
try file.writeAll(decl.fn_link.wasm.?.functype.items);
}
try writeVecSectionHeader(
file,
header_offset,
spec.types_id,
@intCast(u32, (try file.getPos()) - header_offset - header_size),
@intCast(u32, self.funcs.items.len),
);
}
// Function section
{
const header_offset = try reserveVecSectionHeader(file);
const writer = file.writer();
for (self.funcs.items) |_, typeidx| try leb.writeULEB128(writer, @intCast(u32, typeidx));
try writeVecSectionHeader(
file,
header_offset,
spec.funcs_id,
@intCast(u32, (try file.getPos()) - header_offset - header_size),
@intCast(u32, self.funcs.items.len),
);
}
// Export section
{
const header_offset = try reserveVecSectionHeader(file);
const writer = file.writer();
var count: u32 = 0;
for (module.decl_exports.entries.items) |entry| {
for (entry.value) |exprt| {
// Export name length + name
try leb.writeULEB128(writer, @intCast(u32, exprt.options.name.len));
try writer.writeAll(exprt.options.name);
switch (exprt.exported_decl.typed_value.most_recent.typed_value.ty.zigTypeTag()) {
.Fn => {
// Type of the export
try writer.writeByte(0x00);
// Exported function index
try leb.writeULEB128(writer, self.getFuncidx(exprt.exported_decl).?);
},
else => return error.TODOImplementNonFnDeclsForWasm,
}
count += 1;
}
}
try writeVecSectionHeader(
file,
header_offset,
spec.exports_id,
@intCast(u32, (try file.getPos()) - header_offset - header_size),
count,
);
}
// Code section
{
const header_offset = try reserveVecSectionHeader(file);
const writer = file.writer();
for (self.funcs.items) |decl| {
const fn_data = &decl.fn_link.wasm.?;
// Write the already generated code to the file, inserting
// function indexes where required.
var current: u32 = 0;
for (fn_data.idx_refs.items) |idx_ref| {
try writer.writeAll(fn_data.code.items[current..idx_ref.offset]);
current = idx_ref.offset;
// Use a fixed width here to make calculating the code size
// in codegen.wasm.genCode() simpler.
var buf: [5]u8 = undefined;
leb.writeUnsignedFixed(5, &buf, self.getFuncidx(idx_ref.decl).?);
try writer.writeAll(&buf);
}
try writer.writeAll(fn_data.code.items[current..]);
}
try writeVecSectionHeader(
file,
header_offset,
spec.code_id,
@intCast(u32, (try file.getPos()) - header_offset - header_size),
@intCast(u32, self.funcs.items.len),
);
}
}
/// Get the current index of a given Decl in the function list
/// TODO: we could maintain a hash map to potentially make this
fn getFuncidx(self: Wasm, decl: *Module.Decl) ?u32 {
return for (self.funcs.items) |func, idx| {
if (func == decl) break @intCast(u32, idx);
} else null;
}
fn reserveVecSectionHeader(file: fs.File) !u64 {
// section id + fixed leb contents size + fixed leb vector length
const header_size = 1 + 5 + 5;
// TODO: this should be a single lseek(2) call, but fs.File does not
// currently provide a way to do this.
try file.seekBy(header_size);
return (try file.getPos()) - header_size;
}
fn writeVecSectionHeader(file: fs.File, offset: u64, section: u8, size: u32, items: u32) !void {
var buf: [1 + 5 + 5]u8 = undefined;
buf[0] = section;
leb.writeUnsignedFixed(5, buf[1..6], size);
leb.writeUnsignedFixed(5, buf[6..], items);
try file.pwriteAll(&buf, offset);
}

View File

@ -1,293 +0,0 @@
const c = @import("c.zig");
const assert = @import("std").debug.assert;
// we wrap the c module for 3 reasons:
// 1. to avoid accidentally calling the non-thread-safe functions
// 2. patch up some of the types to remove nullability
// 3. some functions have been augmented by zig_llvm.cpp to be more powerful,
// such as ZigLLVMTargetMachineEmitToFile
pub const AttributeIndex = c_uint;
pub const Bool = c_int;
pub const Builder = c.LLVMBuilderRef.Child.Child;
pub const Context = c.LLVMContextRef.Child.Child;
pub const Module = c.LLVMModuleRef.Child.Child;
pub const Value = c.LLVMValueRef.Child.Child;
pub const Type = c.LLVMTypeRef.Child.Child;
pub const BasicBlock = c.LLVMBasicBlockRef.Child.Child;
pub const Attribute = c.LLVMAttributeRef.Child.Child;
pub const Target = c.LLVMTargetRef.Child.Child;
pub const TargetMachine = c.LLVMTargetMachineRef.Child.Child;
pub const TargetData = c.LLVMTargetDataRef.Child.Child;
pub const DIBuilder = c.ZigLLVMDIBuilder;
pub const DIFile = c.ZigLLVMDIFile;
pub const DICompileUnit = c.ZigLLVMDICompileUnit;
pub const ABIAlignmentOfType = c.LLVMABIAlignmentOfType;
pub const AddAttributeAtIndex = c.LLVMAddAttributeAtIndex;
pub const AddModuleCodeViewFlag = c.ZigLLVMAddModuleCodeViewFlag;
pub const AddModuleDebugInfoFlag = c.ZigLLVMAddModuleDebugInfoFlag;
pub const ClearCurrentDebugLocation = c.ZigLLVMClearCurrentDebugLocation;
pub const ConstAllOnes = c.LLVMConstAllOnes;
pub const ConstArray = c.LLVMConstArray;
pub const ConstBitCast = c.LLVMConstBitCast;
pub const ConstIntOfArbitraryPrecision = c.LLVMConstIntOfArbitraryPrecision;
pub const ConstNeg = c.LLVMConstNeg;
pub const ConstStructInContext = c.LLVMConstStructInContext;
pub const DIBuilderFinalize = c.ZigLLVMDIBuilderFinalize;
pub const DisposeBuilder = c.LLVMDisposeBuilder;
pub const DisposeDIBuilder = c.ZigLLVMDisposeDIBuilder;
pub const DisposeMessage = c.LLVMDisposeMessage;
pub const DisposeModule = c.LLVMDisposeModule;
pub const DisposeTargetData = c.LLVMDisposeTargetData;
pub const DisposeTargetMachine = c.LLVMDisposeTargetMachine;
pub const DoubleTypeInContext = c.LLVMDoubleTypeInContext;
pub const DumpModule = c.LLVMDumpModule;
pub const FP128TypeInContext = c.LLVMFP128TypeInContext;
pub const FloatTypeInContext = c.LLVMFloatTypeInContext;
pub const GetEnumAttributeKindForName = c.LLVMGetEnumAttributeKindForName;
pub const GetMDKindIDInContext = c.LLVMGetMDKindIDInContext;
pub const GetUndef = c.LLVMGetUndef;
pub const HalfTypeInContext = c.LLVMHalfTypeInContext;
pub const InitializeAllAsmParsers = c.LLVMInitializeAllAsmParsers;
pub const InitializeAllAsmPrinters = c.LLVMInitializeAllAsmPrinters;
pub const InitializeAllTargetInfos = c.LLVMInitializeAllTargetInfos;
pub const InitializeAllTargetMCs = c.LLVMInitializeAllTargetMCs;
pub const InitializeAllTargets = c.LLVMInitializeAllTargets;
pub const InsertBasicBlockInContext = c.LLVMInsertBasicBlockInContext;
pub const Int128TypeInContext = c.LLVMInt128TypeInContext;
pub const Int16TypeInContext = c.LLVMInt16TypeInContext;
pub const Int1TypeInContext = c.LLVMInt1TypeInContext;
pub const Int32TypeInContext = c.LLVMInt32TypeInContext;
pub const Int64TypeInContext = c.LLVMInt64TypeInContext;
pub const Int8TypeInContext = c.LLVMInt8TypeInContext;
pub const IntPtrTypeForASInContext = c.LLVMIntPtrTypeForASInContext;
pub const IntPtrTypeInContext = c.LLVMIntPtrTypeInContext;
pub const LabelTypeInContext = c.LLVMLabelTypeInContext;
pub const MDNodeInContext = c.LLVMMDNodeInContext;
pub const MDStringInContext = c.LLVMMDStringInContext;
pub const MetadataTypeInContext = c.LLVMMetadataTypeInContext;
pub const PPCFP128TypeInContext = c.LLVMPPCFP128TypeInContext;
pub const SetAlignment = c.LLVMSetAlignment;
pub const SetDataLayout = c.LLVMSetDataLayout;
pub const SetGlobalConstant = c.LLVMSetGlobalConstant;
pub const SetInitializer = c.LLVMSetInitializer;
pub const SetLinkage = c.LLVMSetLinkage;
pub const SetTarget = c.LLVMSetTarget;
pub const SetUnnamedAddr = c.LLVMSetUnnamedAddr;
pub const SetVolatile = c.LLVMSetVolatile;
pub const StructTypeInContext = c.LLVMStructTypeInContext;
pub const TokenTypeInContext = c.LLVMTokenTypeInContext;
pub const X86FP80TypeInContext = c.LLVMX86FP80TypeInContext;
pub const X86MMXTypeInContext = c.LLVMX86MMXTypeInContext;
pub const AddGlobal = LLVMAddGlobal;
extern fn LLVMAddGlobal(M: *Module, Ty: *Type, Name: [*:0]const u8) ?*Value;
pub const ConstStringInContext = LLVMConstStringInContext;
extern fn LLVMConstStringInContext(C: *Context, Str: [*]const u8, Length: c_uint, DontNullTerminate: Bool) ?*Value;
pub const ConstInt = LLVMConstInt;
extern fn LLVMConstInt(IntTy: *Type, N: c_ulonglong, SignExtend: Bool) ?*Value;
pub const BuildLoad = LLVMBuildLoad;
extern fn LLVMBuildLoad(arg0: *Builder, PointerVal: *Value, Name: [*:0]const u8) ?*Value;
pub const ConstNull = LLVMConstNull;
extern fn LLVMConstNull(Ty: *Type) ?*Value;
pub const CreateStringAttribute = LLVMCreateStringAttribute;
extern fn LLVMCreateStringAttribute(
C: *Context,
K: [*]const u8,
KLength: c_uint,
V: [*]const u8,
VLength: c_uint,
) ?*Attribute;
pub const CreateEnumAttribute = LLVMCreateEnumAttribute;
extern fn LLVMCreateEnumAttribute(C: *Context, KindID: c_uint, Val: u64) ?*Attribute;
pub const AddFunction = LLVMAddFunction;
extern fn LLVMAddFunction(M: *Module, Name: [*:0]const u8, FunctionTy: *Type) ?*Value;
pub const CreateCompileUnit = ZigLLVMCreateCompileUnit;
extern fn ZigLLVMCreateCompileUnit(
dibuilder: *DIBuilder,
lang: c_uint,
difile: *DIFile,
producer: [*:0]const u8,
is_optimized: bool,
flags: [*:0]const u8,
runtime_version: c_uint,
split_name: [*:0]const u8,
dwo_id: u64,
emit_debug_info: bool,
) ?*DICompileUnit;
pub const CreateFile = ZigLLVMCreateFile;
extern fn ZigLLVMCreateFile(dibuilder: *DIBuilder, filename: [*:0]const u8, directory: [*:0]const u8) ?*DIFile;
pub const ArrayType = LLVMArrayType;
extern fn LLVMArrayType(ElementType: *Type, ElementCount: c_uint) ?*Type;
pub const CreateDIBuilder = ZigLLVMCreateDIBuilder;
extern fn ZigLLVMCreateDIBuilder(module: *Module, allow_unresolved: bool) ?*DIBuilder;
pub const PointerType = LLVMPointerType;
extern fn LLVMPointerType(ElementType: *Type, AddressSpace: c_uint) ?*Type;
pub const CreateBuilderInContext = LLVMCreateBuilderInContext;
extern fn LLVMCreateBuilderInContext(C: *Context) ?*Builder;
pub const IntTypeInContext = LLVMIntTypeInContext;
extern fn LLVMIntTypeInContext(C: *Context, NumBits: c_uint) ?*Type;
pub const ModuleCreateWithNameInContext = LLVMModuleCreateWithNameInContext;
extern fn LLVMModuleCreateWithNameInContext(ModuleID: [*:0]const u8, C: *Context) ?*Module;
pub const VoidTypeInContext = LLVMVoidTypeInContext;
extern fn LLVMVoidTypeInContext(C: *Context) ?*Type;
pub const ContextCreate = LLVMContextCreate;
extern fn LLVMContextCreate() ?*Context;
pub const ContextDispose = LLVMContextDispose;
extern fn LLVMContextDispose(C: *Context) void;
pub const CopyStringRepOfTargetData = LLVMCopyStringRepOfTargetData;
extern fn LLVMCopyStringRepOfTargetData(TD: *TargetData) ?[*:0]u8;
pub const CreateTargetDataLayout = LLVMCreateTargetDataLayout;
extern fn LLVMCreateTargetDataLayout(T: *TargetMachine) ?*TargetData;
pub const CreateTargetMachine = ZigLLVMCreateTargetMachine;
extern fn ZigLLVMCreateTargetMachine(
T: *Target,
Triple: [*:0]const u8,
CPU: [*:0]const u8,
Features: [*:0]const u8,
Level: CodeGenOptLevel,
Reloc: RelocMode,
CodeModel: CodeModel,
function_sections: bool,
) ?*TargetMachine;
pub const GetHostCPUName = LLVMGetHostCPUName;
extern fn LLVMGetHostCPUName() ?[*:0]u8;
pub const GetNativeFeatures = ZigLLVMGetNativeFeatures;
extern fn ZigLLVMGetNativeFeatures() ?[*:0]u8;
pub const GetElementType = LLVMGetElementType;
extern fn LLVMGetElementType(Ty: *Type) *Type;
pub const TypeOf = LLVMTypeOf;
extern fn LLVMTypeOf(Val: *Value) *Type;
pub const BuildStore = LLVMBuildStore;
extern fn LLVMBuildStore(arg0: *Builder, Val: *Value, Ptr: *Value) ?*Value;
pub const BuildAlloca = LLVMBuildAlloca;
extern fn LLVMBuildAlloca(arg0: *Builder, Ty: *Type, Name: ?[*:0]const u8) ?*Value;
pub const ConstInBoundsGEP = LLVMConstInBoundsGEP;
pub extern fn LLVMConstInBoundsGEP(ConstantVal: *Value, ConstantIndices: [*]*Value, NumIndices: c_uint) ?*Value;
pub const GetTargetFromTriple = LLVMGetTargetFromTriple;
extern fn LLVMGetTargetFromTriple(Triple: [*:0]const u8, T: **Target, ErrorMessage: ?*[*:0]u8) Bool;
pub const VerifyModule = LLVMVerifyModule;
extern fn LLVMVerifyModule(M: *Module, Action: VerifierFailureAction, OutMessage: *?[*:0]u8) Bool;
pub const GetInsertBlock = LLVMGetInsertBlock;
extern fn LLVMGetInsertBlock(Builder: *Builder) *BasicBlock;
pub const FunctionType = LLVMFunctionType;
extern fn LLVMFunctionType(
ReturnType: *Type,
ParamTypes: [*]*Type,
ParamCount: c_uint,
IsVarArg: Bool,
) ?*Type;
pub const GetParam = LLVMGetParam;
extern fn LLVMGetParam(Fn: *Value, Index: c_uint) *Value;
pub const AppendBasicBlockInContext = LLVMAppendBasicBlockInContext;
extern fn LLVMAppendBasicBlockInContext(C: *Context, Fn: *Value, Name: [*:0]const u8) ?*BasicBlock;
pub const PositionBuilderAtEnd = LLVMPositionBuilderAtEnd;
extern fn LLVMPositionBuilderAtEnd(Builder: *Builder, Block: *BasicBlock) void;
pub const AbortProcessAction = VerifierFailureAction.LLVMAbortProcessAction;
pub const PrintMessageAction = VerifierFailureAction.LLVMPrintMessageAction;
pub const ReturnStatusAction = VerifierFailureAction.LLVMReturnStatusAction;
pub const VerifierFailureAction = c.LLVMVerifierFailureAction;
pub const CodeGenLevelNone = CodeGenOptLevel.LLVMCodeGenLevelNone;
pub const CodeGenLevelLess = CodeGenOptLevel.LLVMCodeGenLevelLess;
pub const CodeGenLevelDefault = CodeGenOptLevel.LLVMCodeGenLevelDefault;
pub const CodeGenLevelAggressive = CodeGenOptLevel.LLVMCodeGenLevelAggressive;
pub const CodeGenOptLevel = c.LLVMCodeGenOptLevel;
pub const RelocDefault = RelocMode.LLVMRelocDefault;
pub const RelocStatic = RelocMode.LLVMRelocStatic;
pub const RelocPIC = RelocMode.LLVMRelocPIC;
pub const RelocDynamicNoPic = RelocMode.LLVMRelocDynamicNoPic;
pub const RelocMode = c.LLVMRelocMode;
pub const CodeModelDefault = CodeModel.LLVMCodeModelDefault;
pub const CodeModelJITDefault = CodeModel.LLVMCodeModelJITDefault;
pub const CodeModelSmall = CodeModel.LLVMCodeModelSmall;
pub const CodeModelKernel = CodeModel.LLVMCodeModelKernel;
pub const CodeModelMedium = CodeModel.LLVMCodeModelMedium;
pub const CodeModelLarge = CodeModel.LLVMCodeModelLarge;
pub const CodeModel = c.LLVMCodeModel;
pub const EmitAssembly = EmitOutputType.ZigLLVM_EmitAssembly;
pub const EmitBinary = EmitOutputType.ZigLLVM_EmitBinary;
pub const EmitLLVMIr = EmitOutputType.ZigLLVM_EmitLLVMIr;
pub const EmitOutputType = c.ZigLLVM_EmitOutputType;
pub const CCallConv = CallConv.LLVMCCallConv;
pub const FastCallConv = CallConv.LLVMFastCallConv;
pub const ColdCallConv = CallConv.LLVMColdCallConv;
pub const WebKitJSCallConv = CallConv.LLVMWebKitJSCallConv;
pub const AnyRegCallConv = CallConv.LLVMAnyRegCallConv;
pub const X86StdcallCallConv = CallConv.LLVMX86StdcallCallConv;
pub const X86FastcallCallConv = CallConv.LLVMX86FastcallCallConv;
pub const CallConv = c.LLVMCallConv;
pub const CallAttr = extern enum {
Auto,
NeverTail,
NeverInline,
AlwaysTail,
AlwaysInline,
};
fn removeNullability(comptime T: type) type {
comptime assert(@typeInfo(T).Pointer.size == .C);
return *T.Child;
}
pub const BuildRet = LLVMBuildRet;
extern fn LLVMBuildRet(arg0: *Builder, V: ?*Value) ?*Value;
pub const TargetMachineEmitToFile = ZigLLVMTargetMachineEmitToFile;
extern fn ZigLLVMTargetMachineEmitToFile(
targ_machine_ref: *TargetMachine,
module_ref: *Module,
filename: [*:0]const u8,
output_type: EmitOutputType,
error_message: *[*:0]u8,
is_debug: bool,
is_small: bool,
) bool;
pub const BuildCall = ZigLLVMBuildCall;
extern fn ZigLLVMBuildCall(B: *Builder, Fn: *Value, Args: [*]*Value, NumArgs: c_uint, CC: CallConv, fn_inline: CallAttr, Name: [*:0]const u8) ?*Value;
pub const PrivateLinkage = c.LLVMLinkage.LLVMPrivateLinkage;

View File

@ -1,927 +0,0 @@
const std = @import("std");
const io = std.io;
const fs = std.fs;
const mem = std.mem;
const process = std.process;
const Allocator = mem.Allocator;
const ArrayList = std.ArrayList;
const ast = std.zig.ast;
const Module = @import("Module.zig");
const link = @import("link.zig");
const Package = @import("Package.zig");
const zir = @import("zir.zig");
const build_options = @import("build_options");
pub const max_src_size = 2 * 1024 * 1024 * 1024; // 2 GiB
pub const Color = enum {
Auto,
Off,
On,
};
const usage =
\\Usage: zig [command] [options]
\\
\\Commands:
\\
\\ build-exe [source] Create executable from source or object files
\\ build-lib [source] Create library from source or object files
\\ build-obj [source] Create object from source or assembly
\\ fmt [source] Parse file and render in canonical zig format
\\ targets List available compilation targets
\\ env Print lib path, std path, compiler id and version
\\ version Print version number and exit
\\ zen Print zen of zig and exit
\\
\\
;
pub fn log(
comptime level: std.log.Level,
comptime scope: @TypeOf(.EnumLiteral),
comptime format: []const u8,
args: anytype,
) void {
// Hide anything more verbose than warn unless it was added with `-Dlog=foo`.
if (@enumToInt(level) > @enumToInt(std.log.level) or
@enumToInt(level) > @enumToInt(std.log.Level.warn))
{
const scope_name = @tagName(scope);
const ok = comptime for (build_options.log_scopes) |log_scope| {
if (mem.eql(u8, log_scope, scope_name))
break true;
} else false;
if (!ok)
return;
}
const prefix = "[" ++ @tagName(level) ++ "] " ++ "(" ++ @tagName(scope) ++ "): ";
// Print the message to stderr, silently ignoring any errors
std.debug.print(prefix ++ format ++ "\n", args);
}
var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){};
pub fn main() !void {
const gpa = if (std.builtin.link_libc) std.heap.c_allocator else &general_purpose_allocator.allocator;
defer if (!std.builtin.link_libc) {
_ = general_purpose_allocator.deinit();
};
var arena_instance = std.heap.ArenaAllocator.init(gpa);
defer arena_instance.deinit();
const arena = &arena_instance.allocator;
const args = try process.argsAlloc(arena);
if (args.len <= 1) {
std.debug.print("expected command argument\n\n{}", .{usage});
process.exit(1);
}
const cmd = args[1];
const cmd_args = args[2..];
if (mem.eql(u8, cmd, "build-exe")) {
return buildOutputType(gpa, arena, cmd_args, .Exe);
} else if (mem.eql(u8, cmd, "build-lib")) {
return buildOutputType(gpa, arena, cmd_args, .Lib);
} else if (mem.eql(u8, cmd, "build-obj")) {
return buildOutputType(gpa, arena, cmd_args, .Obj);
} else if (mem.eql(u8, cmd, "fmt")) {
return cmdFmt(gpa, cmd_args);
} else if (mem.eql(u8, cmd, "targets")) {
const info = try std.zig.system.NativeTargetInfo.detect(arena, .{});
const stdout = io.getStdOut().outStream();
return @import("print_targets.zig").cmdTargets(arena, cmd_args, stdout, info.target);
} else if (mem.eql(u8, cmd, "version")) {
try std.io.getStdOut().writeAll(build_options.version ++ "\n");
} else if (mem.eql(u8, cmd, "env")) {
try @import("print_env.zig").cmdEnv(arena, cmd_args, io.getStdOut().outStream());
} else if (mem.eql(u8, cmd, "zen")) {
try io.getStdOut().writeAll(info_zen);
} else if (mem.eql(u8, cmd, "help")) {
try io.getStdOut().writeAll(usage);
} else {
std.debug.print("unknown command: {}\n\n{}", .{ args[1], usage });
process.exit(1);
}
}
const usage_build_generic =
\\Usage: zig build-exe <options> [files]
\\ zig build-lib <options> [files]
\\ zig build-obj <options> [files]
\\
\\Supported file types:
\\ .zig Zig source code
\\ .zir Zig Intermediate Representation code
\\ (planned) .o ELF object file
\\ (planned) .o MACH-O (macOS) object file
\\ (planned) .obj COFF (Windows) object file
\\ (planned) .lib COFF (Windows) static library
\\ (planned) .a ELF static library
\\ (planned) .so ELF shared object (dynamic link)
\\ (planned) .dll Windows Dynamic Link Library
\\ (planned) .dylib MACH-O (macOS) dynamic library
\\ (planned) .s Target-specific assembly source code
\\ (planned) .S Assembly with C preprocessor (requires LLVM extensions)
\\ (planned) .c C source code (requires LLVM extensions)
\\ (planned) .cpp C++ source code (requires LLVM extensions)
\\ Other C++ extensions: .C .cc .cxx
\\
\\General Options:
\\ -h, --help Print this help and exit
\\ --watch Enable compiler REPL
\\ --color [auto|off|on] Enable or disable colored error messages
\\ -femit-bin[=path] (default) output machine code
\\ -fno-emit-bin Do not output machine code
\\
\\Compile Options:
\\ -target [name] <arch><sub>-<os>-<abi> see the targets command
\\ -mcpu [cpu] Specify target CPU and feature set
\\ --name [name] Override output name
\\ --mode [mode] Set the build mode
\\ Debug (default) optimizations off, safety on
\\ ReleaseFast Optimizations on, safety off
\\ ReleaseSafe Optimizations on, safety on
\\ ReleaseSmall Optimize for small binary, safety off
\\ --dynamic Force output to be dynamically linked
\\ --strip Exclude debug symbols
\\ -ofmt=[mode] Override target object format
\\ elf Executable and Linking Format
\\ c Compile to C source code
\\ wasm WebAssembly
\\ pe Portable Executable (Windows)
\\ coff (planned) Common Object File Format (Windows)
\\ macho (planned) macOS relocatables
\\ hex (planned) Intel IHEX
\\ raw (planned) Dump machine code directly
\\
\\Link Options:
\\ -l[lib], --library [lib] Link against system library
\\ --dynamic-linker [path] Set the dynamic interpreter path (usually ld.so)
\\ --version [ver] Dynamic library semver
\\
\\Debug Options (Zig Compiler Development):
\\ -ftime-report Print timing diagnostics
\\ --debug-tokenize verbose tokenization
\\ --debug-ast-tree verbose parsing into an AST (tree view)
\\ --debug-ast-fmt verbose parsing into an AST (render source)
\\ --debug-ir verbose Zig IR
\\ --debug-link verbose linking
\\ --debug-codegen verbose machine code generation
\\
;
const Emit = union(enum) {
no,
yes_default_path,
yes: []const u8,
};
fn buildOutputType(
gpa: *Allocator,
arena: *Allocator,
args: []const []const u8,
output_mode: std.builtin.OutputMode,
) !void {
var color: Color = .Auto;
var build_mode: std.builtin.Mode = .Debug;
var provided_name: ?[]const u8 = null;
var link_mode: ?std.builtin.LinkMode = null;
var root_src_file: ?[]const u8 = null;
var version: std.builtin.Version = .{ .major = 0, .minor = 0, .patch = 0 };
var strip = false;
var watch = false;
var debug_tokenize = false;
var debug_ast_tree = false;
var debug_ast_fmt = false;
var debug_link = false;
var debug_ir = false;
var debug_codegen = false;
var time_report = false;
var emit_bin: Emit = .yes_default_path;
var emit_zir: Emit = .no;
var target_arch_os_abi: []const u8 = "native";
var target_mcpu: ?[]const u8 = null;
var target_dynamic_linker: ?[]const u8 = null;
var target_ofmt: ?[]const u8 = null;
var system_libs = std.ArrayList([]const u8).init(gpa);
defer system_libs.deinit();
{
var i: usize = 0;
while (i < args.len) : (i += 1) {
const arg = args[i];
if (mem.startsWith(u8, arg, "-")) {
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
try io.getStdOut().writeAll(usage_build_generic);
process.exit(0);
} else if (mem.eql(u8, arg, "--color")) {
if (i + 1 >= args.len) {
std.debug.print("expected [auto|on|off] after --color\n", .{});
process.exit(1);
}
i += 1;
const next_arg = args[i];
if (mem.eql(u8, next_arg, "auto")) {
color = .Auto;
} else if (mem.eql(u8, next_arg, "on")) {
color = .On;
} else if (mem.eql(u8, next_arg, "off")) {
color = .Off;
} else {
std.debug.print("expected [auto|on|off] after --color, found '{}'\n", .{next_arg});
process.exit(1);
}
} else if (mem.eql(u8, arg, "--mode")) {
if (i + 1 >= args.len) {
std.debug.print("expected [Debug|ReleaseSafe|ReleaseFast|ReleaseSmall] after --mode\n", .{});
process.exit(1);
}
i += 1;
const next_arg = args[i];
if (mem.eql(u8, next_arg, "Debug")) {
build_mode = .Debug;
} else if (mem.eql(u8, next_arg, "ReleaseSafe")) {
build_mode = .ReleaseSafe;
} else if (mem.eql(u8, next_arg, "ReleaseFast")) {
build_mode = .ReleaseFast;
} else if (mem.eql(u8, next_arg, "ReleaseSmall")) {
build_mode = .ReleaseSmall;
} else {
std.debug.print("expected [Debug|ReleaseSafe|ReleaseFast|ReleaseSmall] after --mode, found '{}'\n", .{next_arg});
process.exit(1);
}
} else if (mem.eql(u8, arg, "--name")) {
if (i + 1 >= args.len) {
std.debug.print("expected parameter after --name\n", .{});
process.exit(1);
}
i += 1;
provided_name = args[i];
} else if (mem.eql(u8, arg, "--library")) {
if (i + 1 >= args.len) {
std.debug.print("expected parameter after --library\n", .{});
process.exit(1);
}
i += 1;
try system_libs.append(args[i]);
} else if (mem.eql(u8, arg, "--version")) {
if (i + 1 >= args.len) {
std.debug.print("expected parameter after --version\n", .{});
process.exit(1);
}
i += 1;
version = std.builtin.Version.parse(args[i]) catch |err| {
std.debug.print("unable to parse --version '{}': {}\n", .{ args[i], @errorName(err) });
process.exit(1);
};
} else if (mem.eql(u8, arg, "-target")) {
if (i + 1 >= args.len) {
std.debug.print("expected parameter after -target\n", .{});
process.exit(1);
}
i += 1;
target_arch_os_abi = args[i];
} else if (mem.eql(u8, arg, "-mcpu")) {
if (i + 1 >= args.len) {
std.debug.print("expected parameter after -mcpu\n", .{});
process.exit(1);
}
i += 1;
target_mcpu = args[i];
} else if (mem.startsWith(u8, arg, "-ofmt=")) {
target_ofmt = arg["-ofmt=".len..];
} else if (mem.startsWith(u8, arg, "-mcpu=")) {
target_mcpu = arg["-mcpu=".len..];
} else if (mem.eql(u8, arg, "--dynamic-linker")) {
if (i + 1 >= args.len) {
std.debug.print("expected parameter after --dynamic-linker\n", .{});
process.exit(1);
}
i += 1;
target_dynamic_linker = args[i];
} else if (mem.eql(u8, arg, "--watch")) {
watch = true;
} else if (mem.eql(u8, arg, "-ftime-report")) {
time_report = true;
} else if (mem.eql(u8, arg, "-femit-bin")) {
emit_bin = .yes_default_path;
} else if (mem.startsWith(u8, arg, "-femit-bin=")) {
emit_bin = .{ .yes = arg["-femit-bin=".len..] };
} else if (mem.eql(u8, arg, "-fno-emit-bin")) {
emit_bin = .no;
} else if (mem.eql(u8, arg, "-femit-zir")) {
emit_zir = .yes_default_path;
} else if (mem.startsWith(u8, arg, "-femit-zir=")) {
emit_zir = .{ .yes = arg["-femit-zir=".len..] };
} else if (mem.eql(u8, arg, "-fno-emit-zir")) {
emit_zir = .no;
} else if (mem.eql(u8, arg, "-dynamic")) {
link_mode = .Dynamic;
} else if (mem.eql(u8, arg, "-static")) {
link_mode = .Static;
} else if (mem.eql(u8, arg, "--strip")) {
strip = true;
} else if (mem.eql(u8, arg, "--debug-tokenize")) {
debug_tokenize = true;
} else if (mem.eql(u8, arg, "--debug-ast-tree")) {
debug_ast_tree = true;
} else if (mem.eql(u8, arg, "--debug-ast-fmt")) {
debug_ast_fmt = true;
} else if (mem.eql(u8, arg, "--debug-link")) {
debug_link = true;
} else if (mem.eql(u8, arg, "--debug-ir")) {
debug_ir = true;
} else if (mem.eql(u8, arg, "--debug-codegen")) {
debug_codegen = true;
} else if (mem.startsWith(u8, arg, "-l")) {
try system_libs.append(arg[2..]);
} else {
std.debug.print("unrecognized parameter: '{}'\n", .{arg});
process.exit(1);
}
} else if (mem.endsWith(u8, arg, ".s") or mem.endsWith(u8, arg, ".S")) {
std.debug.print("assembly files not supported yet\n", .{});
process.exit(1);
} else if (mem.endsWith(u8, arg, ".o") or
mem.endsWith(u8, arg, ".obj") or
mem.endsWith(u8, arg, ".a") or
mem.endsWith(u8, arg, ".lib"))
{
std.debug.print("object files and static libraries not supported yet\n", .{});
process.exit(1);
} else if (mem.endsWith(u8, arg, ".c") or
mem.endsWith(u8, arg, ".cpp"))
{
std.debug.print("compilation of C and C++ source code requires LLVM extensions which are not implemented yet\n", .{});
process.exit(1);
} else if (mem.endsWith(u8, arg, ".so") or
mem.endsWith(u8, arg, ".dylib") or
mem.endsWith(u8, arg, ".dll"))
{
std.debug.print("linking against dynamic libraries not yet supported\n", .{});
process.exit(1);
} else if (mem.endsWith(u8, arg, ".zig") or mem.endsWith(u8, arg, ".zir")) {
if (root_src_file) |other| {
std.debug.print("found another zig file '{}' after root source file '{}'\n", .{ arg, other });
process.exit(1);
} else {
root_src_file = arg;
}
} else {
std.debug.print("unrecognized file extension of parameter '{}'\n", .{arg});
}
}
}
const root_name = if (provided_name) |n| n else blk: {
if (root_src_file) |file| {
const basename = fs.path.basename(file);
var it = mem.split(basename, ".");
break :blk it.next() orelse basename;
} else {
std.debug.print("--name [name] not provided and unable to infer\n", .{});
process.exit(1);
}
};
if (system_libs.items.len != 0) {
std.debug.print("linking against system libraries not yet supported\n", .{});
process.exit(1);
}
var diags: std.zig.CrossTarget.ParseOptions.Diagnostics = .{};
const cross_target = std.zig.CrossTarget.parse(.{
.arch_os_abi = target_arch_os_abi,
.cpu_features = target_mcpu,
.dynamic_linker = target_dynamic_linker,
.diagnostics = &diags,
}) catch |err| switch (err) {
error.UnknownCpuModel => {
std.debug.print("Unknown CPU: '{}'\nAvailable CPUs for architecture '{}':\n", .{
diags.cpu_name.?,
@tagName(diags.arch.?),
});
for (diags.arch.?.allCpuModels()) |cpu| {
std.debug.print(" {}\n", .{cpu.name});
}
process.exit(1);
},
error.UnknownCpuFeature => {
std.debug.print(
\\Unknown CPU feature: '{}'
\\Available CPU features for architecture '{}':
\\
, .{
diags.unknown_feature_name,
@tagName(diags.arch.?),
});
for (diags.arch.?.allFeaturesList()) |feature| {
std.debug.print(" {}: {}\n", .{ feature.name, feature.description });
}
process.exit(1);
},
else => |e| return e,
};
var target_info = try std.zig.system.NativeTargetInfo.detect(gpa, cross_target);
if (target_info.cpu_detection_unimplemented) {
// TODO We want to just use detected_info.target but implementing
// CPU model & feature detection is todo so here we rely on LLVM.
std.debug.print("CPU features detection is not yet available for this system without LLVM extensions\n", .{});
process.exit(1);
}
const src_path = root_src_file orelse {
std.debug.print("expected at least one file argument", .{});
process.exit(1);
};
const object_format: ?std.Target.ObjectFormat = blk: {
const ofmt = target_ofmt orelse break :blk null;
if (mem.eql(u8, ofmt, "elf")) {
break :blk .elf;
} else if (mem.eql(u8, ofmt, "c")) {
break :blk .c;
} else if (mem.eql(u8, ofmt, "coff")) {
break :blk .coff;
} else if (mem.eql(u8, ofmt, "pe")) {
break :blk .pe;
} else if (mem.eql(u8, ofmt, "macho")) {
break :blk .macho;
} else if (mem.eql(u8, ofmt, "wasm")) {
break :blk .wasm;
} else if (mem.eql(u8, ofmt, "hex")) {
break :blk .hex;
} else if (mem.eql(u8, ofmt, "raw")) {
break :blk .raw;
} else {
std.debug.print("unsupported object format: {}", .{ofmt});
process.exit(1);
}
};
const bin_path = switch (emit_bin) {
.no => {
std.debug.print("-fno-emit-bin not supported yet", .{});
process.exit(1);
},
.yes_default_path => if (object_format != null and object_format.? == .c)
try std.fmt.allocPrint(arena, "{}.c", .{root_name})
else
try std.zig.binNameAlloc(arena, root_name, target_info.target, output_mode, link_mode),
.yes => |p| p,
};
const zir_out_path: ?[]const u8 = switch (emit_zir) {
.no => null,
.yes_default_path => blk: {
if (root_src_file) |rsf| {
if (mem.endsWith(u8, rsf, ".zir")) {
break :blk try std.fmt.allocPrint(arena, "{}.out.zir", .{root_name});
}
}
break :blk try std.fmt.allocPrint(arena, "{}.zir", .{root_name});
},
.yes => |p| p,
};
const root_pkg = try Package.create(gpa, fs.cwd(), ".", src_path);
defer root_pkg.destroy();
var module = try Module.init(gpa, .{
.root_name = root_name,
.target = target_info.target,
.output_mode = output_mode,
.root_pkg = root_pkg,
.bin_file_dir = fs.cwd(),
.bin_file_path = bin_path,
.link_mode = link_mode,
.object_format = object_format,
.optimize_mode = build_mode,
.keep_source_files_loaded = zir_out_path != null,
});
defer module.deinit();
const stdin = std.io.getStdIn().inStream();
const stderr = std.io.getStdErr().outStream();
var repl_buf: [1024]u8 = undefined;
try updateModule(gpa, &module, zir_out_path);
while (watch) {
try stderr.print("🦎 ", .{});
if (output_mode == .Exe) {
try module.makeBinFileExecutable();
}
if (stdin.readUntilDelimiterOrEof(&repl_buf, '\n') catch |err| {
try stderr.print("\nUnable to parse command: {}\n", .{@errorName(err)});
continue;
}) |line| {
const actual_line = mem.trimRight(u8, line, "\r\n ");
if (mem.eql(u8, actual_line, "update")) {
if (output_mode == .Exe) {
try module.makeBinFileWritable();
}
try updateModule(gpa, &module, zir_out_path);
} else if (mem.eql(u8, actual_line, "exit")) {
break;
} else if (mem.eql(u8, actual_line, "help")) {
try stderr.writeAll(repl_help);
} else {
try stderr.print("unknown command: {}\n", .{actual_line});
}
} else {
break;
}
}
}
fn updateModule(gpa: *Allocator, module: *Module, zir_out_path: ?[]const u8) !void {
var timer = try std.time.Timer.start();
try module.update();
const update_nanos = timer.read();
var errors = try module.getAllErrorsAlloc();
defer errors.deinit(module.gpa);
if (errors.list.len != 0) {
for (errors.list) |full_err_msg| {
std.debug.print("{}:{}:{}: error: {}\n", .{
full_err_msg.src_path,
full_err_msg.line + 1,
full_err_msg.column + 1,
full_err_msg.msg,
});
}
} else {
std.log.scoped(.compiler).info("Update completed in {} ms\n", .{update_nanos / std.time.ns_per_ms});
}
if (zir_out_path) |zop| {
var new_zir_module = try zir.emit(gpa, module.*);
defer new_zir_module.deinit(gpa);
const baf = try io.BufferedAtomicFile.create(gpa, fs.cwd(), zop, .{});
defer baf.destroy();
try new_zir_module.writeToStream(gpa, baf.stream());
try baf.finish();
}
}
const repl_help =
\\Commands:
\\ update Detect changes to source files and update output files.
\\ help Print this text
\\ exit Quit this repl
\\
;
pub const usage_fmt =
\\usage: zig fmt [file]...
\\
\\ Formats the input files and modifies them in-place.
\\ Arguments can be files or directories, which are searched
\\ recursively.
\\
\\Options:
\\ --help Print this help and exit
\\ --color [auto|off|on] Enable or disable colored error messages
\\ --stdin Format code from stdin; output to stdout
\\ --check List non-conforming files and exit with an error
\\ if the list is non-empty
\\
\\
;
const Fmt = struct {
seen: SeenMap,
any_error: bool,
color: Color,
gpa: *Allocator,
out_buffer: std.ArrayList(u8),
const SeenMap = std.AutoHashMap(fs.File.INode, void);
};
pub fn cmdFmt(gpa: *Allocator, args: []const []const u8) !void {
const stderr_file = io.getStdErr();
var color: Color = .Auto;
var stdin_flag: bool = false;
var check_flag: bool = false;
var input_files = ArrayList([]const u8).init(gpa);
{
var i: usize = 0;
while (i < args.len) : (i += 1) {
const arg = args[i];
if (mem.startsWith(u8, arg, "-")) {
if (mem.eql(u8, arg, "--help")) {
const stdout = io.getStdOut().outStream();
try stdout.writeAll(usage_fmt);
process.exit(0);
} else if (mem.eql(u8, arg, "--color")) {
if (i + 1 >= args.len) {
std.debug.print("expected [auto|on|off] after --color\n", .{});
process.exit(1);
}
i += 1;
const next_arg = args[i];
if (mem.eql(u8, next_arg, "auto")) {
color = .Auto;
} else if (mem.eql(u8, next_arg, "on")) {
color = .On;
} else if (mem.eql(u8, next_arg, "off")) {
color = .Off;
} else {
std.debug.print("expected [auto|on|off] after --color, found '{}'\n", .{next_arg});
process.exit(1);
}
} else if (mem.eql(u8, arg, "--stdin")) {
stdin_flag = true;
} else if (mem.eql(u8, arg, "--check")) {
check_flag = true;
} else {
std.debug.print("unrecognized parameter: '{}'", .{arg});
process.exit(1);
}
} else {
try input_files.append(arg);
}
}
}
if (stdin_flag) {
if (input_files.items.len != 0) {
std.debug.print("cannot use --stdin with positional arguments\n", .{});
process.exit(1);
}
const stdin = io.getStdIn().inStream();
const source_code = try stdin.readAllAlloc(gpa, max_src_size);
defer gpa.free(source_code);
const tree = std.zig.parse(gpa, source_code) catch |err| {
std.debug.print("error parsing stdin: {}\n", .{err});
process.exit(1);
};
defer tree.deinit();
for (tree.errors) |parse_error| {
try printErrMsgToFile(gpa, parse_error, tree, "<stdin>", stderr_file, color);
}
if (tree.errors.len != 0) {
process.exit(1);
}
if (check_flag) {
const anything_changed = try std.zig.render(gpa, io.null_out_stream, tree);
const code = if (anything_changed) @as(u8, 1) else @as(u8, 0);
process.exit(code);
}
const stdout = io.getStdOut().outStream();
_ = try std.zig.render(gpa, stdout, tree);
return;
}
if (input_files.items.len == 0) {
std.debug.print("expected at least one source file argument\n", .{});
process.exit(1);
}
var fmt = Fmt{
.gpa = gpa,
.seen = Fmt.SeenMap.init(gpa),
.any_error = false,
.color = color,
.out_buffer = std.ArrayList(u8).init(gpa),
};
defer fmt.seen.deinit();
defer fmt.out_buffer.deinit();
for (input_files.span()) |file_path| {
// Get the real path here to avoid Windows failing on relative file paths with . or .. in them.
const real_path = fs.realpathAlloc(gpa, file_path) catch |err| {
std.debug.print("unable to open '{}': {}\n", .{ file_path, err });
process.exit(1);
};
defer gpa.free(real_path);
try fmtPath(&fmt, file_path, check_flag, fs.cwd(), real_path);
}
if (fmt.any_error) {
process.exit(1);
}
}
const FmtError = error{
SystemResources,
OperationAborted,
IoPending,
BrokenPipe,
Unexpected,
WouldBlock,
FileClosed,
DestinationAddressRequired,
DiskQuota,
FileTooBig,
InputOutput,
NoSpaceLeft,
AccessDenied,
OutOfMemory,
RenameAcrossMountPoints,
ReadOnlyFileSystem,
LinkQuotaExceeded,
FileBusy,
EndOfStream,
Unseekable,
NotOpenForWriting,
} || fs.File.OpenError;
fn fmtPath(fmt: *Fmt, file_path: []const u8, check_mode: bool, dir: fs.Dir, sub_path: []const u8) FmtError!void {
fmtPathFile(fmt, file_path, check_mode, dir, sub_path) catch |err| switch (err) {
error.IsDir, error.AccessDenied => return fmtPathDir(fmt, file_path, check_mode, dir, sub_path),
else => {
std.debug.print("unable to format '{}': {}\n", .{ file_path, err });
fmt.any_error = true;
return;
},
};
}
fn fmtPathDir(
fmt: *Fmt,
file_path: []const u8,
check_mode: bool,
parent_dir: fs.Dir,
parent_sub_path: []const u8,
) FmtError!void {
var dir = try parent_dir.openDir(parent_sub_path, .{ .iterate = true });
defer dir.close();
const stat = try dir.stat();
if (try fmt.seen.fetchPut(stat.inode, {})) |_| return;
var dir_it = dir.iterate();
while (try dir_it.next()) |entry| {
const is_dir = entry.kind == .Directory;
if (is_dir or mem.endsWith(u8, entry.name, ".zig")) {
const full_path = try fs.path.join(fmt.gpa, &[_][]const u8{ file_path, entry.name });
defer fmt.gpa.free(full_path);
if (is_dir) {
try fmtPathDir(fmt, full_path, check_mode, dir, entry.name);
} else {
fmtPathFile(fmt, full_path, check_mode, dir, entry.name) catch |err| {
std.debug.print("unable to format '{}': {}\n", .{ full_path, err });
fmt.any_error = true;
return;
};
}
}
}
}
fn fmtPathFile(
fmt: *Fmt,
file_path: []const u8,
check_mode: bool,
dir: fs.Dir,
sub_path: []const u8,
) FmtError!void {
const source_file = try dir.openFile(sub_path, .{});
var file_closed = false;
errdefer if (!file_closed) source_file.close();
const stat = try source_file.stat();
if (stat.kind == .Directory)
return error.IsDir;
const source_code = source_file.readToEndAllocOptions(
fmt.gpa,
max_src_size,
stat.size,
@alignOf(u8),
null,
) catch |err| switch (err) {
error.ConnectionResetByPeer => unreachable,
error.ConnectionTimedOut => unreachable,
error.NotOpenForReading => unreachable,
else => |e| return e,
};
source_file.close();
file_closed = true;
defer fmt.gpa.free(source_code);
// Add to set after no longer possible to get error.IsDir.
if (try fmt.seen.fetchPut(stat.inode, {})) |_| return;
const tree = try std.zig.parse(fmt.gpa, source_code);
defer tree.deinit();
for (tree.errors) |parse_error| {
try printErrMsgToFile(fmt.gpa, parse_error, tree, file_path, std.io.getStdErr(), fmt.color);
}
if (tree.errors.len != 0) {
fmt.any_error = true;
return;
}
if (check_mode) {
const anything_changed = try std.zig.render(fmt.gpa, io.null_out_stream, tree);
if (anything_changed) {
std.debug.print("{}\n", .{file_path});
fmt.any_error = true;
}
} else {
// As a heuristic, we make enough capacity for the same as the input source.
try fmt.out_buffer.ensureCapacity(source_code.len);
fmt.out_buffer.items.len = 0;
const writer = fmt.out_buffer.writer();
const anything_changed = try std.zig.render(fmt.gpa, writer, tree);
if (!anything_changed)
return; // Good thing we didn't waste any file system access on this.
var af = try dir.atomicFile(sub_path, .{ .mode = stat.mode });
defer af.deinit();
try af.file.writeAll(fmt.out_buffer.items);
try af.finish();
std.debug.print("{}\n", .{file_path});
}
}
fn printErrMsgToFile(
gpa: *mem.Allocator,
parse_error: ast.Error,
tree: *ast.Tree,
path: []const u8,
file: fs.File,
color: Color,
) !void {
const color_on = switch (color) {
.Auto => file.isTty(),
.On => true,
.Off => false,
};
const lok_token = parse_error.loc();
const span_first = lok_token;
const span_last = lok_token;
const first_token = tree.token_locs[span_first];
const last_token = tree.token_locs[span_last];
const start_loc = tree.tokenLocationLoc(0, first_token);
const end_loc = tree.tokenLocationLoc(first_token.end, last_token);
var text_buf = std.ArrayList(u8).init(gpa);
defer text_buf.deinit();
const out_stream = text_buf.outStream();
try parse_error.render(tree.token_ids, out_stream);
const text = text_buf.span();
const stream = file.outStream();
try stream.print("{}:{}:{}: error: {}\n", .{ path, start_loc.line + 1, start_loc.column + 1, text });
if (!color_on) return;
// Print \r and \t as one space each so that column counts line up
for (tree.source[start_loc.line_start..start_loc.line_end]) |byte| {
try stream.writeByte(switch (byte) {
'\r', '\t' => ' ',
else => byte,
});
}
try stream.writeByte('\n');
try stream.writeByteNTimes(' ', start_loc.column);
try stream.writeByteNTimes('~', last_token.end - first_token.start);
try stream.writeByte('\n');
}
pub const info_zen =
\\
\\ * Communicate intent precisely.
\\ * Edge cases matter.
\\ * Favor reading code over writing code.
\\ * Only one obvious way to do things.
\\ * Runtime crashes are better than bugs.
\\ * Compile errors are better than runtime crashes.
\\ * Incremental improvements.
\\ * Avoid local maximums.
\\ * Reduce the amount one must remember.
\\ * Minimize energy spent on coding style.
\\ * Resource deallocation must succeed.
\\ * Together we serve the users.
\\
\\
;

File diff suppressed because it is too large Load Diff

911
src/Cache.zig Normal file
View File

@ -0,0 +1,911 @@
gpa: *Allocator,
manifest_dir: fs.Dir,
hash: HashHelper = .{},
const Cache = @This();
const std = @import("std");
const crypto = std.crypto;
const fs = std.fs;
const assert = std.debug.assert;
const testing = std.testing;
const mem = std.mem;
const fmt = std.fmt;
const Allocator = std.mem.Allocator;
/// Be sure to call `Manifest.deinit` after successful initialization.
pub fn obtain(cache: *const Cache) Manifest {
return Manifest{
.cache = cache,
.hash = cache.hash,
.manifest_file = null,
.manifest_dirty = false,
.hex_digest = undefined,
};
}
/// This is 128 bits - Even with 2^54 cache entries, the probably of a collision would be under 10^-6
pub const bin_digest_len = 16;
pub const hex_digest_len = bin_digest_len * 2;
const manifest_file_size_max = 50 * 1024 * 1024;
/// The type used for hashing file contents. Currently, this is SipHash128(1, 3), because it
/// provides enough collision resistance for the Manifest use cases, while being one of our
/// fastest options right now.
pub const Hasher = crypto.auth.siphash.SipHash128(1, 3);
/// Initial state, that can be copied.
pub const hasher_init: Hasher = Hasher.init(&[_]u8{0} ** Hasher.minimum_key_length);
pub const File = struct {
path: ?[]const u8,
max_file_size: ?usize,
stat: fs.File.Stat,
bin_digest: [bin_digest_len]u8,
contents: ?[]const u8,
pub fn deinit(self: *File, allocator: *Allocator) void {
if (self.path) |owned_slice| {
allocator.free(owned_slice);
self.path = null;
}
if (self.contents) |contents| {
allocator.free(contents);
self.contents = null;
}
self.* = undefined;
}
};
pub const HashHelper = struct {
hasher: Hasher = hasher_init,
/// Record a slice of bytes as an dependency of the process being cached
pub fn addBytes(hh: *HashHelper, bytes: []const u8) void {
hh.hasher.update(mem.asBytes(&bytes.len));
hh.hasher.update(bytes);
}
pub fn addOptionalBytes(hh: *HashHelper, optional_bytes: ?[]const u8) void {
hh.add(optional_bytes != null);
hh.addBytes(optional_bytes orelse return);
}
pub fn addListOfBytes(hh: *HashHelper, list_of_bytes: []const []const u8) void {
hh.add(list_of_bytes.len);
for (list_of_bytes) |bytes| hh.addBytes(bytes);
}
pub fn addStringSet(hh: *HashHelper, hm: std.StringArrayHashMapUnmanaged(void)) void {
const entries = hm.items();
hh.add(entries.len);
for (entries) |entry| {
hh.addBytes(entry.key);
}
}
/// Convert the input value into bytes and record it as a dependency of the process being cached.
pub fn add(hh: *HashHelper, x: anytype) void {
switch (@TypeOf(x)) {
std.builtin.Version => {
hh.add(x.major);
hh.add(x.minor);
hh.add(x.patch);
},
std.Target.Os.TaggedVersionRange => {
switch (x) {
.linux => |linux| {
hh.add(linux.range.min);
hh.add(linux.range.max);
hh.add(linux.glibc);
},
.windows => |windows| {
hh.add(windows.min);
hh.add(windows.max);
},
.semver => |semver| {
hh.add(semver.min);
hh.add(semver.max);
},
.none => {},
}
},
else => switch (@typeInfo(@TypeOf(x))) {
.Bool, .Int, .Enum, .Array => hh.addBytes(mem.asBytes(&x)),
else => @compileError("unable to hash type " ++ @typeName(@TypeOf(x))),
},
}
}
pub fn addOptional(hh: *HashHelper, optional: anytype) void {
hh.add(optional != null);
hh.add(optional orelse return);
}
/// Returns a hex encoded hash of the inputs, without modifying state.
pub fn peek(hh: HashHelper) [hex_digest_len]u8 {
var copy = hh;
return copy.final();
}
pub fn peekBin(hh: HashHelper) [bin_digest_len]u8 {
var copy = hh;
var bin_digest: [bin_digest_len]u8 = undefined;
copy.hasher.final(&bin_digest);
return bin_digest;
}
/// Returns a hex encoded hash of the inputs, mutating the state of the hasher.
pub fn final(hh: *HashHelper) [hex_digest_len]u8 {
var bin_digest: [bin_digest_len]u8 = undefined;
hh.hasher.final(&bin_digest);
var out_digest: [hex_digest_len]u8 = undefined;
_ = std.fmt.bufPrint(&out_digest, "{x}", .{bin_digest}) catch unreachable;
return out_digest;
}
};
pub const Lock = struct {
manifest_file: fs.File,
pub fn release(lock: *Lock) void {
lock.manifest_file.close();
lock.* = undefined;
}
};
/// Manifest manages project-local `zig-cache` directories.
/// This is not a general-purpose cache.
/// It is designed to be fast and simple, not to withstand attacks using specially-crafted input.
pub const Manifest = struct {
cache: *const Cache,
/// Current state for incremental hashing.
hash: HashHelper,
manifest_file: ?fs.File,
manifest_dirty: bool,
files: std.ArrayListUnmanaged(File) = .{},
hex_digest: [hex_digest_len]u8,
/// Add a file as a dependency of process being cached. When `hit` is
/// called, the file's contents will be checked to ensure that it matches
/// the contents from previous times.
///
/// Max file size will be used to determine the amount of space to the file contents
/// are allowed to take up in memory. If max_file_size is null, then the contents
/// will not be loaded into memory.
///
/// Returns the index of the entry in the `files` array list. You can use it
/// to access the contents of the file after calling `hit()` like so:
///
/// ```
/// var file_contents = cache_hash.files.items[file_index].contents.?;
/// ```
pub fn addFile(self: *Manifest, file_path: []const u8, max_file_size: ?usize) !usize {
assert(self.manifest_file == null);
try self.files.ensureCapacity(self.cache.gpa, self.files.items.len + 1);
const resolved_path = try fs.path.resolve(self.cache.gpa, &[_][]const u8{file_path});
const idx = self.files.items.len;
self.files.addOneAssumeCapacity().* = .{
.path = resolved_path,
.contents = null,
.max_file_size = max_file_size,
.stat = undefined,
.bin_digest = undefined,
};
self.hash.addBytes(resolved_path);
return idx;
}
pub fn addOptionalFile(self: *Manifest, optional_file_path: ?[]const u8) !void {
self.hash.add(optional_file_path != null);
const file_path = optional_file_path orelse return;
_ = try self.addFile(file_path, null);
}
pub fn addListOfFiles(self: *Manifest, list_of_files: []const []const u8) !void {
self.hash.add(list_of_files.len);
for (list_of_files) |file_path| {
_ = try self.addFile(file_path, null);
}
}
/// Check the cache to see if the input exists in it. If it exists, returns `true`.
/// A hex encoding of its hash is available by calling `final`.
///
/// This function will also acquire an exclusive lock to the manifest file. This means
/// that a process holding a Manifest will block any other process attempting to
/// acquire the lock.
///
/// The lock on the manifest file is released when `deinit` is called. As another
/// option, one may call `toOwnedLock` to obtain a smaller object which can represent
/// the lock. `deinit` is safe to call whether or not `toOwnedLock` has been called.
pub fn hit(self: *Manifest) !bool {
assert(self.manifest_file == null);
const ext = ".txt";
var manifest_file_path: [self.hex_digest.len + ext.len]u8 = undefined;
var bin_digest: [bin_digest_len]u8 = undefined;
self.hash.hasher.final(&bin_digest);
_ = std.fmt.bufPrint(&self.hex_digest, "{x}", .{bin_digest}) catch unreachable;
self.hash.hasher = hasher_init;
self.hash.hasher.update(&bin_digest);
mem.copy(u8, &manifest_file_path, &self.hex_digest);
manifest_file_path[self.hex_digest.len..][0..ext.len].* = ext.*;
if (self.files.items.len != 0) {
self.manifest_file = try self.cache.manifest_dir.createFile(&manifest_file_path, .{
.read = true,
.truncate = false,
.lock = .Exclusive,
});
} else {
// If there are no file inputs, we check if the manifest file exists instead of
// comparing the hashes on the files used for the cached item
self.manifest_file = self.cache.manifest_dir.openFile(&manifest_file_path, .{
.read = true,
.write = true,
.lock = .Exclusive,
}) catch |err| switch (err) {
error.FileNotFound => {
self.manifest_dirty = true;
self.manifest_file = try self.cache.manifest_dir.createFile(&manifest_file_path, .{
.read = true,
.truncate = false,
.lock = .Exclusive,
});
return false;
},
else => |e| return e,
};
}
const file_contents = try self.manifest_file.?.inStream().readAllAlloc(self.cache.gpa, manifest_file_size_max);
defer self.cache.gpa.free(file_contents);
const input_file_count = self.files.items.len;
var any_file_changed = false;
var line_iter = mem.tokenize(file_contents, "\n");
var idx: usize = 0;
while (line_iter.next()) |line| {
defer idx += 1;
const cache_hash_file = if (idx < input_file_count) &self.files.items[idx] else blk: {
const new = try self.files.addOne(self.cache.gpa);
new.* = .{
.path = null,
.contents = null,
.max_file_size = null,
.stat = undefined,
.bin_digest = undefined,
};
break :blk new;
};
var iter = mem.tokenize(line, " ");
const size = iter.next() orelse return error.InvalidFormat;
const inode = iter.next() orelse return error.InvalidFormat;
const mtime_nsec_str = iter.next() orelse return error.InvalidFormat;
const digest_str = iter.next() orelse return error.InvalidFormat;
const file_path = iter.rest();
cache_hash_file.stat.size = fmt.parseInt(u64, size, 10) catch return error.InvalidFormat;
cache_hash_file.stat.inode = fmt.parseInt(fs.File.INode, inode, 10) catch return error.InvalidFormat;
cache_hash_file.stat.mtime = fmt.parseInt(i64, mtime_nsec_str, 10) catch return error.InvalidFormat;
std.fmt.hexToBytes(&cache_hash_file.bin_digest, digest_str) catch return error.InvalidFormat;
if (file_path.len == 0) {
return error.InvalidFormat;
}
if (cache_hash_file.path) |p| {
if (!mem.eql(u8, file_path, p)) {
return error.InvalidFormat;
}
}
if (cache_hash_file.path == null) {
cache_hash_file.path = try self.cache.gpa.dupe(u8, file_path);
}
const this_file = fs.cwd().openFile(cache_hash_file.path.?, .{ .read = true }) catch {
return error.CacheUnavailable;
};
defer this_file.close();
const actual_stat = try this_file.stat();
const size_match = actual_stat.size == cache_hash_file.stat.size;
const mtime_match = actual_stat.mtime == cache_hash_file.stat.mtime;
const inode_match = actual_stat.inode == cache_hash_file.stat.inode;
if (!size_match or !mtime_match or !inode_match) {
self.manifest_dirty = true;
cache_hash_file.stat = actual_stat;
if (isProblematicTimestamp(cache_hash_file.stat.mtime)) {
cache_hash_file.stat.mtime = 0;
cache_hash_file.stat.inode = 0;
}
var actual_digest: [bin_digest_len]u8 = undefined;
try hashFile(this_file, &actual_digest);
if (!mem.eql(u8, &cache_hash_file.bin_digest, &actual_digest)) {
cache_hash_file.bin_digest = actual_digest;
// keep going until we have the input file digests
any_file_changed = true;
}
}
if (!any_file_changed) {
self.hash.hasher.update(&cache_hash_file.bin_digest);
}
}
if (any_file_changed) {
// cache miss
// keep the manifest file open
self.unhit(bin_digest, input_file_count);
return false;
}
if (idx < input_file_count) {
self.manifest_dirty = true;
while (idx < input_file_count) : (idx += 1) {
const ch_file = &self.files.items[idx];
try self.populateFileHash(ch_file);
}
return false;
}
return true;
}
pub fn unhit(self: *Manifest, bin_digest: [bin_digest_len]u8, input_file_count: usize) void {
// Reset the hash.
self.hash.hasher = hasher_init;
self.hash.hasher.update(&bin_digest);
// Remove files not in the initial hash.
for (self.files.items[input_file_count..]) |*file| {
file.deinit(self.cache.gpa);
}
self.files.shrinkRetainingCapacity(input_file_count);
for (self.files.items) |file| {
self.hash.hasher.update(&file.bin_digest);
}
}
fn populateFileHash(self: *Manifest, ch_file: *File) !void {
const file = try fs.cwd().openFile(ch_file.path.?, .{});
defer file.close();
ch_file.stat = try file.stat();
if (isProblematicTimestamp(ch_file.stat.mtime)) {
ch_file.stat.mtime = 0;
ch_file.stat.inode = 0;
}
if (ch_file.max_file_size) |max_file_size| {
if (ch_file.stat.size > max_file_size) {
return error.FileTooBig;
}
const contents = try self.cache.gpa.alloc(u8, @intCast(usize, ch_file.stat.size));
errdefer self.cache.gpa.free(contents);
// Hash while reading from disk, to keep the contents in the cpu cache while
// doing hashing.
var hasher = hasher_init;
var off: usize = 0;
while (true) {
// give me everything you've got, captain
const bytes_read = try file.read(contents[off..]);
if (bytes_read == 0) break;
hasher.update(contents[off..][0..bytes_read]);
off += bytes_read;
}
hasher.final(&ch_file.bin_digest);
ch_file.contents = contents;
} else {
try hashFile(file, &ch_file.bin_digest);
}
self.hash.hasher.update(&ch_file.bin_digest);
}
/// Add a file as a dependency of process being cached, after the initial hash has been
/// calculated. This is useful for processes that don't know the all the files that
/// are depended on ahead of time. For example, a source file that can import other files
/// will need to be recompiled if the imported file is changed.
pub fn addFilePostFetch(self: *Manifest, file_path: []const u8, max_file_size: usize) ![]const u8 {
assert(self.manifest_file != null);
const resolved_path = try fs.path.resolve(self.cache.gpa, &[_][]const u8{file_path});
errdefer self.cache.gpa.free(resolved_path);
const new_ch_file = try self.files.addOne(self.cache.gpa);
new_ch_file.* = .{
.path = resolved_path,
.max_file_size = max_file_size,
.stat = undefined,
.bin_digest = undefined,
.contents = null,
};
errdefer self.files.shrinkRetainingCapacity(self.files.items.len - 1);
try self.populateFileHash(new_ch_file);
return new_ch_file.contents.?;
}
/// Add a file as a dependency of process being cached, after the initial hash has been
/// calculated. This is useful for processes that don't know the all the files that
/// are depended on ahead of time. For example, a source file that can import other files
/// will need to be recompiled if the imported file is changed.
pub fn addFilePost(self: *Manifest, file_path: []const u8) !void {
assert(self.manifest_file != null);
const resolved_path = try fs.path.resolve(self.cache.gpa, &[_][]const u8{file_path});
errdefer self.cache.gpa.free(resolved_path);
const new_ch_file = try self.files.addOne(self.cache.gpa);
new_ch_file.* = .{
.path = resolved_path,
.max_file_size = null,
.stat = undefined,
.bin_digest = undefined,
.contents = null,
};
errdefer self.files.shrinkRetainingCapacity(self.files.items.len - 1);
try self.populateFileHash(new_ch_file);
}
pub fn addDepFilePost(self: *Manifest, dir: fs.Dir, dep_file_basename: []const u8) !void {
assert(self.manifest_file != null);
const dep_file_contents = try dir.readFileAlloc(self.cache.gpa, dep_file_basename, manifest_file_size_max);
defer self.cache.gpa.free(dep_file_contents);
var error_buf = std.ArrayList(u8).init(self.cache.gpa);
defer error_buf.deinit();
var it: @import("DepTokenizer.zig") = .{ .bytes = dep_file_contents };
// Skip first token: target.
switch (it.next() orelse return) { // Empty dep file OK.
.target, .target_must_resolve, .prereq => {},
else => |err| {
try err.printError(error_buf.writer());
std.log.err("failed parsing {}: {}", .{ dep_file_basename, error_buf.items });
return error.InvalidDepFile;
},
}
// Process 0+ preqreqs.
// Clang is invoked in single-source mode so we never get more targets.
while (true) {
switch (it.next() orelse return) {
.target, .target_must_resolve => return,
.prereq => |bytes| try self.addFilePost(bytes),
else => |err| {
try err.printError(error_buf.writer());
std.log.err("failed parsing {}: {}", .{ dep_file_basename, error_buf.items });
return error.InvalidDepFile;
},
}
}
}
/// Returns a hex encoded hash of the inputs.
pub fn final(self: *Manifest) [hex_digest_len]u8 {
assert(self.manifest_file != null);
// We don't close the manifest file yet, because we want to
// keep it locked until the API user is done using it.
// We also don't write out the manifest yet, because until
// cache_release is called we still might be working on creating
// the artifacts to cache.
var bin_digest: [bin_digest_len]u8 = undefined;
self.hash.hasher.final(&bin_digest);
var out_digest: [hex_digest_len]u8 = undefined;
_ = std.fmt.bufPrint(&out_digest, "{x}", .{bin_digest}) catch unreachable;
return out_digest;
}
pub fn writeManifest(self: *Manifest) !void {
const manifest_file = self.manifest_file.?;
if (!self.manifest_dirty) return;
var contents = std.ArrayList(u8).init(self.cache.gpa);
defer contents.deinit();
const writer = contents.writer();
var encoded_digest: [hex_digest_len]u8 = undefined;
for (self.files.items) |file| {
_ = std.fmt.bufPrint(&encoded_digest, "{x}", .{file.bin_digest}) catch unreachable;
try writer.print("{d} {d} {d} {s} {s}\n", .{
file.stat.size,
file.stat.inode,
file.stat.mtime,
&encoded_digest,
file.path,
});
}
try manifest_file.setEndPos(contents.items.len);
try manifest_file.pwriteAll(contents.items, 0);
self.manifest_dirty = false;
}
/// Obtain only the data needed to maintain a lock on the manifest file.
/// The `Manifest` remains safe to deinit.
/// Don't forget to call `writeManifest` before this!
pub fn toOwnedLock(self: *Manifest) Lock {
const manifest_file = self.manifest_file.?;
self.manifest_file = null;
return Lock{ .manifest_file = manifest_file };
}
/// Releases the manifest file and frees any memory the Manifest was using.
/// `Manifest.hit` must be called first.
/// Don't forget to call `writeManifest` before this!
pub fn deinit(self: *Manifest) void {
if (self.manifest_file) |file| {
file.close();
}
for (self.files.items) |*file| {
file.deinit(self.cache.gpa);
}
self.files.deinit(self.cache.gpa);
}
};
fn hashFile(file: fs.File, bin_digest: []u8) !void {
var buf: [1024]u8 = undefined;
var hasher = hasher_init;
while (true) {
const bytes_read = try file.read(&buf);
if (bytes_read == 0) break;
hasher.update(buf[0..bytes_read]);
}
hasher.final(bin_digest);
}
/// If the wall clock time, rounded to the same precision as the
/// mtime, is equal to the mtime, then we cannot rely on this mtime
/// yet. We will instead save an mtime value that indicates the hash
/// must be unconditionally computed.
/// This function recognizes the precision of mtime by looking at trailing
/// zero bits of the seconds and nanoseconds.
fn isProblematicTimestamp(fs_clock: i128) bool {
const wall_clock = std.time.nanoTimestamp();
// We have to break the nanoseconds into seconds and remainder nanoseconds
// to detect precision of seconds, because looking at the zero bits in base
// 2 would not detect precision of the seconds value.
const fs_sec = @intCast(i64, @divFloor(fs_clock, std.time.ns_per_s));
const fs_nsec = @intCast(i64, @mod(fs_clock, std.time.ns_per_s));
var wall_sec = @intCast(i64, @divFloor(wall_clock, std.time.ns_per_s));
var wall_nsec = @intCast(i64, @mod(wall_clock, std.time.ns_per_s));
// First make all the least significant zero bits in the fs_clock, also zero bits in the wall clock.
if (fs_nsec == 0) {
wall_nsec = 0;
if (fs_sec == 0) {
wall_sec = 0;
} else {
wall_sec &= @as(i64, -1) << @intCast(u6, @ctz(i64, fs_sec));
}
} else {
wall_nsec &= @as(i64, -1) << @intCast(u6, @ctz(i64, fs_nsec));
}
return wall_nsec == fs_nsec and wall_sec == fs_sec;
}
test "cache file and then recall it" {
if (std.Target.current.os.tag == .wasi) {
// https://github.com/ziglang/zig/issues/5437
return error.SkipZigTest;
}
const cwd = fs.cwd();
const temp_file = "test.txt";
const temp_manifest_dir = "temp_manifest_dir";
const ts = std.time.nanoTimestamp();
try cwd.writeFile(temp_file, "Hello, world!\n");
while (isProblematicTimestamp(ts)) {
std.time.sleep(1);
}
var digest1: [hex_digest_len]u8 = undefined;
var digest2: [hex_digest_len]u8 = undefined;
{
var cache = Cache{
.gpa = testing.allocator,
.manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}),
};
defer cache.manifest_dir.close();
{
var ch = cache.obtain();
defer ch.deinit();
ch.hash.add(true);
ch.hash.add(@as(u16, 1234));
ch.hash.addBytes("1234");
_ = try ch.addFile(temp_file, null);
// There should be nothing in the cache
testing.expectEqual(false, try ch.hit());
digest1 = ch.final();
try ch.writeManifest();
}
{
var ch = cache.obtain();
defer ch.deinit();
ch.hash.add(true);
ch.hash.add(@as(u16, 1234));
ch.hash.addBytes("1234");
_ = try ch.addFile(temp_file, null);
// Cache hit! We just "built" the same file
testing.expect(try ch.hit());
digest2 = ch.final();
try ch.writeManifest();
}
testing.expectEqual(digest1, digest2);
}
try cwd.deleteTree(temp_manifest_dir);
try cwd.deleteFile(temp_file);
}
test "give problematic timestamp" {
var fs_clock = std.time.nanoTimestamp();
// to make it problematic, we make it only accurate to the second
fs_clock = @divTrunc(fs_clock, std.time.ns_per_s);
fs_clock *= std.time.ns_per_s;
testing.expect(isProblematicTimestamp(fs_clock));
}
test "give nonproblematic timestamp" {
testing.expect(!isProblematicTimestamp(std.time.nanoTimestamp() - std.time.ns_per_s));
}
test "check that changing a file makes cache fail" {
if (std.Target.current.os.tag == .wasi) {
// https://github.com/ziglang/zig/issues/5437
return error.SkipZigTest;
}
const cwd = fs.cwd();
const temp_file = "cache_hash_change_file_test.txt";
const temp_manifest_dir = "cache_hash_change_file_manifest_dir";
const original_temp_file_contents = "Hello, world!\n";
const updated_temp_file_contents = "Hello, world; but updated!\n";
try cwd.deleteTree(temp_manifest_dir);
try cwd.deleteTree(temp_file);
const ts = std.time.nanoTimestamp();
try cwd.writeFile(temp_file, original_temp_file_contents);
while (isProblematicTimestamp(ts)) {
std.time.sleep(1);
}
var digest1: [hex_digest_len]u8 = undefined;
var digest2: [hex_digest_len]u8 = undefined;
{
var cache = Cache{
.gpa = testing.allocator,
.manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}),
};
defer cache.manifest_dir.close();
{
var ch = cache.obtain();
defer ch.deinit();
ch.hash.addBytes("1234");
const temp_file_idx = try ch.addFile(temp_file, 100);
// There should be nothing in the cache
testing.expectEqual(false, try ch.hit());
testing.expect(mem.eql(u8, original_temp_file_contents, ch.files.items[temp_file_idx].contents.?));
digest1 = ch.final();
try ch.writeManifest();
}
try cwd.writeFile(temp_file, updated_temp_file_contents);
{
var ch = cache.obtain();
defer ch.deinit();
ch.hash.addBytes("1234");
const temp_file_idx = try ch.addFile(temp_file, 100);
// A file that we depend on has been updated, so the cache should not contain an entry for it
testing.expectEqual(false, try ch.hit());
// The cache system does not keep the contents of re-hashed input files.
testing.expect(ch.files.items[temp_file_idx].contents == null);
digest2 = ch.final();
try ch.writeManifest();
}
testing.expect(!mem.eql(u8, digest1[0..], digest2[0..]));
}
try cwd.deleteTree(temp_manifest_dir);
try cwd.deleteTree(temp_file);
}
test "no file inputs" {
if (std.Target.current.os.tag == .wasi) {
// https://github.com/ziglang/zig/issues/5437
return error.SkipZigTest;
}
const cwd = fs.cwd();
const temp_manifest_dir = "no_file_inputs_manifest_dir";
defer cwd.deleteTree(temp_manifest_dir) catch {};
var digest1: [hex_digest_len]u8 = undefined;
var digest2: [hex_digest_len]u8 = undefined;
var cache = Cache{
.gpa = testing.allocator,
.manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}),
};
defer cache.manifest_dir.close();
{
var ch = cache.obtain();
defer ch.deinit();
ch.hash.addBytes("1234");
// There should be nothing in the cache
testing.expectEqual(false, try ch.hit());
digest1 = ch.final();
try ch.writeManifest();
}
{
var ch = cache.obtain();
defer ch.deinit();
ch.hash.addBytes("1234");
testing.expect(try ch.hit());
digest2 = ch.final();
try ch.writeManifest();
}
testing.expectEqual(digest1, digest2);
}
test "Manifest with files added after initial hash work" {
if (std.Target.current.os.tag == .wasi) {
// https://github.com/ziglang/zig/issues/5437
return error.SkipZigTest;
}
const cwd = fs.cwd();
const temp_file1 = "cache_hash_post_file_test1.txt";
const temp_file2 = "cache_hash_post_file_test2.txt";
const temp_manifest_dir = "cache_hash_post_file_manifest_dir";
const ts1 = std.time.nanoTimestamp();
try cwd.writeFile(temp_file1, "Hello, world!\n");
try cwd.writeFile(temp_file2, "Hello world the second!\n");
while (isProblematicTimestamp(ts1)) {
std.time.sleep(1);
}
var digest1: [hex_digest_len]u8 = undefined;
var digest2: [hex_digest_len]u8 = undefined;
var digest3: [hex_digest_len]u8 = undefined;
{
var cache = Cache{
.gpa = testing.allocator,
.manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}),
};
defer cache.manifest_dir.close();
{
var ch = cache.obtain();
defer ch.deinit();
ch.hash.addBytes("1234");
_ = try ch.addFile(temp_file1, null);
// There should be nothing in the cache
testing.expectEqual(false, try ch.hit());
_ = try ch.addFilePost(temp_file2);
digest1 = ch.final();
try ch.writeManifest();
}
{
var ch = cache.obtain();
defer ch.deinit();
ch.hash.addBytes("1234");
_ = try ch.addFile(temp_file1, null);
testing.expect(try ch.hit());
digest2 = ch.final();
try ch.writeManifest();
}
testing.expect(mem.eql(u8, &digest1, &digest2));
// Modify the file added after initial hash
const ts2 = std.time.nanoTimestamp();
try cwd.writeFile(temp_file2, "Hello world the second, updated\n");
while (isProblematicTimestamp(ts2)) {
std.time.sleep(1);
}
{
var ch = cache.obtain();
defer ch.deinit();
ch.hash.addBytes("1234");
_ = try ch.addFile(temp_file1, null);
// A file that we depend on has been updated, so the cache should not contain an entry for it
testing.expectEqual(false, try ch.hit());
_ = try ch.addFilePost(temp_file2);
digest3 = ch.final();
try ch.writeManifest();
}
testing.expect(!mem.eql(u8, &digest1, &digest3));
}
try cwd.deleteTree(temp_manifest_dir);
try cwd.deleteFile(temp_file1);
try cwd.deleteFile(temp_file2);
}

2882
src/Compilation.zig Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,361 +1,405 @@
const Tokenizer = @This();
index: usize = 0,
bytes: []const u8,
state: State = .lhs,
const std = @import("std"); const std = @import("std");
const testing = std.testing; const testing = std.testing;
const assert = std.debug.assert;
pub const Tokenizer = struct { pub fn next(self: *Tokenizer) ?Token {
arena: std.heap.ArenaAllocator, var start = self.index;
index: usize, var must_resolve = false;
bytes: []const u8,
error_text: []const u8,
state: State,
pub fn init(allocator: *std.mem.Allocator, bytes: []const u8) Tokenizer {
return Tokenizer{
.arena = std.heap.ArenaAllocator.init(allocator),
.index = 0,
.bytes = bytes,
.error_text = "",
.state = State{ .lhs = {} },
};
}
pub fn deinit(self: *Tokenizer) void {
self.arena.deinit();
}
pub fn next(self: *Tokenizer) Error!?Token {
while (self.index < self.bytes.len) { while (self.index < self.bytes.len) {
const char = self.bytes[self.index]; const char = self.bytes[self.index];
while (true) {
switch (self.state) { switch (self.state) {
.lhs => switch (char) { .lhs => switch (char) {
'\t', '\n', '\r', ' ' => { '\t', '\n', '\r', ' ' => {
// silently ignore whitespace // silently ignore whitespace
break; // advance self.index += 1;
}, },
else => { else => {
self.state = State{ .target = try std.ArrayListSentineled(u8, 0).initSize(&self.arena.allocator, 0) }; start = self.index;
self.state = .target;
}, },
}, },
.target => |*target| switch (char) { .target => switch (char) {
'\t', '\n', '\r', ' ' => { '\t', '\n', '\r', ' ' => {
return self.errorIllegalChar(self.index, char, "invalid target", .{}); return errorIllegalChar(.invalid_target, self.index, char);
}, },
'$' => { '$' => {
self.state = State{ .target_dollar_sign = target.* }; self.state = .target_dollar_sign;
break; // advance self.index += 1;
}, },
'\\' => { '\\' => {
self.state = State{ .target_reverse_solidus = target.* }; self.state = .target_reverse_solidus;
break; // advance self.index += 1;
}, },
':' => { ':' => {
self.state = State{ .target_colon = target.* }; self.state = .target_colon;
break; // advance self.index += 1;
}, },
else => { else => {
try target.append(char); self.index += 1;
break; // advance
}, },
}, },
.target_reverse_solidus => |*target| switch (char) { .target_reverse_solidus => switch (char) {
'\t', '\n', '\r' => { '\t', '\n', '\r' => {
return self.errorIllegalChar(self.index, char, "bad target escape", .{}); return errorIllegalChar(.bad_target_escape, self.index, char);
}, },
' ', '#', '\\' => { ' ', '#', '\\' => {
try target.append(char); must_resolve = true;
self.state = State{ .target = target.* }; self.state = .target;
break; // advance self.index += 1;
}, },
'$' => { '$' => {
try target.appendSlice(self.bytes[self.index - 1 .. self.index]); self.state = .target_dollar_sign;
self.state = State{ .target_dollar_sign = target.* }; self.index += 1;
break; // advance
}, },
else => { else => {
try target.appendSlice(self.bytes[self.index - 1 .. self.index + 1]); self.state = .target;
self.state = State{ .target = target.* }; self.index += 1;
break; // advance
}, },
}, },
.target_dollar_sign => |*target| switch (char) { .target_dollar_sign => switch (char) {
'$' => { '$' => {
try target.append(char); must_resolve = true;
self.state = State{ .target = target.* }; self.state = .target;
break; // advance self.index += 1;
}, },
else => { else => {
return self.errorIllegalChar(self.index, char, "expecting '$'", .{}); return errorIllegalChar(.expected_dollar_sign, self.index, char);
}, },
}, },
.target_colon => |*target| switch (char) { .target_colon => switch (char) {
'\n', '\r' => { '\n', '\r' => {
const bytes = target.span(); const bytes = self.bytes[start .. self.index - 1];
if (bytes.len != 0) { if (bytes.len != 0) {
self.state = State{ .lhs = {} }; self.state = .lhs;
return Token{ .id = .target, .bytes = bytes }; return finishTarget(must_resolve, bytes);
} }
// silently ignore null target // silently ignore null target
self.state = State{ .lhs = {} }; self.state = .lhs;
continue;
}, },
'\\' => { '\\' => {
self.state = State{ .target_colon_reverse_solidus = target.* }; self.state = .target_colon_reverse_solidus;
break; // advance self.index += 1;
}, },
else => { else => {
const bytes = target.span(); const bytes = self.bytes[start .. self.index - 1];
if (bytes.len != 0) { if (bytes.len != 0) {
self.state = State{ .rhs = {} }; self.state = .rhs;
return Token{ .id = .target, .bytes = bytes }; return finishTarget(must_resolve, bytes);
} }
// silently ignore null target // silently ignore null target
self.state = State{ .lhs = {} }; self.state = .lhs;
continue;
}, },
}, },
.target_colon_reverse_solidus => |*target| switch (char) { .target_colon_reverse_solidus => switch (char) {
'\n', '\r' => { '\n', '\r' => {
const bytes = target.span(); const bytes = self.bytes[start .. self.index - 2];
if (bytes.len != 0) { if (bytes.len != 0) {
self.state = State{ .lhs = {} }; self.state = .lhs;
return Token{ .id = .target, .bytes = bytes }; return finishTarget(must_resolve, bytes);
} }
// silently ignore null target // silently ignore null target
self.state = State{ .lhs = {} }; self.state = .lhs;
continue;
}, },
else => { else => {
try target.appendSlice(self.bytes[self.index - 2 .. self.index + 1]); self.state = .target;
self.state = State{ .target = target.* };
break;
}, },
}, },
.rhs => switch (char) { .rhs => switch (char) {
'\t', ' ' => { '\t', ' ' => {
// silently ignore horizontal whitespace // silently ignore horizontal whitespace
break; // advance self.index += 1;
}, },
'\n', '\r' => { '\n', '\r' => {
self.state = State{ .lhs = {} }; self.state = .lhs;
continue;
}, },
'\\' => { '\\' => {
self.state = State{ .rhs_continuation = {} }; self.state = .rhs_continuation;
break; // advance self.index += 1;
}, },
'"' => { '"' => {
self.state = State{ .prereq_quote = try std.ArrayListSentineled(u8, 0).initSize(&self.arena.allocator, 0) }; self.state = .prereq_quote;
break; // advance self.index += 1;
start = self.index;
}, },
else => { else => {
self.state = State{ .prereq = try std.ArrayListSentineled(u8, 0).initSize(&self.arena.allocator, 0) }; start = self.index;
self.state = .prereq;
}, },
}, },
.rhs_continuation => switch (char) { .rhs_continuation => switch (char) {
'\n' => { '\n' => {
self.state = State{ .rhs = {} }; self.state = .rhs;
break; // advance self.index += 1;
}, },
'\r' => { '\r' => {
self.state = State{ .rhs_continuation_linefeed = {} }; self.state = .rhs_continuation_linefeed;
break; // advance self.index += 1;
}, },
else => { else => {
return self.errorIllegalChar(self.index, char, "continuation expecting end-of-line", .{}); return errorIllegalChar(.continuation_eol, self.index, char);
}, },
}, },
.rhs_continuation_linefeed => switch (char) { .rhs_continuation_linefeed => switch (char) {
'\n' => { '\n' => {
self.state = State{ .rhs = {} }; self.state = .rhs;
break; // advance
},
else => {
return self.errorIllegalChar(self.index, char, "continuation expecting end-of-line", .{});
},
},
.prereq_quote => |*prereq| switch (char) {
'"' => {
const bytes = prereq.span();
self.index += 1; self.index += 1;
self.state = State{ .rhs = {} };
return Token{ .id = .prereq, .bytes = bytes };
}, },
else => { else => {
try prereq.append(char); return errorIllegalChar(.continuation_eol, self.index, char);
break; // advance
}, },
}, },
.prereq => |*prereq| switch (char) { .prereq_quote => switch (char) {
'"' => {
self.index += 1;
self.state = .rhs;
return Token{ .prereq = self.bytes[start .. self.index - 1] };
},
else => {
self.index += 1;
},
},
.prereq => switch (char) {
'\t', ' ' => { '\t', ' ' => {
const bytes = prereq.span(); self.state = .rhs;
self.state = State{ .rhs = {} }; return Token{ .prereq = self.bytes[start..self.index] };
return Token{ .id = .prereq, .bytes = bytes };
}, },
'\n', '\r' => { '\n', '\r' => {
const bytes = prereq.span(); self.state = .lhs;
self.state = State{ .lhs = {} }; return Token{ .prereq = self.bytes[start..self.index] };
return Token{ .id = .prereq, .bytes = bytes };
}, },
'\\' => { '\\' => {
self.state = State{ .prereq_continuation = prereq.* }; self.state = .prereq_continuation;
break; // advance self.index += 1;
}, },
else => { else => {
try prereq.append(char);
break; // advance
},
},
.prereq_continuation => |*prereq| switch (char) {
'\n' => {
const bytes = prereq.span();
self.index += 1; self.index += 1;
self.state = State{ .rhs = {} }; },
return Token{ .id = .prereq, .bytes = bytes }; },
.prereq_continuation => switch (char) {
'\n' => {
self.index += 1;
self.state = .rhs;
return Token{ .prereq = self.bytes[start .. self.index - 2] };
}, },
'\r' => { '\r' => {
self.state = State{ .prereq_continuation_linefeed = prereq.* }; self.state = .prereq_continuation_linefeed;
break; // advance self.index += 1;
}, },
else => { else => {
// not continuation // not continuation
try prereq.appendSlice(self.bytes[self.index - 1 .. self.index + 1]); self.state = .prereq;
self.state = State{ .prereq = prereq.* };
break; // advance
},
},
.prereq_continuation_linefeed => |prereq| switch (char) {
'\n' => {
const bytes = prereq.span();
self.index += 1; self.index += 1;
self.state = State{ .rhs = {} }; },
return Token{ .id = .prereq, .bytes = bytes }; },
.prereq_continuation_linefeed => switch (char) {
'\n' => {
self.index += 1;
self.state = .rhs;
return Token{ .prereq = self.bytes[start .. self.index - 1] };
}, },
else => { else => {
return self.errorIllegalChar(self.index, char, "continuation expecting end-of-line", .{}); return errorIllegalChar(.continuation_eol, self.index, char);
}, },
}, },
} }
} } else {
self.index += 1;
}
// eof, handle maybe incomplete token
if (self.index == 0) return null;
const idx = self.index - 1;
switch (self.state) { switch (self.state) {
.lhs, .lhs,
.rhs, .rhs,
.rhs_continuation, .rhs_continuation,
.rhs_continuation_linefeed, .rhs_continuation_linefeed,
=> {}, => return null,
.target => |target| { .target => {
return self.errorPosition(idx, target.span(), "incomplete target", .{}); return errorPosition(.incomplete_target, start, self.bytes[start..]);
}, },
.target_reverse_solidus, .target_reverse_solidus,
.target_dollar_sign, .target_dollar_sign,
=> { => {
const index = self.index - 1; const idx = self.index - 1;
return self.errorIllegalChar(idx, self.bytes[idx], "incomplete escape", .{}); return errorIllegalChar(.incomplete_escape, idx, self.bytes[idx]);
}, },
.target_colon => |target| { .target_colon => {
const bytes = target.span(); const bytes = self.bytes[start .. self.index - 1];
if (bytes.len != 0) { if (bytes.len != 0) {
self.index += 1; self.index += 1;
self.state = State{ .rhs = {} }; self.state = .rhs;
return Token{ .id = .target, .bytes = bytes }; return finishTarget(must_resolve, bytes);
} }
// silently ignore null target // silently ignore null target
self.state = State{ .lhs = {} }; self.state = .lhs;
},
.target_colon_reverse_solidus => |target| {
const bytes = target.span();
if (bytes.len != 0) {
self.index += 1;
self.state = State{ .rhs = {} };
return Token{ .id = .target, .bytes = bytes };
}
// silently ignore null target
self.state = State{ .lhs = {} };
},
.prereq_quote => |prereq| {
return self.errorPosition(idx, prereq.span(), "incomplete quoted prerequisite", .{});
},
.prereq => |prereq| {
const bytes = prereq.span();
self.state = State{ .lhs = {} };
return Token{ .id = .prereq, .bytes = bytes };
},
.prereq_continuation => |prereq| {
const bytes = prereq.span();
self.state = State{ .lhs = {} };
return Token{ .id = .prereq, .bytes = bytes };
},
.prereq_continuation_linefeed => |prereq| {
const bytes = prereq.span();
self.state = State{ .lhs = {} };
return Token{ .id = .prereq, .bytes = bytes };
},
}
return null; return null;
},
.target_colon_reverse_solidus => {
const bytes = self.bytes[start .. self.index - 2];
if (bytes.len != 0) {
self.index += 1;
self.state = .rhs;
return finishTarget(must_resolve, bytes);
}
// silently ignore null target
self.state = .lhs;
return null;
},
.prereq_quote => {
return errorPosition(.incomplete_quoted_prerequisite, start, self.bytes[start..]);
},
.prereq => {
self.state = .lhs;
return Token{ .prereq = self.bytes[start..] };
},
.prereq_continuation => {
self.state = .lhs;
return Token{ .prereq = self.bytes[start .. self.index - 1] };
},
.prereq_continuation_linefeed => {
self.state = .lhs;
return Token{ .prereq = self.bytes[start .. self.index - 2] };
},
}
}
unreachable;
} }
fn errorf(self: *Tokenizer, comptime fmt: []const u8, args: anytype) Error { fn errorPosition(comptime id: @TagType(Token), index: usize, bytes: []const u8) Token {
self.error_text = try std.fmt.allocPrintZ(&self.arena.allocator, fmt, args); return @unionInit(Token, @tagName(id), .{ .index = index, .bytes = bytes });
return Error.InvalidInput;
} }
fn errorPosition(self: *Tokenizer, position: usize, bytes: []const u8, comptime fmt: []const u8, args: anytype) Error { fn errorIllegalChar(comptime id: @TagType(Token), index: usize, char: u8) Token {
var buffer = try std.ArrayListSentineled(u8, 0).initSize(&self.arena.allocator, 0); return @unionInit(Token, @tagName(id), .{ .index = index, .char = char });
try buffer.outStream().print(fmt, args);
try buffer.appendSlice(" '");
var out = makeOutput(std.ArrayListSentineled(u8, 0).appendSlice, &buffer);
try printCharValues(&out, bytes);
try buffer.appendSlice("'");
try buffer.outStream().print(" at position {}", .{position - (bytes.len - 1)});
self.error_text = buffer.span();
return Error.InvalidInput;
} }
fn errorIllegalChar(self: *Tokenizer, position: usize, char: u8, comptime fmt: []const u8, args: anytype) Error { fn finishTarget(must_resolve: bool, bytes: []const u8) Token {
var buffer = try std.ArrayListSentineled(u8, 0).initSize(&self.arena.allocator, 0); return if (must_resolve)
try buffer.appendSlice("illegal char "); .{ .target_must_resolve = bytes }
try printUnderstandableChar(&buffer, char); else
try buffer.outStream().print(" at position {}", .{position}); .{ .target = bytes };
if (fmt.len != 0) try buffer.outStream().print(": " ++ fmt, args);
self.error_text = buffer.span();
return Error.InvalidInput;
} }
const Error = error{ const State = enum {
OutOfMemory, lhs,
InvalidInput,
};
const State = union(enum) {
lhs: void,
target: std.ArrayListSentineled(u8, 0),
target_reverse_solidus: std.ArrayListSentineled(u8, 0),
target_dollar_sign: std.ArrayListSentineled(u8, 0),
target_colon: std.ArrayListSentineled(u8, 0),
target_colon_reverse_solidus: std.ArrayListSentineled(u8, 0),
rhs: void,
rhs_continuation: void,
rhs_continuation_linefeed: void,
prereq_quote: std.ArrayListSentineled(u8, 0),
prereq: std.ArrayListSentineled(u8, 0),
prereq_continuation: std.ArrayListSentineled(u8, 0),
prereq_continuation_linefeed: std.ArrayListSentineled(u8, 0),
};
const Token = struct {
id: ID,
bytes: []const u8,
const ID = enum {
target, target,
target_reverse_solidus,
target_dollar_sign,
target_colon,
target_colon_reverse_solidus,
rhs,
rhs_continuation,
rhs_continuation_linefeed,
prereq_quote,
prereq, prereq,
prereq_continuation,
prereq_continuation_linefeed,
}; };
pub const Token = union(enum) {
target: []const u8,
target_must_resolve: []const u8,
prereq: []const u8,
incomplete_quoted_prerequisite: IndexAndBytes,
incomplete_target: IndexAndBytes,
invalid_target: IndexAndChar,
bad_target_escape: IndexAndChar,
expected_dollar_sign: IndexAndChar,
continuation_eol: IndexAndChar,
incomplete_escape: IndexAndChar,
pub const IndexAndChar = struct {
index: usize,
char: u8,
}; };
pub const IndexAndBytes = struct {
index: usize,
bytes: []const u8,
};
/// Resolve escapes in target. Only valid with .target_must_resolve.
pub fn resolve(self: Token, writer: anytype) @TypeOf(writer).Error!void {
const bytes = self.target_must_resolve; // resolve called on incorrect token
var state: enum { start, escape, dollar } = .start;
for (bytes) |c| {
switch (state) {
.start => {
switch (c) {
'\\' => state = .escape,
'$' => state = .dollar,
else => try writer.writeByte(c),
}
},
.escape => {
switch (c) {
' ', '#', '\\' => {},
'$' => {
try writer.writeByte('\\');
state = .dollar;
continue;
},
else => try writer.writeByte('\\'),
}
try writer.writeByte(c);
state = .start;
},
.dollar => {
try writer.writeByte('$');
switch (c) {
'$' => {},
else => try writer.writeByte(c),
}
state = .start;
},
}
}
}
pub fn printError(self: Token, writer: anytype) @TypeOf(writer).Error!void {
switch (self) {
.target, .target_must_resolve, .prereq => unreachable, // not an error
.incomplete_quoted_prerequisite,
.incomplete_target,
=> |index_and_bytes| {
try writer.print("{} '", .{self.errStr()});
if (self == .incomplete_target) {
const tmp = Token{ .target_must_resolve = index_and_bytes.bytes };
try tmp.resolve(writer);
} else {
try printCharValues(writer, index_and_bytes.bytes);
}
try writer.print("' at position {}", .{index_and_bytes.index});
},
.invalid_target,
.bad_target_escape,
.expected_dollar_sign,
.continuation_eol,
.incomplete_escape,
=> |index_and_char| {
try writer.writeAll("illegal char ");
try printUnderstandableChar(writer, index_and_char.char);
try writer.print(" at position {}: {}", .{ index_and_char.index, self.errStr() });
},
}
}
fn errStr(self: Token) []const u8 {
return switch (self) {
.target, .target_must_resolve, .prereq => unreachable, // not an error
.incomplete_quoted_prerequisite => "incomplete quoted prerequisite",
.incomplete_target => "incomplete target",
.invalid_target => "invalid target",
.bad_target_escape => "bad target escape",
.expected_dollar_sign => "expecting '$'",
.continuation_eol => "continuation expecting end-of-line",
.incomplete_escape => "incomplete escape",
};
}
}; };
test "empty file" { test "empty file" {
@ -750,16 +794,16 @@ test "error incomplete target" {
); );
try depTokenizer("\\ foo.o", try depTokenizer("\\ foo.o",
\\ERROR: incomplete target ' foo.o' at position 1 \\ERROR: incomplete target ' foo.o' at position 0
); );
try depTokenizer("\\#foo.o", try depTokenizer("\\#foo.o",
\\ERROR: incomplete target '#foo.o' at position 1 \\ERROR: incomplete target '#foo.o' at position 0
); );
try depTokenizer("\\\\foo.o", try depTokenizer("\\\\foo.o",
\\ERROR: incomplete target '\foo.o' at position 1 \\ERROR: incomplete target '\foo.o' at position 0
); );
try depTokenizer("$$foo.o", try depTokenizer("$$foo.o",
\\ERROR: incomplete target '$foo.o' at position 1 \\ERROR: incomplete target '$foo.o' at position 0
); );
} }
@ -836,33 +880,40 @@ test "error prereq - continuation expecting end-of-line" {
// - tokenize input, emit textual representation, and compare to expect // - tokenize input, emit textual representation, and compare to expect
fn depTokenizer(input: []const u8, expect: []const u8) !void { fn depTokenizer(input: []const u8, expect: []const u8) !void {
var arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator); var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator);
const arena = &arena_allocator.allocator; const arena = &arena_allocator.allocator;
defer arena_allocator.deinit(); defer arena_allocator.deinit();
var it = Tokenizer.init(arena, input); var it: Tokenizer = .{ .bytes = input };
var buffer = try std.ArrayListSentineled(u8, 0).initSize(arena, 0); var buffer = try std.ArrayListSentineled(u8, 0).initSize(arena, 0);
var resolve_buf = std.ArrayList(u8).init(arena);
var i: usize = 0; var i: usize = 0;
while (true) { while (it.next()) |token| {
const r = it.next() catch |err| {
switch (err) {
Tokenizer.Error.InvalidInput => {
if (i != 0) try buffer.appendSlice("\n"); if (i != 0) try buffer.appendSlice("\n");
try buffer.appendSlice("ERROR: "); switch (token) {
try buffer.appendSlice(it.error_text); .target, .prereq => |bytes| {
}, try buffer.appendSlice(@tagName(token));
else => return err,
}
break;
};
const token = r orelse break;
if (i != 0) try buffer.appendSlice("\n");
try buffer.appendSlice(@tagName(token.id));
try buffer.appendSlice(" = {"); try buffer.appendSlice(" = {");
for (token.bytes) |b| { for (bytes) |b| {
try buffer.append(printable_char_tab[b]); try buffer.append(printable_char_tab[b]);
} }
try buffer.appendSlice("}"); try buffer.appendSlice("}");
},
.target_must_resolve => {
try buffer.appendSlice("target = {");
try token.resolve(resolve_buf.writer());
for (resolve_buf.items) |b| {
try buffer.append(printable_char_tab[b]);
}
resolve_buf.items.len = 0;
try buffer.appendSlice("}");
},
else => {
try buffer.appendSlice("ERROR: ");
try token.printError(buffer.outStream());
break;
},
}
i += 1; i += 1;
} }
const got: []const u8 = buffer.span(); const got: []const u8 = buffer.span();
@ -872,13 +923,13 @@ fn depTokenizer(input: []const u8, expect: []const u8) !void {
return; return;
} }
var out = makeOutput(std.fs.File.write, try std.io.getStdErr()); const out = std.io.getStdErr().writer();
try out.write("\n"); try out.writeAll("\n");
try printSection(&out, "<<<< input", input); try printSection(out, "<<<< input", input);
try printSection(&out, "==== expect", expect); try printSection(out, "==== expect", expect);
try printSection(&out, ">>>> got", got); try printSection(out, ">>>> got", got);
try printRuler(&out); try printRuler(out);
testing.expect(false); testing.expect(false);
} }
@ -887,29 +938,29 @@ fn printSection(out: anytype, label: []const u8, bytes: []const u8) !void {
try printLabel(out, label, bytes); try printLabel(out, label, bytes);
try hexDump(out, bytes); try hexDump(out, bytes);
try printRuler(out); try printRuler(out);
try out.write(bytes); try out.writeAll(bytes);
try out.write("\n"); try out.writeAll("\n");
} }
fn printLabel(out: anytype, label: []const u8, bytes: []const u8) !void { fn printLabel(out: anytype, label: []const u8, bytes: []const u8) !void {
var buf: [80]u8 = undefined; var buf: [80]u8 = undefined;
var text = try std.fmt.bufPrint(buf[0..], "{} {} bytes ", .{ label, bytes.len }); var text = try std.fmt.bufPrint(buf[0..], "{} {} bytes ", .{ label, bytes.len });
try out.write(text); try out.writeAll(text);
var i: usize = text.len; var i: usize = text.len;
const end = 79; const end = 79;
while (i < 79) : (i += 1) { while (i < 79) : (i += 1) {
try out.write([_]u8{label[0]}); try out.writeAll(&[_]u8{label[0]});
} }
try out.write("\n"); try out.writeAll("\n");
} }
fn printRuler(out: anytype) !void { fn printRuler(out: anytype) !void {
var i: usize = 0; var i: usize = 0;
const end = 79; const end = 79;
while (i < 79) : (i += 1) { while (i < 79) : (i += 1) {
try out.write("-"); try out.writeAll("-");
} }
try out.write("\n"); try out.writeAll("\n");
} }
fn hexDump(out: anytype, bytes: []const u8) !void { fn hexDump(out: anytype, bytes: []const u8) !void {
@ -924,116 +975,90 @@ fn hexDump(out: anytype, bytes: []const u8) !void {
const n = bytes.len & 0x0f; const n = bytes.len & 0x0f;
if (n > 0) { if (n > 0) {
try printDecValue(out, offset, 8); try printDecValue(out, offset, 8);
try out.write(":"); try out.writeAll(":");
try out.write(" "); try out.writeAll(" ");
var end1 = std.math.min(offset + n, offset + 8); var end1 = std.math.min(offset + n, offset + 8);
for (bytes[offset..end1]) |b| { for (bytes[offset..end1]) |b| {
try out.write(" "); try out.writeAll(" ");
try printHexValue(out, b, 2); try printHexValue(out, b, 2);
} }
var end2 = offset + n; var end2 = offset + n;
if (end2 > end1) { if (end2 > end1) {
try out.write(" "); try out.writeAll(" ");
for (bytes[end1..end2]) |b| { for (bytes[end1..end2]) |b| {
try out.write(" "); try out.writeAll(" ");
try printHexValue(out, b, 2); try printHexValue(out, b, 2);
} }
} }
const short = 16 - n; const short = 16 - n;
var i: usize = 0; var i: usize = 0;
while (i < short) : (i += 1) { while (i < short) : (i += 1) {
try out.write(" "); try out.writeAll(" ");
} }
if (end2 > end1) { if (end2 > end1) {
try out.write(" |"); try out.writeAll(" |");
} else { } else {
try out.write(" |"); try out.writeAll(" |");
} }
try printCharValues(out, bytes[offset..end2]); try printCharValues(out, bytes[offset..end2]);
try out.write("|\n"); try out.writeAll("|\n");
offset += n; offset += n;
} }
try printDecValue(out, offset, 8); try printDecValue(out, offset, 8);
try out.write(":"); try out.writeAll(":");
try out.write("\n"); try out.writeAll("\n");
} }
fn hexDump16(out: anytype, offset: usize, bytes: []const u8) !void { fn hexDump16(out: anytype, offset: usize, bytes: []const u8) !void {
try printDecValue(out, offset, 8); try printDecValue(out, offset, 8);
try out.write(":"); try out.writeAll(":");
try out.write(" "); try out.writeAll(" ");
for (bytes[0..8]) |b| { for (bytes[0..8]) |b| {
try out.write(" "); try out.writeAll(" ");
try printHexValue(out, b, 2); try printHexValue(out, b, 2);
} }
try out.write(" "); try out.writeAll(" ");
for (bytes[8..16]) |b| { for (bytes[8..16]) |b| {
try out.write(" "); try out.writeAll(" ");
try printHexValue(out, b, 2); try printHexValue(out, b, 2);
} }
try out.write(" |"); try out.writeAll(" |");
try printCharValues(out, bytes); try printCharValues(out, bytes);
try out.write("|\n"); try out.writeAll("|\n");
} }
fn printDecValue(out: anytype, value: u64, width: u8) !void { fn printDecValue(out: anytype, value: u64, width: u8) !void {
var buffer: [20]u8 = undefined; var buffer: [20]u8 = undefined;
const len = std.fmt.formatIntBuf(buffer[0..], value, 10, false, width); const len = std.fmt.formatIntBuf(buffer[0..], value, 10, false, .{ .width = width, .fill = '0' });
try out.write(buffer[0..len]); try out.writeAll(buffer[0..len]);
} }
fn printHexValue(out: anytype, value: u64, width: u8) !void { fn printHexValue(out: anytype, value: u64, width: u8) !void {
var buffer: [16]u8 = undefined; var buffer: [16]u8 = undefined;
const len = std.fmt.formatIntBuf(buffer[0..], value, 16, false, width); const len = std.fmt.formatIntBuf(buffer[0..], value, 16, false, .{ .width = width, .fill = '0' });
try out.write(buffer[0..len]); try out.writeAll(buffer[0..len]);
} }
fn printCharValues(out: anytype, bytes: []const u8) !void { fn printCharValues(out: anytype, bytes: []const u8) !void {
for (bytes) |b| { for (bytes) |b| {
try out.write(&[_]u8{printable_char_tab[b]}); try out.writeAll(&[_]u8{printable_char_tab[b]});
} }
} }
fn printUnderstandableChar(buffer: *std.ArrayListSentineled(u8, 0), char: u8) !void { fn printUnderstandableChar(out: anytype, char: u8) !void {
if (!std.ascii.isPrint(char) or char == ' ') { if (!std.ascii.isPrint(char) or char == ' ') {
try buffer.outStream().print("\\x{X:2}", .{char}); try out.print("\\x{X:0>2}", .{char});
} else { } else {
try buffer.appendSlice("'"); try out.print("'{c}'", .{printable_char_tab[char]});
try buffer.append(printable_char_tab[char]);
try buffer.appendSlice("'");
} }
} }
// zig fmt: off // zig fmt: off
const printable_char_tab: []const u8 = const printable_char_tab: [256]u8 = (
"................................ !\"#$%&'()*+,-./0123456789:;<=>?" ++ "................................ !\"#$%&'()*+,-./0123456789:;<=>?" ++
"@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~." ++ "@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~." ++
"................................................................" ++ "................................................................" ++
"................................................................"; "................................................................"
// zig fmt: on ).*;
comptime {
std.debug.assert(printable_char_tab.len == 256);
}
// Make an output var that wraps a context and output function.
// output: must be a function that takes a `self` idiom parameter
// and a bytes parameter
// context: must be that self
fn makeOutput(comptime output: anytype, context: anytype) Output(output, @TypeOf(context)) {
return Output(output, @TypeOf(context)){
.context = context,
};
}
fn Output(comptime output_func: anytype, comptime Context: type) type {
return struct {
context: Context,
pub const output = output_func;
fn write(self: @This(), bytes: []const u8) !void {
try output_func(self.context, bytes);
}
};
}

View File

@ -1,4 +1,6 @@
const Module = @This();
const std = @import("std"); const std = @import("std");
const Compilation = @import("Compilation.zig");
const mem = std.mem; const mem = std.mem;
const Allocator = std.mem.Allocator; const Allocator = std.mem.Allocator;
const ArrayListUnmanaged = std.ArrayListUnmanaged; const ArrayListUnmanaged = std.ArrayListUnmanaged;
@ -14,25 +16,24 @@ const Package = @import("Package.zig");
const link = @import("link.zig"); const link = @import("link.zig");
const ir = @import("ir.zig"); const ir = @import("ir.zig");
const zir = @import("zir.zig"); const zir = @import("zir.zig");
const Module = @This();
const Inst = ir.Inst; const Inst = ir.Inst;
const Body = ir.Body; const Body = ir.Body;
const ast = std.zig.ast; const ast = std.zig.ast;
const trace = @import("tracy.zig").trace; const trace = @import("tracy.zig").trace;
const liveness = @import("liveness.zig");
const astgen = @import("astgen.zig"); const astgen = @import("astgen.zig");
const zir_sema = @import("zir_sema.zig"); const zir_sema = @import("zir_sema.zig");
/// General-purpose allocator. Used for both temporary and long-term storage. /// General-purpose allocator. Used for both temporary and long-term storage.
gpa: *Allocator, gpa: *Allocator,
/// Pointer to externally managed resource. comp: *Compilation,
/// Where our incremental compilation metadata serialization will go.
zig_cache_artifact_directory: Compilation.Directory,
/// Pointer to externally managed resource. `null` if there is no zig file being compiled.
root_pkg: *Package, root_pkg: *Package,
/// Module owns this resource. /// Module owns this resource.
/// The `Scope` is either a `Scope.ZIRModule` or `Scope.File`. /// The `Scope` is either a `Scope.ZIRModule` or `Scope.File`.
root_scope: *Scope, root_scope: *Scope,
bin_file: *link.File,
bin_file_dir: std.fs.Dir,
bin_file_path: []const u8,
/// It's rare for a decl to be exported, so we save memory by having a sparse map of /// It's rare for a decl to be exported, so we save memory by having a sparse map of
/// Decl pointers to details about them being exported. /// Decl pointers to details about them being exported.
/// The Export memory is owned by the `export_owners` table; the slice itself is owned by this table. /// The Export memory is owned by the `export_owners` table; the slice itself is owned by this table.
@ -47,55 +48,42 @@ symbol_exports: std.StringArrayHashMapUnmanaged(*Export) = .{},
export_owners: std.AutoArrayHashMapUnmanaged(*Decl, []*Export) = .{}, export_owners: std.AutoArrayHashMapUnmanaged(*Decl, []*Export) = .{},
/// Maps fully qualified namespaced names to the Decl struct for them. /// Maps fully qualified namespaced names to the Decl struct for them.
decl_table: std.ArrayHashMapUnmanaged(Scope.NameHash, *Decl, Scope.name_hash_hash, Scope.name_hash_eql, false) = .{}, decl_table: std.ArrayHashMapUnmanaged(Scope.NameHash, *Decl, Scope.name_hash_hash, Scope.name_hash_eql, false) = .{},
link_error_flags: link.File.ErrorFlags = .{},
work_queue: std.fifo.LinearFifo(WorkItem, .Dynamic),
/// We optimize memory usage for a compilation with no compile errors by storing the /// We optimize memory usage for a compilation with no compile errors by storing the
/// error messages and mapping outside of `Decl`. /// error messages and mapping outside of `Decl`.
/// The ErrorMsg memory is owned by the decl, using Module's allocator. /// The ErrorMsg memory is owned by the decl, using Module's general purpose allocator.
/// Note that a Decl can succeed but the Fn it represents can fail. In this case, /// Note that a Decl can succeed but the Fn it represents can fail. In this case,
/// a Decl can have a failed_decls entry but have analysis status of success. /// a Decl can have a failed_decls entry but have analysis status of success.
failed_decls: std.AutoArrayHashMapUnmanaged(*Decl, *ErrorMsg) = .{}, failed_decls: std.AutoArrayHashMapUnmanaged(*Decl, *Compilation.ErrorMsg) = .{},
/// Using a map here for consistency with the other fields here. /// Using a map here for consistency with the other fields here.
/// The ErrorMsg memory is owned by the `Scope`, using Module's allocator. /// The ErrorMsg memory is owned by the `Scope`, using Module's general purpose allocator.
failed_files: std.AutoArrayHashMapUnmanaged(*Scope, *ErrorMsg) = .{}, failed_files: std.AutoArrayHashMapUnmanaged(*Scope, *Compilation.ErrorMsg) = .{},
/// Using a map here for consistency with the other fields here. /// Using a map here for consistency with the other fields here.
/// The ErrorMsg memory is owned by the `Export`, using Module's allocator. /// The ErrorMsg memory is owned by the `Export`, using Module's general purpose allocator.
failed_exports: std.AutoArrayHashMapUnmanaged(*Export, *ErrorMsg) = .{}, failed_exports: std.AutoArrayHashMapUnmanaged(*Export, *Compilation.ErrorMsg) = .{},
next_anon_name_index: usize = 0,
/// Candidates for deletion. After a semantic analysis update completes, this list
/// contains Decls that need to be deleted if they end up having no references to them.
deletion_set: ArrayListUnmanaged(*Decl) = .{},
/// Error tags and their values, tag names are duped with mod.gpa.
global_error_set: std.StringHashMapUnmanaged(u16) = .{},
/// Incrementing integer used to compare against the corresponding Decl /// Incrementing integer used to compare against the corresponding Decl
/// field to determine whether a Decl's status applies to an ongoing update, or a /// field to determine whether a Decl's status applies to an ongoing update, or a
/// previous analysis. /// previous analysis.
generation: u32 = 0, generation: u32 = 0,
next_anon_name_index: usize = 0, stage1_flags: packed struct {
have_winmain: bool = false,
/// Candidates for deletion. After a semantic analysis update completes, this list have_wwinmain: bool = false,
/// contains Decls that need to be deleted if they end up having no references to them. have_winmain_crt_startup: bool = false,
deletion_set: std.ArrayListUnmanaged(*Decl) = .{}, have_wwinmain_crt_startup: bool = false,
have_dllmain_crt_startup: bool = false,
/// Owned by Module. have_c_main: bool = false,
root_name: []u8, reserved: u2 = 0,
keep_source_files_loaded: bool, } = .{},
/// Error tags and their values, tag names are duped with mod.gpa.
global_error_set: std.StringHashMapUnmanaged(u16) = .{},
pub const InnerError = error{ OutOfMemory, AnalysisFail };
const WorkItem = union(enum) {
/// Write the machine code for a Decl to the output file.
codegen_decl: *Decl,
/// The Decl needs to be analyzed and possibly export itself.
/// It may have already be analyzed, or it may have been determined
/// to be outdated; in this case perform semantic analysis again.
analyze_decl: *Decl,
/// The source file containing the Decl has been updated, and so the
/// Decl may need its line number information updated in the debug info.
update_line_number: *Decl,
};
pub const Export = struct { pub const Export = struct {
options: std.builtin.ExportOptions, options: std.builtin.ExportOptions,
@ -622,7 +610,7 @@ pub const Scope = struct {
pub fn getSource(self: *File, module: *Module) ![:0]const u8 { pub fn getSource(self: *File, module: *Module) ![:0]const u8 {
switch (self.source) { switch (self.source) {
.unloaded => { .unloaded => {
const source = try module.root_pkg.root_src_dir.readFileAllocOptions( const source = try module.root_pkg.root_src_directory.handle.readFileAllocOptions(
module.gpa, module.gpa,
self.sub_file_path, self.sub_file_path,
std.math.maxInt(u32), std.math.maxInt(u32),
@ -720,7 +708,7 @@ pub const Scope = struct {
pub fn getSource(self: *ZIRModule, module: *Module) ![:0]const u8 { pub fn getSource(self: *ZIRModule, module: *Module) ![:0]const u8 {
switch (self.source) { switch (self.source) {
.unloaded => { .unloaded => {
const source = try module.root_pkg.root_src_dir.readFileAllocOptions( const source = try module.root_pkg.root_src_directory.handle.readFileAllocOptions(
module.gpa, module.gpa,
self.sub_file_path, self.sub_file_path,
std.math.maxInt(u32), std.math.maxInt(u32),
@ -818,117 +806,14 @@ pub const Scope = struct {
}; };
}; };
pub const AllErrors = struct { pub const InnerError = error{ OutOfMemory, AnalysisFail };
arena: std.heap.ArenaAllocator.State,
list: []const Message,
pub const Message = struct {
src_path: []const u8,
line: usize,
column: usize,
byte_offset: usize,
msg: []const u8,
};
pub fn deinit(self: *AllErrors, gpa: *Allocator) void {
self.arena.promote(gpa).deinit();
}
fn add(
arena: *std.heap.ArenaAllocator,
errors: *std.ArrayList(Message),
sub_file_path: []const u8,
source: []const u8,
simple_err_msg: ErrorMsg,
) !void {
const loc = std.zig.findLineColumn(source, simple_err_msg.byte_offset);
try errors.append(.{
.src_path = try arena.allocator.dupe(u8, sub_file_path),
.msg = try arena.allocator.dupe(u8, simple_err_msg.msg),
.byte_offset = simple_err_msg.byte_offset,
.line = loc.line,
.column = loc.column,
});
}
};
pub const InitOptions = struct {
target: std.Target,
root_name: []const u8,
root_pkg: *Package,
output_mode: std.builtin.OutputMode,
bin_file_dir: ?std.fs.Dir = null,
bin_file_path: []const u8,
link_mode: ?std.builtin.LinkMode = null,
object_format: ?std.builtin.ObjectFormat = null,
optimize_mode: std.builtin.Mode = .Debug,
keep_source_files_loaded: bool = false,
};
pub fn init(gpa: *Allocator, options: InitOptions) !Module {
const root_name = try gpa.dupe(u8, options.root_name);
errdefer gpa.free(root_name);
const bin_file_dir = options.bin_file_dir orelse std.fs.cwd();
const bin_file = try link.File.openPath(gpa, bin_file_dir, options.bin_file_path, .{
.root_name = root_name,
.root_pkg = options.root_pkg,
.target = options.target,
.output_mode = options.output_mode,
.link_mode = options.link_mode orelse .Static,
.object_format = options.object_format orelse options.target.getObjectFormat(),
.optimize_mode = options.optimize_mode,
});
errdefer bin_file.destroy();
const root_scope = blk: {
if (mem.endsWith(u8, options.root_pkg.root_src_path, ".zig")) {
const root_scope = try gpa.create(Scope.File);
root_scope.* = .{
.sub_file_path = options.root_pkg.root_src_path,
.source = .{ .unloaded = {} },
.contents = .{ .not_available = {} },
.status = .never_loaded,
.root_container = .{
.file_scope = root_scope,
.decls = .{},
},
};
break :blk &root_scope.base;
} else if (mem.endsWith(u8, options.root_pkg.root_src_path, ".zir")) {
const root_scope = try gpa.create(Scope.ZIRModule);
root_scope.* = .{
.sub_file_path = options.root_pkg.root_src_path,
.source = .{ .unloaded = {} },
.contents = .{ .not_available = {} },
.status = .never_loaded,
.decls = .{},
};
break :blk &root_scope.base;
} else {
unreachable;
}
};
return Module{
.gpa = gpa,
.root_name = root_name,
.root_pkg = options.root_pkg,
.root_scope = root_scope,
.bin_file_dir = bin_file_dir,
.bin_file_path = options.bin_file_path,
.bin_file = bin_file,
.work_queue = std.fifo.LinearFifo(WorkItem, .Dynamic).init(gpa),
.keep_source_files_loaded = options.keep_source_files_loaded,
};
}
pub fn deinit(self: *Module) void { pub fn deinit(self: *Module) void {
self.bin_file.destroy();
const gpa = self.gpa; const gpa = self.gpa;
self.gpa.free(self.root_name);
self.zig_cache_artifact_directory.handle.close();
self.deletion_set.deinit(gpa); self.deletion_set.deinit(gpa);
self.work_queue.deinit();
for (self.decl_table.items()) |entry| { for (self.decl_table.items()) |entry| {
entry.value.destroy(gpa); entry.value.destroy(gpa);
@ -969,7 +854,6 @@ pub fn deinit(self: *Module) void {
gpa.free(entry.key); gpa.free(entry.key);
} }
self.global_error_set.deinit(gpa); self.global_error_set.deinit(gpa);
self.* = undefined;
} }
fn freeExportList(gpa: *Allocator, export_list: []*Export) void { fn freeExportList(gpa: *Allocator, export_list: []*Export) void {
@ -980,204 +864,6 @@ fn freeExportList(gpa: *Allocator, export_list: []*Export) void {
gpa.free(export_list); gpa.free(export_list);
} }
pub fn target(self: Module) std.Target {
return self.bin_file.options.target;
}
pub fn optimizeMode(self: Module) std.builtin.Mode {
return self.bin_file.options.optimize_mode;
}
/// Detect changes to source files, perform semantic analysis, and update the output files.
pub fn update(self: *Module) !void {
const tracy = trace(@src());
defer tracy.end();
self.generation += 1;
// TODO Use the cache hash file system to detect which source files changed.
// Until then we simulate a full cache miss. Source files could have been loaded for any reason;
// to force a refresh we unload now.
if (self.root_scope.cast(Scope.File)) |zig_file| {
zig_file.unload(self.gpa);
self.analyzeContainer(&zig_file.root_container) catch |err| switch (err) {
error.AnalysisFail => {
assert(self.totalErrorCount() != 0);
},
else => |e| return e,
};
} else if (self.root_scope.cast(Scope.ZIRModule)) |zir_module| {
zir_module.unload(self.gpa);
self.analyzeRootZIRModule(zir_module) catch |err| switch (err) {
error.AnalysisFail => {
assert(self.totalErrorCount() != 0);
},
else => |e| return e,
};
}
try self.performAllTheWork();
// Process the deletion set.
while (self.deletion_set.popOrNull()) |decl| {
if (decl.dependants.items().len != 0) {
decl.deletion_flag = false;
continue;
}
try self.deleteDecl(decl);
}
// This is needed before reading the error flags.
try self.bin_file.flush(self);
self.link_error_flags = self.bin_file.errorFlags();
// If there are any errors, we anticipate the source files being loaded
// to report error messages. Otherwise we unload all source files to save memory.
if (self.totalErrorCount() == 0 and !self.keep_source_files_loaded) {
self.root_scope.unload(self.gpa);
}
}
/// Having the file open for writing is problematic as far as executing the
/// binary is concerned. This will remove the write flag, or close the file,
/// or whatever is needed so that it can be executed.
/// After this, one must call` makeFileWritable` before calling `update`.
pub fn makeBinFileExecutable(self: *Module) !void {
return self.bin_file.makeExecutable();
}
pub fn makeBinFileWritable(self: *Module) !void {
return self.bin_file.makeWritable(self.bin_file_dir, self.bin_file_path);
}
pub fn totalErrorCount(self: *Module) usize {
const total = self.failed_decls.items().len +
self.failed_files.items().len +
self.failed_exports.items().len;
return if (total == 0) @boolToInt(self.link_error_flags.no_entry_point_found) else total;
}
pub fn getAllErrorsAlloc(self: *Module) !AllErrors {
var arena = std.heap.ArenaAllocator.init(self.gpa);
errdefer arena.deinit();
var errors = std.ArrayList(AllErrors.Message).init(self.gpa);
defer errors.deinit();
for (self.failed_files.items()) |entry| {
const scope = entry.key;
const err_msg = entry.value;
const source = try scope.getSource(self);
try AllErrors.add(&arena, &errors, scope.subFilePath(), source, err_msg.*);
}
for (self.failed_decls.items()) |entry| {
const decl = entry.key;
const err_msg = entry.value;
const source = try decl.scope.getSource(self);
try AllErrors.add(&arena, &errors, decl.scope.subFilePath(), source, err_msg.*);
}
for (self.failed_exports.items()) |entry| {
const decl = entry.key.owner_decl;
const err_msg = entry.value;
const source = try decl.scope.getSource(self);
try AllErrors.add(&arena, &errors, decl.scope.subFilePath(), source, err_msg.*);
}
if (errors.items.len == 0 and self.link_error_flags.no_entry_point_found) {
try errors.append(.{
.src_path = self.root_pkg.root_src_path,
.line = 0,
.column = 0,
.byte_offset = 0,
.msg = try std.fmt.allocPrint(&arena.allocator, "no entry point found", .{}),
});
}
assert(errors.items.len == self.totalErrorCount());
return AllErrors{
.list = try arena.allocator.dupe(AllErrors.Message, errors.items),
.arena = arena.state,
};
}
pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void {
while (self.work_queue.readItem()) |work_item| switch (work_item) {
.codegen_decl => |decl| switch (decl.analysis) {
.unreferenced => unreachable,
.in_progress => unreachable,
.outdated => unreachable,
.sema_failure,
.codegen_failure,
.dependency_failure,
.sema_failure_retryable,
=> continue,
.complete, .codegen_failure_retryable => {
if (decl.typed_value.most_recent.typed_value.val.cast(Value.Payload.Function)) |payload| {
switch (payload.func.analysis) {
.queued => self.analyzeFnBody(decl, payload.func) catch |err| switch (err) {
error.AnalysisFail => {
assert(payload.func.analysis != .in_progress);
continue;
},
error.OutOfMemory => return error.OutOfMemory,
},
.in_progress => unreachable,
.sema_failure, .dependency_failure => continue,
.success => {},
}
// Here we tack on additional allocations to the Decl's arena. The allocations are
// lifetime annotations in the ZIR.
var decl_arena = decl.typed_value.most_recent.arena.?.promote(self.gpa);
defer decl.typed_value.most_recent.arena.?.* = decl_arena.state;
log.debug("analyze liveness of {}\n", .{decl.name});
try liveness.analyze(self.gpa, &decl_arena.allocator, payload.func.analysis.success);
}
assert(decl.typed_value.most_recent.typed_value.ty.hasCodeGenBits());
self.bin_file.updateDecl(self, decl) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {
decl.analysis = .dependency_failure;
},
else => {
try self.failed_decls.ensureCapacity(self.gpa, self.failed_decls.items().len + 1);
self.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create(
self.gpa,
decl.src(),
"unable to codegen: {}",
.{@errorName(err)},
));
decl.analysis = .codegen_failure_retryable;
},
};
},
},
.analyze_decl => |decl| {
self.ensureDeclAnalyzed(decl) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => continue,
};
},
.update_line_number => |decl| {
self.bin_file.updateDeclLineNumber(self, decl) catch |err| {
try self.failed_decls.ensureCapacity(self.gpa, self.failed_decls.items().len + 1);
self.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create(
self.gpa,
decl.src(),
"unable to update line number: {}",
.{@errorName(err)},
));
decl.analysis = .codegen_failure_retryable;
};
},
};
}
pub fn ensureDeclAnalyzed(self: *Module, decl: *Decl) InnerError!void { pub fn ensureDeclAnalyzed(self: *Module, decl: *Decl) InnerError!void {
const tracy = trace(@src()); const tracy = trace(@src());
defer tracy.end(); defer tracy.end();
@ -1227,7 +913,7 @@ pub fn ensureDeclAnalyzed(self: *Module, decl: *Decl) InnerError!void {
error.AnalysisFail => return error.AnalysisFail, error.AnalysisFail => return error.AnalysisFail,
else => { else => {
try self.failed_decls.ensureCapacity(self.gpa, self.failed_decls.items().len + 1); try self.failed_decls.ensureCapacity(self.gpa, self.failed_decls.items().len + 1);
self.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create( self.failed_decls.putAssumeCapacityNoClobber(decl, try Compilation.ErrorMsg.create(
self.gpa, self.gpa,
decl.src(), decl.src(),
"unable to analyze: {}", "unable to analyze: {}",
@ -1457,10 +1143,10 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
// We don't fully codegen the decl until later, but we do need to reserve a global // We don't fully codegen the decl until later, but we do need to reserve a global
// offset table index for it. This allows us to codegen decls out of dependency order, // offset table index for it. This allows us to codegen decls out of dependency order,
// increasing how many computations can be done in parallel. // increasing how many computations can be done in parallel.
try self.bin_file.allocateDeclIndexes(decl); try self.comp.bin_file.allocateDeclIndexes(decl);
try self.work_queue.writeItem(.{ .codegen_decl = decl }); try self.comp.work_queue.writeItem(.{ .codegen_decl = decl });
} else if (prev_type_has_bits) { } else if (prev_type_has_bits) {
self.bin_file.freeDecl(decl); self.comp.bin_file.freeDecl(decl);
} }
if (fn_proto.getExternExportInlineToken()) |maybe_export_token| { if (fn_proto.getExternExportInlineToken()) |maybe_export_token| {
@ -1708,7 +1394,7 @@ fn getSrcModule(self: *Module, root_scope: *Scope.ZIRModule) !*zir.Module {
if (zir_module.error_msg) |src_err_msg| { if (zir_module.error_msg) |src_err_msg| {
self.failed_files.putAssumeCapacityNoClobber( self.failed_files.putAssumeCapacityNoClobber(
&root_scope.base, &root_scope.base,
try ErrorMsg.create(self.gpa, src_err_msg.byte_offset, "{}", .{src_err_msg.msg}), try Compilation.ErrorMsg.create(self.gpa, src_err_msg.byte_offset, "{}", .{src_err_msg.msg}),
); );
root_scope.status = .unloaded_parse_failure; root_scope.status = .unloaded_parse_failure;
return error.AnalysisFail; return error.AnalysisFail;
@ -1752,7 +1438,7 @@ fn getAstTree(self: *Module, container_scope: *Scope.Container) !*ast.Tree {
defer msg.deinit(); defer msg.deinit();
try parse_err.render(tree.token_ids, msg.outStream()); try parse_err.render(tree.token_ids, msg.outStream());
const err_msg = try self.gpa.create(ErrorMsg); const err_msg = try self.gpa.create(Compilation.ErrorMsg);
err_msg.* = .{ err_msg.* = .{
.msg = msg.toOwnedSlice(), .msg = msg.toOwnedSlice(),
.byte_offset = tree.token_locs[parse_err.loc()].start, .byte_offset = tree.token_locs[parse_err.loc()].start,
@ -1776,7 +1462,7 @@ fn getAstTree(self: *Module, container_scope: *Scope.Container) !*ast.Tree {
} }
} }
fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void { pub fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void {
const tracy = trace(@src()); const tracy = trace(@src());
defer tracy.end(); defer tracy.end();
@ -1785,7 +1471,7 @@ fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void {
const tree = try self.getAstTree(container_scope); const tree = try self.getAstTree(container_scope);
const decls = tree.root_node.decls(); const decls = tree.root_node.decls();
try self.work_queue.ensureUnusedCapacity(decls.len); try self.comp.work_queue.ensureUnusedCapacity(decls.len);
try container_scope.decls.ensureCapacity(self.gpa, decls.len); try container_scope.decls.ensureCapacity(self.gpa, decls.len);
// Keep track of the decls that we expect to see in this file so that // Keep track of the decls that we expect to see in this file so that
@ -1814,21 +1500,21 @@ fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void {
decl.src_index = decl_i; decl.src_index = decl_i;
if (deleted_decls.remove(decl) == null) { if (deleted_decls.remove(decl) == null) {
decl.analysis = .sema_failure; decl.analysis = .sema_failure;
const err_msg = try ErrorMsg.create(self.gpa, tree.token_locs[name_tok].start, "redefinition of '{}'", .{decl.name}); const err_msg = try Compilation.ErrorMsg.create(self.gpa, tree.token_locs[name_tok].start, "redefinition of '{}'", .{decl.name});
errdefer err_msg.destroy(self.gpa); errdefer err_msg.destroy(self.gpa);
try self.failed_decls.putNoClobber(self.gpa, decl, err_msg); try self.failed_decls.putNoClobber(self.gpa, decl, err_msg);
} else { } else {
if (!srcHashEql(decl.contents_hash, contents_hash)) { if (!srcHashEql(decl.contents_hash, contents_hash)) {
try self.markOutdatedDecl(decl); try self.markOutdatedDecl(decl);
decl.contents_hash = contents_hash; decl.contents_hash = contents_hash;
} else switch (self.bin_file.tag) { } else switch (self.comp.bin_file.tag) {
.coff => { .coff => {
// TODO Implement for COFF // TODO Implement for COFF
}, },
.elf => if (decl.fn_link.elf.len != 0) { .elf => if (decl.fn_link.elf.len != 0) {
// TODO Look into detecting when this would be unnecessary by storing enough state // TODO Look into detecting when this would be unnecessary by storing enough state
// in `Decl` to notice that the line number did not change. // in `Decl` to notice that the line number did not change.
self.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl }); self.comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl });
}, },
.macho => { .macho => {
// TODO Implement for MachO // TODO Implement for MachO
@ -1841,7 +1527,7 @@ fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void {
container_scope.decls.putAssumeCapacity(new_decl, {}); container_scope.decls.putAssumeCapacity(new_decl, {});
if (fn_proto.getExternExportInlineToken()) |maybe_export_token| { if (fn_proto.getExternExportInlineToken()) |maybe_export_token| {
if (tree.token_ids[maybe_export_token] == .Keyword_export) { if (tree.token_ids[maybe_export_token] == .Keyword_export) {
self.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl }); self.comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
} }
} }
} }
@ -1856,7 +1542,7 @@ fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void {
decl.src_index = decl_i; decl.src_index = decl_i;
if (deleted_decls.remove(decl) == null) { if (deleted_decls.remove(decl) == null) {
decl.analysis = .sema_failure; decl.analysis = .sema_failure;
const err_msg = try ErrorMsg.create(self.gpa, name_loc.start, "redefinition of '{}'", .{decl.name}); const err_msg = try Compilation.ErrorMsg.create(self.gpa, name_loc.start, "redefinition of '{}'", .{decl.name});
errdefer err_msg.destroy(self.gpa); errdefer err_msg.destroy(self.gpa);
try self.failed_decls.putNoClobber(self.gpa, decl, err_msg); try self.failed_decls.putNoClobber(self.gpa, decl, err_msg);
} else if (!srcHashEql(decl.contents_hash, contents_hash)) { } else if (!srcHashEql(decl.contents_hash, contents_hash)) {
@ -1868,7 +1554,7 @@ fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void {
container_scope.decls.putAssumeCapacity(new_decl, {}); container_scope.decls.putAssumeCapacity(new_decl, {});
if (var_decl.getExternExportToken()) |maybe_export_token| { if (var_decl.getExternExportToken()) |maybe_export_token| {
if (tree.token_ids[maybe_export_token] == .Keyword_export) { if (tree.token_ids[maybe_export_token] == .Keyword_export) {
self.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl }); self.comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
} }
} }
} }
@ -1882,7 +1568,7 @@ fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void {
const new_decl = try self.createNewDecl(&container_scope.base, name, decl_i, name_hash, contents_hash); const new_decl = try self.createNewDecl(&container_scope.base, name, decl_i, name_hash, contents_hash);
container_scope.decls.putAssumeCapacity(new_decl, {}); container_scope.decls.putAssumeCapacity(new_decl, {});
self.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl }); self.comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
} else if (src_decl.castTag(.ContainerField)) |container_field| { } else if (src_decl.castTag(.ContainerField)) |container_field| {
log.err("TODO: analyze container field", .{}); log.err("TODO: analyze container field", .{});
} else if (src_decl.castTag(.TestDecl)) |test_decl| { } else if (src_decl.castTag(.TestDecl)) |test_decl| {
@ -1901,12 +1587,12 @@ fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void {
} }
} }
fn analyzeRootZIRModule(self: *Module, root_scope: *Scope.ZIRModule) !void { pub fn analyzeRootZIRModule(self: *Module, root_scope: *Scope.ZIRModule) !void {
// We may be analyzing it for the first time, or this may be // We may be analyzing it for the first time, or this may be
// an incremental update. This code handles both cases. // an incremental update. This code handles both cases.
const src_module = try self.getSrcModule(root_scope); const src_module = try self.getSrcModule(root_scope);
try self.work_queue.ensureUnusedCapacity(src_module.decls.len); try self.comp.work_queue.ensureUnusedCapacity(src_module.decls.len);
try root_scope.decls.ensureCapacity(self.gpa, src_module.decls.len); try root_scope.decls.ensureCapacity(self.gpa, src_module.decls.len);
var exports_to_resolve = std.ArrayList(*zir.Decl).init(self.gpa); var exports_to_resolve = std.ArrayList(*zir.Decl).init(self.gpa);
@ -1954,7 +1640,7 @@ fn analyzeRootZIRModule(self: *Module, root_scope: *Scope.ZIRModule) !void {
} }
} }
fn deleteDecl(self: *Module, decl: *Decl) !void { pub fn deleteDecl(self: *Module, decl: *Decl) !void {
try self.deletion_set.ensureCapacity(self.gpa, self.deletion_set.items.len + decl.dependencies.items().len); try self.deletion_set.ensureCapacity(self.gpa, self.deletion_set.items.len + decl.dependencies.items().len);
// Remove from the namespace it resides in. In the case of an anonymous Decl it will // Remove from the namespace it resides in. In the case of an anonymous Decl it will
@ -1988,7 +1674,7 @@ fn deleteDecl(self: *Module, decl: *Decl) !void {
entry.value.destroy(self.gpa); entry.value.destroy(self.gpa);
} }
self.deleteDeclExports(decl); self.deleteDeclExports(decl);
self.bin_file.freeDecl(decl); self.comp.bin_file.freeDecl(decl);
decl.destroy(self.gpa); decl.destroy(self.gpa);
} }
@ -2016,7 +1702,7 @@ fn deleteDeclExports(self: *Module, decl: *Decl) void {
self.decl_exports.removeAssertDiscard(exp.exported_decl); self.decl_exports.removeAssertDiscard(exp.exported_decl);
} }
} }
if (self.bin_file.cast(link.File.Elf)) |elf| { if (self.comp.bin_file.cast(link.File.Elf)) |elf| {
elf.deleteExport(exp.link); elf.deleteExport(exp.link);
} }
if (self.failed_exports.remove(exp)) |entry| { if (self.failed_exports.remove(exp)) |entry| {
@ -2029,7 +1715,7 @@ fn deleteDeclExports(self: *Module, decl: *Decl) void {
self.gpa.free(kv.value); self.gpa.free(kv.value);
} }
fn analyzeFnBody(self: *Module, decl: *Decl, func: *Fn) !void { pub fn analyzeFnBody(self: *Module, decl: *Decl, func: *Fn) !void {
const tracy = trace(@src()); const tracy = trace(@src());
defer tracy.end(); defer tracy.end();
@ -2060,7 +1746,7 @@ fn analyzeFnBody(self: *Module, decl: *Decl, func: *Fn) !void {
fn markOutdatedDecl(self: *Module, decl: *Decl) !void { fn markOutdatedDecl(self: *Module, decl: *Decl) !void {
log.debug("mark {} outdated\n", .{decl.name}); log.debug("mark {} outdated\n", .{decl.name});
try self.work_queue.writeItem(.{ .analyze_decl = decl }); try self.comp.work_queue.writeItem(.{ .analyze_decl = decl });
if (self.failed_decls.remove(decl)) |entry| { if (self.failed_decls.remove(decl)) |entry| {
entry.value.destroy(self.gpa); entry.value.destroy(self.gpa);
} }
@ -2082,14 +1768,14 @@ fn allocateNewDecl(
.analysis = .unreferenced, .analysis = .unreferenced,
.deletion_flag = false, .deletion_flag = false,
.contents_hash = contents_hash, .contents_hash = contents_hash,
.link = switch (self.bin_file.tag) { .link = switch (self.comp.bin_file.tag) {
.coff => .{ .coff = link.File.Coff.TextBlock.empty }, .coff => .{ .coff = link.File.Coff.TextBlock.empty },
.elf => .{ .elf = link.File.Elf.TextBlock.empty }, .elf => .{ .elf = link.File.Elf.TextBlock.empty },
.macho => .{ .macho = link.File.MachO.TextBlock.empty }, .macho => .{ .macho = link.File.MachO.TextBlock.empty },
.c => .{ .c = {} }, .c => .{ .c = {} },
.wasm => .{ .wasm = {} }, .wasm => .{ .wasm = {} },
}, },
.fn_link = switch (self.bin_file.tag) { .fn_link = switch (self.comp.bin_file.tag) {
.coff => .{ .coff = {} }, .coff => .{ .coff = {} },
.elf => .{ .elf = link.File.Elf.SrcFn.empty }, .elf => .{ .elf = link.File.Elf.SrcFn.empty },
.macho => .{ .macho = link.File.MachO.SrcFn.empty }, .macho => .{ .macho = link.File.MachO.SrcFn.empty },
@ -2206,7 +1892,7 @@ pub fn analyzeExport(self: *Module, scope: *Scope, src: usize, borrowed_symbol_n
if (self.symbol_exports.get(symbol_name)) |_| { if (self.symbol_exports.get(symbol_name)) |_| {
try self.failed_exports.ensureCapacity(self.gpa, self.failed_exports.items().len + 1); try self.failed_exports.ensureCapacity(self.gpa, self.failed_exports.items().len + 1);
self.failed_exports.putAssumeCapacityNoClobber(new_export, try ErrorMsg.create( self.failed_exports.putAssumeCapacityNoClobber(new_export, try Compilation.ErrorMsg.create(
self.gpa, self.gpa,
src, src,
"exported symbol collision: {}", "exported symbol collision: {}",
@ -2218,11 +1904,11 @@ pub fn analyzeExport(self: *Module, scope: *Scope, src: usize, borrowed_symbol_n
} }
try self.symbol_exports.putNoClobber(self.gpa, symbol_name, new_export); try self.symbol_exports.putNoClobber(self.gpa, symbol_name, new_export);
self.bin_file.updateDeclExports(self, exported_decl, de_gop.entry.value) catch |err| switch (err) { self.comp.bin_file.updateDeclExports(self, exported_decl, de_gop.entry.value) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory, error.OutOfMemory => return error.OutOfMemory,
else => { else => {
try self.failed_exports.ensureCapacity(self.gpa, self.failed_exports.items().len + 1); try self.failed_exports.ensureCapacity(self.gpa, self.failed_exports.items().len + 1);
self.failed_exports.putAssumeCapacityNoClobber(new_export, try ErrorMsg.create( self.failed_exports.putAssumeCapacityNoClobber(new_export, try Compilation.ErrorMsg.create(
self.gpa, self.gpa,
src, src,
"unable to export: {}", "unable to export: {}",
@ -2502,8 +2188,8 @@ pub fn createAnonymousDecl(
// We should be able to further improve the compiler to not omit Decls which are only referenced at // We should be able to further improve the compiler to not omit Decls which are only referenced at
// compile-time and not runtime. // compile-time and not runtime.
if (typed_value.ty.hasCodeGenBits()) { if (typed_value.ty.hasCodeGenBits()) {
try self.bin_file.allocateDeclIndexes(new_decl); try self.comp.bin_file.allocateDeclIndexes(new_decl);
try self.work_queue.writeItem(.{ .codegen_decl = new_decl }); try self.comp.work_queue.writeItem(.{ .codegen_decl = new_decl });
} }
return new_decl; return new_decl;
@ -2756,7 +2442,7 @@ pub fn cmpNumeric(
} else if (rhs_ty_tag == .ComptimeFloat) { } else if (rhs_ty_tag == .ComptimeFloat) {
break :x lhs.ty; break :x lhs.ty;
} }
if (lhs.ty.floatBits(self.target()) >= rhs.ty.floatBits(self.target())) { if (lhs.ty.floatBits(self.getTarget()) >= rhs.ty.floatBits(self.getTarget())) {
break :x lhs.ty; break :x lhs.ty;
} else { } else {
break :x rhs.ty; break :x rhs.ty;
@ -2815,7 +2501,7 @@ pub fn cmpNumeric(
} else if (lhs_is_float) { } else if (lhs_is_float) {
dest_float_type = lhs.ty; dest_float_type = lhs.ty;
} else { } else {
const int_info = lhs.ty.intInfo(self.target()); const int_info = lhs.ty.intInfo(self.getTarget());
lhs_bits = int_info.bits + @boolToInt(!int_info.signed and dest_int_is_signed); lhs_bits = int_info.bits + @boolToInt(!int_info.signed and dest_int_is_signed);
} }
@ -2850,7 +2536,7 @@ pub fn cmpNumeric(
} else if (rhs_is_float) { } else if (rhs_is_float) {
dest_float_type = rhs.ty; dest_float_type = rhs.ty;
} else { } else {
const int_info = rhs.ty.intInfo(self.target()); const int_info = rhs.ty.intInfo(self.getTarget());
rhs_bits = int_info.bits + @boolToInt(!int_info.signed and dest_int_is_signed); rhs_bits = int_info.bits + @boolToInt(!int_info.signed and dest_int_is_signed);
} }
@ -2915,13 +2601,13 @@ pub fn resolvePeerTypes(self: *Module, scope: *Scope, instructions: []*Inst) !Ty
next_inst.ty.isInt() and next_inst.ty.isInt() and
prev_inst.ty.isSignedInt() == next_inst.ty.isSignedInt()) prev_inst.ty.isSignedInt() == next_inst.ty.isSignedInt())
{ {
if (prev_inst.ty.intInfo(self.target()).bits < next_inst.ty.intInfo(self.target()).bits) { if (prev_inst.ty.intInfo(self.getTarget()).bits < next_inst.ty.intInfo(self.getTarget()).bits) {
prev_inst = next_inst; prev_inst = next_inst;
} }
continue; continue;
} }
if (prev_inst.ty.isFloat() and next_inst.ty.isFloat()) { if (prev_inst.ty.isFloat() and next_inst.ty.isFloat()) {
if (prev_inst.ty.floatBits(self.target()) < next_inst.ty.floatBits(self.target())) { if (prev_inst.ty.floatBits(self.getTarget()) < next_inst.ty.floatBits(self.getTarget())) {
prev_inst = next_inst; prev_inst = next_inst;
} }
continue; continue;
@ -2989,8 +2675,8 @@ pub fn coerce(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst
if (inst.ty.zigTypeTag() == .Int and dest_type.zigTypeTag() == .Int) { if (inst.ty.zigTypeTag() == .Int and dest_type.zigTypeTag() == .Int) {
assert(inst.value() == null); // handled above assert(inst.value() == null); // handled above
const src_info = inst.ty.intInfo(self.target()); const src_info = inst.ty.intInfo(self.getTarget());
const dst_info = dest_type.intInfo(self.target()); const dst_info = dest_type.intInfo(self.getTarget());
if ((src_info.signed == dst_info.signed and dst_info.bits >= src_info.bits) or if ((src_info.signed == dst_info.signed and dst_info.bits >= src_info.bits) or
// small enough unsigned ints can get casted to large enough signed ints // small enough unsigned ints can get casted to large enough signed ints
(src_info.signed and !dst_info.signed and dst_info.bits > src_info.bits)) (src_info.signed and !dst_info.signed and dst_info.bits > src_info.bits))
@ -3004,8 +2690,8 @@ pub fn coerce(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst
if (inst.ty.zigTypeTag() == .Float and dest_type.zigTypeTag() == .Float) { if (inst.ty.zigTypeTag() == .Float and dest_type.zigTypeTag() == .Float) {
assert(inst.value() == null); // handled above assert(inst.value() == null); // handled above
const src_bits = inst.ty.floatBits(self.target()); const src_bits = inst.ty.floatBits(self.getTarget());
const dst_bits = dest_type.floatBits(self.target()); const dst_bits = dest_type.floatBits(self.getTarget());
if (dst_bits >= src_bits) { if (dst_bits >= src_bits) {
const b = try self.requireRuntimeBlock(scope, inst.src); const b = try self.requireRuntimeBlock(scope, inst.src);
return self.addUnOp(b, inst.src, dest_type, .floatcast, inst); return self.addUnOp(b, inst.src, dest_type, .floatcast, inst);
@ -3027,14 +2713,14 @@ pub fn coerceNum(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !?*
} }
return self.fail(scope, inst.src, "TODO float to int", .{}); return self.fail(scope, inst.src, "TODO float to int", .{});
} else if (src_zig_tag == .Int or src_zig_tag == .ComptimeInt) { } else if (src_zig_tag == .Int or src_zig_tag == .ComptimeInt) {
if (!val.intFitsInType(dest_type, self.target())) { if (!val.intFitsInType(dest_type, self.getTarget())) {
return self.fail(scope, inst.src, "type {} cannot represent integer value {}", .{ inst.ty, val }); return self.fail(scope, inst.src, "type {} cannot represent integer value {}", .{ inst.ty, val });
} }
return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = val }); return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = val });
} }
} else if (dst_zig_tag == .ComptimeFloat or dst_zig_tag == .Float) { } else if (dst_zig_tag == .ComptimeFloat or dst_zig_tag == .Float) {
if (src_zig_tag == .Float or src_zig_tag == .ComptimeFloat) { if (src_zig_tag == .Float or src_zig_tag == .ComptimeFloat) {
const res = val.floatCast(scope.arena(), dest_type, self.target()) catch |err| switch (err) { const res = val.floatCast(scope.arena(), dest_type, self.getTarget()) catch |err| switch (err) {
error.Overflow => return self.fail( error.Overflow => return self.fail(
scope, scope,
inst.src, inst.src,
@ -3087,7 +2773,7 @@ fn coerceArrayPtrToSlice(self: *Module, scope: *Scope, dest_type: Type, inst: *I
pub fn fail(self: *Module, scope: *Scope, src: usize, comptime format: []const u8, args: anytype) InnerError { pub fn fail(self: *Module, scope: *Scope, src: usize, comptime format: []const u8, args: anytype) InnerError {
@setCold(true); @setCold(true);
const err_msg = try ErrorMsg.create(self.gpa, src, format, args); const err_msg = try Compilation.ErrorMsg.create(self.gpa, src, format, args);
return self.failWithOwnedErrorMsg(scope, src, err_msg); return self.failWithOwnedErrorMsg(scope, src, err_msg);
} }
@ -3115,7 +2801,7 @@ pub fn failNode(
return self.fail(scope, src, format, args); return self.fail(scope, src, format, args);
} }
fn failWithOwnedErrorMsg(self: *Module, scope: *Scope, src: usize, err_msg: *ErrorMsg) InnerError { fn failWithOwnedErrorMsg(self: *Module, scope: *Scope, src: usize, err_msg: *Compilation.ErrorMsg) InnerError {
{ {
errdefer err_msg.destroy(self.gpa); errdefer err_msg.destroy(self.gpa);
try self.failed_decls.ensureCapacity(self.gpa, self.failed_decls.items().len + 1); try self.failed_decls.ensureCapacity(self.gpa, self.failed_decls.items().len + 1);
@ -3181,36 +2867,6 @@ fn coerceInMemoryAllowed(dest_type: Type, src_type: Type) InMemoryCoercionResult
return .no_match; return .no_match;
} }
pub const ErrorMsg = struct {
byte_offset: usize,
msg: []const u8,
pub fn create(gpa: *Allocator, byte_offset: usize, comptime format: []const u8, args: anytype) !*ErrorMsg {
const self = try gpa.create(ErrorMsg);
errdefer gpa.destroy(self);
self.* = try init(gpa, byte_offset, format, args);
return self;
}
/// Assumes the ErrorMsg struct and msg were both allocated with allocator.
pub fn destroy(self: *ErrorMsg, gpa: *Allocator) void {
self.deinit(gpa);
gpa.destroy(self);
}
pub fn init(gpa: *Allocator, byte_offset: usize, comptime format: []const u8, args: anytype) !ErrorMsg {
return ErrorMsg{
.byte_offset = byte_offset,
.msg = try std.fmt.allocPrint(gpa, format, args),
};
}
pub fn deinit(self: *ErrorMsg, gpa: *Allocator) void {
gpa.free(self.msg);
self.* = undefined;
}
};
fn srcHashEql(a: std.zig.SrcHash, b: std.zig.SrcHash) bool { fn srcHashEql(a: std.zig.SrcHash, b: std.zig.SrcHash) bool {
return @bitCast(u128, a) == @bitCast(u128, b); return @bitCast(u128, a) == @bitCast(u128, b);
} }
@ -3274,7 +2930,7 @@ pub fn intSub(allocator: *Allocator, lhs: Value, rhs: Value) !Value {
pub fn floatAdd(self: *Module, scope: *Scope, float_type: Type, src: usize, lhs: Value, rhs: Value) !Value { pub fn floatAdd(self: *Module, scope: *Scope, float_type: Type, src: usize, lhs: Value, rhs: Value) !Value {
var bit_count = switch (float_type.tag()) { var bit_count = switch (float_type.tag()) {
.comptime_float => 128, .comptime_float => 128,
else => float_type.floatBits(self.target()), else => float_type.floatBits(self.getTarget()),
}; };
const allocator = scope.arena(); const allocator = scope.arena();
@ -3308,7 +2964,7 @@ pub fn floatAdd(self: *Module, scope: *Scope, float_type: Type, src: usize, lhs:
pub fn floatSub(self: *Module, scope: *Scope, float_type: Type, src: usize, lhs: Value, rhs: Value) !Value { pub fn floatSub(self: *Module, scope: *Scope, float_type: Type, src: usize, lhs: Value, rhs: Value) !Value {
var bit_count = switch (float_type.tag()) { var bit_count = switch (float_type.tag()) {
.comptime_float => 128, .comptime_float => 128,
else => float_type.floatBits(self.target()), else => float_type.floatBits(self.getTarget()),
}; };
const allocator = scope.arena(); const allocator = scope.arena();
@ -3579,3 +3235,11 @@ pub fn safetyPanic(mod: *Module, block: *Scope.Block, src: usize, panic_id: Pani
_ = try mod.addNoOp(block, src, Type.initTag(.void), .breakpoint); _ = try mod.addNoOp(block, src, Type.initTag(.void), .breakpoint);
return mod.addNoOp(block, src, Type.initTag(.noreturn), .unreach); return mod.addNoOp(block, src, Type.initTag(.noreturn), .unreach);
} }
pub fn getTarget(self: Module) Target {
return self.comp.bin_file.options.target;
}
pub fn optimizeMode(self: Module) std.builtin.Mode {
return self.comp.bin_file.options.optimize_mode;
}

62
src/Package.zig Normal file
View File

@ -0,0 +1,62 @@
pub const Table = std.StringHashMapUnmanaged(*Package);
root_src_directory: Compilation.Directory,
/// Relative to `root_src_directory`. May contain path separators.
root_src_path: []const u8,
table: Table = .{},
parent: ?*Package = null,
const std = @import("std");
const mem = std.mem;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const Package = @This();
const Compilation = @import("Compilation.zig");
/// No references to `root_src_dir` and `root_src_path` are kept.
pub fn create(
gpa: *Allocator,
base_directory: Compilation.Directory,
/// Relative to `base_directory`.
root_src_dir: []const u8,
/// Relative to `root_src_dir`.
root_src_path: []const u8,
) !*Package {
const ptr = try gpa.create(Package);
errdefer gpa.destroy(ptr);
const root_src_dir_path = try base_directory.join(gpa, &[_][]const u8{root_src_dir});
errdefer gpa.free(root_src_dir_path);
const root_src_path_dupe = try mem.dupe(gpa, u8, root_src_path);
errdefer gpa.free(root_src_path_dupe);
ptr.* = .{
.root_src_directory = .{
.path = root_src_dir_path,
.handle = try base_directory.handle.openDir(root_src_dir, .{}),
},
.root_src_path = root_src_path_dupe,
};
return ptr;
}
pub fn destroy(pkg: *Package, gpa: *Allocator) void {
pkg.root_src_directory.handle.close();
gpa.free(pkg.root_src_path);
if (pkg.root_src_directory.path) |p| gpa.free(p);
{
var it = pkg.table.iterator();
while (it.next()) |kv| {
gpa.free(kv.key);
}
}
pkg.table.deinit(gpa);
gpa.destroy(pkg);
}
pub fn add(pkg: *Package, gpa: *Allocator, name: []const u8, package: *Package) !void {
try pkg.table.ensureCapacity(gpa, pkg.table.items().len + 1);
const name_dupe = try mem.dupe(gpa, u8, name);
pkg.table.putAssumeCapacityNoClobber(name_dupe, package);
}

View File

@ -1,196 +0,0 @@
/*
BLAKE2 reference source code package - reference C implementations
Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the
terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at
your option. The terms of these licenses can be found at:
- CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
- OpenSSL license : https://www.openssl.org/source/license.html
- Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0
More information about the BLAKE2 hash function can be found at
https://blake2.net.
*/
#ifndef BLAKE2_H
#define BLAKE2_H
#include <stddef.h>
#include <stdint.h>
#if defined(_MSC_VER)
#define BLAKE2_PACKED(x) __pragma(pack(push, 1)) x __pragma(pack(pop))
#else
#define BLAKE2_PACKED(x) x __attribute__((packed))
#endif
#if defined(__cplusplus)
extern "C" {
#endif
enum blake2s_constant
{
BLAKE2S_BLOCKBYTES = 64,
BLAKE2S_OUTBYTES = 32,
BLAKE2S_KEYBYTES = 32,
BLAKE2S_SALTBYTES = 8,
BLAKE2S_PERSONALBYTES = 8
};
enum blake2b_constant
{
BLAKE2B_BLOCKBYTES = 128,
BLAKE2B_OUTBYTES = 64,
BLAKE2B_KEYBYTES = 64,
BLAKE2B_SALTBYTES = 16,
BLAKE2B_PERSONALBYTES = 16
};
typedef struct blake2s_state__
{
uint32_t h[8];
uint32_t t[2];
uint32_t f[2];
uint8_t buf[BLAKE2S_BLOCKBYTES];
size_t buflen;
size_t outlen;
uint8_t last_node;
} blake2s_state;
typedef struct blake2b_state__
{
uint64_t h[8];
uint64_t t[2];
uint64_t f[2];
uint8_t buf[BLAKE2B_BLOCKBYTES];
size_t buflen;
size_t outlen;
uint8_t last_node;
} blake2b_state;
typedef struct blake2sp_state__
{
blake2s_state S[8][1];
blake2s_state R[1];
uint8_t buf[8 * BLAKE2S_BLOCKBYTES];
size_t buflen;
size_t outlen;
} blake2sp_state;
typedef struct blake2bp_state__
{
blake2b_state S[4][1];
blake2b_state R[1];
uint8_t buf[4 * BLAKE2B_BLOCKBYTES];
size_t buflen;
size_t outlen;
} blake2bp_state;
BLAKE2_PACKED(struct blake2s_param__
{
uint8_t digest_length; /* 1 */
uint8_t key_length; /* 2 */
uint8_t fanout; /* 3 */
uint8_t depth; /* 4 */
uint32_t leaf_length; /* 8 */
uint32_t node_offset; /* 12 */
uint16_t xof_length; /* 14 */
uint8_t node_depth; /* 15 */
uint8_t inner_length; /* 16 */
/* uint8_t reserved[0]; */
uint8_t salt[BLAKE2S_SALTBYTES]; /* 24 */
uint8_t personal[BLAKE2S_PERSONALBYTES]; /* 32 */
});
typedef struct blake2s_param__ blake2s_param;
BLAKE2_PACKED(struct blake2b_param__
{
uint8_t digest_length; /* 1 */
uint8_t key_length; /* 2 */
uint8_t fanout; /* 3 */
uint8_t depth; /* 4 */
uint32_t leaf_length; /* 8 */
uint32_t node_offset; /* 12 */
uint32_t xof_length; /* 16 */
uint8_t node_depth; /* 17 */
uint8_t inner_length; /* 18 */
uint8_t reserved[14]; /* 32 */
uint8_t salt[BLAKE2B_SALTBYTES]; /* 48 */
uint8_t personal[BLAKE2B_PERSONALBYTES]; /* 64 */
});
typedef struct blake2b_param__ blake2b_param;
typedef struct blake2xs_state__
{
blake2s_state S[1];
blake2s_param P[1];
} blake2xs_state;
typedef struct blake2xb_state__
{
blake2b_state S[1];
blake2b_param P[1];
} blake2xb_state;
/* Padded structs result in a compile-time error */
enum {
BLAKE2_DUMMY_1 = 1/(sizeof(blake2s_param) == BLAKE2S_OUTBYTES),
BLAKE2_DUMMY_2 = 1/(sizeof(blake2b_param) == BLAKE2B_OUTBYTES)
};
/* Streaming API */
int blake2s_init( blake2s_state *S, size_t outlen );
int blake2s_init_key( blake2s_state *S, size_t outlen, const void *key, size_t keylen );
int blake2s_init_param( blake2s_state *S, const blake2s_param *P );
int blake2s_update( blake2s_state *S, const void *in, size_t inlen );
int blake2s_final( blake2s_state *S, void *out, size_t outlen );
int blake2b_init( blake2b_state *S, size_t outlen );
int blake2b_init_key( blake2b_state *S, size_t outlen, const void *key, size_t keylen );
int blake2b_init_param( blake2b_state *S, const blake2b_param *P );
int blake2b_update( blake2b_state *S, const void *in, size_t inlen );
int blake2b_final( blake2b_state *S, void *out, size_t outlen );
int blake2sp_init( blake2sp_state *S, size_t outlen );
int blake2sp_init_key( blake2sp_state *S, size_t outlen, const void *key, size_t keylen );
int blake2sp_update( blake2sp_state *S, const void *in, size_t inlen );
int blake2sp_final( blake2sp_state *S, void *out, size_t outlen );
int blake2bp_init( blake2bp_state *S, size_t outlen );
int blake2bp_init_key( blake2bp_state *S, size_t outlen, const void *key, size_t keylen );
int blake2bp_update( blake2bp_state *S, const void *in, size_t inlen );
int blake2bp_final( blake2bp_state *S, void *out, size_t outlen );
/* Variable output length API */
int blake2xs_init( blake2xs_state *S, const size_t outlen );
int blake2xs_init_key( blake2xs_state *S, const size_t outlen, const void *key, size_t keylen );
int blake2xs_update( blake2xs_state *S, const void *in, size_t inlen );
int blake2xs_final(blake2xs_state *S, void *out, size_t outlen);
int blake2xb_init( blake2xb_state *S, const size_t outlen );
int blake2xb_init_key( blake2xb_state *S, const size_t outlen, const void *key, size_t keylen );
int blake2xb_update( blake2xb_state *S, const void *in, size_t inlen );
int blake2xb_final(blake2xb_state *S, void *out, size_t outlen);
/* Simple API */
int blake2s( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen );
int blake2b( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen );
int blake2sp( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen );
int blake2bp( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen );
int blake2xs( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen );
int blake2xb( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen );
/* This is simply an alias for blake2b */
int blake2( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen );
#if defined(__cplusplus)
}
#endif
#endif

View File

@ -1,539 +0,0 @@
/*
BLAKE2 reference source code package - reference C implementations
Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the
terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at
your option. The terms of these licenses can be found at:
- CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
- OpenSSL license : https://www.openssl.org/source/license.html
- Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0
More information about the BLAKE2 hash function can be found at
https://blake2.net.
*/
#include <stdint.h>
#include <string.h>
#include <stdio.h>
#include "blake2.h"
/*
BLAKE2 reference source code package - reference C implementations
Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the
terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at
your option. The terms of these licenses can be found at:
- CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
- OpenSSL license : https://www.openssl.org/source/license.html
- Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0
More information about the BLAKE2 hash function can be found at
https://blake2.net.
*/
#ifndef BLAKE2_IMPL_H
#define BLAKE2_IMPL_H
#include <stdint.h>
#include <string.h>
#if !defined(__cplusplus) && (!defined(__STDC_VERSION__) || __STDC_VERSION__ < 199901L)
#if defined(_MSC_VER)
#define BLAKE2_INLINE __inline
#elif defined(__GNUC__)
#define BLAKE2_INLINE __inline__
#else
#define BLAKE2_INLINE
#endif
#else
#define BLAKE2_INLINE inline
#endif
static BLAKE2_INLINE uint32_t load32( const void *src )
{
#if defined(NATIVE_LITTLE_ENDIAN)
uint32_t w;
memcpy(&w, src, sizeof w);
return w;
#else
const uint8_t *p = ( const uint8_t * )src;
return (( uint32_t )( p[0] ) << 0) |
(( uint32_t )( p[1] ) << 8) |
(( uint32_t )( p[2] ) << 16) |
(( uint32_t )( p[3] ) << 24) ;
#endif
}
static BLAKE2_INLINE uint64_t load64( const void *src )
{
#if defined(NATIVE_LITTLE_ENDIAN)
uint64_t w;
memcpy(&w, src, sizeof w);
return w;
#else
const uint8_t *p = ( const uint8_t * )src;
return (( uint64_t )( p[0] ) << 0) |
(( uint64_t )( p[1] ) << 8) |
(( uint64_t )( p[2] ) << 16) |
(( uint64_t )( p[3] ) << 24) |
(( uint64_t )( p[4] ) << 32) |
(( uint64_t )( p[5] ) << 40) |
(( uint64_t )( p[6] ) << 48) |
(( uint64_t )( p[7] ) << 56) ;
#endif
}
static BLAKE2_INLINE uint16_t load16( const void *src )
{
#if defined(NATIVE_LITTLE_ENDIAN)
uint16_t w;
memcpy(&w, src, sizeof w);
return w;
#else
const uint8_t *p = ( const uint8_t * )src;
return ( uint16_t )((( uint32_t )( p[0] ) << 0) |
(( uint32_t )( p[1] ) << 8));
#endif
}
static BLAKE2_INLINE void store16( void *dst, uint16_t w )
{
#if defined(NATIVE_LITTLE_ENDIAN)
memcpy(dst, &w, sizeof w);
#else
uint8_t *p = ( uint8_t * )dst;
*p++ = ( uint8_t )w; w >>= 8;
*p++ = ( uint8_t )w;
#endif
}
static BLAKE2_INLINE void store32( void *dst, uint32_t w )
{
#if defined(NATIVE_LITTLE_ENDIAN)
memcpy(dst, &w, sizeof w);
#else
uint8_t *p = ( uint8_t * )dst;
p[0] = (uint8_t)(w >> 0);
p[1] = (uint8_t)(w >> 8);
p[2] = (uint8_t)(w >> 16);
p[3] = (uint8_t)(w >> 24);
#endif
}
static BLAKE2_INLINE void store64( void *dst, uint64_t w )
{
#if defined(NATIVE_LITTLE_ENDIAN)
memcpy(dst, &w, sizeof w);
#else
uint8_t *p = ( uint8_t * )dst;
p[0] = (uint8_t)(w >> 0);
p[1] = (uint8_t)(w >> 8);
p[2] = (uint8_t)(w >> 16);
p[3] = (uint8_t)(w >> 24);
p[4] = (uint8_t)(w >> 32);
p[5] = (uint8_t)(w >> 40);
p[6] = (uint8_t)(w >> 48);
p[7] = (uint8_t)(w >> 56);
#endif
}
static BLAKE2_INLINE uint64_t load48( const void *src )
{
const uint8_t *p = ( const uint8_t * )src;
return (( uint64_t )( p[0] ) << 0) |
(( uint64_t )( p[1] ) << 8) |
(( uint64_t )( p[2] ) << 16) |
(( uint64_t )( p[3] ) << 24) |
(( uint64_t )( p[4] ) << 32) |
(( uint64_t )( p[5] ) << 40) ;
}
static BLAKE2_INLINE void store48( void *dst, uint64_t w )
{
uint8_t *p = ( uint8_t * )dst;
p[0] = (uint8_t)(w >> 0);
p[1] = (uint8_t)(w >> 8);
p[2] = (uint8_t)(w >> 16);
p[3] = (uint8_t)(w >> 24);
p[4] = (uint8_t)(w >> 32);
p[5] = (uint8_t)(w >> 40);
}
static BLAKE2_INLINE uint32_t rotr32( const uint32_t w, const unsigned c )
{
return ( w >> c ) | ( w << ( 32 - c ) );
}
static BLAKE2_INLINE uint64_t rotr64( const uint64_t w, const unsigned c )
{
return ( w >> c ) | ( w << ( 64 - c ) );
}
/* prevents compiler optimizing out memset() */
static BLAKE2_INLINE void secure_zero_memory(void *v, size_t n)
{
static void *(*const volatile memset_v)(void *, int, size_t) = &memset;
memset_v(v, 0, n);
}
#endif
static const uint64_t blake2b_IV[8] =
{
0x6a09e667f3bcc908ULL, 0xbb67ae8584caa73bULL,
0x3c6ef372fe94f82bULL, 0xa54ff53a5f1d36f1ULL,
0x510e527fade682d1ULL, 0x9b05688c2b3e6c1fULL,
0x1f83d9abfb41bd6bULL, 0x5be0cd19137e2179ULL
};
static const uint8_t blake2b_sigma[12][16] =
{
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } ,
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } ,
{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 } ,
{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 } ,
{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 } ,
{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 } ,
{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 } ,
{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 } ,
{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 } ,
{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0 } ,
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } ,
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }
};
static void blake2b_set_lastnode( blake2b_state *S )
{
S->f[1] = (uint64_t)-1;
}
/* Some helper functions, not necessarily useful */
static int blake2b_is_lastblock( const blake2b_state *S )
{
return S->f[0] != 0;
}
static void blake2b_set_lastblock( blake2b_state *S )
{
if( S->last_node ) blake2b_set_lastnode( S );
S->f[0] = (uint64_t)-1;
}
static void blake2b_increment_counter( blake2b_state *S, const uint64_t inc )
{
S->t[0] += inc;
S->t[1] += ( S->t[0] < inc );
}
static void blake2b_init0( blake2b_state *S )
{
size_t i;
memset( S, 0, sizeof( blake2b_state ) );
for( i = 0; i < 8; ++i ) S->h[i] = blake2b_IV[i];
}
/* init xors IV with input parameter block */
int blake2b_init_param( blake2b_state *S, const blake2b_param *P )
{
const uint8_t *p = ( const uint8_t * )( P );
size_t i;
blake2b_init0( S );
/* IV XOR ParamBlock */
for( i = 0; i < 8; ++i )
S->h[i] ^= load64( p + sizeof( S->h[i] ) * i );
S->outlen = P->digest_length;
return 0;
}
int blake2b_init( blake2b_state *S, size_t outlen )
{
blake2b_param P[1];
if ( ( !outlen ) || ( outlen > BLAKE2B_OUTBYTES ) ) return -1;
P->digest_length = (uint8_t)outlen;
P->key_length = 0;
P->fanout = 1;
P->depth = 1;
store32( &P->leaf_length, 0 );
store32( &P->node_offset, 0 );
store32( &P->xof_length, 0 );
P->node_depth = 0;
P->inner_length = 0;
memset( P->reserved, 0, sizeof( P->reserved ) );
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2b_init_param( S, P );
}
int blake2b_init_key( blake2b_state *S, size_t outlen, const void *key, size_t keylen )
{
blake2b_param P[1];
if ( ( !outlen ) || ( outlen > BLAKE2B_OUTBYTES ) ) return -1;
if ( !key || !keylen || keylen > BLAKE2B_KEYBYTES ) return -1;
P->digest_length = (uint8_t)outlen;
P->key_length = (uint8_t)keylen;
P->fanout = 1;
P->depth = 1;
store32( &P->leaf_length, 0 );
store32( &P->node_offset, 0 );
store32( &P->xof_length, 0 );
P->node_depth = 0;
P->inner_length = 0;
memset( P->reserved, 0, sizeof( P->reserved ) );
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
if( blake2b_init_param( S, P ) < 0 ) return -1;
{
uint8_t block[BLAKE2B_BLOCKBYTES];
memset( block, 0, BLAKE2B_BLOCKBYTES );
memcpy( block, key, keylen );
blake2b_update( S, block, BLAKE2B_BLOCKBYTES );
secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */
}
return 0;
}
#define G(r,i,a,b,c,d) \
do { \
a = a + b + m[blake2b_sigma[r][2*i+0]]; \
d = rotr64(d ^ a, 32); \
c = c + d; \
b = rotr64(b ^ c, 24); \
a = a + b + m[blake2b_sigma[r][2*i+1]]; \
d = rotr64(d ^ a, 16); \
c = c + d; \
b = rotr64(b ^ c, 63); \
} while(0)
#define ROUND(r) \
do { \
G(r,0,v[ 0],v[ 4],v[ 8],v[12]); \
G(r,1,v[ 1],v[ 5],v[ 9],v[13]); \
G(r,2,v[ 2],v[ 6],v[10],v[14]); \
G(r,3,v[ 3],v[ 7],v[11],v[15]); \
G(r,4,v[ 0],v[ 5],v[10],v[15]); \
G(r,5,v[ 1],v[ 6],v[11],v[12]); \
G(r,6,v[ 2],v[ 7],v[ 8],v[13]); \
G(r,7,v[ 3],v[ 4],v[ 9],v[14]); \
} while(0)
static void blake2b_compress( blake2b_state *S, const uint8_t block[BLAKE2B_BLOCKBYTES] )
{
uint64_t m[16];
uint64_t v[16];
size_t i;
for( i = 0; i < 16; ++i ) {
m[i] = load64( block + i * sizeof( m[i] ) );
}
for( i = 0; i < 8; ++i ) {
v[i] = S->h[i];
}
v[ 8] = blake2b_IV[0];
v[ 9] = blake2b_IV[1];
v[10] = blake2b_IV[2];
v[11] = blake2b_IV[3];
v[12] = blake2b_IV[4] ^ S->t[0];
v[13] = blake2b_IV[5] ^ S->t[1];
v[14] = blake2b_IV[6] ^ S->f[0];
v[15] = blake2b_IV[7] ^ S->f[1];
ROUND( 0 );
ROUND( 1 );
ROUND( 2 );
ROUND( 3 );
ROUND( 4 );
ROUND( 5 );
ROUND( 6 );
ROUND( 7 );
ROUND( 8 );
ROUND( 9 );
ROUND( 10 );
ROUND( 11 );
for( i = 0; i < 8; ++i ) {
S->h[i] = S->h[i] ^ v[i] ^ v[i + 8];
}
}
#undef G
#undef ROUND
int blake2b_update( blake2b_state *S, const void *pin, size_t inlen )
{
const unsigned char * in = (const unsigned char *)pin;
if( inlen > 0 )
{
size_t left = S->buflen;
size_t fill = BLAKE2B_BLOCKBYTES - left;
if( inlen > fill )
{
S->buflen = 0;
memcpy( S->buf + left, in, fill ); /* Fill buffer */
blake2b_increment_counter( S, BLAKE2B_BLOCKBYTES );
blake2b_compress( S, S->buf ); /* Compress */
in += fill; inlen -= fill;
while(inlen > BLAKE2B_BLOCKBYTES) {
blake2b_increment_counter(S, BLAKE2B_BLOCKBYTES);
blake2b_compress( S, in );
in += BLAKE2B_BLOCKBYTES;
inlen -= BLAKE2B_BLOCKBYTES;
}
}
memcpy( S->buf + S->buflen, in, inlen );
S->buflen += inlen;
}
return 0;
}
int blake2b_final( blake2b_state *S, void *out, size_t outlen )
{
uint8_t buffer[BLAKE2B_OUTBYTES] = {0};
size_t i;
if( out == NULL || outlen < S->outlen )
return -1;
if( blake2b_is_lastblock( S ) )
return -1;
blake2b_increment_counter( S, S->buflen );
blake2b_set_lastblock( S );
memset( S->buf + S->buflen, 0, BLAKE2B_BLOCKBYTES - S->buflen ); /* Padding */
blake2b_compress( S, S->buf );
for( i = 0; i < 8; ++i ) /* Output full hash to temp buffer */
store64( buffer + sizeof( S->h[i] ) * i, S->h[i] );
memcpy( out, buffer, S->outlen );
secure_zero_memory(buffer, sizeof(buffer));
return 0;
}
/* inlen, at least, should be uint64_t. Others can be size_t. */
int blake2b( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen )
{
blake2b_state S[1];
/* Verify parameters */
if ( NULL == in && inlen > 0 ) return -1;
if ( NULL == out ) return -1;
if( NULL == key && keylen > 0 ) return -1;
if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1;
if( keylen > BLAKE2B_KEYBYTES ) return -1;
if( keylen > 0 )
{
if( blake2b_init_key( S, outlen, key, keylen ) < 0 ) return -1;
}
else
{
if( blake2b_init( S, outlen ) < 0 ) return -1;
}
blake2b_update( S, ( const uint8_t * )in, inlen );
blake2b_final( S, out, outlen );
return 0;
}
int blake2( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen ) {
return blake2b(out, outlen, in, inlen, key, keylen);
}
#if defined(SUPERCOP)
int crypto_hash( unsigned char *out, unsigned char *in, unsigned long long inlen )
{
return blake2b( out, BLAKE2B_OUTBYTES, in, inlen, NULL, 0 );
}
#endif
#if defined(BLAKE2B_SELFTEST)
#include <string.h>
#include "blake2-kat.h"
int main( void )
{
uint8_t key[BLAKE2B_KEYBYTES];
uint8_t buf[BLAKE2_KAT_LENGTH];
size_t i, step;
for( i = 0; i < BLAKE2B_KEYBYTES; ++i )
key[i] = ( uint8_t )i;
for( i = 0; i < BLAKE2_KAT_LENGTH; ++i )
buf[i] = ( uint8_t )i;
/* Test simple API */
for( i = 0; i < BLAKE2_KAT_LENGTH; ++i )
{
uint8_t hash[BLAKE2B_OUTBYTES];
blake2b( hash, BLAKE2B_OUTBYTES, buf, i, key, BLAKE2B_KEYBYTES );
if( 0 != memcmp( hash, blake2b_keyed_kat[i], BLAKE2B_OUTBYTES ) )
{
goto fail;
}
}
/* Test streaming API */
for(step = 1; step < BLAKE2B_BLOCKBYTES; ++step) {
for (i = 0; i < BLAKE2_KAT_LENGTH; ++i) {
uint8_t hash[BLAKE2B_OUTBYTES];
blake2b_state S;
uint8_t * p = buf;
size_t mlen = i;
int err = 0;
if( (err = blake2b_init_key(&S, BLAKE2B_OUTBYTES, key, BLAKE2B_KEYBYTES)) < 0 ) {
goto fail;
}
while (mlen >= step) {
if ( (err = blake2b_update(&S, p, step)) < 0 ) {
goto fail;
}
mlen -= step;
p += step;
}
if ( (err = blake2b_update(&S, p, mlen)) < 0) {
goto fail;
}
if ( (err = blake2b_final(&S, hash, BLAKE2B_OUTBYTES)) < 0) {
goto fail;
}
if (0 != memcmp(hash, blake2b_keyed_kat[i], BLAKE2B_OUTBYTES)) {
goto fail;
}
}
}
puts( "ok" );
return 0;
fail:
puts("error");
return -1;
}
#endif

View File

@ -1,595 +0,0 @@
/*
* Copyright (c) 2018 Andrew Kelley
*
* This file is part of zig, which is MIT licensed.
* See http://opensource.org/licenses/MIT
*/
#include "stage2.h"
#include "cache_hash.hpp"
#include "all_types.hpp"
#include "buffer.hpp"
#include "os.hpp"
#include <stdio.h>
void cache_init(CacheHash *ch, Buf *manifest_dir) {
int rc = blake2b_init(&ch->blake, 48);
assert(rc == 0);
ch->files = {};
ch->manifest_dir = manifest_dir;
ch->manifest_file_path = nullptr;
ch->manifest_dirty = false;
ch->force_check_manifest = false;
ch->b64_digest = BUF_INIT;
}
void cache_mem(CacheHash *ch, const char *ptr, size_t len) {
assert(ch->manifest_file_path == nullptr);
assert(ptr != nullptr);
blake2b_update(&ch->blake, ptr, len);
}
void cache_slice(CacheHash *ch, Slice<const char> slice) {
// mix the length into the hash so that two juxtaposed cached slices can't collide
cache_usize(ch, slice.len);
cache_mem(ch, slice.ptr, slice.len);
}
void cache_str(CacheHash *ch, const char *ptr) {
// + 1 to include the null byte
cache_mem(ch, ptr, strlen(ptr) + 1);
}
void cache_int(CacheHash *ch, int x) {
assert(ch->manifest_file_path == nullptr);
// + 1 to include the null byte
uint8_t buf[sizeof(int) + 1];
memcpy(buf, &x, sizeof(int));
buf[sizeof(int)] = 0;
blake2b_update(&ch->blake, buf, sizeof(int) + 1);
}
void cache_usize(CacheHash *ch, size_t x) {
assert(ch->manifest_file_path == nullptr);
// + 1 to include the null byte
uint8_t buf[sizeof(size_t) + 1];
memcpy(buf, &x, sizeof(size_t));
buf[sizeof(size_t)] = 0;
blake2b_update(&ch->blake, buf, sizeof(size_t) + 1);
}
void cache_bool(CacheHash *ch, bool x) {
assert(ch->manifest_file_path == nullptr);
blake2b_update(&ch->blake, &x, 1);
}
void cache_buf(CacheHash *ch, Buf *buf) {
assert(ch->manifest_file_path == nullptr);
assert(buf != nullptr);
// + 1 to include the null byte
blake2b_update(&ch->blake, buf_ptr(buf), buf_len(buf) + 1);
}
void cache_buf_opt(CacheHash *ch, Buf *buf) {
assert(ch->manifest_file_path == nullptr);
if (buf == nullptr) {
cache_str(ch, "");
cache_str(ch, "");
} else {
cache_buf(ch, buf);
}
}
void cache_list_of_link_lib(CacheHash *ch, LinkLib **ptr, size_t len) {
assert(ch->manifest_file_path == nullptr);
for (size_t i = 0; i < len; i += 1) {
LinkLib *lib = ptr[i];
if (lib->provided_explicitly) {
cache_buf(ch, lib->name);
}
}
cache_str(ch, "");
}
void cache_list_of_buf(CacheHash *ch, Buf **ptr, size_t len) {
assert(ch->manifest_file_path == nullptr);
for (size_t i = 0; i < len; i += 1) {
Buf *buf = ptr[i];
cache_buf(ch, buf);
}
cache_str(ch, "");
}
void cache_list_of_file(CacheHash *ch, Buf **ptr, size_t len) {
assert(ch->manifest_file_path == nullptr);
for (size_t i = 0; i < len; i += 1) {
Buf *buf = ptr[i];
cache_file(ch, buf);
}
cache_str(ch, "");
}
void cache_list_of_str(CacheHash *ch, const char **ptr, size_t len) {
assert(ch->manifest_file_path == nullptr);
for (size_t i = 0; i < len; i += 1) {
const char *s = ptr[i];
cache_str(ch, s);
}
cache_str(ch, "");
}
void cache_file(CacheHash *ch, Buf *file_path) {
assert(ch->manifest_file_path == nullptr);
assert(file_path != nullptr);
Buf *resolved_path = buf_alloc();
*resolved_path = os_path_resolve(&file_path, 1);
CacheHashFile *chf = ch->files.add_one();
chf->path = resolved_path;
cache_buf(ch, resolved_path);
}
void cache_file_opt(CacheHash *ch, Buf *file_path) {
assert(ch->manifest_file_path == nullptr);
if (file_path == nullptr) {
cache_str(ch, "");
cache_str(ch, "");
} else {
cache_file(ch, file_path);
}
}
// Ported from std/base64.zig
static uint8_t base64_fs_alphabet[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_";
static void base64_encode(Slice<uint8_t> dest, Slice<uint8_t> source) {
size_t dest_len = ((source.len + 2) / 3) * 4;
assert(dest.len == dest_len);
size_t i = 0;
size_t out_index = 0;
for (; i + 2 < source.len; i += 3) {
dest.ptr[out_index] = base64_fs_alphabet[(source.ptr[i] >> 2) & 0x3f];
out_index += 1;
dest.ptr[out_index] = base64_fs_alphabet[((source.ptr[i] & 0x3) << 4) | ((source.ptr[i + 1] & 0xf0) >> 4)];
out_index += 1;
dest.ptr[out_index] = base64_fs_alphabet[((source.ptr[i + 1] & 0xf) << 2) | ((source.ptr[i + 2] & 0xc0) >> 6)];
out_index += 1;
dest.ptr[out_index] = base64_fs_alphabet[source.ptr[i + 2] & 0x3f];
out_index += 1;
}
// Assert that we never need pad characters.
assert(i == source.len);
}
// Ported from std/base64.zig
static Error base64_decode(Slice<uint8_t> dest, Slice<uint8_t> source) {
if (source.len % 4 != 0)
return ErrorInvalidFormat;
if (dest.len != (source.len / 4) * 3)
return ErrorInvalidFormat;
// In Zig this is comptime computed. In C++ it's not worth it to do that.
uint8_t char_to_index[256];
bool char_in_alphabet[256] = {0};
for (size_t i = 0; i < 64; i += 1) {
uint8_t c = base64_fs_alphabet[i];
assert(!char_in_alphabet[c]);
char_in_alphabet[c] = true;
char_to_index[c] = i;
}
size_t src_cursor = 0;
size_t dest_cursor = 0;
for (;src_cursor < source.len; src_cursor += 4) {
if (!char_in_alphabet[source.ptr[src_cursor + 0]]) return ErrorInvalidFormat;
if (!char_in_alphabet[source.ptr[src_cursor + 1]]) return ErrorInvalidFormat;
if (!char_in_alphabet[source.ptr[src_cursor + 2]]) return ErrorInvalidFormat;
if (!char_in_alphabet[source.ptr[src_cursor + 3]]) return ErrorInvalidFormat;
dest.ptr[dest_cursor + 0] = (char_to_index[source.ptr[src_cursor + 0]] << 2) | (char_to_index[source.ptr[src_cursor + 1]] >> 4);
dest.ptr[dest_cursor + 1] = (char_to_index[source.ptr[src_cursor + 1]] << 4) | (char_to_index[source.ptr[src_cursor + 2]] >> 2);
dest.ptr[dest_cursor + 2] = (char_to_index[source.ptr[src_cursor + 2]] << 6) | (char_to_index[source.ptr[src_cursor + 3]]);
dest_cursor += 3;
}
assert(src_cursor == source.len);
assert(dest_cursor == dest.len);
return ErrorNone;
}
static Error hash_file(uint8_t *digest, OsFile handle, Buf *contents) {
Error err;
if (contents) {
buf_resize(contents, 0);
}
blake2b_state blake;
int rc = blake2b_init(&blake, 48);
assert(rc == 0);
for (;;) {
uint8_t buf[4096];
size_t amt = 4096;
if ((err = os_file_read(handle, buf, &amt)))
return err;
if (amt == 0) {
rc = blake2b_final(&blake, digest, 48);
assert(rc == 0);
return ErrorNone;
}
blake2b_update(&blake, buf, amt);
if (contents) {
buf_append_mem(contents, (char*)buf, amt);
}
}
}
// If the wall clock time, rounded to the same precision as the
// mtime, is equal to the mtime, then we cannot rely on this mtime
// yet. We will instead save an mtime value that indicates the hash
// must be unconditionally computed.
static bool is_problematic_timestamp(const OsTimeStamp *fs_clock) {
OsTimeStamp wall_clock = os_timestamp_calendar();
// First make all the least significant zero bits in the fs_clock, also zero bits in the wall clock.
if (fs_clock->nsec == 0) {
wall_clock.nsec = 0;
if (fs_clock->sec == 0) {
wall_clock.sec = 0;
} else {
wall_clock.sec &= (-1ull) << ctzll(fs_clock->sec);
}
} else {
wall_clock.nsec &= (-1ull) << ctzll(fs_clock->nsec);
}
return wall_clock.nsec == fs_clock->nsec && wall_clock.sec == fs_clock->sec;
}
static Error populate_file_hash(CacheHash *ch, CacheHashFile *chf, Buf *contents) {
Error err;
assert(chf->path != nullptr);
OsFile this_file;
if ((err = os_file_open_r(chf->path, &this_file, &chf->attr)))
return err;
if (is_problematic_timestamp(&chf->attr.mtime)) {
chf->attr.mtime.sec = 0;
chf->attr.mtime.nsec = 0;
chf->attr.inode = 0;
}
if ((err = hash_file(chf->bin_digest, this_file, contents))) {
os_file_close(&this_file);
return err;
}
os_file_close(&this_file);
blake2b_update(&ch->blake, chf->bin_digest, 48);
return ErrorNone;
}
Error cache_hit(CacheHash *ch, Buf *out_digest) {
Error err;
uint8_t bin_digest[48];
int rc = blake2b_final(&ch->blake, bin_digest, 48);
assert(rc == 0);
buf_resize(&ch->b64_digest, 64);
base64_encode(buf_to_slice(&ch->b64_digest), {bin_digest, 48});
if (ch->files.length == 0 && !ch->force_check_manifest) {
buf_resize(out_digest, 64);
base64_encode(buf_to_slice(out_digest), {bin_digest, 48});
return ErrorNone;
}
rc = blake2b_init(&ch->blake, 48);
assert(rc == 0);
blake2b_update(&ch->blake, bin_digest, 48);
ch->manifest_file_path = buf_alloc();
os_path_join(ch->manifest_dir, &ch->b64_digest, ch->manifest_file_path);
buf_append_str(ch->manifest_file_path, ".txt");
if ((err = os_make_path(ch->manifest_dir)))
return err;
if ((err = os_file_open_lock_rw(ch->manifest_file_path, &ch->manifest_file)))
return err;
Buf line_buf = BUF_INIT;
buf_resize(&line_buf, 512);
if ((err = os_file_read_all(ch->manifest_file, &line_buf))) {
os_file_close(&ch->manifest_file);
return err;
}
size_t input_file_count = ch->files.length;
bool any_file_changed = false;
Error return_code = ErrorNone;
size_t file_i = 0;
SplitIterator line_it = memSplit(buf_to_slice(&line_buf), str("\n"));
for (;; file_i += 1) {
Optional<Slice<uint8_t>> opt_line = SplitIterator_next(&line_it);
CacheHashFile *chf;
if (file_i < input_file_count) {
chf = &ch->files.at(file_i);
} else if (any_file_changed) {
// cache miss.
// keep the manifest file open with the rw lock
// reset the hash
rc = blake2b_init(&ch->blake, 48);
assert(rc == 0);
blake2b_update(&ch->blake, bin_digest, 48);
ch->files.resize(input_file_count);
// bring the hash up to the input file hashes
for (file_i = 0; file_i < input_file_count; file_i += 1) {
blake2b_update(&ch->blake, ch->files.at(file_i).bin_digest, 48);
}
// caller can notice that out_digest is unmodified.
return return_code;
} else if (!opt_line.is_some) {
break;
} else {
chf = ch->files.add_one();
chf->path = nullptr;
}
if (!opt_line.is_some)
break;
SplitIterator it = memSplit(opt_line.value, str(" "));
Optional<Slice<uint8_t>> opt_inode = SplitIterator_next(&it);
if (!opt_inode.is_some) {
return_code = ErrorInvalidFormat;
break;
}
chf->attr.inode = strtoull((const char *)opt_inode.value.ptr, nullptr, 10);
Optional<Slice<uint8_t>> opt_mtime_sec = SplitIterator_next(&it);
if (!opt_mtime_sec.is_some) {
return_code = ErrorInvalidFormat;
break;
}
chf->attr.mtime.sec = strtoull((const char *)opt_mtime_sec.value.ptr, nullptr, 10);
Optional<Slice<uint8_t>> opt_mtime_nsec = SplitIterator_next(&it);
if (!opt_mtime_nsec.is_some) {
return_code = ErrorInvalidFormat;
break;
}
chf->attr.mtime.nsec = strtoull((const char *)opt_mtime_nsec.value.ptr, nullptr, 10);
Optional<Slice<uint8_t>> opt_digest = SplitIterator_next(&it);
if (!opt_digest.is_some) {
return_code = ErrorInvalidFormat;
break;
}
if ((err = base64_decode({chf->bin_digest, 48}, opt_digest.value))) {
return_code = ErrorInvalidFormat;
break;
}
Slice<uint8_t> file_path = SplitIterator_rest(&it);
if (file_path.len == 0) {
return_code = ErrorInvalidFormat;
break;
}
Buf *this_path = buf_create_from_slice(file_path);
if (chf->path != nullptr && !buf_eql_buf(this_path, chf->path)) {
return_code = ErrorInvalidFormat;
break;
}
chf->path = this_path;
// if the mtime matches we can trust the digest
OsFile this_file;
OsFileAttr actual_attr;
if ((err = os_file_open_r(chf->path, &this_file, &actual_attr))) {
fprintf(stderr, "Unable to open %s\n: %s", buf_ptr(chf->path), err_str(err));
os_file_close(&ch->manifest_file);
return ErrorCacheUnavailable;
}
if (chf->attr.mtime.sec == actual_attr.mtime.sec &&
chf->attr.mtime.nsec == actual_attr.mtime.nsec &&
chf->attr.inode == actual_attr.inode)
{
os_file_close(&this_file);
} else {
// we have to recompute the digest.
// later we'll rewrite the manifest with the new mtime/digest values
ch->manifest_dirty = true;
chf->attr = actual_attr;
if (is_problematic_timestamp(&actual_attr.mtime)) {
chf->attr.mtime.sec = 0;
chf->attr.mtime.nsec = 0;
chf->attr.inode = 0;
}
uint8_t actual_digest[48];
if ((err = hash_file(actual_digest, this_file, nullptr))) {
os_file_close(&this_file);
os_file_close(&ch->manifest_file);
return err;
}
os_file_close(&this_file);
if (memcmp(chf->bin_digest, actual_digest, 48) != 0) {
memcpy(chf->bin_digest, actual_digest, 48);
// keep going until we have the input file digests
any_file_changed = true;
}
}
if (!any_file_changed) {
blake2b_update(&ch->blake, chf->bin_digest, 48);
}
}
if (file_i < input_file_count || file_i == 0 || return_code != ErrorNone) {
// manifest file is empty or missing entries, so this is a cache miss
ch->manifest_dirty = true;
for (; file_i < input_file_count; file_i += 1) {
CacheHashFile *chf = &ch->files.at(file_i);
if ((err = populate_file_hash(ch, chf, nullptr))) {
fprintf(stderr, "Unable to hash %s: %s\n", buf_ptr(chf->path), err_str(err));
os_file_close(&ch->manifest_file);
return ErrorCacheUnavailable;
}
}
if (return_code != ErrorNone && return_code != ErrorInvalidFormat) {
os_file_close(&ch->manifest_file);
}
return return_code;
}
// Cache Hit
return cache_final(ch, out_digest);
}
Error cache_add_file_fetch(CacheHash *ch, Buf *resolved_path, Buf *contents) {
Error err;
assert(ch->manifest_file_path != nullptr);
CacheHashFile *chf = ch->files.add_one();
chf->path = resolved_path;
if ((err = populate_file_hash(ch, chf, contents))) {
os_file_close(&ch->manifest_file);
return err;
}
return ErrorNone;
}
Error cache_add_file(CacheHash *ch, Buf *path) {
Buf *resolved_path = buf_alloc();
*resolved_path = os_path_resolve(&path, 1);
return cache_add_file_fetch(ch, resolved_path, nullptr);
}
Error cache_add_dep_file(CacheHash *ch, Buf *dep_file_path, bool verbose) {
Error err;
Buf *contents = buf_alloc();
if ((err = os_fetch_file_path(dep_file_path, contents))) {
if (err == ErrorFileNotFound)
return err;
if (verbose) {
fprintf(stderr, "%s: unable to read .d file: %s\n", err_str(err), buf_ptr(dep_file_path));
}
return ErrorReadingDepFile;
}
auto it = stage2_DepTokenizer_init(buf_ptr(contents), buf_len(contents));
// skip first token: target
{
auto result = stage2_DepTokenizer_next(&it);
switch (result.type_id) {
case stage2_DepNextResult::error:
if (verbose) {
fprintf(stderr, "%s: failed processing .d file: %s\n", result.textz, buf_ptr(dep_file_path));
}
err = ErrorInvalidDepFile;
goto finish;
case stage2_DepNextResult::null:
err = ErrorNone;
goto finish;
case stage2_DepNextResult::target:
case stage2_DepNextResult::prereq:
err = ErrorNone;
break;
}
}
// Process 0+ preqreqs.
// clang is invoked in single-source mode so we never get more targets.
for (;;) {
auto result = stage2_DepTokenizer_next(&it);
switch (result.type_id) {
case stage2_DepNextResult::error:
if (verbose) {
fprintf(stderr, "%s: failed processing .d file: %s\n", result.textz, buf_ptr(dep_file_path));
}
err = ErrorInvalidDepFile;
goto finish;
case stage2_DepNextResult::null:
case stage2_DepNextResult::target:
err = ErrorNone;
goto finish;
case stage2_DepNextResult::prereq:
break;
}
auto textbuf = buf_alloc();
buf_init_from_str(textbuf, result.textz);
if ((err = cache_add_file(ch, textbuf))) {
if (verbose) {
fprintf(stderr, "unable to add %s to cache: %s\n", result.textz, err_str(err));
fprintf(stderr, "when processing .d file: %s\n", buf_ptr(dep_file_path));
}
goto finish;
}
}
finish:
stage2_DepTokenizer_deinit(&it);
return err;
}
static Error write_manifest_file(CacheHash *ch) {
Error err;
Buf contents = BUF_INIT;
buf_resize(&contents, 0);
uint8_t encoded_digest[65];
encoded_digest[64] = 0;
for (size_t i = 0; i < ch->files.length; i += 1) {
CacheHashFile *chf = &ch->files.at(i);
base64_encode({encoded_digest, 64}, {chf->bin_digest, 48});
buf_appendf(&contents, "%" ZIG_PRI_u64 " %" ZIG_PRI_u64 " %" ZIG_PRI_u64 " %s %s\n",
chf->attr.inode, chf->attr.mtime.sec, chf->attr.mtime.nsec, encoded_digest, buf_ptr(chf->path));
}
if ((err = os_file_overwrite(ch->manifest_file, &contents)))
return err;
return ErrorNone;
}
Error cache_final(CacheHash *ch, Buf *out_digest) {
assert(ch->manifest_file_path != nullptr);
// We don't close the manifest file yet, because we want to
// keep it locked until the API user is done using it.
// We also don't write out the manifest yet, because until
// cache_release is called we still might be working on creating
// the artifacts to cache.
uint8_t bin_digest[48];
int rc = blake2b_final(&ch->blake, bin_digest, 48);
assert(rc == 0);
buf_resize(out_digest, 64);
base64_encode(buf_to_slice(out_digest), {bin_digest, 48});
return ErrorNone;
}
void cache_release(CacheHash *ch) {
assert(ch->manifest_file_path != nullptr);
Error err;
if (ch->manifest_dirty) {
if ((err = write_manifest_file(ch))) {
fprintf(stderr, "Warning: Unable to write cache file '%s': %s\n",
buf_ptr(ch->manifest_file_path), err_str(err));
}
}
os_file_close(&ch->manifest_file);
}

View File

@ -1,83 +0,0 @@
/*
* Copyright (c) 2018 Andrew Kelley
*
* This file is part of zig, which is MIT licensed.
* See http://opensource.org/licenses/MIT
*/
#ifndef ZIG_CACHE_HASH_HPP
#define ZIG_CACHE_HASH_HPP
#include "blake2.h"
#include "os.hpp"
struct LinkLib;
struct CacheHashFile {
Buf *path;
OsFileAttr attr;
uint8_t bin_digest[48];
Buf *contents;
};
struct CacheHash {
blake2b_state blake;
ZigList<CacheHashFile> files;
Buf *manifest_dir;
Buf *manifest_file_path;
Buf b64_digest;
OsFile manifest_file;
bool manifest_dirty;
bool force_check_manifest;
};
// Always call this first to set up.
void cache_init(CacheHash *ch, Buf *manifest_dir);
// Next, use the hash population functions to add the initial parameters.
void cache_mem(CacheHash *ch, const char *ptr, size_t len);
void cache_slice(CacheHash *ch, Slice<const char> slice);
void cache_str(CacheHash *ch, const char *ptr);
void cache_int(CacheHash *ch, int x);
void cache_bool(CacheHash *ch, bool x);
void cache_usize(CacheHash *ch, size_t x);
void cache_buf(CacheHash *ch, Buf *buf);
void cache_buf_opt(CacheHash *ch, Buf *buf);
void cache_list_of_link_lib(CacheHash *ch, LinkLib **ptr, size_t len);
void cache_list_of_buf(CacheHash *ch, Buf **ptr, size_t len);
void cache_list_of_file(CacheHash *ch, Buf **ptr, size_t len);
void cache_list_of_str(CacheHash *ch, const char **ptr, size_t len);
void cache_file(CacheHash *ch, Buf *path);
void cache_file_opt(CacheHash *ch, Buf *path);
// Then call cache_hit when you're ready to see if you can skip the next step.
// out_b64_digest will be left unchanged if it was a cache miss.
// If you got a cache hit, the next step is cache_release.
// From this point on, there is a lock on the input params. Release
// the lock with cache_release.
// Set force_check_manifest if you plan to add files later, but have not
// added any files before calling cache_hit. CacheHash::b64_digest becomes
// available for use after this call, even in the case of a miss, and it
// is a hash of the input parameters only.
// If this function returns ErrorInvalidFormat, that error may be treated
// as a cache miss.
Error ATTRIBUTE_MUST_USE cache_hit(CacheHash *ch, Buf *out_b64_digest);
// If you did not get a cache hit, call this function for every file
// that is depended on, and then finish with cache_final.
Error ATTRIBUTE_MUST_USE cache_add_file(CacheHash *ch, Buf *path);
// This opens a file created by -MD -MF args to Clang
Error ATTRIBUTE_MUST_USE cache_add_dep_file(CacheHash *ch, Buf *path, bool verbose);
// This variant of cache_add_file returns the file contents.
// Also the file path argument must be already resolved.
Error ATTRIBUTE_MUST_USE cache_add_file_fetch(CacheHash *ch, Buf *resolved_path, Buf *contents);
// out_b64_digest will be the same thing that cache_hit returns if you got a cache hit
Error ATTRIBUTE_MUST_USE cache_final(CacheHash *ch, Buf *out_b64_digest);
// Until this function is called, no one will be able to get a lock on your input params.
void cache_release(CacheHash *ch);
#endif

View File

@ -7,9 +7,7 @@ pub const CliArg = struct {
name: []const u8, name: []const u8,
syntax: Syntax, syntax: Syntax,
/// TODO we're going to want to change this when we start shipping self-hosted because this causes zig_equivalent: @import("main.zig").ClangArgIterator.ZigEquivalent,
/// all the functions in stage2.zig to get exported.
zig_equivalent: @import("stage2.zig").ClangArgIterator.ZigEquivalent,
/// Prefixed by "-" /// Prefixed by "-"
pd1: bool = false, pd1: bool = false,

View File

@ -7,7 +7,7 @@ flagpd1("CC"),
.{ .{
.name = "E", .name = "E",
.syntax = .flag, .syntax = .flag,
.zig_equivalent = .pp_or_asm, .zig_equivalent = .preprocess_only,
.pd1 = true, .pd1 = true,
.pd2 = false, .pd2 = false,
.psl = false, .psl = false,
@ -96,7 +96,7 @@ flagpd1("Qy"),
.{ .{
.name = "S", .name = "S",
.syntax = .flag, .syntax = .flag,
.zig_equivalent = .pp_or_asm, .zig_equivalent = .asm_only,
.pd1 = true, .pd1 = true,
.pd2 = false, .pd2 = false,
.psl = false, .psl = false,
@ -199,7 +199,7 @@ sepd1("Zlinker-input"),
.{ .{
.name = "E", .name = "E",
.syntax = .flag, .syntax = .flag,
.zig_equivalent = .pp_or_asm, .zig_equivalent = .preprocess_only,
.pd1 = true, .pd1 = true,
.pd2 = false, .pd2 = false,
.psl = true, .psl = true,
@ -1512,7 +1512,7 @@ flagpsl("MT"),
.{ .{
.name = "assemble", .name = "assemble",
.syntax = .flag, .syntax = .flag,
.zig_equivalent = .pp_or_asm, .zig_equivalent = .asm_only,
.pd1 = false, .pd1 = false,
.pd2 = true, .pd2 = true,
.psl = false, .psl = false,
@ -1840,7 +1840,7 @@ flagpsl("MT"),
.{ .{
.name = "preprocess", .name = "preprocess",
.syntax = .flag, .syntax = .flag,
.zig_equivalent = .pp_or_asm, .zig_equivalent = .preprocess_only,
.pd1 = false, .pd1 = false,
.pd2 = true, .pd2 = true,
.psl = false, .psl = false,

View File

@ -1,68 +0,0 @@
/*
* Copyright (c) 2015 Andrew Kelley
*
* This file is part of zig, which is MIT licensed.
* See http://opensource.org/licenses/MIT
*/
#ifndef ZIG_CODEGEN_HPP
#define ZIG_CODEGEN_HPP
#include "parser.hpp"
#include "errmsg.hpp"
#include "target.hpp"
#include "stage2.h"
#include <stdio.h>
CodeGen *codegen_create(Buf *main_pkg_path, Buf *root_src_path, const ZigTarget *target,
OutType out_type, BuildMode build_mode, Buf *zig_lib_dir,
Stage2LibCInstallation *libc, Buf *cache_dir, bool is_test_build, Stage2ProgressNode *progress_node);
CodeGen *create_child_codegen(CodeGen *parent_gen, Buf *root_src_path, OutType out_type,
Stage2LibCInstallation *libc, const char *name, Stage2ProgressNode *progress_node);
void codegen_set_clang_argv(CodeGen *codegen, const char **args, size_t len);
void codegen_set_llvm_argv(CodeGen *codegen, const char **args, size_t len);
void codegen_set_each_lib_rpath(CodeGen *codegen, bool each_lib_rpath);
void codegen_set_strip(CodeGen *codegen, bool strip);
void codegen_set_errmsg_color(CodeGen *codegen, ErrColor err_color);
void codegen_set_out_name(CodeGen *codegen, Buf *out_name);
void codegen_add_lib_dir(CodeGen *codegen, const char *dir);
void codegen_add_forbidden_lib(CodeGen *codegen, Buf *lib);
LinkLib *codegen_add_link_lib(CodeGen *codegen, Buf *lib);
void codegen_add_framework(CodeGen *codegen, const char *name);
void codegen_add_rpath(CodeGen *codegen, const char *name);
void codegen_set_rdynamic(CodeGen *g, bool rdynamic);
void codegen_set_linker_script(CodeGen *g, const char *linker_script);
void codegen_set_test_filter(CodeGen *g, Buf *filter);
void codegen_set_test_name_prefix(CodeGen *g, Buf *prefix);
void codegen_set_lib_version(CodeGen *g, bool is_versioned, size_t major, size_t minor, size_t patch);
void codegen_add_time_event(CodeGen *g, const char *name);
void codegen_print_timing_report(CodeGen *g, FILE *f);
void codegen_link(CodeGen *g);
void zig_link_add_compiler_rt(CodeGen *g, Stage2ProgressNode *progress_node);
void codegen_build_and_link(CodeGen *g);
ZigPackage *codegen_create_package(CodeGen *g, const char *root_src_dir, const char *root_src_path,
const char *pkg_path);
void codegen_add_assembly(CodeGen *g, Buf *path);
void codegen_add_object(CodeGen *g, Buf *object_path);
void codegen_translate_c(CodeGen *g, Buf *full_path);
Buf *codegen_generate_builtin_source(CodeGen *g);
TargetSubsystem detect_subsystem(CodeGen *g);
void codegen_release_caches(CodeGen *codegen);
bool codegen_fn_has_err_ret_tracing_arg(CodeGen *g, ZigType *return_type);
bool codegen_fn_has_err_ret_tracing_stack(CodeGen *g, ZigFn *fn, bool is_async);
ATTRIBUTE_NORETURN
void codegen_report_errors_and_exit(CodeGen *g);
void codegen_switch_sub_prog_node(CodeGen *g, Stage2ProgressNode *node);
#endif

View File

@ -8,7 +8,8 @@ const Value = @import("value.zig").Value;
const TypedValue = @import("TypedValue.zig"); const TypedValue = @import("TypedValue.zig");
const link = @import("link.zig"); const link = @import("link.zig");
const Module = @import("Module.zig"); const Module = @import("Module.zig");
const ErrorMsg = Module.ErrorMsg; const Compilation = @import("Compilation.zig");
const ErrorMsg = Compilation.ErrorMsg;
const Target = std.Target; const Target = std.Target;
const Allocator = mem.Allocator; const Allocator = mem.Allocator;
const trace = @import("tracy.zig").trace; const trace = @import("tracy.zig").trace;
@ -50,7 +51,7 @@ pub const Result = union(enum) {
appended: void, appended: void,
/// The value is available externally, `code` is unused. /// The value is available externally, `code` is unused.
externally_managed: []const u8, externally_managed: []const u8,
fail: *Module.ErrorMsg, fail: *ErrorMsg,
}; };
pub const GenerateSymbolError = error{ pub const GenerateSymbolError = error{

125
src/codegen/llvm.zig Normal file
View File

@ -0,0 +1,125 @@
const std = @import("std");
const Allocator = std.mem.Allocator;
pub fn targetTriple(allocator: *Allocator, target: std.Target) ![]u8 {
const llvm_arch = switch (target.cpu.arch) {
.arm => "arm",
.armeb => "armeb",
.aarch64 => "aarch64",
.aarch64_be => "aarch64_be",
.aarch64_32 => "aarch64_32",
.arc => "arc",
.avr => "avr",
.bpfel => "bpfel",
.bpfeb => "bpfeb",
.hexagon => "hexagon",
.mips => "mips",
.mipsel => "mipsel",
.mips64 => "mips64",
.mips64el => "mips64el",
.msp430 => "msp430",
.powerpc => "powerpc",
.powerpc64 => "powerpc64",
.powerpc64le => "powerpc64le",
.r600 => "r600",
.amdgcn => "amdgcn",
.riscv32 => "riscv32",
.riscv64 => "riscv64",
.sparc => "sparc",
.sparcv9 => "sparcv9",
.sparcel => "sparcel",
.s390x => "s390x",
.tce => "tce",
.tcele => "tcele",
.thumb => "thumb",
.thumbeb => "thumbeb",
.i386 => "i386",
.x86_64 => "x86_64",
.xcore => "xcore",
.nvptx => "nvptx",
.nvptx64 => "nvptx64",
.le32 => "le32",
.le64 => "le64",
.amdil => "amdil",
.amdil64 => "amdil64",
.hsail => "hsail",
.hsail64 => "hsail64",
.spir => "spir",
.spir64 => "spir64",
.kalimba => "kalimba",
.shave => "shave",
.lanai => "lanai",
.wasm32 => "wasm32",
.wasm64 => "wasm64",
.renderscript32 => "renderscript32",
.renderscript64 => "renderscript64",
.ve => "ve",
.spu_2 => return error.LLVMBackendDoesNotSupportSPUMarkII,
};
// TODO Add a sub-arch for some architectures depending on CPU features.
const llvm_os = switch (target.os.tag) {
.freestanding => "unknown",
.ananas => "ananas",
.cloudabi => "cloudabi",
.dragonfly => "dragonfly",
.freebsd => "freebsd",
.fuchsia => "fuchsia",
.ios => "ios",
.kfreebsd => "kfreebsd",
.linux => "linux",
.lv2 => "lv2",
.macosx => "macosx",
.netbsd => "netbsd",
.openbsd => "openbsd",
.solaris => "solaris",
.windows => "windows",
.haiku => "haiku",
.minix => "minix",
.rtems => "rtems",
.nacl => "nacl",
.cnk => "cnk",
.aix => "aix",
.cuda => "cuda",
.nvcl => "nvcl",
.amdhsa => "amdhsa",
.ps4 => "ps4",
.elfiamcu => "elfiamcu",
.tvos => "tvos",
.watchos => "watchos",
.mesa3d => "mesa3d",
.contiki => "contiki",
.amdpal => "amdpal",
.hermit => "hermit",
.hurd => "hurd",
.wasi => "wasi",
.emscripten => "emscripten",
.uefi => "windows",
.other => "unknown",
};
const llvm_abi = switch (target.abi) {
.none => "unknown",
.gnu => "gnu",
.gnuabin32 => "gnuabin32",
.gnuabi64 => "gnuabi64",
.gnueabi => "gnueabi",
.gnueabihf => "gnueabihf",
.gnux32 => "gnux32",
.code16 => "code16",
.eabi => "eabi",
.eabihf => "eabihf",
.android => "android",
.musl => "musl",
.musleabi => "musleabi",
.musleabihf => "musleabihf",
.msvc => "msvc",
.itanium => "itanium",
.cygnus => "cygnus",
.coreclr => "coreclr",
.simulator => "simulator",
.macabi => "macabi",
};
return std.fmt.allocPrint(allocator, "{}-unknown-{}-{}", .{ llvm_arch, llvm_os, llvm_abi });
}

View File

@ -5,7 +5,8 @@ const assert = std.debug.assert;
const leb = std.debug.leb; const leb = std.debug.leb;
const mem = std.mem; const mem = std.mem;
const Decl = @import("../Module.zig").Decl; const Module = @import("../Module.zig");
const Decl = Module.Decl;
const Inst = @import("../ir.zig").Inst; const Inst = @import("../ir.zig").Inst;
const Type = @import("../type.zig").Type; const Type = @import("../type.zig").Type;
const Value = @import("../value.zig").Value; const Value = @import("../value.zig").Value;

View File

@ -1,196 +0,0 @@
#include "cache_hash.hpp"
#include "os.hpp"
#include "compiler.hpp"
#include <stdio.h>
Error get_compiler_id(Buf **result) {
static Buf saved_compiler_id = BUF_INIT;
if (saved_compiler_id.list.length != 0) {
*result = &saved_compiler_id;
return ErrorNone;
}
Error err;
Buf *manifest_dir = buf_alloc();
os_path_join(get_global_cache_dir(), buf_create_from_str("exe"), manifest_dir);
CacheHash cache_hash;
CacheHash *ch = &cache_hash;
cache_init(ch, manifest_dir);
Buf self_exe_path = BUF_INIT;
if ((err = os_self_exe_path(&self_exe_path)))
return err;
cache_file(ch, &self_exe_path);
buf_resize(&saved_compiler_id, 0);
if ((err = cache_hit(ch, &saved_compiler_id))) {
if (err != ErrorInvalidFormat)
return err;
}
if (buf_len(&saved_compiler_id) != 0) {
cache_release(ch);
*result = &saved_compiler_id;
return ErrorNone;
}
ZigList<Buf *> lib_paths = {};
if ((err = os_self_exe_shared_libs(lib_paths)))
return err;
#if defined(ZIG_OS_DARWIN)
// only add the self exe path on mac os
Buf *lib_path = lib_paths.at(0);
if ((err = cache_add_file(ch, lib_path)))
return err;
#else
for (size_t i = 0; i < lib_paths.length; i += 1) {
Buf *lib_path = lib_paths.at(i);
if ((err = cache_add_file(ch, lib_path)))
return err;
}
#endif
if ((err = cache_final(ch, &saved_compiler_id)))
return err;
cache_release(ch);
*result = &saved_compiler_id;
return ErrorNone;
}
static bool test_zig_install_prefix(Buf *test_path, Buf *out_zig_lib_dir) {
{
Buf *test_zig_dir = buf_sprintf("%s" OS_SEP "lib" OS_SEP "zig", buf_ptr(test_path));
Buf *test_index_file = buf_sprintf("%s" OS_SEP "std" OS_SEP "std.zig", buf_ptr(test_zig_dir));
int err;
bool exists;
if ((err = os_file_exists(test_index_file, &exists))) {
exists = false;
}
if (exists) {
buf_init_from_buf(out_zig_lib_dir, test_zig_dir);
return true;
}
}
// Also try without "zig"
{
Buf *test_zig_dir = buf_sprintf("%s" OS_SEP "lib", buf_ptr(test_path));
Buf *test_index_file = buf_sprintf("%s" OS_SEP "std" OS_SEP "std.zig", buf_ptr(test_zig_dir));
int err;
bool exists;
if ((err = os_file_exists(test_index_file, &exists))) {
exists = false;
}
if (exists) {
buf_init_from_buf(out_zig_lib_dir, test_zig_dir);
return true;
}
}
return false;
}
static int find_zig_lib_dir(Buf *out_path) {
int err;
Buf self_exe_path = BUF_INIT;
buf_resize(&self_exe_path, 0);
if (!(err = os_self_exe_path(&self_exe_path))) {
Buf *cur_path = &self_exe_path;
for (;;) {
Buf *test_dir = buf_alloc();
os_path_dirname(cur_path, test_dir);
if (buf_eql_buf(test_dir, cur_path)) {
break;
}
if (test_zig_install_prefix(test_dir, out_path)) {
return 0;
}
cur_path = test_dir;
}
}
return ErrorFileNotFound;
}
Buf *get_zig_lib_dir(void) {
static Buf saved_lib_dir = BUF_INIT;
if (saved_lib_dir.list.length != 0)
return &saved_lib_dir;
buf_resize(&saved_lib_dir, 0);
int err;
if ((err = find_zig_lib_dir(&saved_lib_dir))) {
fprintf(stderr, "Unable to find zig lib directory\n");
exit(EXIT_FAILURE);
}
return &saved_lib_dir;
}
Buf *get_zig_std_dir(Buf *zig_lib_dir) {
static Buf saved_std_dir = BUF_INIT;
if (saved_std_dir.list.length != 0)
return &saved_std_dir;
buf_resize(&saved_std_dir, 0);
os_path_join(zig_lib_dir, buf_create_from_str("std"), &saved_std_dir);
return &saved_std_dir;
}
Buf *get_zig_special_dir(Buf *zig_lib_dir) {
static Buf saved_special_dir = BUF_INIT;
if (saved_special_dir.list.length != 0)
return &saved_special_dir;
buf_resize(&saved_special_dir, 0);
os_path_join(get_zig_std_dir(zig_lib_dir), buf_sprintf("special"), &saved_special_dir);
return &saved_special_dir;
}
Buf *get_global_cache_dir(void) {
static Buf saved_global_cache_dir = BUF_INIT;
if (saved_global_cache_dir.list.length != 0)
return &saved_global_cache_dir;
buf_resize(&saved_global_cache_dir, 0);
Buf app_data_dir = BUF_INIT;
Error err;
if ((err = os_get_app_data_dir(&app_data_dir, "zig"))) {
fprintf(stderr, "Unable to get application data dir: %s\n", err_str(err));
exit(1);
}
os_path_join(&app_data_dir, buf_create_from_str("stage1"), &saved_global_cache_dir);
buf_deinit(&app_data_dir);
return &saved_global_cache_dir;
}
FileExt classify_file_ext(const char *filename_ptr, size_t filename_len) {
if (mem_ends_with_str(filename_ptr, filename_len, ".c")) {
return FileExtC;
} else if (mem_ends_with_str(filename_ptr, filename_len, ".C") ||
mem_ends_with_str(filename_ptr, filename_len, ".cc") ||
mem_ends_with_str(filename_ptr, filename_len, ".cpp") ||
mem_ends_with_str(filename_ptr, filename_len, ".cxx"))
{
return FileExtCpp;
} else if (mem_ends_with_str(filename_ptr, filename_len, ".ll")) {
return FileExtLLVMIr;
} else if (mem_ends_with_str(filename_ptr, filename_len, ".bc")) {
return FileExtLLVMBitCode;
} else if (mem_ends_with_str(filename_ptr, filename_len, ".s") ||
mem_ends_with_str(filename_ptr, filename_len, ".S"))
{
return FileExtAsm;
}
// TODO look for .so, .so.X, .so.X.Y, .so.X.Y.Z
return FileExtUnknown;
}

View File

@ -1,24 +0,0 @@
/*
* Copyright (c) 2018 Andrew Kelley
*
* This file is part of zig, which is MIT licensed.
* See http://opensource.org/licenses/MIT
*/
#ifndef ZIG_COMPILER_HPP
#define ZIG_COMPILER_HPP
#include "all_types.hpp"
Error get_compiler_id(Buf **result);
Buf *get_zig_lib_dir(void);
Buf *get_zig_special_dir(Buf *zig_lib_dir);
Buf *get_zig_std_dir(Buf *zig_lib_dir);
Buf *get_global_cache_dir(void);
FileExt classify_file_ext(const char *filename_ptr, size_t filename_len);
#endif

View File

@ -1,3 +1,6 @@
pub const have_llvm = true;
pub const version: []const u8 = "@ZIG_VERSION@"; pub const version: []const u8 = "@ZIG_VERSION@";
pub const log_scopes: []const []const u8 = &[_][]const u8{}; pub const log_scopes: []const []const u8 = &[_][]const u8{};
pub const zir_dumps: []const []const u8 = &[_][]const u8{};
pub const enable_tracy = false; pub const enable_tracy = false;
pub const is_stage1 = true;

View File

@ -1,392 +0,0 @@
/*
* Copyright (c) 2019 Andrew Kelley
*
* This file is part of zig, which is MIT licensed.
* See http://opensource.org/licenses/MIT
*/
#include "glibc.hpp"
#include "compiler.hpp"
#include "cache_hash.hpp"
#include "codegen.hpp"
static const ZigGLibCLib glibc_libs[] = {
{"c", 6},
{"m", 6},
{"pthread", 0},
{"dl", 2},
{"rt", 1},
{"ld", 2},
{"util", 1},
};
Error glibc_load_metadata(ZigGLibCAbi **out_result, Buf *zig_lib_dir, bool verbose) {
Error err;
ZigGLibCAbi *glibc_abi = heap::c_allocator.create<ZigGLibCAbi>();
glibc_abi->vers_txt_path = buf_sprintf("%s" OS_SEP "libc" OS_SEP "glibc" OS_SEP "vers.txt", buf_ptr(zig_lib_dir));
glibc_abi->fns_txt_path = buf_sprintf("%s" OS_SEP "libc" OS_SEP "glibc" OS_SEP "fns.txt", buf_ptr(zig_lib_dir));
glibc_abi->abi_txt_path = buf_sprintf("%s" OS_SEP "libc" OS_SEP "glibc" OS_SEP "abi.txt", buf_ptr(zig_lib_dir));
glibc_abi->version_table.init(16);
Buf *vers_txt_contents = buf_alloc();
if ((err = os_fetch_file_path(glibc_abi->vers_txt_path, vers_txt_contents))) {
if (verbose) {
fprintf(stderr, "Unable to read %s: %s\n", buf_ptr(glibc_abi->vers_txt_path), err_str(err));
}
return err;
}
Buf *fns_txt_contents = buf_alloc();
if ((err = os_fetch_file_path(glibc_abi->fns_txt_path, fns_txt_contents))) {
if (verbose) {
fprintf(stderr, "Unable to read %s: %s\n", buf_ptr(glibc_abi->fns_txt_path), err_str(err));
}
return err;
}
Buf *abi_txt_contents = buf_alloc();
if ((err = os_fetch_file_path(glibc_abi->abi_txt_path, abi_txt_contents))) {
if (verbose) {
fprintf(stderr, "Unable to read %s: %s\n", buf_ptr(glibc_abi->abi_txt_path), err_str(err));
}
return err;
}
{
SplitIterator it = memSplit(buf_to_slice(vers_txt_contents), str("\r\n"));
for (;;) {
Optional<Slice<uint8_t>> opt_component = SplitIterator_next(&it);
if (!opt_component.is_some) break;
Buf *ver_buf = buf_create_from_slice(opt_component.value);
Stage2SemVer *this_ver = glibc_abi->all_versions.add_one();
if ((err = target_parse_glibc_version(this_ver, buf_ptr(ver_buf)))) {
if (verbose) {
fprintf(stderr, "Unable to parse glibc version '%s': %s\n", buf_ptr(ver_buf), err_str(err));
}
return err;
}
}
}
{
SplitIterator it = memSplit(buf_to_slice(fns_txt_contents), str("\r\n"));
for (;;) {
Optional<Slice<uint8_t>> opt_component = SplitIterator_next(&it);
if (!opt_component.is_some) break;
SplitIterator line_it = memSplit(opt_component.value, str(" "));
Optional<Slice<uint8_t>> opt_fn_name = SplitIterator_next(&line_it);
if (!opt_fn_name.is_some) {
if (verbose) {
fprintf(stderr, "%s: Expected function name\n", buf_ptr(glibc_abi->fns_txt_path));
}
return ErrorInvalidFormat;
}
Optional<Slice<uint8_t>> opt_lib_name = SplitIterator_next(&line_it);
if (!opt_lib_name.is_some) {
if (verbose) {
fprintf(stderr, "%s: Expected lib name\n", buf_ptr(glibc_abi->fns_txt_path));
}
return ErrorInvalidFormat;
}
Buf *this_fn_name = buf_create_from_slice(opt_fn_name.value);
Buf *this_lib_name = buf_create_from_slice(opt_lib_name.value);
glibc_abi->all_functions.append({ this_fn_name, glibc_lib_find(buf_ptr(this_lib_name)) });
}
}
{
SplitIterator it = memSplit(buf_to_slice(abi_txt_contents), str("\r\n"));
ZigGLibCVerList *ver_list_base = nullptr;
int line_num = 0;
for (;;) {
if (ver_list_base == nullptr) {
line_num += 1;
Optional<Slice<uint8_t>> opt_line = SplitIterator_next_separate(&it);
if (!opt_line.is_some) break;
ver_list_base = heap::c_allocator.allocate<ZigGLibCVerList>(glibc_abi->all_functions.length);
SplitIterator line_it = memSplit(opt_line.value, str(" "));
for (;;) {
ZigTarget *target = heap::c_allocator.create<ZigTarget>();
Optional<Slice<uint8_t>> opt_target = SplitIterator_next(&line_it);
if (!opt_target.is_some) break;
SplitIterator component_it = memSplit(opt_target.value, str("-"));
Optional<Slice<uint8_t>> opt_arch = SplitIterator_next(&component_it);
assert(opt_arch.is_some);
Optional<Slice<uint8_t>> opt_os = SplitIterator_next(&component_it);
assert(opt_os.is_some); // it's always "linux" so we ignore it
Optional<Slice<uint8_t>> opt_abi = SplitIterator_next(&component_it);
assert(opt_abi.is_some);
err = target_parse_arch(&target->arch, (char*)opt_arch.value.ptr, opt_arch.value.len);
assert(err == ErrorNone);
target->os = OsLinux;
err = target_parse_abi(&target->abi, (char*)opt_abi.value.ptr, opt_abi.value.len);
if (err != ErrorNone) {
fprintf(stderr, "Error parsing %s:%d: %s\n", buf_ptr(glibc_abi->abi_txt_path),
line_num, err_str(err));
fprintf(stderr, "arch: '%.*s', os: '%.*s', abi: '%.*s'\n",
(int)opt_arch.value.len, (const char*)opt_arch.value.ptr,
(int)opt_os.value.len, (const char*)opt_os.value.ptr,
(int)opt_abi.value.len, (const char*)opt_abi.value.ptr);
fprintf(stderr, "parsed from target: '%.*s'\n",
(int)opt_target.value.len, (const char*)opt_target.value.ptr);
fprintf(stderr, "parsed from line:\n%.*s\n", (int)opt_line.value.len, opt_line.value.ptr);
fprintf(stderr, "Zig installation appears to be corrupted.\n");
exit(1);
}
glibc_abi->version_table.put(target, ver_list_base);
}
continue;
}
for (size_t fn_i = 0; fn_i < glibc_abi->all_functions.length; fn_i += 1) {
ZigGLibCVerList *ver_list = &ver_list_base[fn_i];
line_num += 1;
Optional<Slice<uint8_t>> opt_line = SplitIterator_next_separate(&it);
assert(opt_line.is_some);
SplitIterator line_it = memSplit(opt_line.value, str(" "));
for (;;) {
Optional<Slice<uint8_t>> opt_ver = SplitIterator_next(&line_it);
if (!opt_ver.is_some) break;
assert(ver_list->len < 8); // increase the array len in the type
unsigned long ver_index = strtoul(buf_ptr(buf_create_from_slice(opt_ver.value)), nullptr, 10);
assert(ver_index < 255); // use a bigger integer in the type
ver_list->versions[ver_list->len] = ver_index;
ver_list->len += 1;
}
}
ver_list_base = nullptr;
}
}
*out_result = glibc_abi;
return ErrorNone;
}
Error glibc_build_dummies_and_maps(CodeGen *g, const ZigGLibCAbi *glibc_abi, const ZigTarget *target,
Buf **out_dir, bool verbose, Stage2ProgressNode *progress_node)
{
Error err;
Buf *cache_dir = get_global_cache_dir();
CacheHash *cache_hash = heap::c_allocator.create<CacheHash>();
Buf *manifest_dir = buf_sprintf("%s" OS_SEP CACHE_HASH_SUBDIR, buf_ptr(cache_dir));
cache_init(cache_hash, manifest_dir);
Buf *compiler_id;
if ((err = get_compiler_id(&compiler_id))) {
if (verbose) {
fprintf(stderr, "unable to get compiler id: %s\n", err_str(err));
}
return err;
}
cache_buf(cache_hash, compiler_id);
cache_int(cache_hash, target->arch);
cache_int(cache_hash, target->abi);
cache_int(cache_hash, target->glibc_or_darwin_version->major);
cache_int(cache_hash, target->glibc_or_darwin_version->minor);
cache_int(cache_hash, target->glibc_or_darwin_version->patch);
Buf digest = BUF_INIT;
buf_resize(&digest, 0);
if ((err = cache_hit(cache_hash, &digest))) {
// Treat an invalid format error as a cache miss.
if (err != ErrorInvalidFormat)
return err;
}
// We should always get a cache hit because there are no
// files in the input hash.
assert(buf_len(&digest) != 0);
Buf *dummy_dir = buf_alloc();
os_path_join(manifest_dir, &digest, dummy_dir);
if ((err = os_make_path(dummy_dir)))
return err;
Buf *test_if_exists_path = buf_alloc();
os_path_join(dummy_dir, buf_create_from_str("ok"), test_if_exists_path);
bool hit;
if ((err = os_file_exists(test_if_exists_path, &hit)))
return err;
if (hit) {
*out_dir = dummy_dir;
return ErrorNone;
}
ZigGLibCVerList *ver_list_base = glibc_abi->version_table.get(target);
uint8_t target_ver_index = 0;
for (;target_ver_index < glibc_abi->all_versions.length; target_ver_index += 1) {
const Stage2SemVer *this_ver = &glibc_abi->all_versions.at(target_ver_index);
if (this_ver->major == target->glibc_or_darwin_version->major &&
this_ver->minor == target->glibc_or_darwin_version->minor &&
this_ver->patch == target->glibc_or_darwin_version->patch)
{
break;
}
}
if (target_ver_index == glibc_abi->all_versions.length) {
if (verbose) {
fprintf(stderr, "Unrecognized glibc version: %d.%d.%d\n",
target->glibc_or_darwin_version->major,
target->glibc_or_darwin_version->minor,
target->glibc_or_darwin_version->patch);
}
return ErrorUnknownABI;
}
Buf *map_file_path = buf_sprintf("%s" OS_SEP "all.map", buf_ptr(dummy_dir));
Buf *map_contents = buf_alloc();
for (uint8_t ver_i = 0; ver_i < glibc_abi->all_versions.length; ver_i += 1) {
const Stage2SemVer *ver = &glibc_abi->all_versions.at(ver_i);
if (ver->patch == 0) {
buf_appendf(map_contents, "GLIBC_%d.%d { };\n", ver->major, ver->minor);
} else {
buf_appendf(map_contents, "GLIBC_%d.%d.%d { };\n", ver->major, ver->minor, ver->patch);
}
}
if ((err = os_write_file(map_file_path, map_contents))) {
if (verbose) {
fprintf(stderr, "unable to write %s: %s", buf_ptr(map_file_path), err_str(err));
}
return err;
}
for (size_t lib_i = 0; lib_i < array_length(glibc_libs); lib_i += 1) {
const ZigGLibCLib *lib = &glibc_libs[lib_i];
Buf *zig_file_path = buf_sprintf("%s" OS_SEP "%s.zig", buf_ptr(dummy_dir), lib->name);
Buf *zig_body = buf_alloc();
Buf *zig_footer = buf_alloc();
buf_appendf(zig_body, "comptime {\n");
buf_appendf(zig_body, " asm (\n");
for (size_t fn_i = 0; fn_i < glibc_abi->all_functions.length; fn_i += 1) {
const ZigGLibCFn *libc_fn = &glibc_abi->all_functions.at(fn_i);
if (libc_fn->lib != lib) continue;
ZigGLibCVerList *ver_list = &ver_list_base[fn_i];
// Pick the default symbol version:
// - If there are no versions, don't emit it
// - Take the greatest one <= than the target one
// - If none of them is <= than the
// specified one don't pick any default version
if (ver_list->len == 0) continue;
uint8_t chosen_def_ver_index = 255;
for (uint8_t ver_i = 0; ver_i < ver_list->len; ver_i += 1) {
uint8_t ver_index = ver_list->versions[ver_i];
if ((chosen_def_ver_index == 255 || ver_index > chosen_def_ver_index) &&
target_ver_index >= ver_index)
{
chosen_def_ver_index = ver_index;
}
}
for (uint8_t ver_i = 0; ver_i < ver_list->len; ver_i += 1) {
uint8_t ver_index = ver_list->versions[ver_i];
Buf *stub_name;
const Stage2SemVer *ver = &glibc_abi->all_versions.at(ver_index);
const char *sym_name = buf_ptr(libc_fn->name);
if (ver->patch == 0) {
stub_name = buf_sprintf("%s_%d_%d", sym_name, ver->major, ver->minor);
} else {
stub_name = buf_sprintf("%s_%d_%d_%d", sym_name, ver->major, ver->minor, ver->patch);
}
buf_appendf(zig_footer, "export fn %s() void {}\n", buf_ptr(stub_name));
// Default symbol version definition vs normal symbol version definition
const char *at_sign_str = (chosen_def_ver_index != 255 &&
ver_index == chosen_def_ver_index) ? "@@" : "@";
if (ver->patch == 0) {
buf_appendf(zig_body, " \\\\ .symver %s, %s%sGLIBC_%d.%d\n",
buf_ptr(stub_name), sym_name, at_sign_str, ver->major, ver->minor);
} else {
buf_appendf(zig_body, " \\\\ .symver %s, %s%sGLIBC_%d.%d.%d\n",
buf_ptr(stub_name), sym_name, at_sign_str, ver->major, ver->minor, ver->patch);
}
// Hide the stub to keep the symbol table clean
buf_appendf(zig_body, " \\\\ .hidden %s\n", buf_ptr(stub_name));
}
}
buf_appendf(zig_body, " );\n");
buf_appendf(zig_body, "}\n");
buf_append_buf(zig_body, zig_footer);
if ((err = os_write_file(zig_file_path, zig_body))) {
if (verbose) {
fprintf(stderr, "unable to write %s: %s", buf_ptr(zig_file_path), err_str(err));
}
return err;
}
bool is_ld = (strcmp(lib->name, "ld") == 0);
CodeGen *child_gen = create_child_codegen(g, zig_file_path, OutTypeLib, nullptr, lib->name, progress_node);
codegen_set_lib_version(child_gen, true, lib->sover, 0, 0);
child_gen->is_dynamic = true;
child_gen->is_dummy_so = true;
child_gen->version_script_path = map_file_path;
child_gen->enable_cache = false;
child_gen->output_dir = dummy_dir;
if (is_ld) {
assert(g->zig_target->standard_dynamic_linker_path != nullptr);
Buf *ld_basename = buf_alloc();
os_path_split(buf_create_from_str(g->zig_target->standard_dynamic_linker_path),
nullptr, ld_basename);
child_gen->override_soname = ld_basename;
}
codegen_build_and_link(child_gen);
}
if ((err = os_write_file(test_if_exists_path, buf_alloc()))) {
if (verbose) {
fprintf(stderr, "unable to write %s: %s", buf_ptr(test_if_exists_path), err_str(err));
}
return err;
}
*out_dir = dummy_dir;
return ErrorNone;
}
uint32_t hash_glibc_target(const ZigTarget *x) {
return x->arch * (uint32_t)3250106448 +
x->os * (uint32_t)542534372 +
x->abi * (uint32_t)59162639;
}
bool eql_glibc_target(const ZigTarget *a, const ZigTarget *b) {
return a->arch == b->arch &&
a->os == b->os &&
a->abi == b->abi;
}
size_t glibc_lib_count(void) {
return array_length(glibc_libs);
}
const ZigGLibCLib *glibc_lib_enum(size_t index) {
assert(index < array_length(glibc_libs));
return &glibc_libs[index];
}
const ZigGLibCLib *glibc_lib_find(const char *name) {
for (size_t i = 0; i < array_length(glibc_libs); i += 1) {
if (strcmp(glibc_libs[i].name, name) == 0) {
return &glibc_libs[i];
}
}
return nullptr;
}

View File

@ -1,50 +0,0 @@
/*
* Copyright (c) 2019 Andrew Kelley
*
* This file is part of zig, which is MIT licensed.
* See http://opensource.org/licenses/MIT
*/
#ifndef ZIG_GLIBC_HPP
#define ZIG_GLIBC_HPP
#include "all_types.hpp"
struct ZigGLibCLib {
const char *name;
uint8_t sover;
};
struct ZigGLibCFn {
Buf *name;
const ZigGLibCLib *lib;
};
struct ZigGLibCVerList {
uint8_t versions[8]; // 8 is just the max number, we know statically it's big enough
uint8_t len;
};
uint32_t hash_glibc_target(const ZigTarget *x);
bool eql_glibc_target(const ZigTarget *a, const ZigTarget *b);
struct ZigGLibCAbi {
Buf *abi_txt_path;
Buf *vers_txt_path;
Buf *fns_txt_path;
ZigList<Stage2SemVer> all_versions;
ZigList<ZigGLibCFn> all_functions;
// The value is a pointer to all_functions.length items and each item is an index
// into all_functions.
HashMap<const ZigTarget *, ZigGLibCVerList *, hash_glibc_target, eql_glibc_target> version_table;
};
Error glibc_load_metadata(ZigGLibCAbi **out_result, Buf *zig_lib_dir, bool verbose);
Error glibc_build_dummies_and_maps(CodeGen *codegen, const ZigGLibCAbi *glibc_abi, const ZigTarget *target,
Buf **out_dir, bool verbose, Stage2ProgressNode *progress_node);
size_t glibc_lib_count(void);
const ZigGLibCLib *glibc_lib_enum(size_t index);
const ZigGLibCLib *glibc_lib_find(const char *name);
#endif

956
src/glibc.zig Normal file
View File

@ -0,0 +1,956 @@
const std = @import("std");
const Allocator = std.mem.Allocator;
const mem = std.mem;
const path = std.fs.path;
const assert = std.debug.assert;
const target_util = @import("target.zig");
const Compilation = @import("Compilation.zig");
const build_options = @import("build_options");
const trace = @import("tracy.zig").trace;
const Cache = @import("Cache.zig");
const Package = @import("Package.zig");
pub const Lib = struct {
name: []const u8,
sover: u8,
};
pub const Fn = struct {
name: []const u8,
lib: *const Lib,
};
pub const VerList = struct {
/// 7 is just the max number, we know statically it's big enough.
versions: [7]u8,
len: u8,
};
pub const ABI = struct {
all_versions: []const std.builtin.Version,
all_functions: []const Fn,
/// The value is a pointer to all_functions.len items and each item is an index into all_functions.
version_table: std.AutoHashMapUnmanaged(target_util.ArchOsAbi, [*]VerList),
arena_state: std.heap.ArenaAllocator.State,
pub fn destroy(abi: *ABI, gpa: *Allocator) void {
abi.version_table.deinit(gpa);
abi.arena_state.promote(gpa).deinit(); // Frees the ABI memory too.
}
};
pub const libs = [_]Lib{
.{ .name = "c", .sover = 6 },
.{ .name = "m", .sover = 6 },
.{ .name = "pthread", .sover = 0 },
.{ .name = "dl", .sover = 2 },
.{ .name = "rt", .sover = 1 },
.{ .name = "ld", .sover = 2 },
.{ .name = "util", .sover = 1 },
};
pub const LoadMetaDataError = error{
/// The files that ship with the Zig compiler were unable to be read, or otherwise had malformed data.
ZigInstallationCorrupt,
OutOfMemory,
};
/// This function will emit a log error when there is a problem with the zig installation and then return
/// `error.ZigInstallationCorrupt`.
pub fn loadMetaData(gpa: *Allocator, zig_lib_dir: std.fs.Dir) LoadMetaDataError!*ABI {
const tracy = trace(@src());
defer tracy.end();
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
errdefer arena_allocator.deinit();
const arena = &arena_allocator.allocator;
var all_versions = std.ArrayListUnmanaged(std.builtin.Version){};
var all_functions = std.ArrayListUnmanaged(Fn){};
var version_table = std.AutoHashMapUnmanaged(target_util.ArchOsAbi, [*]VerList){};
errdefer version_table.deinit(gpa);
var glibc_dir = zig_lib_dir.openDir("libc" ++ path.sep_str ++ "glibc", .{}) catch |err| {
std.log.err("unable to open glibc dir: {}", .{@errorName(err)});
return error.ZigInstallationCorrupt;
};
defer glibc_dir.close();
const max_txt_size = 500 * 1024; // Bigger than this and something is definitely borked.
const vers_txt_contents = glibc_dir.readFileAlloc(gpa, "vers.txt", max_txt_size) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => {
std.log.err("unable to read vers.txt: {}", .{@errorName(err)});
return error.ZigInstallationCorrupt;
},
};
defer gpa.free(vers_txt_contents);
// Arena allocated because the result contains references to function names.
const fns_txt_contents = glibc_dir.readFileAlloc(arena, "fns.txt", max_txt_size) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => {
std.log.err("unable to read fns.txt: {}", .{@errorName(err)});
return error.ZigInstallationCorrupt;
},
};
const abi_txt_contents = glibc_dir.readFileAlloc(gpa, "abi.txt", max_txt_size) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => {
std.log.err("unable to read abi.txt: {}", .{@errorName(err)});
return error.ZigInstallationCorrupt;
},
};
defer gpa.free(abi_txt_contents);
{
var it = mem.tokenize(vers_txt_contents, "\r\n");
var line_i: usize = 1;
while (it.next()) |line| : (line_i += 1) {
const prefix = "GLIBC_";
if (!mem.startsWith(u8, line, prefix)) {
std.log.err("vers.txt:{}: expected 'GLIBC_' prefix", .{line_i});
return error.ZigInstallationCorrupt;
}
const adjusted_line = line[prefix.len..];
const ver = std.builtin.Version.parse(adjusted_line) catch |err| {
std.log.err("vers.txt:{}: unable to parse glibc version '{}': {}", .{ line_i, line, @errorName(err) });
return error.ZigInstallationCorrupt;
};
try all_versions.append(arena, ver);
}
}
{
var file_it = mem.tokenize(fns_txt_contents, "\r\n");
var line_i: usize = 1;
while (file_it.next()) |line| : (line_i += 1) {
var line_it = mem.tokenize(line, " ");
const fn_name = line_it.next() orelse {
std.log.err("fns.txt:{}: expected function name", .{line_i});
return error.ZigInstallationCorrupt;
};
const lib_name = line_it.next() orelse {
std.log.err("fns.txt:{}: expected library name", .{line_i});
return error.ZigInstallationCorrupt;
};
const lib = findLib(lib_name) orelse {
std.log.err("fns.txt:{}: unknown library name: {}", .{ line_i, lib_name });
return error.ZigInstallationCorrupt;
};
try all_functions.append(arena, .{
.name = fn_name,
.lib = lib,
});
}
}
{
var file_it = mem.split(abi_txt_contents, "\n");
var line_i: usize = 0;
while (true) {
const ver_list_base: []VerList = blk: {
const line = file_it.next() orelse break;
if (line.len == 0) break;
line_i += 1;
const ver_list_base = try arena.alloc(VerList, all_functions.items.len);
var line_it = mem.tokenize(line, " ");
while (line_it.next()) |target_string| {
var component_it = mem.tokenize(target_string, "-");
const arch_name = component_it.next() orelse {
std.log.err("abi.txt:{}: expected arch name", .{line_i});
return error.ZigInstallationCorrupt;
};
const os_name = component_it.next() orelse {
std.log.err("abi.txt:{}: expected OS name", .{line_i});
return error.ZigInstallationCorrupt;
};
const abi_name = component_it.next() orelse {
std.log.err("abi.txt:{}: expected ABI name", .{line_i});
return error.ZigInstallationCorrupt;
};
const arch_tag = std.meta.stringToEnum(std.Target.Cpu.Arch, arch_name) orelse {
std.log.err("abi.txt:{}: unrecognized arch: '{}'", .{ line_i, arch_name });
return error.ZigInstallationCorrupt;
};
if (!mem.eql(u8, os_name, "linux")) {
std.log.err("abi.txt:{}: expected OS 'linux', found '{}'", .{ line_i, os_name });
return error.ZigInstallationCorrupt;
}
const abi_tag = std.meta.stringToEnum(std.Target.Abi, abi_name) orelse {
std.log.err("abi.txt:{}: unrecognized ABI: '{}'", .{ line_i, abi_name });
return error.ZigInstallationCorrupt;
};
const triple = target_util.ArchOsAbi{
.arch = arch_tag,
.os = .linux,
.abi = abi_tag,
};
try version_table.put(gpa, triple, ver_list_base.ptr);
}
break :blk ver_list_base;
};
for (ver_list_base) |*ver_list| {
const line = file_it.next() orelse {
std.log.err("abi.txt:{}: missing version number line", .{line_i});
return error.ZigInstallationCorrupt;
};
line_i += 1;
ver_list.* = .{
.versions = undefined,
.len = 0,
};
var line_it = mem.tokenize(line, " ");
while (line_it.next()) |version_index_string| {
if (ver_list.len >= ver_list.versions.len) {
// If this happens with legit data, increase the array len in the type.
std.log.err("abi.txt:{}: too many versions", .{line_i});
return error.ZigInstallationCorrupt;
}
const version_index = std.fmt.parseInt(u8, version_index_string, 10) catch |err| {
// If this happens with legit data, increase the size of the integer type in the struct.
std.log.err("abi.txt:{}: unable to parse version: {}", .{ line_i, @errorName(err) });
return error.ZigInstallationCorrupt;
};
ver_list.versions[ver_list.len] = version_index;
ver_list.len += 1;
}
}
}
}
const abi = try arena.create(ABI);
abi.* = .{
.all_versions = all_versions.items,
.all_functions = all_functions.items,
.version_table = version_table,
.arena_state = arena_allocator.state,
};
return abi;
}
fn findLib(name: []const u8) ?*const Lib {
for (libs) |*lib| {
if (mem.eql(u8, lib.name, name)) {
return lib;
}
}
return null;
}
pub const CRTFile = enum {
crti_o,
crtn_o,
scrt1_o,
libc_nonshared_a,
};
pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
if (!build_options.have_llvm) {
return error.ZigCompilerNotBuiltWithLLVMExtensions;
}
const gpa = comp.gpa;
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
defer arena_allocator.deinit();
const arena = &arena_allocator.allocator;
switch (crt_file) {
.crti_o => {
var args = std.ArrayList([]const u8).init(arena);
try add_include_dirs(comp, arena, &args);
try args.appendSlice(&[_][]const u8{
"-D_LIBC_REENTRANT",
"-include",
try lib_path(comp, arena, lib_libc_glibc ++ "include" ++ path.sep_str ++ "libc-modules.h"),
"-DMODULE_NAME=libc",
"-Wno-nonportable-include-path",
"-include",
try lib_path(comp, arena, lib_libc_glibc ++ "include" ++ path.sep_str ++ "libc-symbols.h"),
"-DTOP_NAMESPACE=glibc",
"-DASSEMBLER",
"-g",
"-Wa,--noexecstack",
});
return comp.build_crt_file("crti", .Obj, &[1]Compilation.CSourceFile{
.{
.src_path = try start_asm_path(comp, arena, "crti.S"),
.extra_flags = args.items,
},
});
},
.crtn_o => {
var args = std.ArrayList([]const u8).init(arena);
try add_include_dirs(comp, arena, &args);
try args.appendSlice(&[_][]const u8{
"-D_LIBC_REENTRANT",
"-DMODULE_NAME=libc",
"-DTOP_NAMESPACE=glibc",
"-DASSEMBLER",
"-g",
"-Wa,--noexecstack",
});
return comp.build_crt_file("crtn", .Obj, &[1]Compilation.CSourceFile{
.{
.src_path = try start_asm_path(comp, arena, "crtn.S"),
.extra_flags = args.items,
},
});
},
.scrt1_o => {
const start_os: Compilation.CSourceFile = blk: {
var args = std.ArrayList([]const u8).init(arena);
try add_include_dirs(comp, arena, &args);
try args.appendSlice(&[_][]const u8{
"-D_LIBC_REENTRANT",
"-include",
try lib_path(comp, arena, lib_libc_glibc ++ "include" ++ path.sep_str ++ "libc-modules.h"),
"-DMODULE_NAME=libc",
"-Wno-nonportable-include-path",
"-include",
try lib_path(comp, arena, lib_libc_glibc ++ "include" ++ path.sep_str ++ "libc-symbols.h"),
"-DPIC",
"-DSHARED",
"-DTOP_NAMESPACE=glibc",
"-DASSEMBLER",
"-g",
"-Wa,--noexecstack",
});
break :blk .{
.src_path = try start_asm_path(comp, arena, "start.S"),
.extra_flags = args.items,
};
};
const abi_note_o: Compilation.CSourceFile = blk: {
var args = std.ArrayList([]const u8).init(arena);
try args.appendSlice(&[_][]const u8{
"-I",
try lib_path(comp, arena, lib_libc_glibc ++ "csu"),
});
try add_include_dirs(comp, arena, &args);
try args.appendSlice(&[_][]const u8{
"-D_LIBC_REENTRANT",
"-DMODULE_NAME=libc",
"-DTOP_NAMESPACE=glibc",
"-DASSEMBLER",
"-g",
"-Wa,--noexecstack",
});
break :blk .{
.src_path = try lib_path(comp, arena, lib_libc_glibc ++ "csu" ++ path.sep_str ++ "abi-note.S"),
.extra_flags = args.items,
};
};
return comp.build_crt_file("Scrt1", .Obj, &[_]Compilation.CSourceFile{ start_os, abi_note_o });
},
.libc_nonshared_a => {
const deps = [_][]const u8{
lib_libc_glibc ++ "stdlib" ++ path.sep_str ++ "atexit.c",
lib_libc_glibc ++ "stdlib" ++ path.sep_str ++ "at_quick_exit.c",
lib_libc_glibc ++ "io" ++ path.sep_str ++ "stat.c",
lib_libc_glibc ++ "io" ++ path.sep_str ++ "fstat.c",
lib_libc_glibc ++ "io" ++ path.sep_str ++ "lstat.c",
lib_libc_glibc ++ "io" ++ path.sep_str ++ "stat64.c",
lib_libc_glibc ++ "io" ++ path.sep_str ++ "fstat64.c",
lib_libc_glibc ++ "io" ++ path.sep_str ++ "lstat64.c",
lib_libc_glibc ++ "io" ++ path.sep_str ++ "fstatat.c",
lib_libc_glibc ++ "io" ++ path.sep_str ++ "fstatat64.c",
lib_libc_glibc ++ "io" ++ path.sep_str ++ "mknod.c",
lib_libc_glibc ++ "io" ++ path.sep_str ++ "mknodat.c",
lib_libc_glibc ++ "nptl" ++ path.sep_str ++ "pthread_atfork.c",
lib_libc_glibc ++ "debug" ++ path.sep_str ++ "stack_chk_fail_local.c",
};
var c_source_files: [deps.len + 1]Compilation.CSourceFile = undefined;
c_source_files[0] = blk: {
var args = std.ArrayList([]const u8).init(arena);
try args.appendSlice(&[_][]const u8{
"-std=gnu11",
"-fgnu89-inline",
"-g",
"-O2",
"-fmerge-all-constants",
"-fno-stack-protector",
"-fmath-errno",
"-fno-stack-protector",
"-I",
try lib_path(comp, arena, lib_libc_glibc ++ "csu"),
});
try add_include_dirs(comp, arena, &args);
try args.appendSlice(&[_][]const u8{
"-DSTACK_PROTECTOR_LEVEL=0",
"-fPIC",
"-fno-stack-protector",
"-ftls-model=initial-exec",
"-D_LIBC_REENTRANT",
"-include",
try lib_path(comp, arena, lib_libc_glibc ++ "include" ++ path.sep_str ++ "libc-modules.h"),
"-DMODULE_NAME=libc",
"-Wno-nonportable-include-path",
"-include",
try lib_path(comp, arena, lib_libc_glibc ++ "include" ++ path.sep_str ++ "libc-symbols.h"),
"-DPIC",
"-DLIBC_NONSHARED=1",
"-DTOP_NAMESPACE=glibc",
});
break :blk .{
.src_path = try lib_path(comp, arena, lib_libc_glibc ++ "csu" ++ path.sep_str ++ "elf-init.c"),
.extra_flags = args.items,
};
};
for (deps) |dep, i| {
var args = std.ArrayList([]const u8).init(arena);
try args.appendSlice(&[_][]const u8{
"-std=gnu11",
"-fgnu89-inline",
"-g",
"-O2",
"-fmerge-all-constants",
"-fno-stack-protector",
"-fmath-errno",
"-ftls-model=initial-exec",
"-Wno-ignored-attributes",
});
try add_include_dirs(comp, arena, &args);
try args.appendSlice(&[_][]const u8{
"-D_LIBC_REENTRANT",
"-include",
try lib_path(comp, arena, lib_libc_glibc ++ "include" ++ path.sep_str ++ "libc-modules.h"),
"-DMODULE_NAME=libc",
"-Wno-nonportable-include-path",
"-include",
try lib_path(comp, arena, lib_libc_glibc ++ "include" ++ path.sep_str ++ "libc-symbols.h"),
"-DPIC",
"-DLIBC_NONSHARED=1",
"-DTOP_NAMESPACE=glibc",
});
c_source_files[i + 1] = .{
.src_path = try lib_path(comp, arena, dep),
.extra_flags = args.items,
};
}
return comp.build_crt_file("c_nonshared", .Lib, &c_source_files);
},
}
}
fn start_asm_path(comp: *Compilation, arena: *Allocator, basename: []const u8) ![]const u8 {
const arch = comp.getTarget().cpu.arch;
const is_ppc = arch == .powerpc or arch == .powerpc64 or arch == .powerpc64le;
const is_aarch64 = arch == .aarch64 or arch == .aarch64_be;
const is_sparc = arch == .sparc or arch == .sparcel or arch == .sparcv9;
const is_64 = arch.ptrBitWidth() == 64;
const s = path.sep_str;
var result = std.ArrayList(u8).init(arena);
try result.appendSlice(comp.zig_lib_directory.path.?);
try result.appendSlice(s ++ "libc" ++ s ++ "glibc" ++ s ++ "sysdeps" ++ s);
if (is_sparc) {
if (is_64) {
try result.appendSlice("sparc" ++ s ++ "sparc64");
} else {
try result.appendSlice("sparc" ++ s ++ "sparc32");
}
} else if (arch.isARM()) {
try result.appendSlice("arm");
} else if (arch.isMIPS()) {
try result.appendSlice("mips");
} else if (arch == .x86_64) {
try result.appendSlice("x86_64");
} else if (arch == .i386) {
try result.appendSlice("i386");
} else if (is_aarch64) {
try result.appendSlice("aarch64");
} else if (arch.isRISCV()) {
try result.appendSlice("riscv");
} else if (is_ppc) {
if (is_64) {
try result.appendSlice("powerpc" ++ s ++ "powerpc64");
} else {
try result.appendSlice("powerpc" ++ s ++ "powerpc32");
}
}
try result.appendSlice(s);
try result.appendSlice(basename);
return result.items;
}
fn add_include_dirs(comp: *Compilation, arena: *Allocator, args: *std.ArrayList([]const u8)) error{OutOfMemory}!void {
const target = comp.getTarget();
const arch = target.cpu.arch;
const opt_nptl: ?[]const u8 = if (target.os.tag == .linux) "nptl" else "htl";
const glibc = try lib_path(comp, arena, lib_libc ++ "glibc");
const s = path.sep_str;
try args.append("-I");
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "include"));
if (target.os.tag == .linux) {
try add_include_dirs_arch(arena, args, arch, null, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix" ++ s ++ "sysv" ++ s ++ "linux"));
}
if (opt_nptl) |nptl| {
try add_include_dirs_arch(arena, args, arch, nptl, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps"));
}
if (target.os.tag == .linux) {
try args.append("-I");
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++
"unix" ++ s ++ "sysv" ++ s ++ "linux" ++ s ++ "generic"));
try args.append("-I");
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++
"unix" ++ s ++ "sysv" ++ s ++ "linux" ++ s ++ "include"));
try args.append("-I");
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++
"unix" ++ s ++ "sysv" ++ s ++ "linux"));
}
if (opt_nptl) |nptl| {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ comp.zig_lib_directory.path.?, lib_libc_glibc ++ "sysdeps", nptl }));
}
try args.append("-I");
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "pthread"));
try args.append("-I");
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix" ++ s ++ "sysv"));
try add_include_dirs_arch(arena, args, arch, null, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix"));
try args.append("-I");
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix"));
try add_include_dirs_arch(arena, args, arch, null, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps"));
try args.append("-I");
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "generic"));
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ comp.zig_lib_directory.path.?, lib_libc ++ "glibc" }));
try args.append("-I");
try args.append(try std.fmt.allocPrint(arena, "{}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "{}-{}-{}", .{
comp.zig_lib_directory.path.?, @tagName(arch), @tagName(target.os.tag), @tagName(target.abi),
}));
try args.append("-I");
try args.append(try lib_path(comp, arena, lib_libc ++ "include" ++ s ++ "generic-glibc"));
try args.append("-I");
try args.append(try std.fmt.allocPrint(arena, "{}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "{}-linux-any", .{
comp.zig_lib_directory.path.?, @tagName(arch),
}));
try args.append("-I");
try args.append(try lib_path(comp, arena, lib_libc ++ "include" ++ s ++ "any-linux-any"));
}
fn add_include_dirs_arch(
arena: *Allocator,
args: *std.ArrayList([]const u8),
arch: std.Target.Cpu.Arch,
opt_nptl: ?[]const u8,
dir: []const u8,
) error{OutOfMemory}!void {
const is_x86 = arch == .i386 or arch == .x86_64;
const is_aarch64 = arch == .aarch64 or arch == .aarch64_be;
const is_ppc = arch == .powerpc or arch == .powerpc64 or arch == .powerpc64le;
const is_sparc = arch == .sparc or arch == .sparcel or arch == .sparcv9;
const is_64 = arch.ptrBitWidth() == 64;
const s = path.sep_str;
if (is_x86) {
if (arch == .x86_64) {
if (opt_nptl) |nptl| {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "x86_64", nptl }));
} else {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "x86_64" }));
}
} else if (arch == .i386) {
if (opt_nptl) |nptl| {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "i386", nptl }));
} else {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "i386" }));
}
}
if (opt_nptl) |nptl| {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "x86", nptl }));
} else {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "x86" }));
}
} else if (arch.isARM()) {
if (opt_nptl) |nptl| {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "arm", nptl }));
} else {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "arm" }));
}
} else if (arch.isMIPS()) {
if (opt_nptl) |nptl| {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "mips", nptl }));
} else {
if (is_64) {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "mips" ++ s ++ "mips64" }));
} else {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "mips" ++ s ++ "mips32" }));
}
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "mips" }));
}
} else if (is_sparc) {
if (opt_nptl) |nptl| {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "sparc", nptl }));
} else {
if (is_64) {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "sparc" ++ s ++ "sparc64" }));
} else {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "sparc" ++ s ++ "sparc32" }));
}
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "sparc" }));
}
} else if (is_aarch64) {
if (opt_nptl) |nptl| {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "aarch64", nptl }));
} else {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "aarch64" }));
}
} else if (is_ppc) {
if (opt_nptl) |nptl| {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "powerpc", nptl }));
} else {
if (is_64) {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "powerpc" ++ s ++ "powerpc64" }));
} else {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "powerpc" ++ s ++ "powerpc32" }));
}
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "powerpc" }));
}
} else if (arch.isRISCV()) {
if (opt_nptl) |nptl| {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "riscv", nptl }));
} else {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "riscv" }));
}
}
}
fn path_from_lib(comp: *Compilation, arena: *Allocator, sub_path: []const u8) ![]const u8 {
return path.join(arena, &[_][]const u8{ comp.zig_lib_directory.path.?, sub_path });
}
const lib_libc = "libc" ++ path.sep_str;
const lib_libc_glibc = lib_libc ++ "glibc" ++ path.sep_str;
fn lib_path(comp: *Compilation, arena: *Allocator, sub_path: []const u8) ![]const u8 {
return path.join(arena, &[_][]const u8{ comp.zig_lib_directory.path.?, sub_path });
}
pub const BuiltSharedObjects = struct {
lock: Cache.Lock,
dir_path: []u8,
pub fn deinit(self: *BuiltSharedObjects, gpa: *Allocator) void {
self.lock.release();
gpa.free(self.dir_path);
self.* = undefined;
}
};
const all_map_basename = "all.map";
// TODO Turn back on zig fmt when https://github.com/ziglang/zig/issues/5948 is implemented.
// zig fmt: off
pub fn buildSharedObjects(comp: *Compilation) !void {
const tracy = trace(@src());
defer tracy.end();
if (!build_options.have_llvm) {
return error.ZigCompilerNotBuiltWithLLVMExtensions;
}
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
const arena = &arena_allocator.allocator;
const target = comp.getTarget();
const target_version = target.os.version_range.linux.glibc;
// Use the global cache directory.
var cache_parent: Cache = .{
.gpa = comp.gpa,
.manifest_dir = try comp.global_cache_directory.handle.makeOpenPath("h", .{}),
};
defer cache_parent.manifest_dir.close();
var cache = cache_parent.obtain();
defer cache.deinit();
cache.hash.addBytes(build_options.version);
cache.hash.addBytes(comp.zig_lib_directory.path orelse ".");
cache.hash.add(target.cpu.arch);
cache.hash.add(target.abi);
cache.hash.add(target_version);
const hit = try cache.hit();
const digest = cache.final();
const o_sub_path = try path.join(arena, &[_][]const u8{ "o", &digest });
// Even if we get a hit, it doesn't guarantee that we finished the job last time.
// We use the presence of an "ok" file to determine if it is a true hit.
var o_directory: Compilation.Directory = .{
.handle = try comp.global_cache_directory.handle.makeOpenPath(o_sub_path, .{}),
.path = try path.join(arena, &[_][]const u8{ comp.global_cache_directory.path.?, o_sub_path }),
};
defer o_directory.handle.close();
const ok_basename = "ok";
const actual_hit = if (hit) blk: {
o_directory.handle.access(ok_basename, .{}) catch |err| switch (err) {
error.FileNotFound => break :blk false,
else => |e| return e,
};
break :blk true;
} else false;
if (!actual_hit) {
const metadata = try loadMetaData(comp.gpa, comp.zig_lib_directory.handle);
defer metadata.destroy(comp.gpa);
const ver_list_base = metadata.version_table.get(.{
.arch = target.cpu.arch,
.os = target.os.tag,
.abi = target.abi,
}) orelse return error.GLibCUnavailableForThisTarget;
const target_ver_index = for (metadata.all_versions) |ver, i| {
switch (ver.order(target_version)) {
.eq => break i,
.lt => continue,
.gt => {
// TODO Expose via compile error mechanism instead of log.
std.log.warn("invalid target glibc version: {}", .{target_version});
return error.InvalidTargetGLibCVersion;
},
}
} else blk: {
const latest_index = metadata.all_versions.len - 1;
std.log.warn("zig cannot build new glibc version {}; providing instead {}", .{
target_version, metadata.all_versions[latest_index],
});
break :blk latest_index;
};
{
var map_contents = std.ArrayList(u8).init(arena);
for (metadata.all_versions) |ver| {
if (ver.patch == 0) {
try map_contents.writer().print("GLIBC_{d}.{d} {{ }};\n", .{ ver.major, ver.minor });
} else {
try map_contents.writer().print("GLIBC_{d}.{d}.{d} {{ }};\n", .{ ver.major, ver.minor, ver.patch });
}
}
try o_directory.handle.writeFile(all_map_basename, map_contents.items);
map_contents.deinit(); // The most recent allocation of an arena can be freed :)
}
var zig_body = std.ArrayList(u8).init(comp.gpa);
defer zig_body.deinit();
for (libs) |*lib| {
zig_body.shrinkRetainingCapacity(0);
for (metadata.all_functions) |*libc_fn, fn_i| {
if (libc_fn.lib != lib) continue;
const ver_list = ver_list_base[fn_i];
// Pick the default symbol version:
// - If there are no versions, don't emit it
// - Take the greatest one <= than the target one
// - If none of them is <= than the
// specified one don't pick any default version
if (ver_list.len == 0) continue;
var chosen_def_ver_index: u8 = 255;
{
var ver_i: u8 = 0;
while (ver_i < ver_list.len) : (ver_i += 1) {
const ver_index = ver_list.versions[ver_i];
if ((chosen_def_ver_index == 255 or ver_index > chosen_def_ver_index) and
target_ver_index >= ver_index)
{
chosen_def_ver_index = ver_index;
}
}
}
{
var ver_i: u8 = 0;
while (ver_i < ver_list.len) : (ver_i += 1) {
// Example:
// .globl _Exit_2_2_5
// .type _Exit_2_2_5, @function;
// .symver _Exit_2_2_5, _Exit@@GLIBC_2.2.5
// .hidden _Exit_2_2_5
// _Exit_2_2_5:
const ver_index = ver_list.versions[ver_i];
const ver = metadata.all_versions[ver_index];
const sym_name = libc_fn.name;
// Default symbol version definition vs normal symbol version definition
const want_two_ats = chosen_def_ver_index != 255 and ver_index == chosen_def_ver_index;
const at_sign_str = "@@"[0 .. @boolToInt(want_two_ats) + @as(usize, 1)];
if (ver.patch == 0) {
const sym_plus_ver = try std.fmt.allocPrint(
arena, "{s}_{d}_{d}",
.{sym_name, ver.major, ver.minor},
);
try zig_body.writer().print(
\\.globl {s}
\\.type {s}, @function;
\\.symver {s}, {s}{s}GLIBC_{d}.{d}
\\.hidden {s}
\\{s}:
\\
, .{
sym_plus_ver,
sym_plus_ver,
sym_plus_ver, sym_name, at_sign_str, ver.major, ver.minor,
sym_plus_ver,
sym_plus_ver,
});
} else {
const sym_plus_ver = try std.fmt.allocPrint(arena, "{s}_{d}_{d}_{d}",
.{sym_name, ver.major, ver.minor, ver.patch},
);
try zig_body.writer().print(
\\.globl {s}
\\.type {s}, @function;
\\.symver {s}, {s}{s}GLIBC_{d}.{d}.{d}
\\.hidden {s}
\\{s}:
\\
, .{
sym_plus_ver,
sym_plus_ver,
sym_plus_ver, sym_name, at_sign_str, ver.major, ver.minor, ver.patch,
sym_plus_ver,
sym_plus_ver,
});
}
}
}
}
var lib_name_buf: [32]u8 = undefined; // Larger than each of the names "c", "pthread", etc.
const asm_file_basename = std.fmt.bufPrint(&lib_name_buf, "{s}.s", .{lib.name}) catch unreachable;
try o_directory.handle.writeFile(asm_file_basename, zig_body.items);
try buildSharedLib(comp, arena, comp.global_cache_directory, o_directory, asm_file_basename, lib);
}
// No need to write the manifest because there are no file inputs associated with this cache hash.
// However we do need to write the ok file now.
if (o_directory.handle.createFile(ok_basename, .{})) |file| {
file.close();
} else |err| {
std.log.warn("glibc shared objects: failed to mark completion: {}", .{@errorName(err)});
}
}
assert(comp.glibc_so_files == null);
comp.glibc_so_files = BuiltSharedObjects{
.lock = cache.toOwnedLock(),
.dir_path = try path.join(comp.gpa, &[_][]const u8{ comp.global_cache_directory.path.?, o_sub_path }),
};
}
// zig fmt: on
fn buildSharedLib(
comp: *Compilation,
arena: *Allocator,
zig_cache_directory: Compilation.Directory,
bin_directory: Compilation.Directory,
asm_file_basename: []const u8,
lib: *const Lib,
) !void {
const tracy = trace(@src());
defer tracy.end();
const emit_bin = Compilation.EmitLoc{
.directory = bin_directory,
.basename = try std.fmt.allocPrint(arena, "lib{s}.so.{d}", .{ lib.name, lib.sover }),
};
const version: std.builtin.Version = .{ .major = lib.sover, .minor = 0, .patch = 0 };
const ld_basename = path.basename(comp.getTarget().standardDynamicLinkerPath().get().?);
const override_soname = if (mem.eql(u8, lib.name, "ld")) ld_basename else null;
const map_file_path = try path.join(arena, &[_][]const u8{ bin_directory.path.?, all_map_basename });
const c_source_files = [1]Compilation.CSourceFile{
.{
.src_path = try path.join(arena, &[_][]const u8{ bin_directory.path.?, asm_file_basename }),
},
};
const sub_compilation = try Compilation.create(comp.gpa, .{
.local_cache_directory = zig_cache_directory,
.global_cache_directory = comp.global_cache_directory,
.zig_lib_directory = comp.zig_lib_directory,
.target = comp.getTarget(),
.root_name = lib.name,
.root_pkg = null,
.output_mode = .Lib,
.link_mode = .Dynamic,
.rand = comp.rand,
.libc_installation = comp.bin_file.options.libc_installation,
.emit_bin = emit_bin,
.optimize_mode = comp.bin_file.options.optimize_mode,
.want_sanitize_c = false,
.want_stack_check = false,
.want_valgrind = false,
.emit_h = null,
.strip = comp.bin_file.options.strip,
.is_native_os = false,
.self_exe_path = comp.self_exe_path,
.verbose_cc = comp.verbose_cc,
.verbose_link = comp.bin_file.options.verbose_link,
.verbose_tokenize = comp.verbose_tokenize,
.verbose_ast = comp.verbose_ast,
.verbose_ir = comp.verbose_ir,
.verbose_llvm_ir = comp.verbose_llvm_ir,
.verbose_cimport = comp.verbose_cimport,
.verbose_llvm_cpu_features = comp.verbose_llvm_cpu_features,
.clang_passthrough_mode = comp.clang_passthrough_mode,
.version = version,
.version_script = map_file_path,
.override_soname = override_soname,
.c_source_files = &c_source_files,
.is_compiler_rt_or_libc = true,
});
defer sub_compilation.destroy();
try sub_compilation.updateSubCompilation();
}

File diff suppressed because it is too large Load Diff

75
src/introspect.zig Normal file
View File

@ -0,0 +1,75 @@
const std = @import("std");
const mem = std.mem;
const fs = std.fs;
const Compilation = @import("Compilation.zig");
/// Returns the sub_path that worked, or `null` if none did.
/// The path of the returned Directory is relative to `base`.
/// The handle of the returned Directory is open.
fn testZigInstallPrefix(base_dir: fs.Dir) ?Compilation.Directory {
const test_index_file = "std" ++ fs.path.sep_str ++ "std.zig";
zig_dir: {
// Try lib/zig/std/std.zig
const lib_zig = "lib" ++ fs.path.sep_str ++ "zig";
var test_zig_dir = base_dir.openDir(lib_zig, .{}) catch break :zig_dir;
const file = test_zig_dir.openFile(test_index_file, .{}) catch {
test_zig_dir.close();
break :zig_dir;
};
file.close();
return Compilation.Directory{ .handle = test_zig_dir, .path = lib_zig };
}
// Try lib/std/std.zig
var test_zig_dir = base_dir.openDir("lib", .{}) catch return null;
const file = test_zig_dir.openFile(test_index_file, .{}) catch {
test_zig_dir.close();
return null;
};
file.close();
return Compilation.Directory{ .handle = test_zig_dir, .path = "lib" };
}
/// Both the directory handle and the path are newly allocated resources which the caller now owns.
pub fn findZigLibDir(gpa: *mem.Allocator) !Compilation.Directory {
const self_exe_path = try fs.selfExePathAlloc(gpa);
defer gpa.free(self_exe_path);
return findZigLibDirFromSelfExe(gpa, self_exe_path);
}
/// Both the directory handle and the path are newly allocated resources which the caller now owns.
pub fn findZigLibDirFromSelfExe(
allocator: *mem.Allocator,
self_exe_path: []const u8,
) error{ OutOfMemory, FileNotFound }!Compilation.Directory {
const cwd = fs.cwd();
var cur_path: []const u8 = self_exe_path;
while (fs.path.dirname(cur_path)) |dirname| : (cur_path = dirname) {
var base_dir = cwd.openDir(dirname, .{}) catch continue;
defer base_dir.close();
const sub_directory = testZigInstallPrefix(base_dir) orelse continue;
return Compilation.Directory{
.handle = sub_directory.handle,
.path = try fs.path.join(allocator, &[_][]const u8{ dirname, sub_directory.path.? }),
};
}
return error.FileNotFound;
}
/// Caller owns returned memory.
pub fn resolveGlobalCacheDir(allocator: *mem.Allocator) ![]u8 {
const appname = "zig";
if (std.Target.current.os.tag != .windows) {
if (std.os.getenv("XDG_CACHE_HOME")) |cache_root| {
return fs.path.join(allocator, &[_][]const u8{ cache_root, appname });
} else if (std.os.getenv("HOME")) |home| {
return fs.path.join(allocator, &[_][]const u8{ home, ".cache", appname });
}
}
return fs.getAppDataDir(allocator, appname);
}

View File

@ -4,6 +4,7 @@ const Target = std.Target;
const fs = std.fs; const fs = std.fs;
const Allocator = std.mem.Allocator; const Allocator = std.mem.Allocator;
const Batch = std.event.Batch; const Batch = std.event.Batch;
const build_options = @import("build_options");
const is_darwin = Target.current.isDarwin(); const is_darwin = Target.current.isDarwin();
const is_windows = Target.current.os.tag == .windows; const is_windows = Target.current.os.tag == .windows;
@ -13,6 +14,8 @@ const log = std.log.scoped(.libc_installation);
usingnamespace @import("windows_sdk.zig"); usingnamespace @import("windows_sdk.zig");
// TODO https://github.com/ziglang/zig/issues/6345
/// See the render function implementation for documentation of the fields. /// See the render function implementation for documentation of the fields.
pub const LibCInstallation = struct { pub const LibCInstallation = struct {
include_dir: ?[]const u8 = null, include_dir: ?[]const u8 = null,
@ -168,6 +171,8 @@ pub const LibCInstallation = struct {
var self: LibCInstallation = .{}; var self: LibCInstallation = .{};
if (is_windows) { if (is_windows) {
if (!build_options.have_llvm)
return error.WindowsSdkNotFound;
var sdk: *ZigWindowsSDK = undefined; var sdk: *ZigWindowsSDK = undefined;
switch (zig_find_windows_sdk(&sdk)) { switch (zig_find_windows_sdk(&sdk)) {
.None => { .None => {

319
src/libcxx.zig Normal file
View File

@ -0,0 +1,319 @@
const std = @import("std");
const path = std.fs.path;
const assert = std.debug.assert;
const target_util = @import("target.zig");
const Compilation = @import("Compilation.zig");
const build_options = @import("build_options");
const trace = @import("tracy.zig").trace;
const libcxxabi_files = [_][]const u8{
"src/abort_message.cpp",
"src/cxa_aux_runtime.cpp",
"src/cxa_default_handlers.cpp",
"src/cxa_demangle.cpp",
"src/cxa_exception.cpp",
"src/cxa_exception_storage.cpp",
"src/cxa_guard.cpp",
"src/cxa_handlers.cpp",
"src/cxa_noexception.cpp",
"src/cxa_personality.cpp",
"src/cxa_thread_atexit.cpp",
"src/cxa_vector.cpp",
"src/cxa_virtual.cpp",
"src/fallback_malloc.cpp",
"src/private_typeinfo.cpp",
"src/stdlib_exception.cpp",
"src/stdlib_new_delete.cpp",
"src/stdlib_stdexcept.cpp",
"src/stdlib_typeinfo.cpp",
};
const libcxx_files = [_][]const u8{
"src/algorithm.cpp",
"src/any.cpp",
"src/atomic.cpp",
"src/barrier.cpp",
"src/bind.cpp",
"src/charconv.cpp",
"src/chrono.cpp",
"src/condition_variable.cpp",
"src/condition_variable_destructor.cpp",
"src/debug.cpp",
"src/exception.cpp",
"src/experimental/memory_resource.cpp",
"src/filesystem/directory_iterator.cpp",
"src/filesystem/int128_builtins.cpp",
"src/filesystem/operations.cpp",
"src/functional.cpp",
"src/future.cpp",
"src/hash.cpp",
"src/ios.cpp",
"src/iostream.cpp",
"src/locale.cpp",
"src/memory.cpp",
"src/mutex.cpp",
"src/mutex_destructor.cpp",
"src/new.cpp",
"src/optional.cpp",
"src/random.cpp",
"src/random_shuffle.cpp",
"src/regex.cpp",
"src/shared_mutex.cpp",
"src/stdexcept.cpp",
"src/string.cpp",
"src/strstream.cpp",
"src/support/solaris/xlocale.cpp",
"src/support/win32/locale_win32.cpp",
"src/support/win32/support.cpp",
"src/support/win32/thread_win32.cpp",
"src/system_error.cpp",
"src/thread.cpp",
"src/typeinfo.cpp",
"src/utility.cpp",
"src/valarray.cpp",
"src/variant.cpp",
"src/vector.cpp",
};
pub fn buildLibCXX(comp: *Compilation) !void {
if (!build_options.have_llvm) {
return error.ZigCompilerNotBuiltWithLLVMExtensions;
}
const tracy = trace(@src());
defer tracy.end();
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
const arena = &arena_allocator.allocator;
const root_name = "c++";
const output_mode = .Lib;
const link_mode = .Static;
const target = comp.getTarget();
const basename = try std.zig.binNameAlloc(arena, .{
.root_name = root_name,
.target = target,
.output_mode = output_mode,
.link_mode = link_mode,
});
const emit_bin = Compilation.EmitLoc{
.directory = null, // Put it in the cache directory.
.basename = basename,
};
const cxxabi_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxxabi", "include" });
const cxx_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxx", "include" });
var c_source_files = std.ArrayList(Compilation.CSourceFile).init(arena);
try c_source_files.ensureCapacity(libcxx_files.len);
for (libcxx_files) |cxx_src| {
var cflags = std.ArrayList([]const u8).init(arena);
if (target.os.tag == .windows) {
// Filesystem stuff isn't supported on Windows.
if (std.mem.startsWith(u8, cxx_src, "src/filesystem/"))
continue;
} else {
if (std.mem.startsWith(u8, cxx_src, "src/support/win32/"))
continue;
}
try cflags.append("-DNDEBUG");
try cflags.append("-D_LIBCPP_BUILDING_LIBRARY");
try cflags.append("-D_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER");
try cflags.append("-DLIBCXX_BUILDING_LIBCXXABI");
try cflags.append("-D_LIBCXXABI_DISABLE_VISIBILITY_ANNOTATIONS");
try cflags.append("-D_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS");
if (target.abi.isMusl()) {
try cflags.append("-D_LIBCPP_HAS_MUSL_LIBC");
}
try cflags.append("-I");
try cflags.append(cxx_include_path);
try cflags.append("-I");
try cflags.append(cxxabi_include_path);
try cflags.append("-O3");
try cflags.append("-DNDEBUG");
if (target_util.supports_fpic(target)) {
try cflags.append("-fPIC");
}
try cflags.append("-nostdinc++");
try cflags.append("-fvisibility-inlines-hidden");
try cflags.append("-std=c++14");
try cflags.append("-Wno-user-defined-literals");
c_source_files.appendAssumeCapacity(.{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxx", cxx_src }),
.extra_flags = cflags.items,
});
}
const sub_compilation = try Compilation.create(comp.gpa, .{
.local_cache_directory = comp.global_cache_directory,
.global_cache_directory = comp.global_cache_directory,
.zig_lib_directory = comp.zig_lib_directory,
.target = target,
.root_name = root_name,
.root_pkg = null,
.output_mode = output_mode,
.rand = comp.rand,
.libc_installation = comp.bin_file.options.libc_installation,
.emit_bin = emit_bin,
.optimize_mode = comp.bin_file.options.optimize_mode,
.link_mode = link_mode,
.want_sanitize_c = false,
.want_stack_check = false,
.want_valgrind = false,
.want_pic = comp.bin_file.options.pic,
.emit_h = null,
.strip = comp.bin_file.options.strip,
.is_native_os = comp.bin_file.options.is_native_os,
.self_exe_path = comp.self_exe_path,
.c_source_files = c_source_files.items,
.verbose_cc = comp.verbose_cc,
.verbose_link = comp.bin_file.options.verbose_link,
.verbose_tokenize = comp.verbose_tokenize,
.verbose_ast = comp.verbose_ast,
.verbose_ir = comp.verbose_ir,
.verbose_llvm_ir = comp.verbose_llvm_ir,
.verbose_cimport = comp.verbose_cimport,
.verbose_llvm_cpu_features = comp.verbose_llvm_cpu_features,
.clang_passthrough_mode = comp.clang_passthrough_mode,
.link_libc = true,
});
defer sub_compilation.destroy();
try sub_compilation.updateSubCompilation();
assert(comp.libcxx_static_lib == null);
comp.libcxx_static_lib = Compilation.CRTFile{
.full_object_path = try sub_compilation.bin_file.options.emit.?.directory.join(
comp.gpa,
&[_][]const u8{basename},
),
.lock = sub_compilation.bin_file.toOwnedLock(),
};
}
pub fn buildLibCXXABI(comp: *Compilation) !void {
if (!build_options.have_llvm) {
return error.ZigCompilerNotBuiltWithLLVMExtensions;
}
const tracy = trace(@src());
defer tracy.end();
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
const arena = &arena_allocator.allocator;
const root_name = "c++abi";
const output_mode = .Lib;
const link_mode = .Static;
const target = comp.getTarget();
const basename = try std.zig.binNameAlloc(arena, .{
.root_name = root_name,
.target = target,
.output_mode = output_mode,
.link_mode = link_mode,
});
const emit_bin = Compilation.EmitLoc{
.directory = null, // Put it in the cache directory.
.basename = basename,
};
const cxxabi_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxxabi", "include" });
const cxx_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxx", "include" });
var c_source_files: [libcxxabi_files.len]Compilation.CSourceFile = undefined;
for (libcxxabi_files) |cxxabi_src, i| {
var cflags = std.ArrayList([]const u8).init(arena);
try cflags.append("-DHAVE___CXA_THREAD_ATEXIT_IMPL");
try cflags.append("-D_LIBCPP_DISABLE_EXTERN_TEMPLATE");
try cflags.append("-D_LIBCPP_ENABLE_CXX17_REMOVED_UNEXPECTED_FUNCTIONS");
try cflags.append("-D_LIBCXXABI_BUILDING_LIBRARY");
try cflags.append("-D_LIBCXXABI_DISABLE_VISIBILITY_ANNOTATIONS");
try cflags.append("-D_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS");
if (target.abi.isMusl()) {
try cflags.append("-D_LIBCPP_HAS_MUSL_LIBC");
}
try cflags.append("-I");
try cflags.append(cxxabi_include_path);
try cflags.append("-I");
try cflags.append(cxx_include_path);
try cflags.append("-O3");
try cflags.append("-DNDEBUG");
if (target_util.supports_fpic(target)) {
try cflags.append("-fPIC");
}
try cflags.append("-nostdinc++");
try cflags.append("-fstrict-aliasing");
try cflags.append("-funwind-tables");
try cflags.append("-D_DEBUG");
try cflags.append("-UNDEBUG");
try cflags.append("-std=c++11");
c_source_files[i] = .{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxxabi", cxxabi_src }),
.extra_flags = cflags.items,
};
}
const sub_compilation = try Compilation.create(comp.gpa, .{
.local_cache_directory = comp.global_cache_directory,
.global_cache_directory = comp.global_cache_directory,
.zig_lib_directory = comp.zig_lib_directory,
.target = target,
.root_name = root_name,
.root_pkg = null,
.output_mode = output_mode,
.rand = comp.rand,
.libc_installation = comp.bin_file.options.libc_installation,
.emit_bin = emit_bin,
.optimize_mode = comp.bin_file.options.optimize_mode,
.link_mode = link_mode,
.want_sanitize_c = false,
.want_stack_check = false,
.want_valgrind = false,
.want_pic = comp.bin_file.options.pic,
.emit_h = null,
.strip = comp.bin_file.options.strip,
.is_native_os = comp.bin_file.options.is_native_os,
.self_exe_path = comp.self_exe_path,
.c_source_files = &c_source_files,
.verbose_cc = comp.verbose_cc,
.verbose_link = comp.bin_file.options.verbose_link,
.verbose_tokenize = comp.verbose_tokenize,
.verbose_ast = comp.verbose_ast,
.verbose_ir = comp.verbose_ir,
.verbose_llvm_ir = comp.verbose_llvm_ir,
.verbose_cimport = comp.verbose_cimport,
.verbose_llvm_cpu_features = comp.verbose_llvm_cpu_features,
.clang_passthrough_mode = comp.clang_passthrough_mode,
.link_libc = true,
});
defer sub_compilation.destroy();
try sub_compilation.updateSubCompilation();
assert(comp.libcxxabi_static_lib == null);
comp.libcxxabi_static_lib = Compilation.CRTFile{
.full_object_path = try sub_compilation.bin_file.options.emit.?.directory.join(
comp.gpa,
&[_][]const u8{basename},
),
.lock = sub_compilation.bin_file.toOwnedLock(),
};
}

135
src/libunwind.zig Normal file
View File

@ -0,0 +1,135 @@
const std = @import("std");
const path = std.fs.path;
const assert = std.debug.assert;
const target_util = @import("target.zig");
const Compilation = @import("Compilation.zig");
const build_options = @import("build_options");
const trace = @import("tracy.zig").trace;
pub fn buildStaticLib(comp: *Compilation) !void {
if (!build_options.have_llvm) {
return error.ZigCompilerNotBuiltWithLLVMExtensions;
}
const tracy = trace(@src());
defer tracy.end();
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
const arena = &arena_allocator.allocator;
const root_name = "unwind";
const output_mode = .Lib;
const link_mode = .Static;
const target = comp.getTarget();
const basename = try std.zig.binNameAlloc(arena, .{
.root_name = root_name,
.target = target,
.output_mode = output_mode,
.link_mode = link_mode,
});
const emit_bin = Compilation.EmitLoc{
.directory = null, // Put it in the cache directory.
.basename = basename,
};
const unwind_src_list = [_][]const u8{
"libunwind" ++ path.sep_str ++ "src" ++ path.sep_str ++ "libunwind.cpp",
"libunwind" ++ path.sep_str ++ "src" ++ path.sep_str ++ "Unwind-EHABI.cpp",
"libunwind" ++ path.sep_str ++ "src" ++ path.sep_str ++ "Unwind-seh.cpp",
"libunwind" ++ path.sep_str ++ "src" ++ path.sep_str ++ "UnwindLevel1.c",
"libunwind" ++ path.sep_str ++ "src" ++ path.sep_str ++ "UnwindLevel1-gcc-ext.c",
"libunwind" ++ path.sep_str ++ "src" ++ path.sep_str ++ "Unwind-sjlj.c",
"libunwind" ++ path.sep_str ++ "src" ++ path.sep_str ++ "UnwindRegistersRestore.S",
"libunwind" ++ path.sep_str ++ "src" ++ path.sep_str ++ "UnwindRegistersSave.S",
};
var c_source_files: [unwind_src_list.len]Compilation.CSourceFile = undefined;
for (unwind_src_list) |unwind_src, i| {
var cflags = std.ArrayList([]const u8).init(arena);
switch (Compilation.classifyFileExt(unwind_src)) {
.c => {
try cflags.append("-std=c99");
},
.cpp => {
try cflags.appendSlice(&[_][]const u8{
"-fno-rtti",
"-I",
try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxx", "include" }),
});
},
.assembly => {},
else => unreachable, // You can see the entire list of files just above.
}
try cflags.append("-I");
try cflags.append(try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libunwind", "include" }));
if (target_util.supports_fpic(target)) {
try cflags.append("-fPIC");
}
try cflags.append("-D_LIBUNWIND_DISABLE_VISIBILITY_ANNOTATIONS");
try cflags.append("-Wa,--noexecstack");
// This is intentionally always defined because the macro definition means, should it only
// build for the target specified by compiler defines. Since we pass -target the compiler
// defines will be correct.
try cflags.append("-D_LIBUNWIND_IS_NATIVE_ONLY");
if (comp.bin_file.options.optimize_mode == .Debug) {
try cflags.append("-D_DEBUG");
}
if (comp.bin_file.options.single_threaded) {
try cflags.append("-D_LIBUNWIND_HAS_NO_THREADS");
}
try cflags.append("-Wno-bitwise-conditional-parentheses");
c_source_files[i] = .{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{unwind_src}),
.extra_flags = cflags.items,
};
}
const sub_compilation = try Compilation.create(comp.gpa, .{
.local_cache_directory = comp.global_cache_directory,
.global_cache_directory = comp.global_cache_directory,
.zig_lib_directory = comp.zig_lib_directory,
.target = target,
.root_name = root_name,
.root_pkg = null,
.output_mode = output_mode,
.rand = comp.rand,
.libc_installation = comp.bin_file.options.libc_installation,
.emit_bin = emit_bin,
.optimize_mode = comp.bin_file.options.optimize_mode,
.link_mode = link_mode,
.want_sanitize_c = false,
.want_stack_check = false,
.want_valgrind = false,
.want_pic = comp.bin_file.options.pic,
.emit_h = null,
.strip = comp.bin_file.options.strip,
.is_native_os = comp.bin_file.options.is_native_os,
.self_exe_path = comp.self_exe_path,
.c_source_files = &c_source_files,
.verbose_cc = comp.verbose_cc,
.verbose_link = comp.bin_file.options.verbose_link,
.verbose_tokenize = comp.verbose_tokenize,
.verbose_ast = comp.verbose_ast,
.verbose_ir = comp.verbose_ir,
.verbose_llvm_ir = comp.verbose_llvm_ir,
.verbose_cimport = comp.verbose_cimport,
.verbose_llvm_cpu_features = comp.verbose_llvm_cpu_features,
.clang_passthrough_mode = comp.clang_passthrough_mode,
.link_libc = true,
});
defer sub_compilation.destroy();
try sub_compilation.updateSubCompilation();
assert(comp.libunwind_static_lib == null);
comp.libunwind_static_lib = Compilation.CRTFile{
.full_object_path = try sub_compilation.bin_file.options.emit.?.directory.join(
comp.gpa,
&[_][]const u8{basename},
),
.lock = sub_compilation.bin_file.toOwnedLock(),
};
}

File diff suppressed because it is too large Load Diff

549
src/link.zig Normal file
View File

@ -0,0 +1,549 @@
const std = @import("std");
const mem = std.mem;
const Allocator = std.mem.Allocator;
const fs = std.fs;
const log = std.log.scoped(.link);
const assert = std.debug.assert;
const Compilation = @import("Compilation.zig");
const Module = @import("Module.zig");
const trace = @import("tracy.zig").trace;
const Package = @import("Package.zig");
const Type = @import("type.zig").Type;
const Cache = @import("Cache.zig");
const build_options = @import("build_options");
const LibCInstallation = @import("libc_installation.zig").LibCInstallation;
pub const producer_string = if (std.builtin.is_test) "zig test" else "zig " ++ build_options.version;
pub const Emit = struct {
/// Where the output will go.
directory: Compilation.Directory,
/// Path to the output file, relative to `directory`.
sub_path: []const u8,
};
pub const Options = struct {
/// This is `null` when -fno-emit-bin is used. When `openPath` or `flush` is called,
/// it will have already been null-checked.
emit: ?Emit,
target: std.Target,
output_mode: std.builtin.OutputMode,
link_mode: std.builtin.LinkMode,
object_format: std.builtin.ObjectFormat,
optimize_mode: std.builtin.Mode,
machine_code_model: std.builtin.CodeModel,
root_name: []const u8,
/// Not every Compilation compiles .zig code! For example you could do `zig build-exe foo.o`.
module: ?*Module,
dynamic_linker: ?[]const u8,
/// Used for calculating how much space to reserve for symbols in case the binary file
/// does not already have a symbol table.
symbol_count_hint: u64 = 32,
/// Used for calculating how much space to reserve for executable program code in case
/// the binary file does not already have such a section.
program_code_size_hint: u64 = 256 * 1024,
entry_addr: ?u64 = null,
stack_size_override: ?u64,
/// Set to `true` to omit debug info.
strip: bool,
/// If this is true then this link code is responsible for outputting an object
/// file and then using LLD to link it together with the link options and other objects.
/// Otherwise (depending on `use_llvm`) this link code directly outputs and updates the final binary.
use_lld: bool,
/// If this is true then this link code is responsible for making an LLVM IR Module,
/// outputting it to an object file, and then linking that together with link options and
/// other objects.
/// Otherwise (depending on `use_lld`) this link code directly outputs and updates the final binary.
use_llvm: bool,
link_libc: bool,
link_libcpp: bool,
function_sections: bool,
eh_frame_hdr: bool,
rdynamic: bool,
z_nodelete: bool,
z_defs: bool,
bind_global_refs_locally: bool,
is_native_os: bool,
pic: bool,
valgrind: bool,
stack_check: bool,
single_threaded: bool,
verbose_link: bool,
dll_export_fns: bool,
error_return_tracing: bool,
is_compiler_rt_or_libc: bool,
parent_compilation_link_libc: bool,
each_lib_rpath: bool,
disable_lld_caching: bool,
is_test: bool,
gc_sections: ?bool = null,
allow_shlib_undefined: ?bool,
subsystem: ?std.Target.SubSystem,
linker_script: ?[]const u8,
version_script: ?[]const u8,
override_soname: ?[]const u8,
llvm_cpu_features: ?[*:0]const u8,
/// Extra args passed directly to LLD. Ignored when not linking with LLD.
extra_lld_args: []const []const u8,
objects: []const []const u8,
framework_dirs: []const []const u8,
frameworks: []const []const u8,
system_libs: std.StringArrayHashMapUnmanaged(void),
lib_dirs: []const []const u8,
rpath_list: []const []const u8,
version: ?std.builtin.Version,
libc_installation: ?*const LibCInstallation,
pub fn effectiveOutputMode(options: Options) std.builtin.OutputMode {
return if (options.use_lld) .Obj else options.output_mode;
}
};
pub const File = struct {
tag: Tag,
options: Options,
file: ?fs.File,
allocator: *Allocator,
/// When linking with LLD, this linker code will output an object file only at
/// this location, and then this path can be placed on the LLD linker line.
intermediary_basename: ?[]const u8 = null,
/// Prevents other processes from clobbering files in the output directory
/// of this linking operation.
lock: ?Cache.Lock = null,
pub const LinkBlock = union {
elf: Elf.TextBlock,
coff: Coff.TextBlock,
macho: MachO.TextBlock,
c: void,
wasm: void,
};
pub const LinkFn = union {
elf: Elf.SrcFn,
coff: Coff.SrcFn,
macho: MachO.SrcFn,
c: void,
wasm: ?Wasm.FnData,
};
/// For DWARF .debug_info.
pub const DbgInfoTypeRelocsTable = std.HashMapUnmanaged(Type, DbgInfoTypeReloc, Type.hash, Type.eql, std.hash_map.DefaultMaxLoadPercentage);
/// For DWARF .debug_info.
pub const DbgInfoTypeReloc = struct {
/// Offset from `TextBlock.dbg_info_off` (the buffer that is local to a Decl).
/// This is where the .debug_info tag for the type is.
off: u32,
/// Offset from `TextBlock.dbg_info_off` (the buffer that is local to a Decl).
/// List of DW.AT_type / DW.FORM_ref4 that points to the type.
relocs: std.ArrayListUnmanaged(u32),
};
/// Attempts incremental linking, if the file already exists. If
/// incremental linking fails, falls back to truncating the file and
/// rewriting it. A malicious file is detected as incremental link failure
/// and does not cause Illegal Behavior. This operation is not atomic.
pub fn openPath(allocator: *Allocator, options: Options) !*File {
const use_stage1 = build_options.is_stage1 and options.use_llvm;
if (use_stage1 or options.emit == null) {
return switch (options.object_format) {
.coff, .pe => &(try Coff.createEmpty(allocator, options)).base,
.elf => &(try Elf.createEmpty(allocator, options)).base,
.macho => &(try MachO.createEmpty(allocator, options)).base,
.wasm => &(try Wasm.createEmpty(allocator, options)).base,
.c => unreachable, // Reported error earlier.
.hex => return error.HexObjectFormatUnimplemented,
.raw => return error.RawObjectFormatUnimplemented,
};
}
const emit = options.emit.?;
const use_lld = build_options.have_llvm and options.use_lld; // comptime known false when !have_llvm
const sub_path = if (use_lld) blk: {
if (options.module == null) {
// No point in opening a file, we would not write anything to it. Initialize with empty.
return switch (options.object_format) {
.coff, .pe => &(try Coff.createEmpty(allocator, options)).base,
.elf => &(try Elf.createEmpty(allocator, options)).base,
.macho => &(try MachO.createEmpty(allocator, options)).base,
.wasm => &(try Wasm.createEmpty(allocator, options)).base,
.c => unreachable, // Reported error earlier.
.hex => return error.HexObjectFormatUnimplemented,
.raw => return error.RawObjectFormatUnimplemented,
};
}
// Open a temporary object file, not the final output file because we want to link with LLD.
break :blk try std.fmt.allocPrint(allocator, "{s}{s}", .{ emit.sub_path, options.target.oFileExt() });
} else emit.sub_path;
errdefer if (use_lld) allocator.free(sub_path);
const file: *File = switch (options.object_format) {
.coff, .pe => &(try Coff.openPath(allocator, sub_path, options)).base,
.elf => &(try Elf.openPath(allocator, sub_path, options)).base,
.macho => &(try MachO.openPath(allocator, sub_path, options)).base,
.wasm => &(try Wasm.openPath(allocator, sub_path, options)).base,
.c => &(try C.openPath(allocator, sub_path, options)).base,
.hex => return error.HexObjectFormatUnimplemented,
.raw => return error.RawObjectFormatUnimplemented,
};
if (use_lld) {
file.intermediary_basename = sub_path;
}
return file;
}
pub fn cast(base: *File, comptime T: type) ?*T {
if (base.tag != T.base_tag)
return null;
return @fieldParentPtr(T, "base", base);
}
pub fn makeWritable(base: *File) !void {
switch (base.tag) {
.coff, .elf, .macho => {
if (base.file != null) return;
const emit = base.options.emit orelse return;
base.file = try emit.directory.handle.createFile(emit.sub_path, .{
.truncate = false,
.read = true,
.mode = determineMode(base.options),
});
},
.c, .wasm => {},
}
}
pub fn makeExecutable(base: *File) !void {
switch (base.tag) {
.coff, .elf, .macho => if (base.file) |f| {
if (base.intermediary_basename != null) {
// The file we have open is not the final file that we want to
// make executable, so we don't have to close it.
return;
}
f.close();
base.file = null;
},
.c, .wasm => {},
}
}
/// May be called before or after updateDeclExports but must be called
/// after allocateDeclIndexes for any given Decl.
pub fn updateDecl(base: *File, module: *Module, decl: *Module.Decl) !void {
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).updateDecl(module, decl),
.elf => return @fieldParentPtr(Elf, "base", base).updateDecl(module, decl),
.macho => return @fieldParentPtr(MachO, "base", base).updateDecl(module, decl),
.c => return @fieldParentPtr(C, "base", base).updateDecl(module, decl),
.wasm => return @fieldParentPtr(Wasm, "base", base).updateDecl(module, decl),
}
}
pub fn updateDeclLineNumber(base: *File, module: *Module, decl: *Module.Decl) !void {
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).updateDeclLineNumber(module, decl),
.elf => return @fieldParentPtr(Elf, "base", base).updateDeclLineNumber(module, decl),
.macho => return @fieldParentPtr(MachO, "base", base).updateDeclLineNumber(module, decl),
.c, .wasm => {},
}
}
/// Must be called before any call to updateDecl or updateDeclExports for
/// any given Decl.
pub fn allocateDeclIndexes(base: *File, decl: *Module.Decl) !void {
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).allocateDeclIndexes(decl),
.elf => return @fieldParentPtr(Elf, "base", base).allocateDeclIndexes(decl),
.macho => return @fieldParentPtr(MachO, "base", base).allocateDeclIndexes(decl),
.c, .wasm => {},
}
}
pub fn releaseLock(self: *File) void {
if (self.lock) |*lock| {
lock.release();
self.lock = null;
}
}
pub fn toOwnedLock(self: *File) Cache.Lock {
const lock = self.lock.?;
self.lock = null;
return lock;
}
pub fn destroy(base: *File) void {
base.releaseLock();
if (base.file) |f| f.close();
if (base.intermediary_basename) |sub_path| base.allocator.free(sub_path);
switch (base.tag) {
.coff => {
const parent = @fieldParentPtr(Coff, "base", base);
parent.deinit();
base.allocator.destroy(parent);
},
.elf => {
const parent = @fieldParentPtr(Elf, "base", base);
parent.deinit();
base.allocator.destroy(parent);
},
.macho => {
const parent = @fieldParentPtr(MachO, "base", base);
parent.deinit();
base.allocator.destroy(parent);
},
.c => {
const parent = @fieldParentPtr(C, "base", base);
parent.deinit();
base.allocator.destroy(parent);
},
.wasm => {
const parent = @fieldParentPtr(Wasm, "base", base);
parent.deinit();
base.allocator.destroy(parent);
},
}
}
/// Commit pending changes and write headers. Takes into account final output mode
/// and `use_lld`, not only `effectiveOutputMode`.
pub fn flush(base: *File, comp: *Compilation) !void {
const emit = base.options.emit orelse return; // -fno-emit-bin
if (comp.clang_preprocessor_mode == .yes) {
// TODO: avoid extra link step when it's just 1 object file (the `zig cc -c` case)
// Until then, we do `lld -r -o output.o input.o` even though the output is the same
// as the input. For the preprocessing case (`zig cc -E -o foo`) we copy the file
// to the final location. See also the corresponding TODO in Coff linking.
const full_out_path = try emit.directory.join(comp.gpa, &[_][]const u8{emit.sub_path});
defer comp.gpa.free(full_out_path);
assert(comp.c_object_table.count() == 1);
const the_entry = comp.c_object_table.items()[0];
const cached_pp_file_path = the_entry.key.status.success.object_path;
try fs.cwd().copyFile(cached_pp_file_path, fs.cwd(), full_out_path, .{});
return;
}
const use_lld = build_options.have_llvm and base.options.use_lld;
if (use_lld and base.options.output_mode == .Lib and base.options.link_mode == .Static and
!base.options.target.isWasm())
{
return base.linkAsArchive(comp);
}
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).flush(comp),
.elf => return @fieldParentPtr(Elf, "base", base).flush(comp),
.macho => return @fieldParentPtr(MachO, "base", base).flush(comp),
.c => return @fieldParentPtr(C, "base", base).flush(comp),
.wasm => return @fieldParentPtr(Wasm, "base", base).flush(comp),
}
}
/// Commit pending changes and write headers. Works based on `effectiveOutputMode`
/// rather than final output mode.
pub fn flushModule(base: *File, comp: *Compilation) !void {
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).flushModule(comp),
.elf => return @fieldParentPtr(Elf, "base", base).flushModule(comp),
.macho => return @fieldParentPtr(MachO, "base", base).flushModule(comp),
.c => return @fieldParentPtr(C, "base", base).flushModule(comp),
.wasm => return @fieldParentPtr(Wasm, "base", base).flushModule(comp),
}
}
pub fn freeDecl(base: *File, decl: *Module.Decl) void {
switch (base.tag) {
.coff => @fieldParentPtr(Coff, "base", base).freeDecl(decl),
.elf => @fieldParentPtr(Elf, "base", base).freeDecl(decl),
.macho => @fieldParentPtr(MachO, "base", base).freeDecl(decl),
.c => unreachable,
.wasm => @fieldParentPtr(Wasm, "base", base).freeDecl(decl),
}
}
pub fn errorFlags(base: *File) ErrorFlags {
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).error_flags,
.elf => return @fieldParentPtr(Elf, "base", base).error_flags,
.macho => return @fieldParentPtr(MachO, "base", base).error_flags,
.c => return .{ .no_entry_point_found = false },
.wasm => return ErrorFlags{},
}
}
/// May be called before or after updateDecl, but must be called after
/// allocateDeclIndexes for any given Decl.
pub fn updateDeclExports(
base: *File,
module: *Module,
decl: *const Module.Decl,
exports: []const *Module.Export,
) !void {
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).updateDeclExports(module, decl, exports),
.elf => return @fieldParentPtr(Elf, "base", base).updateDeclExports(module, decl, exports),
.macho => return @fieldParentPtr(MachO, "base", base).updateDeclExports(module, decl, exports),
.c => return {},
.wasm => return @fieldParentPtr(Wasm, "base", base).updateDeclExports(module, decl, exports),
}
}
pub fn getDeclVAddr(base: *File, decl: *const Module.Decl) u64 {
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).getDeclVAddr(decl),
.elf => return @fieldParentPtr(Elf, "base", base).getDeclVAddr(decl),
.macho => return @fieldParentPtr(MachO, "base", base).getDeclVAddr(decl),
.c => unreachable,
.wasm => unreachable,
}
}
fn linkAsArchive(base: *File, comp: *Compilation) !void {
const tracy = trace(@src());
defer tracy.end();
var arena_allocator = std.heap.ArenaAllocator.init(base.allocator);
defer arena_allocator.deinit();
const arena = &arena_allocator.allocator;
const directory = base.options.emit.?.directory; // Just an alias to make it shorter to type.
// If there is no Zig code to compile, then we should skip flushing the output file because it
// will not be part of the linker line anyway.
const module_obj_path: ?[]const u8 = if (base.options.module) |module| blk: {
const use_stage1 = build_options.is_stage1 and base.options.use_llvm;
if (use_stage1) {
const obj_basename = try std.zig.binNameAlloc(arena, .{
.root_name = base.options.root_name,
.target = base.options.target,
.output_mode = .Obj,
});
const full_obj_path = try directory.join(arena, &[_][]const u8{obj_basename});
break :blk full_obj_path;
}
try base.flushModule(comp);
const obj_basename = base.intermediary_basename.?;
const full_obj_path = try directory.join(arena, &[_][]const u8{obj_basename});
break :blk full_obj_path;
} else null;
// This function follows the same pattern as link.Elf.linkWithLLD so if you want some
// insight as to what's going on here you can read that function body which is more
// well-commented.
const id_symlink_basename = "llvm-ar.id";
base.releaseLock();
var ch = comp.cache_parent.obtain();
defer ch.deinit();
try ch.addListOfFiles(base.options.objects);
for (comp.c_object_table.items()) |entry| {
_ = try ch.addFile(entry.key.status.success.object_path, null);
}
try ch.addOptionalFile(module_obj_path);
// We don't actually care whether it's a cache hit or miss; we just need the digest and the lock.
_ = try ch.hit();
const digest = ch.final();
var prev_digest_buf: [digest.len]u8 = undefined;
const prev_digest: []u8 = directory.handle.readLink(id_symlink_basename, &prev_digest_buf) catch |err| b: {
log.debug("archive new_digest={} readlink error: {}", .{ digest, @errorName(err) });
break :b prev_digest_buf[0..0];
};
if (mem.eql(u8, prev_digest, &digest)) {
log.debug("archive digest={} match - skipping invocation", .{digest});
base.lock = ch.toOwnedLock();
return;
}
// We are about to change the output file to be different, so we invalidate the build hash now.
directory.handle.deleteFile(id_symlink_basename) catch |err| switch (err) {
error.FileNotFound => {},
else => |e| return e,
};
var object_files = std.ArrayList([*:0]const u8).init(base.allocator);
defer object_files.deinit();
try object_files.ensureCapacity(base.options.objects.len + comp.c_object_table.items().len + 1);
for (base.options.objects) |obj_path| {
object_files.appendAssumeCapacity(try arena.dupeZ(u8, obj_path));
}
for (comp.c_object_table.items()) |entry| {
object_files.appendAssumeCapacity(try arena.dupeZ(u8, entry.key.status.success.object_path));
}
if (module_obj_path) |p| {
object_files.appendAssumeCapacity(try arena.dupeZ(u8, p));
}
const full_out_path = try directory.join(arena, &[_][]const u8{base.options.emit.?.sub_path});
const full_out_path_z = try arena.dupeZ(u8, full_out_path);
if (base.options.verbose_link) {
std.debug.print("ar rcs {}", .{full_out_path_z});
for (object_files.items) |arg| {
std.debug.print(" {}", .{arg});
}
std.debug.print("\n", .{});
}
const llvm = @import("llvm.zig");
const os_type = @import("target.zig").osToLLVM(base.options.target.os.tag);
const bad = llvm.WriteArchive(full_out_path_z, object_files.items.ptr, object_files.items.len, os_type);
if (bad) return error.UnableToWriteArchive;
directory.handle.symLink(&digest, id_symlink_basename, .{}) catch |err| {
std.log.warn("failed to save archive hash digest symlink: {}", .{@errorName(err)});
};
ch.writeManifest() catch |err| {
std.log.warn("failed to write cache manifest when archiving: {}", .{@errorName(err)});
};
base.lock = ch.toOwnedLock();
}
pub const Tag = enum {
coff,
elf,
macho,
c,
wasm,
};
pub const ErrorFlags = struct {
no_entry_point_found: bool = false,
};
pub const C = @import("link/C.zig");
pub const Coff = @import("link/Coff.zig");
pub const Elf = @import("link/Elf.zig");
pub const MachO = @import("link/MachO.zig");
pub const Wasm = @import("link/Wasm.zig");
};
pub fn determineMode(options: Options) fs.File.Mode {
// On common systems with a 0o022 umask, 0o777 will still result in a file created
// with 0o755 permissions, but it works appropriately if the system is configured
// more leniently. As another data point, C's fopen seems to open files with the
// 666 mode.
const executable_mode = if (std.Target.current.os.tag == .windows) 0 else 0o777;
switch (options.effectiveOutputMode()) {
.Lib => return switch (options.link_mode) {
.Dynamic => executable_mode,
.Static => fs.File.default_mode,
},
.Exe => return executable_mode,
.Obj => return fs.File.default_mode,
}
}

Some files were not shown because too many files have changed in this diff Show More