mirror of
https://github.com/ziglang/zig.git
synced 2025-12-06 06:13:07 +00:00
Merge pull request #6250 from ziglang/stage2-zig-cc
move `zig cc`, `zig translate-c`, `zig libc`, main(), and linking from stage1 to stage2
This commit is contained in:
commit
fe117d9961
149
CMakeLists.txt
149
CMakeLists.txt
@ -51,7 +51,6 @@ message("Configuring zig version ${ZIG_VERSION}")
|
||||
|
||||
set(ZIG_STATIC off CACHE BOOL "Attempt to build a static zig executable (not compatible with glibc)")
|
||||
set(ZIG_STATIC_LLVM off CACHE BOOL "Prefer linking against static LLVM libraries")
|
||||
set(ZIG_ENABLE_MEM_PROFILE off CACHE BOOL "Activate memory usage instrumentation")
|
||||
set(ZIG_PREFER_CLANG_CPP_DYLIB off CACHE BOOL "Try to link against -lclang-cpp")
|
||||
set(ZIG_WORKAROUND_4799 off CACHE BOOL "workaround for https://github.com/ziglang/zig/issues/4799")
|
||||
set(ZIG_WORKAROUND_POLLY_SO off CACHE STRING "workaround for https://github.com/ziglang/zig/issues/4799")
|
||||
@ -72,11 +71,6 @@ string(REGEX REPLACE "\\\\" "\\\\\\\\" ZIG_LIBC_INCLUDE_DIR_ESCAPED "${ZIG_LIBC_
|
||||
|
||||
option(ZIG_TEST_COVERAGE "Build Zig with test coverage instrumentation" OFF)
|
||||
|
||||
# Zig no longer has embedded LLD. This option is kept for package maintainers
|
||||
# so that they don't have to update their scripts in case we ever re-introduce
|
||||
# LLD to the tree. This option does nothing.
|
||||
option(ZIG_FORCE_EXTERNAL_LLD "does nothing" OFF)
|
||||
|
||||
set(ZIG_TARGET_TRIPLE "native" CACHE STRING "arch-os-abi to output binaries for")
|
||||
set(ZIG_TARGET_MCPU "baseline" CACHE STRING "-mcpu parameter to output binaries for")
|
||||
set(ZIG_EXECUTABLE "" CACHE STRING "(when cross compiling) path to already-built zig binary")
|
||||
@ -101,7 +95,7 @@ if(APPLE AND ZIG_WORKAROUND_4799)
|
||||
list(APPEND LLVM_LIBRARIES "-Wl,${CMAKE_PREFIX_PATH}/lib/libPolly.a" "-Wl,${CMAKE_PREFIX_PATH}/lib/libPollyPPCG.a" "-Wl,${CMAKE_PREFIX_PATH}/lib/libPollyISL.a")
|
||||
endif()
|
||||
|
||||
set(ZIG_CPP_LIB_DIR "${CMAKE_BINARY_DIR}/zig_cpp")
|
||||
set(ZIG_CPP_LIB_DIR "${CMAKE_BINARY_DIR}/zigcpp")
|
||||
|
||||
# Handle multi-config builds and place each into a common lib. The VS generator
|
||||
# for example will append a Debug folder by default if not explicitly specified.
|
||||
@ -267,53 +261,45 @@ include_directories("${CMAKE_SOURCE_DIR}/deps/dbg-macro")
|
||||
|
||||
find_package(Threads)
|
||||
|
||||
# CMake doesn't let us create an empty executable, so we hang on to this one separately.
|
||||
set(ZIG_MAIN_SRC "${CMAKE_SOURCE_DIR}/src/main.cpp")
|
||||
# This is our shim which will be replaced by stage1.zig.
|
||||
set(ZIG0_SOURCES
|
||||
"${CMAKE_SOURCE_DIR}/src/stage1/zig0.cpp"
|
||||
)
|
||||
|
||||
# This is our shim which will be replaced by libstage2 written in Zig.
|
||||
set(ZIG0_SHIM_SRC "${CMAKE_SOURCE_DIR}/src/stage2.cpp")
|
||||
|
||||
if(ZIG_ENABLE_MEM_PROFILE)
|
||||
set(ZIG_SOURCES_MEM_PROFILE "${CMAKE_SOURCE_DIR}/src/mem_profile.cpp")
|
||||
endif()
|
||||
|
||||
set(ZIG_SOURCES
|
||||
"${CMAKE_SOURCE_DIR}/src/analyze.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/ast_render.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/bigfloat.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/bigint.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/buffer.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/cache_hash.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/codegen.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/compiler.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/dump_analysis.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/errmsg.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/error.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/glibc.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/heap.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/ir.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/ir_print.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/link.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/mem.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/os.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/parser.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/range_set.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/target.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/tokenizer.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/util.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/softfloat_ext.cpp"
|
||||
"${ZIG_SOURCES_MEM_PROFILE}"
|
||||
set(STAGE1_SOURCES
|
||||
"${CMAKE_SOURCE_DIR}/src/stage1/analyze.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/stage1/ast_render.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/stage1/bigfloat.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/stage1/bigint.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/stage1/buffer.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/stage1/codegen.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/stage1/dump_analysis.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/stage1/errmsg.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/stage1/error.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/stage1/heap.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/stage1/ir.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/stage1/ir_print.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/stage1/mem.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/stage1/os.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/stage1/parser.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/stage1/range_set.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/stage1/stage1.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/stage1/target.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/stage1/tokenizer.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/stage1/util.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/stage1/softfloat_ext.cpp"
|
||||
)
|
||||
set(OPTIMIZED_C_SOURCES
|
||||
"${CMAKE_SOURCE_DIR}/src/blake2b.c"
|
||||
"${CMAKE_SOURCE_DIR}/src/parse_f128.c"
|
||||
"${CMAKE_SOURCE_DIR}/src/stage1/parse_f128.c"
|
||||
)
|
||||
set(ZIG_CPP_SOURCES
|
||||
# These are planned to stay even when we are self-hosted.
|
||||
"${CMAKE_SOURCE_DIR}/src/zig_llvm.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/zig_clang.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/zig_clang_driver.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/zig_clang_cc1_main.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/zig_clang_cc1as_main.cpp"
|
||||
# https://github.com/ziglang/zig/issues/6363
|
||||
"${CMAKE_SOURCE_DIR}/src/windows_sdk.cpp"
|
||||
)
|
||||
|
||||
@ -334,7 +320,7 @@ set(ZIG_STD_DEST "${ZIG_LIB_DIR}/std")
|
||||
set(ZIG_CONFIG_H_OUT "${CMAKE_BINARY_DIR}/config.h")
|
||||
set(ZIG_CONFIG_ZIG_OUT "${CMAKE_BINARY_DIR}/config.zig")
|
||||
configure_file (
|
||||
"${CMAKE_SOURCE_DIR}/src/config.h.in"
|
||||
"${CMAKE_SOURCE_DIR}/src/stage1/config.h.in"
|
||||
"${ZIG_CONFIG_H_OUT}"
|
||||
)
|
||||
configure_file (
|
||||
@ -346,6 +332,7 @@ include_directories(
|
||||
${CMAKE_SOURCE_DIR}
|
||||
${CMAKE_BINARY_DIR}
|
||||
"${CMAKE_SOURCE_DIR}/src"
|
||||
"${CMAKE_SOURCE_DIR}/src/stage1"
|
||||
)
|
||||
|
||||
# These have to go before the -Wno- flags
|
||||
@ -411,18 +398,19 @@ if(ZIG_TEST_COVERAGE)
|
||||
set(EXE_LDFLAGS "${EXE_LDFLAGS} -fprofile-arcs -ftest-coverage")
|
||||
endif()
|
||||
|
||||
add_library(zig_cpp STATIC ${ZIG_CPP_SOURCES})
|
||||
set_target_properties(zig_cpp PROPERTIES
|
||||
add_library(zigcpp STATIC ${ZIG_CPP_SOURCES})
|
||||
set_target_properties(zigcpp PROPERTIES
|
||||
COMPILE_FLAGS ${EXE_CFLAGS}
|
||||
)
|
||||
|
||||
target_link_libraries(zig_cpp LINK_PUBLIC
|
||||
target_link_libraries(zigcpp LINK_PUBLIC
|
||||
${CLANG_LIBRARIES}
|
||||
${LLD_LIBRARIES}
|
||||
${LLVM_LIBRARIES}
|
||||
${CMAKE_THREAD_LIBS_INIT}
|
||||
)
|
||||
if(ZIG_WORKAROUND_POLLY_SO)
|
||||
target_link_libraries(zig_cpp LINK_PUBLIC "-Wl,${ZIG_WORKAROUND_POLLY_SO}")
|
||||
target_link_libraries(zigcpp LINK_PUBLIC "-Wl,${ZIG_WORKAROUND_POLLY_SO}")
|
||||
endif()
|
||||
|
||||
add_library(opt_c_util STATIC ${OPTIMIZED_C_SOURCES})
|
||||
@ -430,68 +418,67 @@ set_target_properties(opt_c_util PROPERTIES
|
||||
COMPILE_FLAGS "${OPTIMIZED_C_FLAGS}"
|
||||
)
|
||||
|
||||
add_library(zigcompiler STATIC ${ZIG_SOURCES})
|
||||
set_target_properties(zigcompiler PROPERTIES
|
||||
add_library(zigstage1 STATIC ${STAGE1_SOURCES})
|
||||
set_target_properties(zigstage1 PROPERTIES
|
||||
COMPILE_FLAGS ${EXE_CFLAGS}
|
||||
LINK_FLAGS ${EXE_LDFLAGS}
|
||||
)
|
||||
target_link_libraries(zigcompiler LINK_PUBLIC
|
||||
zig_cpp
|
||||
target_link_libraries(zigstage1 LINK_PUBLIC
|
||||
opt_c_util
|
||||
${SOFTFLOAT_LIBRARIES}
|
||||
${CMAKE_THREAD_LIBS_INIT}
|
||||
zigcpp
|
||||
)
|
||||
if(NOT MSVC)
|
||||
target_link_libraries(zigcompiler LINK_PUBLIC ${LIBXML2})
|
||||
target_link_libraries(zigstage1 LINK_PUBLIC ${LIBXML2})
|
||||
endif()
|
||||
|
||||
if(ZIG_DIA_GUIDS_LIB)
|
||||
target_link_libraries(zigcompiler LINK_PUBLIC ${ZIG_DIA_GUIDS_LIB})
|
||||
target_link_libraries(zigstage1 LINK_PUBLIC ${ZIG_DIA_GUIDS_LIB})
|
||||
endif()
|
||||
|
||||
if(MSVC OR MINGW)
|
||||
target_link_libraries(zigcompiler LINK_PUBLIC version)
|
||||
target_link_libraries(zigstage1 LINK_PUBLIC version)
|
||||
endif()
|
||||
|
||||
add_executable(zig0 "${ZIG_MAIN_SRC}" "${ZIG0_SHIM_SRC}")
|
||||
add_executable(zig0 ${ZIG0_SOURCES})
|
||||
set_target_properties(zig0 PROPERTIES
|
||||
COMPILE_FLAGS ${EXE_CFLAGS}
|
||||
LINK_FLAGS ${EXE_LDFLAGS}
|
||||
)
|
||||
target_link_libraries(zig0 zigcompiler)
|
||||
target_link_libraries(zig0 zigstage1)
|
||||
|
||||
if(MSVC)
|
||||
set(LIBSTAGE2 "${CMAKE_BINARY_DIR}/zigstage2.lib")
|
||||
set(ZIG1_OBJECT "${CMAKE_BINARY_DIR}/zig1.obj")
|
||||
else()
|
||||
set(LIBSTAGE2 "${CMAKE_BINARY_DIR}/libzigstage2.a")
|
||||
set(ZIG1_OBJECT "${CMAKE_BINARY_DIR}/zig1.o")
|
||||
endif()
|
||||
if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
|
||||
set(LIBSTAGE2_RELEASE_ARG "")
|
||||
set(ZIG1_RELEASE_ARG "")
|
||||
else()
|
||||
set(LIBSTAGE2_RELEASE_ARG --release-fast --strip)
|
||||
set(ZIG1_RELEASE_ARG -OReleaseFast --strip)
|
||||
endif()
|
||||
|
||||
set(BUILD_LIBSTAGE2_ARGS "build-lib"
|
||||
"src-self-hosted/stage2.zig"
|
||||
set(BUILD_ZIG1_ARGS
|
||||
"src/stage1.zig"
|
||||
-target "${ZIG_TARGET_TRIPLE}"
|
||||
"-mcpu=${ZIG_TARGET_MCPU}"
|
||||
--name zigstage2
|
||||
--name zig1
|
||||
--override-lib-dir "${CMAKE_SOURCE_DIR}/lib"
|
||||
--cache on
|
||||
--output-dir "${CMAKE_BINARY_DIR}"
|
||||
${LIBSTAGE2_RELEASE_ARG}
|
||||
--bundle-compiler-rt
|
||||
-fPIC
|
||||
"-femit-bin=${ZIG1_OBJECT}"
|
||||
"${ZIG1_RELEASE_ARG}"
|
||||
-lc
|
||||
--pkg-begin build_options "${ZIG_CONFIG_ZIG_OUT}"
|
||||
--pkg-end
|
||||
--pkg-begin compiler_rt "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt.zig"
|
||||
--pkg-end
|
||||
)
|
||||
|
||||
if("${ZIG_TARGET_TRIPLE}" STREQUAL "native")
|
||||
add_custom_target(zig_build_libstage2 ALL
|
||||
COMMAND zig0 ${BUILD_LIBSTAGE2_ARGS}
|
||||
add_custom_target(zig_build_zig1 ALL
|
||||
COMMAND zig0 ${BUILD_ZIG1_ARGS}
|
||||
DEPENDS zig0
|
||||
BYPRODUCTS "${LIBSTAGE2}"
|
||||
BYPRODUCTS "${ZIG1_OBJECT}"
|
||||
COMMENT STATUS "Building self-hosted component ${ZIG1_OBJECT}"
|
||||
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
|
||||
)
|
||||
set(ZIG_EXECUTABLE "${zig_BINARY_DIR}/zig")
|
||||
@ -499,26 +486,28 @@ if("${ZIG_TARGET_TRIPLE}" STREQUAL "native")
|
||||
set(ZIG_EXECUTABLE "${ZIG_EXECUTABLE}.exe")
|
||||
endif()
|
||||
else()
|
||||
add_custom_target(zig_build_libstage2 ALL
|
||||
COMMAND "${ZIG_EXECUTABLE}" ${BUILD_LIBSTAGE2_ARGS}
|
||||
BYPRODUCTS "${LIBSTAGE2}"
|
||||
add_custom_target(zig_build_zig1 ALL
|
||||
COMMAND "${ZIG_EXECUTABLE}" ${BUILD_ZIG1_ARGS}
|
||||
BYPRODUCTS "${ZIG1_OBJECT}"
|
||||
COMMENT STATUS "Building self-hosted component ${ZIG1_OBJECT}"
|
||||
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
|
||||
)
|
||||
endif()
|
||||
|
||||
add_executable(zig "${ZIG_MAIN_SRC}")
|
||||
# cmake won't let us configure an executable without C sources.
|
||||
add_executable(zig "${CMAKE_SOURCE_DIR}/src/stage1/empty.cpp")
|
||||
|
||||
set_target_properties(zig PROPERTIES
|
||||
COMPILE_FLAGS ${EXE_CFLAGS}
|
||||
LINK_FLAGS ${EXE_LDFLAGS}
|
||||
)
|
||||
target_link_libraries(zig zigcompiler "${LIBSTAGE2}")
|
||||
target_link_libraries(zig "${ZIG1_OBJECT}" zigstage1)
|
||||
if(MSVC)
|
||||
target_link_libraries(zig ntdll.lib)
|
||||
elseif(MINGW)
|
||||
target_link_libraries(zig ntdll)
|
||||
endif()
|
||||
add_dependencies(zig zig_build_libstage2)
|
||||
add_dependencies(zig zig_build_zig1)
|
||||
|
||||
install(TARGETS zig DESTINATION bin)
|
||||
|
||||
|
||||
37
README.md
37
README.md
@ -22,6 +22,8 @@ Note that you can
|
||||
|
||||
### Stage 1: Build Zig from C++ Source Code
|
||||
|
||||
This step must be repeated when you make changes to any of the C++ source code.
|
||||
|
||||
#### Dependencies
|
||||
|
||||
##### POSIX
|
||||
@ -82,6 +84,41 @@ in which case you can try `-DZIG_WORKAROUND_6087=ON`.
|
||||
|
||||
See https://github.com/ziglang/zig/wiki/Building-Zig-on-Windows
|
||||
|
||||
### Stage 2: Build Self-Hosted Zig from Zig Source Code
|
||||
|
||||
Now we use the stage1 binary:
|
||||
|
||||
```
|
||||
zig build --prefix $(pwd)/stage2 -Denable-llvm
|
||||
```
|
||||
|
||||
This produces `stage2/bin/zig` which can be used for testing and development.
|
||||
Once it is feature complete, it will be used to build stage 3 - the final compiler
|
||||
binary.
|
||||
|
||||
### Stage 3: Rebuild Self-Hosted Zig Using the Self-Hosted Compiler
|
||||
|
||||
*Note: Stage 2 compiler is not yet able to build Stage 3. Building Stage 3 is
|
||||
not yet supported.*
|
||||
|
||||
Once the self-hosted compiler can build itself, this will be the actual
|
||||
compiler binary that we will install to the system. Until then, users should
|
||||
use stage 1.
|
||||
|
||||
#### Debug / Development Build
|
||||
|
||||
```
|
||||
stage2/bin/zig build
|
||||
```
|
||||
|
||||
This produces `zig-cache/bin/zig`.
|
||||
|
||||
#### Release / Install Build
|
||||
|
||||
```
|
||||
stage2/bin/zig build install -Drelease
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
The ultimate goal of the Zig project is to serve users. As a first-order
|
||||
|
||||
210
build.zig
210
build.zig
@ -9,6 +9,7 @@ const ArrayList = std.ArrayList;
|
||||
const io = std.io;
|
||||
const fs = std.fs;
|
||||
const InstallDirectoryOptions = std.build.InstallDirectoryOptions;
|
||||
const assert = std.debug.assert;
|
||||
|
||||
const zig_version = std.builtin.Version{ .major = 0, .minor = 6, .patch = 0 };
|
||||
|
||||
@ -37,7 +38,7 @@ pub fn build(b: *Builder) !void {
|
||||
|
||||
const test_step = b.step("test", "Run all the tests");
|
||||
|
||||
var test_stage2 = b.addTest("src-self-hosted/test.zig");
|
||||
var test_stage2 = b.addTest("src/test.zig");
|
||||
test_stage2.setBuildMode(mode);
|
||||
test_stage2.addPackagePath("stage2_tests", "test/stage2/test.zig");
|
||||
|
||||
@ -55,70 +56,6 @@ pub fn build(b: *Builder) !void {
|
||||
const enable_llvm = b.option(bool, "enable-llvm", "Build self-hosted compiler with LLVM backend enabled") orelse false;
|
||||
const config_h_path_option = b.option([]const u8, "config_h", "Path to the generated config.h");
|
||||
|
||||
if (!only_install_lib_files) {
|
||||
var exe = b.addExecutable("zig", "src-self-hosted/main.zig");
|
||||
exe.setBuildMode(mode);
|
||||
exe.setTarget(target);
|
||||
test_step.dependOn(&exe.step);
|
||||
b.default_step.dependOn(&exe.step);
|
||||
|
||||
if (enable_llvm) {
|
||||
const config_h_text = if (config_h_path_option) |config_h_path|
|
||||
try std.fs.cwd().readFileAlloc(b.allocator, toNativePathSep(b, config_h_path), max_config_h_bytes)
|
||||
else
|
||||
try findAndReadConfigH(b);
|
||||
|
||||
var ctx = parseConfigH(b, config_h_text);
|
||||
ctx.llvm = try findLLVM(b, ctx.llvm_config_exe);
|
||||
|
||||
try configureStage2(b, exe, ctx);
|
||||
}
|
||||
if (!only_install_lib_files) {
|
||||
exe.install();
|
||||
}
|
||||
const tracy = b.option([]const u8, "tracy", "Enable Tracy integration. Supply path to Tracy source");
|
||||
const link_libc = b.option(bool, "force-link-libc", "Force self-hosted compiler to link libc") orelse false;
|
||||
if (link_libc) {
|
||||
exe.linkLibC();
|
||||
test_stage2.linkLibC();
|
||||
}
|
||||
|
||||
const log_scopes = b.option([]const []const u8, "log", "Which log scopes to enable") orelse &[0][]const u8{};
|
||||
const zir_dumps = b.option([]const []const u8, "dump-zir", "Which functions to dump ZIR for before codegen") orelse &[0][]const u8{};
|
||||
|
||||
const opt_version_string = b.option([]const u8, "version-string", "Override Zig version string. Default is to find out with git.");
|
||||
const version = if (opt_version_string) |version| version else v: {
|
||||
var code: u8 = undefined;
|
||||
const version_untrimmed = b.execAllowFail(&[_][]const u8{
|
||||
"git", "-C", b.build_root, "name-rev", "HEAD",
|
||||
"--tags", "--name-only", "--no-undefined", "--always",
|
||||
}, &code, .Ignore) catch |err| {
|
||||
std.debug.print(
|
||||
\\Unable to determine zig version string: {}
|
||||
\\Provide the zig version string explicitly using the `version-string` build option.
|
||||
, .{err});
|
||||
std.process.exit(1);
|
||||
};
|
||||
const trimmed = mem.trim(u8, version_untrimmed, " \n\r");
|
||||
break :v b.fmt("{}.{}.{}+{}", .{ zig_version.major, zig_version.minor, zig_version.patch, trimmed });
|
||||
};
|
||||
exe.addBuildOption([]const u8, "version", version);
|
||||
|
||||
exe.addBuildOption([]const []const u8, "log_scopes", log_scopes);
|
||||
exe.addBuildOption([]const []const u8, "zir_dumps", zir_dumps);
|
||||
exe.addBuildOption(bool, "enable_tracy", tracy != null);
|
||||
if (tracy) |tracy_path| {
|
||||
const client_cpp = fs.path.join(
|
||||
b.allocator,
|
||||
&[_][]const u8{ tracy_path, "TracyClient.cpp" },
|
||||
) catch unreachable;
|
||||
exe.addIncludeDir(tracy_path);
|
||||
exe.addCSourceFile(client_cpp, &[_][]const u8{ "-DTRACY_ENABLE=1", "-fno-sanitize=undefined" });
|
||||
exe.linkSystemLibraryName("c++");
|
||||
exe.linkLibC();
|
||||
}
|
||||
}
|
||||
|
||||
b.installDirectory(InstallDirectoryOptions{
|
||||
.source_dir = "lib",
|
||||
.install_dir = .Lib,
|
||||
@ -133,6 +70,95 @@ pub fn build(b: *Builder) !void {
|
||||
},
|
||||
});
|
||||
|
||||
if (only_install_lib_files)
|
||||
return;
|
||||
|
||||
const tracy = b.option([]const u8, "tracy", "Enable Tracy integration. Supply path to Tracy source");
|
||||
const link_libc = b.option(bool, "force-link-libc", "Force self-hosted compiler to link libc") orelse enable_llvm;
|
||||
|
||||
var exe = b.addExecutable("zig", "src/main.zig");
|
||||
exe.install();
|
||||
exe.setBuildMode(mode);
|
||||
exe.setTarget(target);
|
||||
test_step.dependOn(&exe.step);
|
||||
b.default_step.dependOn(&exe.step);
|
||||
|
||||
exe.addBuildOption(bool, "have_llvm", enable_llvm);
|
||||
if (enable_llvm) {
|
||||
const config_h_text = if (config_h_path_option) |config_h_path|
|
||||
try std.fs.cwd().readFileAlloc(b.allocator, toNativePathSep(b, config_h_path), max_config_h_bytes)
|
||||
else
|
||||
try findAndReadConfigH(b);
|
||||
|
||||
var ctx = parseConfigH(b, config_h_text);
|
||||
ctx.llvm = try findLLVM(b, ctx.llvm_config_exe);
|
||||
|
||||
try configureStage2(b, exe, ctx, tracy != null);
|
||||
}
|
||||
if (link_libc) {
|
||||
exe.linkLibC();
|
||||
test_stage2.linkLibC();
|
||||
}
|
||||
|
||||
const log_scopes = b.option([]const []const u8, "log", "Which log scopes to enable") orelse &[0][]const u8{};
|
||||
const zir_dumps = b.option([]const []const u8, "dump-zir", "Which functions to dump ZIR for before codegen") orelse &[0][]const u8{};
|
||||
|
||||
const opt_version_string = b.option([]const u8, "version-string", "Override Zig version string. Default is to find out with git.");
|
||||
const version = if (opt_version_string) |version| version else v: {
|
||||
const version_string = b.fmt("{}.{}.{}", .{ zig_version.major, zig_version.minor, zig_version.patch });
|
||||
|
||||
var code: u8 = undefined;
|
||||
const git_sha_untrimmed = b.execAllowFail(&[_][]const u8{
|
||||
"git", "-C", b.build_root, "name-rev", "HEAD",
|
||||
"--tags", "--name-only", "--no-undefined", "--always",
|
||||
}, &code, .Ignore) catch {
|
||||
break :v version_string;
|
||||
};
|
||||
const git_sha_trimmed = mem.trim(u8, git_sha_untrimmed, " \n\r");
|
||||
// Detect dirty changes.
|
||||
const diff_untrimmed = b.execAllowFail(&[_][]const u8{
|
||||
"git", "-C", b.build_root, "diff", "HEAD",
|
||||
}, &code, .Ignore) catch |err| {
|
||||
std.debug.print("Error executing git diff: {}", .{err});
|
||||
std.process.exit(1);
|
||||
};
|
||||
const trimmed_diff = mem.trim(u8, diff_untrimmed, " \n\r");
|
||||
const dirty_suffix = if (trimmed_diff.len == 0) "" else s: {
|
||||
const dirty_hash = std.hash.Wyhash.hash(0, trimmed_diff);
|
||||
break :s b.fmt("dirty{x}", .{@truncate(u32, dirty_hash)});
|
||||
};
|
||||
|
||||
// This will look like e.g. "0.6.0^0" for a tag commit.
|
||||
if (mem.endsWith(u8, git_sha_trimmed, "^0")) {
|
||||
const git_ver_string = git_sha_trimmed[0 .. git_sha_trimmed.len - 2];
|
||||
if (!mem.eql(u8, git_ver_string, version_string)) {
|
||||
std.debug.print("Expected git tag '{}', found '{}'", .{ version_string, git_ver_string });
|
||||
std.process.exit(1);
|
||||
}
|
||||
break :v b.fmt("{}{}", .{ version_string, dirty_suffix });
|
||||
} else {
|
||||
break :v b.fmt("{}+{}{}", .{ version_string, git_sha_trimmed, dirty_suffix });
|
||||
}
|
||||
};
|
||||
exe.addBuildOption([]const u8, "version", version);
|
||||
|
||||
exe.addBuildOption([]const []const u8, "log_scopes", log_scopes);
|
||||
exe.addBuildOption([]const []const u8, "zir_dumps", zir_dumps);
|
||||
exe.addBuildOption(bool, "enable_tracy", tracy != null);
|
||||
exe.addBuildOption(bool, "is_stage1", false);
|
||||
if (tracy) |tracy_path| {
|
||||
const client_cpp = fs.path.join(
|
||||
b.allocator,
|
||||
&[_][]const u8{ tracy_path, "TracyClient.cpp" },
|
||||
) catch unreachable;
|
||||
exe.addIncludeDir(tracy_path);
|
||||
exe.addCSourceFile(client_cpp, &[_][]const u8{ "-DTRACY_ENABLE=1", "-fno-sanitize=undefined" });
|
||||
if (!enable_llvm) {
|
||||
exe.linkSystemLibraryName("c++");
|
||||
}
|
||||
exe.linkLibC();
|
||||
}
|
||||
|
||||
const test_filter = b.option([]const u8, "test-filter", "Skip tests that do not match filter");
|
||||
|
||||
const is_wine_enabled = b.option(bool, "enable-wine", "Use Wine to run cross compiled Windows tests") orelse false;
|
||||
@ -140,10 +166,13 @@ pub fn build(b: *Builder) !void {
|
||||
const is_wasmtime_enabled = b.option(bool, "enable-wasmtime", "Use Wasmtime to enable and run WASI libstd tests") orelse false;
|
||||
const glibc_multi_dir = b.option([]const u8, "enable-foreign-glibc", "Provide directory with glibc installations to run cross compiled tests that link glibc");
|
||||
|
||||
test_stage2.addBuildOption(bool, "is_stage1", false);
|
||||
test_stage2.addBuildOption(bool, "have_llvm", enable_llvm);
|
||||
test_stage2.addBuildOption(bool, "enable_qemu", is_qemu_enabled);
|
||||
test_stage2.addBuildOption(bool, "enable_wine", is_wine_enabled);
|
||||
test_stage2.addBuildOption(bool, "enable_wasmtime", is_wasmtime_enabled);
|
||||
test_stage2.addBuildOption(?[]const u8, "glibc_multi_install_dir", glibc_multi_dir);
|
||||
test_stage2.addBuildOption([]const u8, "version", version);
|
||||
|
||||
const test_stage2_step = b.step("test-stage2", "Run the stage2 compiler tests");
|
||||
test_stage2_step.dependOn(&test_stage2.step);
|
||||
@ -182,10 +211,7 @@ pub fn build(b: *Builder) !void {
|
||||
test_step.dependOn(tests.addCompareOutputTests(b, test_filter, modes));
|
||||
test_step.dependOn(tests.addStandaloneTests(b, test_filter, modes));
|
||||
test_step.dependOn(tests.addStackTraceTests(b, test_filter, modes));
|
||||
const test_cli = tests.addCliTests(b, test_filter, modes);
|
||||
const test_cli_step = b.step("test-cli", "Run zig cli tests");
|
||||
test_cli_step.dependOn(test_cli);
|
||||
test_step.dependOn(test_cli);
|
||||
test_step.dependOn(tests.addCliTests(b, test_filter, modes));
|
||||
test_step.dependOn(tests.addAssembleAndLinkTests(b, test_filter, modes));
|
||||
test_step.dependOn(tests.addRuntimeSafetyTests(b, test_filter, modes));
|
||||
test_step.dependOn(tests.addTranslateCTests(b, test_filter));
|
||||
@ -241,7 +267,7 @@ fn fileExists(filename: []const u8) !bool {
|
||||
fn addCppLib(b: *Builder, lib_exe_obj: anytype, cmake_binary_dir: []const u8, lib_name: []const u8) void {
|
||||
lib_exe_obj.addObjectFile(fs.path.join(b.allocator, &[_][]const u8{
|
||||
cmake_binary_dir,
|
||||
"zig_cpp",
|
||||
"zigcpp",
|
||||
b.fmt("{}{}{}", .{ lib_exe_obj.target.libPrefix(), lib_name, lib_exe_obj.target.staticLibSuffix() }),
|
||||
}) catch unreachable);
|
||||
}
|
||||
@ -320,21 +346,17 @@ fn findLLVM(b: *Builder, llvm_config_exe: []const u8) !LibraryDep {
|
||||
return result;
|
||||
}
|
||||
|
||||
fn configureStage2(b: *Builder, exe: anytype, ctx: Context) !void {
|
||||
fn configureStage2(b: *Builder, exe: anytype, ctx: Context, need_cpp_includes: bool) !void {
|
||||
exe.addIncludeDir("src");
|
||||
exe.addIncludeDir(ctx.cmake_binary_dir);
|
||||
addCppLib(b, exe, ctx.cmake_binary_dir, "zig_cpp");
|
||||
if (ctx.lld_include_dir.len != 0) {
|
||||
addCppLib(b, exe, ctx.cmake_binary_dir, "zigcpp");
|
||||
assert(ctx.lld_include_dir.len != 0);
|
||||
exe.addIncludeDir(ctx.lld_include_dir);
|
||||
{
|
||||
var it = mem.tokenize(ctx.lld_libraries, ";");
|
||||
while (it.next()) |lib| {
|
||||
exe.addObjectFile(lib);
|
||||
}
|
||||
} else {
|
||||
addCppLib(b, exe, ctx.cmake_binary_dir, "embedded_lld_wasm");
|
||||
addCppLib(b, exe, ctx.cmake_binary_dir, "embedded_lld_elf");
|
||||
addCppLib(b, exe, ctx.cmake_binary_dir, "embedded_lld_coff");
|
||||
addCppLib(b, exe, ctx.cmake_binary_dir, "embedded_lld_lib");
|
||||
}
|
||||
{
|
||||
var it = mem.tokenize(ctx.clang_libraries, ";");
|
||||
@ -344,10 +366,20 @@ fn configureStage2(b: *Builder, exe: anytype, ctx: Context) !void {
|
||||
}
|
||||
dependOnLib(b, exe, ctx.llvm);
|
||||
|
||||
// Boy, it sure would be nice to simply linkSystemLibrary("c++") and rely on zig's
|
||||
// ability to provide libc++ right? Well thanks to C++ not having a stable ABI this
|
||||
// will cause linker errors. It would work in the situation when `zig cc` is used to
|
||||
// build LLVM, Clang, and LLD, however when depending on them as system libraries, system
|
||||
// libc++ must be used.
|
||||
const cross_compile = false; // TODO
|
||||
if (cross_compile) {
|
||||
// In this case we assume that zig cc was used to build the LLVM, Clang, LLD dependencies.
|
||||
exe.linkSystemLibrary("c++");
|
||||
} else {
|
||||
if (exe.target.getOsTag() == .linux) {
|
||||
// First we try to static link against gcc libstdc++. If that doesn't work,
|
||||
// we fall back to -lc++ and cross our fingers.
|
||||
addCxxKnownPath(b, ctx, exe, "libstdc++.a", "") catch |err| switch (err) {
|
||||
addCxxKnownPath(b, ctx, exe, "libstdc++.a", "", need_cpp_includes) catch |err| switch (err) {
|
||||
error.RequiredLibraryNotFound => {
|
||||
exe.linkSystemLibrary("c++");
|
||||
},
|
||||
@ -356,12 +388,12 @@ fn configureStage2(b: *Builder, exe: anytype, ctx: Context) !void {
|
||||
|
||||
exe.linkSystemLibrary("pthread");
|
||||
} else if (exe.target.isFreeBSD()) {
|
||||
try addCxxKnownPath(b, ctx, exe, "libc++.a", null);
|
||||
try addCxxKnownPath(b, ctx, exe, "libc++.a", null, need_cpp_includes);
|
||||
exe.linkSystemLibrary("pthread");
|
||||
} else if (exe.target.isDarwin()) {
|
||||
if (addCxxKnownPath(b, ctx, exe, "libgcc_eh.a", "")) {
|
||||
if (addCxxKnownPath(b, ctx, exe, "libgcc_eh.a", "", need_cpp_includes)) {
|
||||
// Compiler is GCC.
|
||||
try addCxxKnownPath(b, ctx, exe, "libstdc++.a", null);
|
||||
try addCxxKnownPath(b, ctx, exe, "libstdc++.a", null, need_cpp_includes);
|
||||
exe.linkSystemLibrary("pthread");
|
||||
// TODO LLD cannot perform this link.
|
||||
// See https://github.com/ziglang/zig/issues/1535
|
||||
@ -378,8 +410,7 @@ fn configureStage2(b: *Builder, exe: anytype, ctx: Context) !void {
|
||||
if (ctx.dia_guids_lib.len != 0) {
|
||||
exe.addObjectFile(ctx.dia_guids_lib);
|
||||
}
|
||||
|
||||
exe.linkSystemLibrary("c");
|
||||
}
|
||||
}
|
||||
|
||||
fn addCxxKnownPath(
|
||||
@ -388,6 +419,7 @@ fn addCxxKnownPath(
|
||||
exe: anytype,
|
||||
objname: []const u8,
|
||||
errtxt: ?[]const u8,
|
||||
need_cpp_includes: bool,
|
||||
) !void {
|
||||
const path_padded = try b.exec(&[_][]const u8{
|
||||
ctx.cxx_compiler,
|
||||
@ -403,6 +435,16 @@ fn addCxxKnownPath(
|
||||
return error.RequiredLibraryNotFound;
|
||||
}
|
||||
exe.addObjectFile(path_unpadded);
|
||||
|
||||
// TODO a way to integrate with system c++ include files here
|
||||
// cc -E -Wp,-v -xc++ /dev/null
|
||||
if (need_cpp_includes) {
|
||||
// I used these temporarily for testing something but we obviously need a
|
||||
// more general purpose solution here.
|
||||
//exe.addIncludeDir("/nix/store/b3zsk4ihlpiimv3vff86bb5bxghgdzb9-gcc-9.2.0/lib/gcc/x86_64-unknown-linux-gnu/9.2.0/../../../../include/c++/9.2.0");
|
||||
//exe.addIncludeDir("/nix/store/b3zsk4ihlpiimv3vff86bb5bxghgdzb9-gcc-9.2.0/lib/gcc/x86_64-unknown-linux-gnu/9.2.0/../../../../include/c++/9.2.0/x86_64-unknown-linux-gnu");
|
||||
//exe.addIncludeDir("/nix/store/b3zsk4ihlpiimv3vff86bb5bxghgdzb9-gcc-9.2.0/lib/gcc/x86_64-unknown-linux-gnu/9.2.0/../../../../include/c++/9.2.0/backward");
|
||||
}
|
||||
}
|
||||
|
||||
const Context = struct {
|
||||
|
||||
@ -28,22 +28,6 @@ PATH=$PWD/$WASMTIME:$PATH
|
||||
# This will affect the cmake command below.
|
||||
git config core.abbrev 9
|
||||
|
||||
# This patch is a workaround for
|
||||
# https://bugs.llvm.org/show_bug.cgi?id=44870 / https://github.com/llvm/llvm-project/issues/191
|
||||
# It only applies to the apt.llvm.org packages.
|
||||
patch <<'END_PATCH'
|
||||
--- CMakeLists.txt
|
||||
+++ CMakeLists.txt
|
||||
@@ -369,6 +369,7 @@ target_link_libraries(zig_cpp LINK_PUBLIC
|
||||
${CLANG_LIBRARIES}
|
||||
${LLD_LIBRARIES}
|
||||
${LLVM_LIBRARIES}
|
||||
+ "-Wl,/usr/lib/llvm-10/lib/LLVMPolly.so"
|
||||
)
|
||||
|
||||
add_library(opt_c_util STATIC ${OPTIMIZED_C_SOURCES})
|
||||
END_PATCH
|
||||
|
||||
export CC=gcc-7
|
||||
export CXX=g++-7
|
||||
mkdir build
|
||||
|
||||
@ -7,6 +7,13 @@ pacman --noconfirm --needed -S git base-devel mingw-w64-x86_64-toolchain mingw-w
|
||||
|
||||
git config core.abbrev 9
|
||||
|
||||
# Git is wrong for autocrlf being enabled by default on Windows.
|
||||
# git is mangling files on Windows by default.
|
||||
# This is the second bug I've tracked down to being caused by autocrlf.
|
||||
git config core.autocrlf false
|
||||
# Too late; the files are already mangled.
|
||||
git checkout .
|
||||
|
||||
ZIGBUILDDIR="$(pwd)/build"
|
||||
PREFIX="$ZIGBUILDDIR/dist"
|
||||
CMAKEFLAGS="-DCMAKE_COLOR_MAKEFILE=OFF -DCMAKE_INSTALL_PREFIX=$PREFIX -DZIG_STATIC=ON"
|
||||
|
||||
@ -21,6 +21,11 @@ cd $ZIGDIR
|
||||
# This will affect the cmake command below.
|
||||
git config core.abbrev 9
|
||||
|
||||
# SourceHut reports that it is a terminal that supports escape codes, but it
|
||||
# is a filthy liar. Here we tell Zig to not try to send any terminal escape
|
||||
# codes to show progress.
|
||||
export TERM=dumb
|
||||
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH=$PREFIX "-DCMAKE_INSTALL_PREFIX=$(pwd)/release" -DZIG_STATIC=ON
|
||||
|
||||
247
doc/docgen.zig
247
doc/docgen.zig
@ -4,7 +4,7 @@ const io = std.io;
|
||||
const fs = std.fs;
|
||||
const process = std.process;
|
||||
const ChildProcess = std.ChildProcess;
|
||||
const warn = std.debug.warn;
|
||||
const print = std.debug.print;
|
||||
const mem = std.mem;
|
||||
const testing = std.testing;
|
||||
|
||||
@ -215,23 +215,23 @@ const Tokenizer = struct {
|
||||
fn parseError(tokenizer: *Tokenizer, token: Token, comptime fmt: []const u8, args: anytype) anyerror {
|
||||
const loc = tokenizer.getTokenLocation(token);
|
||||
const args_prefix = .{ tokenizer.source_file_name, loc.line + 1, loc.column + 1 };
|
||||
warn("{}:{}:{}: error: " ++ fmt ++ "\n", args_prefix ++ args);
|
||||
print("{}:{}:{}: error: " ++ fmt ++ "\n", args_prefix ++ args);
|
||||
if (loc.line_start <= loc.line_end) {
|
||||
warn("{}\n", .{tokenizer.buffer[loc.line_start..loc.line_end]});
|
||||
print("{}\n", .{tokenizer.buffer[loc.line_start..loc.line_end]});
|
||||
{
|
||||
var i: usize = 0;
|
||||
while (i < loc.column) : (i += 1) {
|
||||
warn(" ", .{});
|
||||
print(" ", .{});
|
||||
}
|
||||
}
|
||||
{
|
||||
const caret_count = token.end - token.start;
|
||||
var i: usize = 0;
|
||||
while (i < caret_count) : (i += 1) {
|
||||
warn("~", .{});
|
||||
print("~", .{});
|
||||
}
|
||||
}
|
||||
warn("\n", .{});
|
||||
print("\n", .{});
|
||||
}
|
||||
return error.ParseError;
|
||||
}
|
||||
@ -274,6 +274,7 @@ const Code = struct {
|
||||
link_objects: []const []const u8,
|
||||
target_str: ?[]const u8,
|
||||
link_libc: bool,
|
||||
disable_cache: bool,
|
||||
|
||||
const Id = union(enum) {
|
||||
Test,
|
||||
@ -522,6 +523,7 @@ fn genToc(allocator: *mem.Allocator, tokenizer: *Tokenizer) !Toc {
|
||||
defer link_objects.deinit();
|
||||
var target_str: ?[]const u8 = null;
|
||||
var link_libc = false;
|
||||
var disable_cache = false;
|
||||
|
||||
const source_token = while (true) {
|
||||
const content_tok = try eatToken(tokenizer, Token.Id.Content);
|
||||
@ -532,6 +534,8 @@ fn genToc(allocator: *mem.Allocator, tokenizer: *Tokenizer) !Toc {
|
||||
mode = .ReleaseFast;
|
||||
} else if (mem.eql(u8, end_tag_name, "code_release_safe")) {
|
||||
mode = .ReleaseSafe;
|
||||
} else if (mem.eql(u8, end_tag_name, "code_disable_cache")) {
|
||||
disable_cache = true;
|
||||
} else if (mem.eql(u8, end_tag_name, "code_link_object")) {
|
||||
_ = try eatToken(tokenizer, Token.Id.Separator);
|
||||
const obj_tok = try eatToken(tokenizer, Token.Id.TagContent);
|
||||
@ -572,6 +576,7 @@ fn genToc(allocator: *mem.Allocator, tokenizer: *Tokenizer) !Toc {
|
||||
.link_objects = link_objects.toOwnedSlice(),
|
||||
.target_str = target_str,
|
||||
.link_libc = link_libc,
|
||||
.disable_cache = disable_cache,
|
||||
},
|
||||
});
|
||||
tokenizer.code_node_count += 1;
|
||||
@ -1032,7 +1037,7 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
|
||||
},
|
||||
.Code => |code| {
|
||||
code_progress_index += 1;
|
||||
warn("docgen example code {}/{}...", .{ code_progress_index, tokenizer.code_node_count });
|
||||
print("docgen example code {}/{}...", .{ code_progress_index, tokenizer.code_node_count });
|
||||
|
||||
const raw_source = tokenizer.buffer[code.source_token.start..code.source_token.end];
|
||||
const trimmed_raw_source = mem.trim(u8, raw_source, " \n");
|
||||
@ -1055,30 +1060,17 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
|
||||
var build_args = std.ArrayList([]const u8).init(allocator);
|
||||
defer build_args.deinit();
|
||||
try build_args.appendSlice(&[_][]const u8{
|
||||
zig_exe,
|
||||
"build-exe",
|
||||
tmp_source_file_name,
|
||||
"--name",
|
||||
code.name,
|
||||
"--color",
|
||||
"on",
|
||||
"--cache",
|
||||
"on",
|
||||
zig_exe, "build-exe",
|
||||
"--name", code.name,
|
||||
"--color", "on",
|
||||
"--enable-cache", tmp_source_file_name,
|
||||
});
|
||||
try out.print("<pre><code class=\"shell\">$ zig build-exe {}.zig", .{code.name});
|
||||
switch (code.mode) {
|
||||
.Debug => {},
|
||||
.ReleaseSafe => {
|
||||
try build_args.append("--release-safe");
|
||||
try out.print(" --release-safe", .{});
|
||||
},
|
||||
.ReleaseFast => {
|
||||
try build_args.append("--release-fast");
|
||||
try out.print(" --release-fast", .{});
|
||||
},
|
||||
.ReleaseSmall => {
|
||||
try build_args.append("--release-small");
|
||||
try out.print(" --release-small", .{});
|
||||
else => {
|
||||
try build_args.appendSlice(&[_][]const u8{ "-O", @tagName(code.mode) });
|
||||
try out.print(" -O {s}", .{@tagName(code.mode)});
|
||||
},
|
||||
}
|
||||
for (code.link_objects) |link_object| {
|
||||
@ -1087,9 +1079,8 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
|
||||
allocator,
|
||||
&[_][]const u8{ tmp_dir_name, name_with_ext },
|
||||
);
|
||||
try build_args.append("--object");
|
||||
try build_args.append(full_path_object);
|
||||
try out.print(" --object {}", .{name_with_ext});
|
||||
try out.print(" {s}", .{name_with_ext});
|
||||
}
|
||||
if (code.link_libc) {
|
||||
try build_args.append("-lc");
|
||||
@ -1114,20 +1105,14 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
|
||||
switch (result.term) {
|
||||
.Exited => |exit_code| {
|
||||
if (exit_code == 0) {
|
||||
warn("{}\nThe following command incorrectly succeeded:\n", .{result.stderr});
|
||||
for (build_args.items) |arg|
|
||||
warn("{} ", .{arg})
|
||||
else
|
||||
warn("\n", .{});
|
||||
print("{}\nThe following command incorrectly succeeded:\n", .{result.stderr});
|
||||
dumpArgs(build_args.items);
|
||||
return parseError(tokenizer, code.source_token, "example incorrectly compiled", .{});
|
||||
}
|
||||
},
|
||||
else => {
|
||||
warn("{}\nThe following command crashed:\n", .{result.stderr});
|
||||
for (build_args.items) |arg|
|
||||
warn("{} ", .{arg})
|
||||
else
|
||||
warn("\n", .{});
|
||||
print("{}\nThe following command crashed:\n", .{result.stderr});
|
||||
dumpArgs(build_args.items);
|
||||
return parseError(tokenizer, code.source_token, "example compile crashed", .{});
|
||||
},
|
||||
}
|
||||
@ -1174,11 +1159,8 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
|
||||
switch (result.term) {
|
||||
.Exited => |exit_code| {
|
||||
if (exit_code == 0) {
|
||||
warn("{}\nThe following command incorrectly succeeded:\n", .{result.stderr});
|
||||
for (run_args) |arg|
|
||||
warn("{} ", .{arg})
|
||||
else
|
||||
warn("\n", .{});
|
||||
print("{}\nThe following command incorrectly succeeded:\n", .{result.stderr});
|
||||
dumpArgs(run_args);
|
||||
return parseError(tokenizer, code.source_token, "example incorrectly compiled", .{});
|
||||
}
|
||||
},
|
||||
@ -1206,27 +1188,13 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
|
||||
var test_args = std.ArrayList([]const u8).init(allocator);
|
||||
defer test_args.deinit();
|
||||
|
||||
try test_args.appendSlice(&[_][]const u8{
|
||||
zig_exe,
|
||||
"test",
|
||||
tmp_source_file_name,
|
||||
"--cache",
|
||||
"on",
|
||||
});
|
||||
try test_args.appendSlice(&[_][]const u8{ zig_exe, "test", tmp_source_file_name });
|
||||
try out.print("<pre><code class=\"shell\">$ zig test {}.zig", .{code.name});
|
||||
switch (code.mode) {
|
||||
.Debug => {},
|
||||
.ReleaseSafe => {
|
||||
try test_args.append("--release-safe");
|
||||
try out.print(" --release-safe", .{});
|
||||
},
|
||||
.ReleaseFast => {
|
||||
try test_args.append("--release-fast");
|
||||
try out.print(" --release-fast", .{});
|
||||
},
|
||||
.ReleaseSmall => {
|
||||
try test_args.append("--release-small");
|
||||
try out.print(" --release-small", .{});
|
||||
else => {
|
||||
try test_args.appendSlice(&[_][]const u8{ "-O", @tagName(code.mode) });
|
||||
try out.print(" -O {s}", .{@tagName(code.mode)});
|
||||
},
|
||||
}
|
||||
if (code.link_libc) {
|
||||
@ -1252,23 +1220,13 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
|
||||
"--color",
|
||||
"on",
|
||||
tmp_source_file_name,
|
||||
"--output-dir",
|
||||
tmp_dir_name,
|
||||
});
|
||||
try out.print("<pre><code class=\"shell\">$ zig test {}.zig", .{code.name});
|
||||
switch (code.mode) {
|
||||
.Debug => {},
|
||||
.ReleaseSafe => {
|
||||
try test_args.append("--release-safe");
|
||||
try out.print(" --release-safe", .{});
|
||||
},
|
||||
.ReleaseFast => {
|
||||
try test_args.append("--release-fast");
|
||||
try out.print(" --release-fast", .{});
|
||||
},
|
||||
.ReleaseSmall => {
|
||||
try test_args.append("--release-small");
|
||||
try out.print(" --release-small", .{});
|
||||
else => {
|
||||
try test_args.appendSlice(&[_][]const u8{ "-O", @tagName(code.mode) });
|
||||
try out.print(" -O {s}", .{@tagName(code.mode)});
|
||||
},
|
||||
}
|
||||
const result = try ChildProcess.exec(.{
|
||||
@ -1280,25 +1238,19 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
|
||||
switch (result.term) {
|
||||
.Exited => |exit_code| {
|
||||
if (exit_code == 0) {
|
||||
warn("{}\nThe following command incorrectly succeeded:\n", .{result.stderr});
|
||||
for (test_args.items) |arg|
|
||||
warn("{} ", .{arg})
|
||||
else
|
||||
warn("\n", .{});
|
||||
print("{}\nThe following command incorrectly succeeded:\n", .{result.stderr});
|
||||
dumpArgs(test_args.items);
|
||||
return parseError(tokenizer, code.source_token, "example incorrectly compiled", .{});
|
||||
}
|
||||
},
|
||||
else => {
|
||||
warn("{}\nThe following command crashed:\n", .{result.stderr});
|
||||
for (test_args.items) |arg|
|
||||
warn("{} ", .{arg})
|
||||
else
|
||||
warn("\n", .{});
|
||||
print("{}\nThe following command crashed:\n", .{result.stderr});
|
||||
dumpArgs(test_args.items);
|
||||
return parseError(tokenizer, code.source_token, "example compile crashed", .{});
|
||||
},
|
||||
}
|
||||
if (mem.indexOf(u8, result.stderr, error_match) == null) {
|
||||
warn("{}\nExpected to find '{}' in stderr", .{ result.stderr, error_match });
|
||||
print("{}\nExpected to find '{}' in stderr", .{ result.stderr, error_match });
|
||||
return parseError(tokenizer, code.source_token, "example did not have expected compile error", .{});
|
||||
}
|
||||
const escaped_stderr = try escapeHtml(allocator, result.stderr);
|
||||
@ -1314,23 +1266,21 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
|
||||
zig_exe,
|
||||
"test",
|
||||
tmp_source_file_name,
|
||||
"--output-dir",
|
||||
tmp_dir_name,
|
||||
});
|
||||
var mode_arg: []const u8 = "";
|
||||
switch (code.mode) {
|
||||
.Debug => {},
|
||||
.ReleaseSafe => {
|
||||
try test_args.append("--release-safe");
|
||||
mode_arg = " --release-safe";
|
||||
try test_args.append("-OReleaseSafe");
|
||||
mode_arg = "-OReleaseSafe";
|
||||
},
|
||||
.ReleaseFast => {
|
||||
try test_args.append("--release-fast");
|
||||
mode_arg = " --release-fast";
|
||||
try test_args.append("-OReleaseFast");
|
||||
mode_arg = "-OReleaseFast";
|
||||
},
|
||||
.ReleaseSmall => {
|
||||
try test_args.append("--release-small");
|
||||
mode_arg = " --release-small";
|
||||
try test_args.append("-OReleaseSmall");
|
||||
mode_arg = "-OReleaseSmall";
|
||||
},
|
||||
}
|
||||
|
||||
@ -1343,25 +1293,19 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
|
||||
switch (result.term) {
|
||||
.Exited => |exit_code| {
|
||||
if (exit_code == 0) {
|
||||
warn("{}\nThe following command incorrectly succeeded:\n", .{result.stderr});
|
||||
for (test_args.items) |arg|
|
||||
warn("{} ", .{arg})
|
||||
else
|
||||
warn("\n", .{});
|
||||
print("{}\nThe following command incorrectly succeeded:\n", .{result.stderr});
|
||||
dumpArgs(test_args.items);
|
||||
return parseError(tokenizer, code.source_token, "example test incorrectly succeeded", .{});
|
||||
}
|
||||
},
|
||||
else => {
|
||||
warn("{}\nThe following command crashed:\n", .{result.stderr});
|
||||
for (test_args.items) |arg|
|
||||
warn("{} ", .{arg})
|
||||
else
|
||||
warn("\n", .{});
|
||||
print("{}\nThe following command crashed:\n", .{result.stderr});
|
||||
dumpArgs(test_args.items);
|
||||
return parseError(tokenizer, code.source_token, "example compile crashed", .{});
|
||||
},
|
||||
}
|
||||
if (mem.indexOf(u8, result.stderr, error_match) == null) {
|
||||
warn("{}\nExpected to find '{}' in stderr", .{ result.stderr, error_match });
|
||||
print("{}\nExpected to find '{}' in stderr", .{ result.stderr, error_match });
|
||||
return parseError(tokenizer, code.source_token, "example did not have expected runtime safety error message", .{});
|
||||
}
|
||||
const escaped_stderr = try escapeHtml(allocator, result.stderr);
|
||||
@ -1395,32 +1339,20 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
|
||||
"on",
|
||||
"--name",
|
||||
code.name,
|
||||
"--output-dir",
|
||||
tmp_dir_name,
|
||||
try std.fmt.allocPrint(allocator, "-femit-bin={s}{c}{s}", .{
|
||||
tmp_dir_name, fs.path.sep, name_plus_obj_ext,
|
||||
}),
|
||||
});
|
||||
|
||||
if (!code.is_inline) {
|
||||
try out.print("<pre><code class=\"shell\">$ zig build-obj {}.zig", .{code.name});
|
||||
}
|
||||
|
||||
switch (code.mode) {
|
||||
.Debug => {},
|
||||
.ReleaseSafe => {
|
||||
try build_args.append("--release-safe");
|
||||
else => {
|
||||
try build_args.appendSlice(&[_][]const u8{ "-O", @tagName(code.mode) });
|
||||
if (!code.is_inline) {
|
||||
try out.print(" --release-safe", .{});
|
||||
}
|
||||
},
|
||||
.ReleaseFast => {
|
||||
try build_args.append("--release-fast");
|
||||
if (!code.is_inline) {
|
||||
try out.print(" --release-fast", .{});
|
||||
}
|
||||
},
|
||||
.ReleaseSmall => {
|
||||
try build_args.append("--release-small");
|
||||
if (!code.is_inline) {
|
||||
try out.print(" --release-small", .{});
|
||||
try out.print(" -O {s}", .{@tagName(code.mode)});
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -1440,25 +1372,19 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
|
||||
switch (result.term) {
|
||||
.Exited => |exit_code| {
|
||||
if (exit_code == 0) {
|
||||
warn("{}\nThe following command incorrectly succeeded:\n", .{result.stderr});
|
||||
for (build_args.items) |arg|
|
||||
warn("{} ", .{arg})
|
||||
else
|
||||
warn("\n", .{});
|
||||
print("{}\nThe following command incorrectly succeeded:\n", .{result.stderr});
|
||||
dumpArgs(build_args.items);
|
||||
return parseError(tokenizer, code.source_token, "example build incorrectly succeeded", .{});
|
||||
}
|
||||
},
|
||||
else => {
|
||||
warn("{}\nThe following command crashed:\n", .{result.stderr});
|
||||
for (build_args.items) |arg|
|
||||
warn("{} ", .{arg})
|
||||
else
|
||||
warn("\n", .{});
|
||||
print("{}\nThe following command crashed:\n", .{result.stderr});
|
||||
dumpArgs(build_args.items);
|
||||
return parseError(tokenizer, code.source_token, "example compile crashed", .{});
|
||||
},
|
||||
}
|
||||
if (mem.indexOf(u8, result.stderr, error_match) == null) {
|
||||
warn("{}\nExpected to find '{}' in stderr", .{ result.stderr, error_match });
|
||||
print("{}\nExpected to find '{}' in stderr", .{ result.stderr, error_match });
|
||||
return parseError(tokenizer, code.source_token, "example did not have expected compile error message", .{});
|
||||
}
|
||||
const escaped_stderr = try escapeHtml(allocator, result.stderr);
|
||||
@ -1472,6 +1398,12 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
|
||||
}
|
||||
},
|
||||
Code.Id.Lib => {
|
||||
const bin_basename = try std.zig.binNameAlloc(allocator, .{
|
||||
.root_name = code.name,
|
||||
.target = std.Target.current,
|
||||
.output_mode = .Lib,
|
||||
});
|
||||
|
||||
var test_args = std.ArrayList([]const u8).init(allocator);
|
||||
defer test_args.deinit();
|
||||
|
||||
@ -1479,23 +1411,16 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
|
||||
zig_exe,
|
||||
"build-lib",
|
||||
tmp_source_file_name,
|
||||
"--output-dir",
|
||||
tmp_dir_name,
|
||||
try std.fmt.allocPrint(allocator, "-femit-bin={s}{s}{s}", .{
|
||||
tmp_dir_name, fs.path.sep_str, bin_basename,
|
||||
}),
|
||||
});
|
||||
try out.print("<pre><code class=\"shell\">$ zig build-lib {}.zig", .{code.name});
|
||||
switch (code.mode) {
|
||||
.Debug => {},
|
||||
.ReleaseSafe => {
|
||||
try test_args.append("--release-safe");
|
||||
try out.print(" --release-safe", .{});
|
||||
},
|
||||
.ReleaseFast => {
|
||||
try test_args.append("--release-fast");
|
||||
try out.print(" --release-fast", .{});
|
||||
},
|
||||
.ReleaseSmall => {
|
||||
try test_args.append("--release-small");
|
||||
try out.print(" --release-small", .{});
|
||||
else => {
|
||||
try test_args.appendSlice(&[_][]const u8{ "-O", @tagName(code.mode) });
|
||||
try out.print(" -O {s}", .{@tagName(code.mode)});
|
||||
},
|
||||
}
|
||||
if (code.target_str) |triple| {
|
||||
@ -1508,7 +1433,7 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
|
||||
try out.print("\n{}{}</code></pre>\n", .{ escaped_stderr, escaped_stdout });
|
||||
},
|
||||
}
|
||||
warn("OK\n", .{});
|
||||
print("OK\n", .{});
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -1524,20 +1449,14 @@ fn exec(allocator: *mem.Allocator, env_map: *std.BufMap, args: []const []const u
|
||||
switch (result.term) {
|
||||
.Exited => |exit_code| {
|
||||
if (exit_code != 0) {
|
||||
warn("{}\nThe following command exited with code {}:\n", .{ result.stderr, exit_code });
|
||||
for (args) |arg|
|
||||
warn("{} ", .{arg})
|
||||
else
|
||||
warn("\n", .{});
|
||||
print("{}\nThe following command exited with code {}:\n", .{ result.stderr, exit_code });
|
||||
dumpArgs(args);
|
||||
return error.ChildExitError;
|
||||
}
|
||||
},
|
||||
else => {
|
||||
warn("{}\nThe following command crashed:\n", .{result.stderr});
|
||||
for (args) |arg|
|
||||
warn("{} ", .{arg})
|
||||
else
|
||||
warn("\n", .{});
|
||||
print("{}\nThe following command crashed:\n", .{result.stderr});
|
||||
dumpArgs(args);
|
||||
return error.ChildCrashed;
|
||||
},
|
||||
}
|
||||
@ -1545,9 +1464,13 @@ fn exec(allocator: *mem.Allocator, env_map: *std.BufMap, args: []const []const u
|
||||
}
|
||||
|
||||
fn getBuiltinCode(allocator: *mem.Allocator, env_map: *std.BufMap, zig_exe: []const u8) ![]const u8 {
|
||||
const result = try exec(allocator, env_map, &[_][]const u8{
|
||||
zig_exe,
|
||||
"builtin",
|
||||
});
|
||||
const result = try exec(allocator, env_map, &[_][]const u8{ zig_exe, "build-obj", "--show-builtin" });
|
||||
return result.stdout;
|
||||
}
|
||||
|
||||
fn dumpArgs(args: []const []const u8) void {
|
||||
for (args) |arg|
|
||||
print("{} ", .{arg})
|
||||
else
|
||||
print("\n", .{});
|
||||
}
|
||||
|
||||
@ -1078,6 +1078,7 @@ const nan = std.math.nan(f128);
|
||||
but you can switch to {#syntax#}Optimized{#endsyntax#} mode on a per-block basis:</p>
|
||||
{#code_begin|obj|foo#}
|
||||
{#code_release_fast#}
|
||||
{#code_disable_cache#}
|
||||
const std = @import("std");
|
||||
const builtin = std.builtin;
|
||||
const big = @as(f64, 1 << 40);
|
||||
@ -9881,9 +9882,10 @@ The result is 3</code></pre>
|
||||
const std = @import("std");
|
||||
|
||||
pub fn main() !void {
|
||||
// TODO a better default allocator that isn't as wasteful!
|
||||
const args = try std.process.argsAlloc(std.heap.page_allocator);
|
||||
defer std.process.argsFree(std.heap.page_allocator, args);
|
||||
var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
const gpa = &general_purpose_allocator.allocator;
|
||||
const args = try std.process.argsAlloc(gpa);
|
||||
defer std.process.argsFree(gpa, args);
|
||||
|
||||
for (args) |arg, i| {
|
||||
std.debug.print("{}: {}\n", .{ i, arg });
|
||||
@ -11385,8 +11387,9 @@ keyword <- KEYWORD_align / KEYWORD_and / KEYWORD_anyframe / KEYWORD_anytype
|
||||
<li>Incremental improvements.</li>
|
||||
<li>Avoid local maximums.</li>
|
||||
<li>Reduce the amount one must remember.</li>
|
||||
<li>Minimize energy spent on coding style.</li>
|
||||
<li>Resource deallocation must succeed.</li>
|
||||
<li>Focus on code rather than style.</li>
|
||||
<li>Resource allocation may fail; resource deallocation must succeed.</li>
|
||||
<li>Memory is a resource.</li>
|
||||
<li>Together we serve the users.</li>
|
||||
</ul>
|
||||
{#header_close#}
|
||||
|
||||
@ -112,12 +112,10 @@ pub fn ArrayHashMap(
|
||||
return self.unmanaged.clearAndFree(self.allocator);
|
||||
}
|
||||
|
||||
/// Deprecated. Use `items().len`.
|
||||
pub fn count(self: Self) usize {
|
||||
return self.items().len;
|
||||
return self.unmanaged.count();
|
||||
}
|
||||
|
||||
/// Deprecated. Iterate using `items`.
|
||||
pub fn iterator(self: *const Self) Iterator {
|
||||
return Iterator{
|
||||
.hm = self,
|
||||
@ -332,6 +330,10 @@ pub fn ArrayHashMapUnmanaged(
|
||||
}
|
||||
}
|
||||
|
||||
pub fn count(self: Self) usize {
|
||||
return self.entries.items.len;
|
||||
}
|
||||
|
||||
/// If key exists this function cannot fail.
|
||||
/// If there is an existing item with `key`, then the result
|
||||
/// `Entry` pointer points to it, and found_existing is true.
|
||||
|
||||
@ -1384,6 +1384,7 @@ pub const LibExeObjStep = struct {
|
||||
}
|
||||
|
||||
fn computeOutFileNames(self: *LibExeObjStep) void {
|
||||
// TODO make this call std.zig.binNameAlloc
|
||||
switch (self.kind) {
|
||||
.Obj => {
|
||||
self.out_filename = self.builder.fmt("{}{}", .{ self.name, self.target.oFileExt() });
|
||||
@ -1699,8 +1700,6 @@ pub const LibExeObjStep = struct {
|
||||
self.main_pkg_path = dir_path;
|
||||
}
|
||||
|
||||
pub const setDisableGenH = @compileError("deprecated; set the emit_h field directly");
|
||||
|
||||
pub fn setLibCFile(self: *LibExeObjStep, libc_file: ?[]const u8) void {
|
||||
self.libc_file = libc_file;
|
||||
}
|
||||
@ -1961,10 +1960,10 @@ pub const LibExeObjStep = struct {
|
||||
|
||||
if (self.root_src) |root_src| try zig_args.append(root_src.getPath(builder));
|
||||
|
||||
var prev_has_extra_flags = false;
|
||||
for (self.link_objects.span()) |link_object| {
|
||||
switch (link_object) {
|
||||
.StaticPath => |static_path| {
|
||||
try zig_args.append("--object");
|
||||
try zig_args.append(builder.pathFromRoot(static_path));
|
||||
},
|
||||
|
||||
@ -1972,12 +1971,10 @@ pub const LibExeObjStep = struct {
|
||||
.Exe => unreachable,
|
||||
.Test => unreachable,
|
||||
.Obj => {
|
||||
try zig_args.append("--object");
|
||||
try zig_args.append(other.getOutputPath());
|
||||
},
|
||||
.Lib => {
|
||||
if (!other.is_dynamic or self.target.isWindows()) {
|
||||
try zig_args.append("--object");
|
||||
try zig_args.append(other.getOutputLibPath());
|
||||
} else {
|
||||
const full_path_lib = other.getOutputPath();
|
||||
@ -1996,14 +1993,27 @@ pub const LibExeObjStep = struct {
|
||||
try zig_args.append(name);
|
||||
},
|
||||
.AssemblyFile => |asm_file| {
|
||||
try zig_args.append("--c-source");
|
||||
if (prev_has_extra_flags) {
|
||||
try zig_args.append("-extra-cflags");
|
||||
try zig_args.append("--");
|
||||
prev_has_extra_flags = false;
|
||||
}
|
||||
try zig_args.append(asm_file.getPath(builder));
|
||||
},
|
||||
.CSourceFile => |c_source_file| {
|
||||
try zig_args.append("--c-source");
|
||||
if (c_source_file.args.len == 0) {
|
||||
if (prev_has_extra_flags) {
|
||||
try zig_args.append("-cflags");
|
||||
try zig_args.append("--");
|
||||
prev_has_extra_flags = false;
|
||||
}
|
||||
} else {
|
||||
try zig_args.append("-cflags");
|
||||
for (c_source_file.args) |arg| {
|
||||
try zig_args.append(arg);
|
||||
}
|
||||
try zig_args.append("--");
|
||||
}
|
||||
try zig_args.append(c_source_file.source.getPath(builder));
|
||||
},
|
||||
}
|
||||
@ -2078,10 +2088,8 @@ pub const LibExeObjStep = struct {
|
||||
}
|
||||
|
||||
switch (self.build_mode) {
|
||||
.Debug => {},
|
||||
.ReleaseSafe => zig_args.append("--release-safe") catch unreachable,
|
||||
.ReleaseFast => zig_args.append("--release-fast") catch unreachable,
|
||||
.ReleaseSmall => zig_args.append("--release-small") catch unreachable,
|
||||
.Debug => {}, // Skip since it's the default.
|
||||
else => zig_args.append(builder.fmt("-O{s}", .{@tagName(self.build_mode)})) catch unreachable,
|
||||
}
|
||||
|
||||
try zig_args.append("--cache-dir");
|
||||
@ -2092,14 +2100,8 @@ pub const LibExeObjStep = struct {
|
||||
|
||||
if (self.kind == Kind.Lib and self.is_dynamic) {
|
||||
if (self.version) |version| {
|
||||
zig_args.append("--ver-major") catch unreachable;
|
||||
zig_args.append(builder.fmt("{}", .{version.major})) catch unreachable;
|
||||
|
||||
zig_args.append("--ver-minor") catch unreachable;
|
||||
zig_args.append(builder.fmt("{}", .{version.minor})) catch unreachable;
|
||||
|
||||
zig_args.append("--ver-patch") catch unreachable;
|
||||
zig_args.append(builder.fmt("{}", .{version.patch})) catch unreachable;
|
||||
zig_args.append("--version") catch unreachable;
|
||||
zig_args.append(builder.fmt("{}", .{version})) catch unreachable;
|
||||
}
|
||||
}
|
||||
if (self.is_dynamic) {
|
||||
@ -2316,8 +2318,7 @@ pub const LibExeObjStep = struct {
|
||||
if (self.kind == Kind.Test) {
|
||||
try builder.spawnChild(zig_args.span());
|
||||
} else {
|
||||
try zig_args.append("--cache");
|
||||
try zig_args.append("on");
|
||||
try zig_args.append("--enable-cache");
|
||||
|
||||
const output_dir_nl = try builder.execFromStep(zig_args.span(), &self.step);
|
||||
const build_output_dir = mem.trimRight(u8, output_dir_nl, "\r\n");
|
||||
|
||||
@ -72,8 +72,7 @@ pub const TranslateCStep = struct {
|
||||
try argv_list.append("translate-c");
|
||||
try argv_list.append("-lc");
|
||||
|
||||
try argv_list.append("--cache");
|
||||
try argv_list.append("on");
|
||||
try argv_list.append("--enable-cache");
|
||||
|
||||
if (!self.target.isNative()) {
|
||||
try argv_list.append("-target");
|
||||
|
||||
@ -1,726 +0,0 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2015-2020 Zig Contributors
|
||||
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
|
||||
// The MIT license requires this copyright notice to be included in all copies
|
||||
// and substantial portions of the software.
|
||||
const std = @import("std.zig");
|
||||
const crypto = std.crypto;
|
||||
const Hasher = crypto.auth.siphash.SipHash128(1, 3); // provides enough collision resistance for the CacheHash use cases, while being one of our fastest options right now
|
||||
const fs = std.fs;
|
||||
const base64 = std.base64;
|
||||
const ArrayList = std.ArrayList;
|
||||
const assert = std.debug.assert;
|
||||
const testing = std.testing;
|
||||
const mem = std.mem;
|
||||
const fmt = std.fmt;
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
const base64_encoder = fs.base64_encoder;
|
||||
const base64_decoder = fs.base64_decoder;
|
||||
/// This is 128 bits - Even with 2^54 cache entries, the probably of a collision would be under 10^-6
|
||||
const BIN_DIGEST_LEN = 16;
|
||||
const BASE64_DIGEST_LEN = base64.Base64Encoder.calcSize(BIN_DIGEST_LEN);
|
||||
|
||||
const MANIFEST_FILE_SIZE_MAX = 50 * 1024 * 1024;
|
||||
|
||||
pub const File = struct {
|
||||
path: ?[]const u8,
|
||||
max_file_size: ?usize,
|
||||
stat: fs.File.Stat,
|
||||
bin_digest: [BIN_DIGEST_LEN]u8,
|
||||
contents: ?[]const u8,
|
||||
|
||||
pub fn deinit(self: *File, allocator: *Allocator) void {
|
||||
if (self.path) |owned_slice| {
|
||||
allocator.free(owned_slice);
|
||||
self.path = null;
|
||||
}
|
||||
if (self.contents) |contents| {
|
||||
allocator.free(contents);
|
||||
self.contents = null;
|
||||
}
|
||||
self.* = undefined;
|
||||
}
|
||||
};
|
||||
|
||||
/// CacheHash manages project-local `zig-cache` directories.
|
||||
/// This is not a general-purpose cache.
|
||||
/// It was designed to be fast and simple, not to withstand attacks using specially-crafted input.
|
||||
pub const CacheHash = struct {
|
||||
allocator: *Allocator,
|
||||
hasher_init: Hasher, // initial state, that can be copied
|
||||
hasher: Hasher, // current state for incremental hashing
|
||||
manifest_dir: fs.Dir,
|
||||
manifest_file: ?fs.File,
|
||||
manifest_dirty: bool,
|
||||
files: ArrayList(File),
|
||||
b64_digest: [BASE64_DIGEST_LEN]u8,
|
||||
|
||||
/// Be sure to call release after successful initialization.
|
||||
pub fn init(allocator: *Allocator, dir: fs.Dir, manifest_dir_path: []const u8) !CacheHash {
|
||||
const hasher_init = Hasher.init(&[_]u8{0} ** Hasher.minimum_key_length);
|
||||
return CacheHash{
|
||||
.allocator = allocator,
|
||||
.hasher_init = hasher_init,
|
||||
.hasher = hasher_init,
|
||||
.manifest_dir = try dir.makeOpenPath(manifest_dir_path, .{}),
|
||||
.manifest_file = null,
|
||||
.manifest_dirty = false,
|
||||
.files = ArrayList(File).init(allocator),
|
||||
.b64_digest = undefined,
|
||||
};
|
||||
}
|
||||
|
||||
/// Record a slice of bytes as an dependency of the process being cached
|
||||
pub fn addSlice(self: *CacheHash, val: []const u8) void {
|
||||
assert(self.manifest_file == null);
|
||||
|
||||
self.hasher.update(val);
|
||||
self.hasher.update(&[_]u8{0});
|
||||
}
|
||||
|
||||
/// Convert the input value into bytes and record it as a dependency of the
|
||||
/// process being cached
|
||||
pub fn add(self: *CacheHash, val: anytype) void {
|
||||
assert(self.manifest_file == null);
|
||||
|
||||
const valPtr = switch (@typeInfo(@TypeOf(val))) {
|
||||
.Int => &val,
|
||||
.Pointer => val,
|
||||
else => &val,
|
||||
};
|
||||
|
||||
self.addSlice(mem.asBytes(valPtr));
|
||||
}
|
||||
|
||||
/// Add a file as a dependency of process being cached. When `CacheHash.hit` is
|
||||
/// called, the file's contents will be checked to ensure that it matches
|
||||
/// the contents from previous times.
|
||||
///
|
||||
/// Max file size will be used to determine the amount of space to the file contents
|
||||
/// are allowed to take up in memory. If max_file_size is null, then the contents
|
||||
/// will not be loaded into memory.
|
||||
///
|
||||
/// Returns the index of the entry in the `CacheHash.files` ArrayList. You can use it
|
||||
/// to access the contents of the file after calling `CacheHash.hit()` like so:
|
||||
///
|
||||
/// ```
|
||||
/// var file_contents = cache_hash.files.items[file_index].contents.?;
|
||||
/// ```
|
||||
pub fn addFile(self: *CacheHash, file_path: []const u8, max_file_size: ?usize) !usize {
|
||||
assert(self.manifest_file == null);
|
||||
|
||||
try self.files.ensureCapacity(self.files.items.len + 1);
|
||||
const resolved_path = try fs.path.resolve(self.allocator, &[_][]const u8{file_path});
|
||||
|
||||
const idx = self.files.items.len;
|
||||
self.files.addOneAssumeCapacity().* = .{
|
||||
.path = resolved_path,
|
||||
.contents = null,
|
||||
.max_file_size = max_file_size,
|
||||
.stat = undefined,
|
||||
.bin_digest = undefined,
|
||||
};
|
||||
|
||||
self.addSlice(resolved_path);
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
/// Check the cache to see if the input exists in it. If it exists, a base64 encoding
|
||||
/// of it's hash will be returned; otherwise, null will be returned.
|
||||
///
|
||||
/// This function will also acquire an exclusive lock to the manifest file. This means
|
||||
/// that a process holding a CacheHash will block any other process attempting to
|
||||
/// acquire the lock.
|
||||
///
|
||||
/// The lock on the manifest file is released when `CacheHash.release` is called.
|
||||
pub fn hit(self: *CacheHash) !?[BASE64_DIGEST_LEN]u8 {
|
||||
assert(self.manifest_file == null);
|
||||
|
||||
var bin_digest: [BIN_DIGEST_LEN]u8 = undefined;
|
||||
self.hasher.final(&bin_digest);
|
||||
|
||||
base64_encoder.encode(self.b64_digest[0..], &bin_digest);
|
||||
|
||||
self.hasher = self.hasher_init;
|
||||
self.hasher.update(&bin_digest);
|
||||
|
||||
const manifest_file_path = try fmt.allocPrint(self.allocator, "{}.txt", .{self.b64_digest});
|
||||
defer self.allocator.free(manifest_file_path);
|
||||
|
||||
if (self.files.items.len != 0) {
|
||||
self.manifest_file = try self.manifest_dir.createFile(manifest_file_path, .{
|
||||
.read = true,
|
||||
.truncate = false,
|
||||
.lock = .Exclusive,
|
||||
});
|
||||
} else {
|
||||
// If there are no file inputs, we check if the manifest file exists instead of
|
||||
// comparing the hashes on the files used for the cached item
|
||||
self.manifest_file = self.manifest_dir.openFile(manifest_file_path, .{
|
||||
.read = true,
|
||||
.write = true,
|
||||
.lock = .Exclusive,
|
||||
}) catch |err| switch (err) {
|
||||
error.FileNotFound => {
|
||||
self.manifest_dirty = true;
|
||||
self.manifest_file = try self.manifest_dir.createFile(manifest_file_path, .{
|
||||
.read = true,
|
||||
.truncate = false,
|
||||
.lock = .Exclusive,
|
||||
});
|
||||
return null;
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
|
||||
const file_contents = try self.manifest_file.?.inStream().readAllAlloc(self.allocator, MANIFEST_FILE_SIZE_MAX);
|
||||
defer self.allocator.free(file_contents);
|
||||
|
||||
const input_file_count = self.files.items.len;
|
||||
var any_file_changed = false;
|
||||
var line_iter = mem.tokenize(file_contents, "\n");
|
||||
var idx: usize = 0;
|
||||
while (line_iter.next()) |line| {
|
||||
defer idx += 1;
|
||||
|
||||
const cache_hash_file = if (idx < input_file_count) &self.files.items[idx] else blk: {
|
||||
const new = try self.files.addOne();
|
||||
new.* = .{
|
||||
.path = null,
|
||||
.contents = null,
|
||||
.max_file_size = null,
|
||||
.stat = undefined,
|
||||
.bin_digest = undefined,
|
||||
};
|
||||
break :blk new;
|
||||
};
|
||||
|
||||
var iter = mem.tokenize(line, " ");
|
||||
const size = iter.next() orelse return error.InvalidFormat;
|
||||
const inode = iter.next() orelse return error.InvalidFormat;
|
||||
const mtime_nsec_str = iter.next() orelse return error.InvalidFormat;
|
||||
const digest_str = iter.next() orelse return error.InvalidFormat;
|
||||
const file_path = iter.rest();
|
||||
|
||||
cache_hash_file.stat.size = fmt.parseInt(u64, size, 10) catch return error.InvalidFormat;
|
||||
cache_hash_file.stat.inode = fmt.parseInt(fs.File.INode, inode, 10) catch return error.InvalidFormat;
|
||||
cache_hash_file.stat.mtime = fmt.parseInt(i64, mtime_nsec_str, 10) catch return error.InvalidFormat;
|
||||
base64_decoder.decode(&cache_hash_file.bin_digest, digest_str) catch return error.InvalidFormat;
|
||||
|
||||
if (file_path.len == 0) {
|
||||
return error.InvalidFormat;
|
||||
}
|
||||
if (cache_hash_file.path) |p| {
|
||||
if (!mem.eql(u8, file_path, p)) {
|
||||
return error.InvalidFormat;
|
||||
}
|
||||
}
|
||||
|
||||
if (cache_hash_file.path == null) {
|
||||
cache_hash_file.path = try self.allocator.dupe(u8, file_path);
|
||||
}
|
||||
|
||||
const this_file = fs.cwd().openFile(cache_hash_file.path.?, .{ .read = true }) catch {
|
||||
return error.CacheUnavailable;
|
||||
};
|
||||
defer this_file.close();
|
||||
|
||||
const actual_stat = try this_file.stat();
|
||||
const size_match = actual_stat.size == cache_hash_file.stat.size;
|
||||
const mtime_match = actual_stat.mtime == cache_hash_file.stat.mtime;
|
||||
const inode_match = actual_stat.inode == cache_hash_file.stat.inode;
|
||||
|
||||
if (!size_match or !mtime_match or !inode_match) {
|
||||
self.manifest_dirty = true;
|
||||
|
||||
cache_hash_file.stat = actual_stat;
|
||||
|
||||
if (isProblematicTimestamp(cache_hash_file.stat.mtime)) {
|
||||
cache_hash_file.stat.mtime = 0;
|
||||
cache_hash_file.stat.inode = 0;
|
||||
}
|
||||
|
||||
var actual_digest: [BIN_DIGEST_LEN]u8 = undefined;
|
||||
try hashFile(this_file, &actual_digest, self.hasher_init);
|
||||
|
||||
if (!mem.eql(u8, &cache_hash_file.bin_digest, &actual_digest)) {
|
||||
cache_hash_file.bin_digest = actual_digest;
|
||||
// keep going until we have the input file digests
|
||||
any_file_changed = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!any_file_changed) {
|
||||
self.hasher.update(&cache_hash_file.bin_digest);
|
||||
}
|
||||
}
|
||||
|
||||
if (any_file_changed) {
|
||||
// cache miss
|
||||
// keep the manifest file open
|
||||
// reset the hash
|
||||
self.hasher = self.hasher_init;
|
||||
self.hasher.update(&bin_digest);
|
||||
|
||||
// Remove files not in the initial hash
|
||||
for (self.files.items[input_file_count..]) |*file| {
|
||||
file.deinit(self.allocator);
|
||||
}
|
||||
self.files.shrink(input_file_count);
|
||||
|
||||
for (self.files.items) |file| {
|
||||
self.hasher.update(&file.bin_digest);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
if (idx < input_file_count) {
|
||||
self.manifest_dirty = true;
|
||||
while (idx < input_file_count) : (idx += 1) {
|
||||
const ch_file = &self.files.items[idx];
|
||||
try self.populateFileHash(ch_file);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
return self.final();
|
||||
}
|
||||
|
||||
fn populateFileHash(self: *CacheHash, ch_file: *File) !void {
|
||||
const file = try fs.cwd().openFile(ch_file.path.?, .{});
|
||||
defer file.close();
|
||||
|
||||
ch_file.stat = try file.stat();
|
||||
|
||||
if (isProblematicTimestamp(ch_file.stat.mtime)) {
|
||||
ch_file.stat.mtime = 0;
|
||||
ch_file.stat.inode = 0;
|
||||
}
|
||||
|
||||
if (ch_file.max_file_size) |max_file_size| {
|
||||
if (ch_file.stat.size > max_file_size) {
|
||||
return error.FileTooBig;
|
||||
}
|
||||
|
||||
const contents = try self.allocator.alloc(u8, @intCast(usize, ch_file.stat.size));
|
||||
errdefer self.allocator.free(contents);
|
||||
|
||||
// Hash while reading from disk, to keep the contents in the cpu cache while
|
||||
// doing hashing.
|
||||
var hasher = self.hasher_init;
|
||||
var off: usize = 0;
|
||||
while (true) {
|
||||
// give me everything you've got, captain
|
||||
const bytes_read = try file.read(contents[off..]);
|
||||
if (bytes_read == 0) break;
|
||||
hasher.update(contents[off..][0..bytes_read]);
|
||||
off += bytes_read;
|
||||
}
|
||||
hasher.final(&ch_file.bin_digest);
|
||||
|
||||
ch_file.contents = contents;
|
||||
} else {
|
||||
try hashFile(file, &ch_file.bin_digest, self.hasher_init);
|
||||
}
|
||||
|
||||
self.hasher.update(&ch_file.bin_digest);
|
||||
}
|
||||
|
||||
/// Add a file as a dependency of process being cached, after the initial hash has been
|
||||
/// calculated. This is useful for processes that don't know the all the files that
|
||||
/// are depended on ahead of time. For example, a source file that can import other files
|
||||
/// will need to be recompiled if the imported file is changed.
|
||||
pub fn addFilePostFetch(self: *CacheHash, file_path: []const u8, max_file_size: usize) ![]u8 {
|
||||
assert(self.manifest_file != null);
|
||||
|
||||
const resolved_path = try fs.path.resolve(self.allocator, &[_][]const u8{file_path});
|
||||
errdefer self.allocator.free(resolved_path);
|
||||
|
||||
const new_ch_file = try self.files.addOne();
|
||||
new_ch_file.* = .{
|
||||
.path = resolved_path,
|
||||
.max_file_size = max_file_size,
|
||||
.stat = undefined,
|
||||
.bin_digest = undefined,
|
||||
.contents = null,
|
||||
};
|
||||
errdefer self.files.shrink(self.files.items.len - 1);
|
||||
|
||||
try self.populateFileHash(new_ch_file);
|
||||
|
||||
return new_ch_file.contents.?;
|
||||
}
|
||||
|
||||
/// Add a file as a dependency of process being cached, after the initial hash has been
|
||||
/// calculated. This is useful for processes that don't know the all the files that
|
||||
/// are depended on ahead of time. For example, a source file that can import other files
|
||||
/// will need to be recompiled if the imported file is changed.
|
||||
pub fn addFilePost(self: *CacheHash, file_path: []const u8) !void {
|
||||
assert(self.manifest_file != null);
|
||||
|
||||
const resolved_path = try fs.path.resolve(self.allocator, &[_][]const u8{file_path});
|
||||
errdefer self.allocator.free(resolved_path);
|
||||
|
||||
const new_ch_file = try self.files.addOne();
|
||||
new_ch_file.* = .{
|
||||
.path = resolved_path,
|
||||
.max_file_size = null,
|
||||
.stat = undefined,
|
||||
.bin_digest = undefined,
|
||||
.contents = null,
|
||||
};
|
||||
errdefer self.files.shrink(self.files.items.len - 1);
|
||||
|
||||
try self.populateFileHash(new_ch_file);
|
||||
}
|
||||
|
||||
/// Returns a base64 encoded hash of the inputs.
|
||||
pub fn final(self: *CacheHash) [BASE64_DIGEST_LEN]u8 {
|
||||
assert(self.manifest_file != null);
|
||||
|
||||
// We don't close the manifest file yet, because we want to
|
||||
// keep it locked until the API user is done using it.
|
||||
// We also don't write out the manifest yet, because until
|
||||
// cache_release is called we still might be working on creating
|
||||
// the artifacts to cache.
|
||||
|
||||
var bin_digest: [BIN_DIGEST_LEN]u8 = undefined;
|
||||
self.hasher.final(&bin_digest);
|
||||
|
||||
var out_digest: [BASE64_DIGEST_LEN]u8 = undefined;
|
||||
base64_encoder.encode(&out_digest, &bin_digest);
|
||||
|
||||
return out_digest;
|
||||
}
|
||||
|
||||
pub fn writeManifest(self: *CacheHash) !void {
|
||||
assert(self.manifest_file != null);
|
||||
|
||||
var encoded_digest: [BASE64_DIGEST_LEN]u8 = undefined;
|
||||
var contents = ArrayList(u8).init(self.allocator);
|
||||
var outStream = contents.outStream();
|
||||
defer contents.deinit();
|
||||
|
||||
for (self.files.items) |file| {
|
||||
base64_encoder.encode(encoded_digest[0..], &file.bin_digest);
|
||||
try outStream.print("{} {} {} {} {}\n", .{ file.stat.size, file.stat.inode, file.stat.mtime, encoded_digest[0..], file.path });
|
||||
}
|
||||
|
||||
try self.manifest_file.?.pwriteAll(contents.items, 0);
|
||||
self.manifest_dirty = false;
|
||||
}
|
||||
|
||||
/// Releases the manifest file and frees any memory the CacheHash was using.
|
||||
/// `CacheHash.hit` must be called first.
|
||||
///
|
||||
/// Will also attempt to write to the manifest file if the manifest is dirty.
|
||||
/// Writing to the manifest file can fail, but this function ignores those errors.
|
||||
/// To detect failures from writing the manifest, one may explicitly call
|
||||
/// `writeManifest` before `release`.
|
||||
pub fn release(self: *CacheHash) void {
|
||||
if (self.manifest_file) |file| {
|
||||
if (self.manifest_dirty) {
|
||||
// To handle these errors, API users should call
|
||||
// writeManifest before release().
|
||||
self.writeManifest() catch {};
|
||||
}
|
||||
|
||||
file.close();
|
||||
}
|
||||
|
||||
for (self.files.items) |*file| {
|
||||
file.deinit(self.allocator);
|
||||
}
|
||||
self.files.deinit();
|
||||
self.manifest_dir.close();
|
||||
}
|
||||
};
|
||||
|
||||
fn hashFile(file: fs.File, bin_digest: []u8, hasher_init: anytype) !void {
|
||||
var buf: [1024]u8 = undefined;
|
||||
|
||||
var hasher = hasher_init;
|
||||
while (true) {
|
||||
const bytes_read = try file.read(&buf);
|
||||
if (bytes_read == 0) break;
|
||||
hasher.update(buf[0..bytes_read]);
|
||||
}
|
||||
|
||||
hasher.final(bin_digest);
|
||||
}
|
||||
|
||||
/// If the wall clock time, rounded to the same precision as the
|
||||
/// mtime, is equal to the mtime, then we cannot rely on this mtime
|
||||
/// yet. We will instead save an mtime value that indicates the hash
|
||||
/// must be unconditionally computed.
|
||||
/// This function recognizes the precision of mtime by looking at trailing
|
||||
/// zero bits of the seconds and nanoseconds.
|
||||
fn isProblematicTimestamp(fs_clock: i128) bool {
|
||||
const wall_clock = std.time.nanoTimestamp();
|
||||
|
||||
// We have to break the nanoseconds into seconds and remainder nanoseconds
|
||||
// to detect precision of seconds, because looking at the zero bits in base
|
||||
// 2 would not detect precision of the seconds value.
|
||||
const fs_sec = @intCast(i64, @divFloor(fs_clock, std.time.ns_per_s));
|
||||
const fs_nsec = @intCast(i64, @mod(fs_clock, std.time.ns_per_s));
|
||||
var wall_sec = @intCast(i64, @divFloor(wall_clock, std.time.ns_per_s));
|
||||
var wall_nsec = @intCast(i64, @mod(wall_clock, std.time.ns_per_s));
|
||||
|
||||
// First make all the least significant zero bits in the fs_clock, also zero bits in the wall clock.
|
||||
if (fs_nsec == 0) {
|
||||
wall_nsec = 0;
|
||||
if (fs_sec == 0) {
|
||||
wall_sec = 0;
|
||||
} else {
|
||||
wall_sec &= @as(i64, -1) << @intCast(u6, @ctz(i64, fs_sec));
|
||||
}
|
||||
} else {
|
||||
wall_nsec &= @as(i64, -1) << @intCast(u6, @ctz(i64, fs_nsec));
|
||||
}
|
||||
return wall_nsec == fs_nsec and wall_sec == fs_sec;
|
||||
}
|
||||
|
||||
test "cache file and then recall it" {
|
||||
if (std.Target.current.os.tag == .wasi) {
|
||||
// https://github.com/ziglang/zig/issues/5437
|
||||
return error.SkipZigTest;
|
||||
}
|
||||
const cwd = fs.cwd();
|
||||
|
||||
const temp_file = "test.txt";
|
||||
const temp_manifest_dir = "temp_manifest_dir";
|
||||
|
||||
const ts = std.time.nanoTimestamp();
|
||||
try cwd.writeFile(temp_file, "Hello, world!\n");
|
||||
|
||||
while (isProblematicTimestamp(ts)) {
|
||||
std.time.sleep(1);
|
||||
}
|
||||
|
||||
var digest1: [BASE64_DIGEST_LEN]u8 = undefined;
|
||||
var digest2: [BASE64_DIGEST_LEN]u8 = undefined;
|
||||
|
||||
{
|
||||
var ch = try CacheHash.init(testing.allocator, cwd, temp_manifest_dir);
|
||||
defer ch.release();
|
||||
|
||||
ch.add(true);
|
||||
ch.add(@as(u16, 1234));
|
||||
ch.add("1234");
|
||||
_ = try ch.addFile(temp_file, null);
|
||||
|
||||
// There should be nothing in the cache
|
||||
testing.expectEqual(@as(?[BASE64_DIGEST_LEN]u8, null), try ch.hit());
|
||||
|
||||
digest1 = ch.final();
|
||||
}
|
||||
{
|
||||
var ch = try CacheHash.init(testing.allocator, cwd, temp_manifest_dir);
|
||||
defer ch.release();
|
||||
|
||||
ch.add(true);
|
||||
ch.add(@as(u16, 1234));
|
||||
ch.add("1234");
|
||||
_ = try ch.addFile(temp_file, null);
|
||||
|
||||
// Cache hit! We just "built" the same file
|
||||
digest2 = (try ch.hit()).?;
|
||||
}
|
||||
|
||||
testing.expectEqual(digest1, digest2);
|
||||
|
||||
try cwd.deleteTree(temp_manifest_dir);
|
||||
try cwd.deleteFile(temp_file);
|
||||
}
|
||||
|
||||
test "give problematic timestamp" {
|
||||
var fs_clock = std.time.nanoTimestamp();
|
||||
// to make it problematic, we make it only accurate to the second
|
||||
fs_clock = @divTrunc(fs_clock, std.time.ns_per_s);
|
||||
fs_clock *= std.time.ns_per_s;
|
||||
testing.expect(isProblematicTimestamp(fs_clock));
|
||||
}
|
||||
|
||||
test "give nonproblematic timestamp" {
|
||||
testing.expect(!isProblematicTimestamp(std.time.nanoTimestamp() - std.time.ns_per_s));
|
||||
}
|
||||
|
||||
test "check that changing a file makes cache fail" {
|
||||
if (std.Target.current.os.tag == .wasi) {
|
||||
// https://github.com/ziglang/zig/issues/5437
|
||||
return error.SkipZigTest;
|
||||
}
|
||||
const cwd = fs.cwd();
|
||||
|
||||
const temp_file = "cache_hash_change_file_test.txt";
|
||||
const temp_manifest_dir = "cache_hash_change_file_manifest_dir";
|
||||
const original_temp_file_contents = "Hello, world!\n";
|
||||
const updated_temp_file_contents = "Hello, world; but updated!\n";
|
||||
|
||||
try cwd.deleteTree(temp_manifest_dir);
|
||||
try cwd.deleteTree(temp_file);
|
||||
|
||||
const ts = std.time.nanoTimestamp();
|
||||
try cwd.writeFile(temp_file, original_temp_file_contents);
|
||||
|
||||
while (isProblematicTimestamp(ts)) {
|
||||
std.time.sleep(1);
|
||||
}
|
||||
|
||||
var digest1: [BASE64_DIGEST_LEN]u8 = undefined;
|
||||
var digest2: [BASE64_DIGEST_LEN]u8 = undefined;
|
||||
|
||||
{
|
||||
var ch = try CacheHash.init(testing.allocator, cwd, temp_manifest_dir);
|
||||
defer ch.release();
|
||||
|
||||
ch.add("1234");
|
||||
const temp_file_idx = try ch.addFile(temp_file, 100);
|
||||
|
||||
// There should be nothing in the cache
|
||||
testing.expectEqual(@as(?[BASE64_DIGEST_LEN]u8, null), try ch.hit());
|
||||
|
||||
testing.expect(mem.eql(u8, original_temp_file_contents, ch.files.items[temp_file_idx].contents.?));
|
||||
|
||||
digest1 = ch.final();
|
||||
}
|
||||
|
||||
try cwd.writeFile(temp_file, updated_temp_file_contents);
|
||||
|
||||
{
|
||||
var ch = try CacheHash.init(testing.allocator, cwd, temp_manifest_dir);
|
||||
defer ch.release();
|
||||
|
||||
ch.add("1234");
|
||||
const temp_file_idx = try ch.addFile(temp_file, 100);
|
||||
|
||||
// A file that we depend on has been updated, so the cache should not contain an entry for it
|
||||
testing.expectEqual(@as(?[BASE64_DIGEST_LEN]u8, null), try ch.hit());
|
||||
|
||||
// The cache system does not keep the contents of re-hashed input files.
|
||||
testing.expect(ch.files.items[temp_file_idx].contents == null);
|
||||
|
||||
digest2 = ch.final();
|
||||
}
|
||||
|
||||
testing.expect(!mem.eql(u8, digest1[0..], digest2[0..]));
|
||||
|
||||
try cwd.deleteTree(temp_manifest_dir);
|
||||
try cwd.deleteTree(temp_file);
|
||||
}
|
||||
|
||||
test "no file inputs" {
|
||||
if (std.Target.current.os.tag == .wasi) {
|
||||
// https://github.com/ziglang/zig/issues/5437
|
||||
return error.SkipZigTest;
|
||||
}
|
||||
const cwd = fs.cwd();
|
||||
const temp_manifest_dir = "no_file_inputs_manifest_dir";
|
||||
defer cwd.deleteTree(temp_manifest_dir) catch unreachable;
|
||||
|
||||
var digest1: [BASE64_DIGEST_LEN]u8 = undefined;
|
||||
var digest2: [BASE64_DIGEST_LEN]u8 = undefined;
|
||||
|
||||
{
|
||||
var ch = try CacheHash.init(testing.allocator, cwd, temp_manifest_dir);
|
||||
defer ch.release();
|
||||
|
||||
ch.add("1234");
|
||||
|
||||
// There should be nothing in the cache
|
||||
testing.expectEqual(@as(?[BASE64_DIGEST_LEN]u8, null), try ch.hit());
|
||||
|
||||
digest1 = ch.final();
|
||||
}
|
||||
{
|
||||
var ch = try CacheHash.init(testing.allocator, cwd, temp_manifest_dir);
|
||||
defer ch.release();
|
||||
|
||||
ch.add("1234");
|
||||
|
||||
digest2 = (try ch.hit()).?;
|
||||
}
|
||||
|
||||
testing.expectEqual(digest1, digest2);
|
||||
}
|
||||
|
||||
test "CacheHashes with files added after initial hash work" {
|
||||
if (std.Target.current.os.tag == .wasi) {
|
||||
// https://github.com/ziglang/zig/issues/5437
|
||||
return error.SkipZigTest;
|
||||
}
|
||||
const cwd = fs.cwd();
|
||||
|
||||
const temp_file1 = "cache_hash_post_file_test1.txt";
|
||||
const temp_file2 = "cache_hash_post_file_test2.txt";
|
||||
const temp_manifest_dir = "cache_hash_post_file_manifest_dir";
|
||||
|
||||
const ts1 = std.time.nanoTimestamp();
|
||||
try cwd.writeFile(temp_file1, "Hello, world!\n");
|
||||
try cwd.writeFile(temp_file2, "Hello world the second!\n");
|
||||
|
||||
while (isProblematicTimestamp(ts1)) {
|
||||
std.time.sleep(1);
|
||||
}
|
||||
|
||||
var digest1: [BASE64_DIGEST_LEN]u8 = undefined;
|
||||
var digest2: [BASE64_DIGEST_LEN]u8 = undefined;
|
||||
var digest3: [BASE64_DIGEST_LEN]u8 = undefined;
|
||||
|
||||
{
|
||||
var ch = try CacheHash.init(testing.allocator, cwd, temp_manifest_dir);
|
||||
defer ch.release();
|
||||
|
||||
ch.add("1234");
|
||||
_ = try ch.addFile(temp_file1, null);
|
||||
|
||||
// There should be nothing in the cache
|
||||
testing.expectEqual(@as(?[BASE64_DIGEST_LEN]u8, null), try ch.hit());
|
||||
|
||||
_ = try ch.addFilePost(temp_file2);
|
||||
|
||||
digest1 = ch.final();
|
||||
}
|
||||
{
|
||||
var ch = try CacheHash.init(testing.allocator, cwd, temp_manifest_dir);
|
||||
defer ch.release();
|
||||
|
||||
ch.add("1234");
|
||||
_ = try ch.addFile(temp_file1, null);
|
||||
|
||||
digest2 = (try ch.hit()).?;
|
||||
}
|
||||
testing.expect(mem.eql(u8, &digest1, &digest2));
|
||||
|
||||
// Modify the file added after initial hash
|
||||
const ts2 = std.time.nanoTimestamp();
|
||||
try cwd.writeFile(temp_file2, "Hello world the second, updated\n");
|
||||
|
||||
while (isProblematicTimestamp(ts2)) {
|
||||
std.time.sleep(1);
|
||||
}
|
||||
|
||||
{
|
||||
var ch = try CacheHash.init(testing.allocator, cwd, temp_manifest_dir);
|
||||
defer ch.release();
|
||||
|
||||
ch.add("1234");
|
||||
_ = try ch.addFile(temp_file1, null);
|
||||
|
||||
// A file that we depend on has been updated, so the cache should not contain an entry for it
|
||||
testing.expectEqual(@as(?[BASE64_DIGEST_LEN]u8, null), try ch.hit());
|
||||
|
||||
_ = try ch.addFilePost(temp_file2);
|
||||
|
||||
digest3 = ch.final();
|
||||
}
|
||||
|
||||
testing.expect(!mem.eql(u8, &digest1, &digest3));
|
||||
|
||||
try cwd.deleteTree(temp_manifest_dir);
|
||||
try cwd.deleteFile(temp_file1);
|
||||
try cwd.deleteFile(temp_file2);
|
||||
}
|
||||
@ -213,7 +213,7 @@ pub const ChildProcess = struct {
|
||||
const stdout_in = child.stdout.?.inStream();
|
||||
const stderr_in = child.stderr.?.inStream();
|
||||
|
||||
// TODO need to poll to read these streams to prevent a deadlock (or rely on evented I/O).
|
||||
// TODO https://github.com/ziglang/zig/issues/6343
|
||||
const stdout = try stdout_in.readAllAlloc(args.allocator, args.max_output_bytes);
|
||||
errdefer args.allocator.free(stdout);
|
||||
const stderr = try stderr_in.readAllAlloc(args.allocator, args.max_output_bytes);
|
||||
@ -816,6 +816,13 @@ fn destroyPipe(pipe: [2]os.fd_t) void {
|
||||
// Then the child exits.
|
||||
fn forkChildErrReport(fd: i32, err: ChildProcess.SpawnError) noreturn {
|
||||
writeIntFd(fd, @as(ErrInt, @errorToInt(err))) catch {};
|
||||
// If we're linking libc, some naughty applications may have registered atexit handlers
|
||||
// which we really do not want to run in the fork child. I caught LLVM doing this and
|
||||
// it caused a deadlock instead of doing an exit syscall. In the words of Avril Lavigne,
|
||||
// "Why'd you have to go and make things so complicated?"
|
||||
if (std.Target.current.os.tag == .linux) {
|
||||
std.os.linux.exit(1); // By-pass libc regardless of whether it is linked.
|
||||
}
|
||||
os.exit(1);
|
||||
}
|
||||
|
||||
|
||||
@ -101,14 +101,12 @@ pub const Level = enum {
|
||||
debug,
|
||||
};
|
||||
|
||||
/// The default log level is based on build mode. Note that in ReleaseSmall
|
||||
/// builds the default level is emerg but no messages will be stored/logged
|
||||
/// by the default logger to save space.
|
||||
/// The default log level is based on build mode.
|
||||
pub const default_level: Level = switch (builtin.mode) {
|
||||
.Debug => .debug,
|
||||
.ReleaseSafe => .notice,
|
||||
.ReleaseFast => .err,
|
||||
.ReleaseSmall => .emerg,
|
||||
.ReleaseSmall => .err,
|
||||
};
|
||||
|
||||
/// The current log level. This is set to root.log_level if present, otherwise
|
||||
@ -131,11 +129,22 @@ fn log(
|
||||
// On freestanding one must provide a log function; we do not have
|
||||
// any I/O configured.
|
||||
return;
|
||||
} else if (builtin.mode != .ReleaseSmall) {
|
||||
} else {
|
||||
const level_txt = switch (message_level) {
|
||||
.emerg => "emergency",
|
||||
.alert => "alert",
|
||||
.crit => "critical",
|
||||
.err => "error",
|
||||
.warn => "warning",
|
||||
.notice => "notice",
|
||||
.info => "info",
|
||||
.debug => "debug",
|
||||
};
|
||||
const prefix2 = if (scope == .default) ": " else "(" ++ @tagName(scope) ++ "): ";
|
||||
const stderr = std.io.getStdErr().writer();
|
||||
const held = std.debug.getStderrMutex().acquire();
|
||||
defer held.release();
|
||||
const stderr = std.io.getStdErr().writer();
|
||||
nosuspend stderr.print(format ++ "\n", args) catch return;
|
||||
nosuspend stderr.print(level_txt ++ prefix2 ++ format ++ "\n", args) catch return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -231,8 +231,6 @@ fn AllocWithOptionsPayload(comptime Elem: type, comptime alignment: ?u29, compti
|
||||
/// call `free` when done.
|
||||
///
|
||||
/// For allocating a single item, see `create`.
|
||||
///
|
||||
/// Deprecated; use `allocWithOptions`.
|
||||
pub fn allocSentinel(
|
||||
self: *Allocator,
|
||||
comptime Elem: type,
|
||||
|
||||
@ -761,6 +761,7 @@ pub const DeleteFileError = error{
|
||||
FileNotFound,
|
||||
AccessDenied,
|
||||
NameTooLong,
|
||||
/// Also known as sharing violation.
|
||||
FileBusy,
|
||||
Unexpected,
|
||||
NotDir,
|
||||
@ -825,6 +826,7 @@ pub fn DeleteFile(sub_path_w: []const u16, options: DeleteFileOptions) DeleteFil
|
||||
.INVALID_PARAMETER => unreachable,
|
||||
.FILE_IS_A_DIRECTORY => return error.IsDir,
|
||||
.NOT_A_DIRECTORY => return error.NotDir,
|
||||
.SHARING_VIOLATION => return error.FileBusy,
|
||||
else => return unexpectedStatus(rc),
|
||||
}
|
||||
}
|
||||
|
||||
@ -161,7 +161,7 @@ fn usage(builder: *Builder, already_ran_build: bool, out_stream: anytype) !void
|
||||
try fmt.allocPrint(allocator, "{} (default)", .{top_level_step.step.name})
|
||||
else
|
||||
top_level_step.step.name;
|
||||
try out_stream.print(" {s:22} {}\n", .{ name, top_level_step.description });
|
||||
try out_stream.print(" {s:<27} {}\n", .{ name, top_level_step.description });
|
||||
}
|
||||
|
||||
try out_stream.writeAll(
|
||||
@ -185,7 +185,7 @@ fn usage(builder: *Builder, already_ran_build: bool, out_stream: anytype) !void
|
||||
Builder.typeIdName(option.type_id),
|
||||
});
|
||||
defer allocator.free(name);
|
||||
try out_stream.print("{s:24} {}\n", .{ name, option.description });
|
||||
try out_stream.print("{s:<29} {}\n", .{ name, option.description });
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -103,6 +103,6 @@ pub fn log(
|
||||
log_err_count += 1;
|
||||
}
|
||||
if (@enumToInt(message_level) <= @enumToInt(std.testing.log_level)) {
|
||||
std.debug.print("[{}] ({}): " ++ format, .{ @tagName(scope), @tagName(message_level) } ++ args);
|
||||
std.debug.print("[{}] ({}): " ++ format ++ "\n", .{ @tagName(scope), @tagName(message_level) } ++ args);
|
||||
}
|
||||
}
|
||||
|
||||
@ -224,7 +224,7 @@ inline fn initEventLoopAndCallMain() u8 {
|
||||
if (std.event.Loop.instance) |loop| {
|
||||
if (!@hasDecl(root, "event_loop")) {
|
||||
loop.init() catch |err| {
|
||||
std.debug.warn("error: {}\n", .{@errorName(err)});
|
||||
std.log.err("{}", .{@errorName(err)});
|
||||
if (@errorReturnTrace()) |trace| {
|
||||
std.debug.dumpStackTrace(trace.*);
|
||||
}
|
||||
@ -270,7 +270,7 @@ pub fn callMain() u8 {
|
||||
},
|
||||
.ErrorUnion => {
|
||||
const result = root.main() catch |err| {
|
||||
std.debug.warn("error: {}\n", .{@errorName(err)});
|
||||
std.log.err("{}", .{@errorName(err)});
|
||||
if (@errorReturnTrace()) |trace| {
|
||||
std.debug.dumpStackTrace(trace.*);
|
||||
}
|
||||
|
||||
@ -47,7 +47,6 @@ pub const base64 = @import("base64.zig");
|
||||
pub const build = @import("build.zig");
|
||||
pub const builtin = @import("builtin.zig");
|
||||
pub const c = @import("c.zig");
|
||||
pub const cache_hash = @import("cache_hash.zig");
|
||||
pub const coff = @import("coff.zig");
|
||||
pub const compress = @import("compress.zig");
|
||||
pub const crypto = @import("crypto.zig");
|
||||
|
||||
@ -75,6 +75,13 @@ pub const Target = struct {
|
||||
else => return ".so",
|
||||
}
|
||||
}
|
||||
|
||||
pub fn defaultVersionRange(tag: Tag) Os {
|
||||
return .{
|
||||
.tag = tag,
|
||||
.version_range = VersionRange.default(tag),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
/// Based on NTDDI version constants from
|
||||
@ -290,11 +297,32 @@ pub const Target = struct {
|
||||
}
|
||||
};
|
||||
|
||||
pub fn defaultVersionRange(tag: Tag) Os {
|
||||
return .{
|
||||
.tag = tag,
|
||||
.version_range = VersionRange.default(tag),
|
||||
pub const TaggedVersionRange = union(enum) {
|
||||
none: void,
|
||||
semver: Version.Range,
|
||||
linux: LinuxVersionRange,
|
||||
windows: WindowsVersion.Range,
|
||||
};
|
||||
|
||||
/// Provides a tagged union. `Target` does not store the tag because it is
|
||||
/// redundant with the OS tag; this function abstracts that part away.
|
||||
pub fn getVersionRange(self: Os) TaggedVersionRange {
|
||||
switch (self.tag) {
|
||||
.linux => return TaggedVersionRange{ .linux = self.version_range.linux },
|
||||
.windows => return TaggedVersionRange{ .windows = self.version_range.windows },
|
||||
|
||||
.freebsd,
|
||||
.macosx,
|
||||
.ios,
|
||||
.tvos,
|
||||
.watchos,
|
||||
.netbsd,
|
||||
.openbsd,
|
||||
.dragonfly,
|
||||
=> return TaggedVersionRange{ .semver = self.version_range.semver },
|
||||
|
||||
else => return .none,
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if system is guaranteed to be at least `version` or older than `version`.
|
||||
@ -455,18 +483,9 @@ pub const Target = struct {
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn oFileExt(abi: Abi) [:0]const u8 {
|
||||
return switch (abi) {
|
||||
.msvc => ".obj",
|
||||
else => ".o",
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const ObjectFormat = enum {
|
||||
/// TODO Get rid of this one.
|
||||
unknown,
|
||||
coff,
|
||||
pe,
|
||||
elf,
|
||||
@ -1116,8 +1135,18 @@ pub const Target = struct {
|
||||
return linuxTripleSimple(allocator, self.cpu.arch, self.os.tag, self.abi);
|
||||
}
|
||||
|
||||
pub fn oFileExt_cpu_arch_abi(cpu_arch: Cpu.Arch, abi: Abi) [:0]const u8 {
|
||||
if (cpu_arch.isWasm()) {
|
||||
return ".o.wasm";
|
||||
}
|
||||
switch (abi) {
|
||||
.msvc => return ".obj",
|
||||
else => return ".o",
|
||||
}
|
||||
}
|
||||
|
||||
pub fn oFileExt(self: Target) [:0]const u8 {
|
||||
return self.abi.oFileExt();
|
||||
return oFileExt_cpu_arch_abi(self.cpu.arch, self.abi);
|
||||
}
|
||||
|
||||
pub fn exeFileExtSimple(cpu_arch: Cpu.Arch, os_tag: Os.Tag) [:0]const u8 {
|
||||
@ -1457,6 +1486,27 @@ pub const Target = struct {
|
||||
=> return result,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return whether or not the given host target is capable of executing natively executables
|
||||
/// of the other target.
|
||||
pub fn canExecBinariesOf(host_target: Target, binary_target: Target) bool {
|
||||
if (host_target.os.tag != binary_target.os.tag)
|
||||
return false;
|
||||
|
||||
if (host_target.cpu.arch == binary_target.cpu.arch)
|
||||
return true;
|
||||
|
||||
if (host_target.cpu.arch == .x86_64 and binary_target.cpu.arch == .i386)
|
||||
return true;
|
||||
|
||||
if (host_target.cpu.arch == .aarch64 and binary_target.cpu.arch == .arm)
|
||||
return true;
|
||||
|
||||
if (host_target.cpu.arch == .aarch64_be and binary_target.cpu.arch == .armeb)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
test "" {
|
||||
|
||||
@ -38,7 +38,7 @@ pub fn expectError(expected_error: anyerror, actual_error_union: anytype) void {
|
||||
/// This function is intended to be used only in tests. When the two values are not
|
||||
/// equal, prints diagnostics to stderr to show exactly how they are not equal,
|
||||
/// then aborts.
|
||||
/// The types must match exactly.
|
||||
/// `actual` is casted to the type of `expected`.
|
||||
pub fn expectEqual(expected: anytype, actual: @TypeOf(expected)) void {
|
||||
switch (@typeInfo(@TypeOf(actual))) {
|
||||
.NoReturn,
|
||||
|
||||
@ -64,24 +64,84 @@ pub fn lineDelta(source: []const u8, start: usize, end: usize) isize {
|
||||
return line;
|
||||
}
|
||||
|
||||
/// Returns the standard file system basename of a binary generated by the Zig compiler.
|
||||
pub fn binNameAlloc(
|
||||
allocator: *std.mem.Allocator,
|
||||
pub const BinNameOptions = struct {
|
||||
root_name: []const u8,
|
||||
target: std.Target,
|
||||
output_mode: std.builtin.OutputMode,
|
||||
link_mode: ?std.builtin.LinkMode,
|
||||
) error{OutOfMemory}![]u8 {
|
||||
switch (output_mode) {
|
||||
.Exe => return std.fmt.allocPrint(allocator, "{}{}", .{ root_name, target.exeFileExt() }),
|
||||
.Lib => {
|
||||
const suffix = switch (link_mode orelse .Static) {
|
||||
.Static => target.staticLibSuffix(),
|
||||
.Dynamic => target.dynamicLibSuffix(),
|
||||
link_mode: ?std.builtin.LinkMode = null,
|
||||
object_format: ?std.Target.ObjectFormat = null,
|
||||
version: ?std.builtin.Version = null,
|
||||
};
|
||||
return std.fmt.allocPrint(allocator, "{}{}{}", .{ target.libPrefix(), root_name, suffix });
|
||||
|
||||
/// Returns the standard file system basename of a binary generated by the Zig compiler.
|
||||
pub fn binNameAlloc(allocator: *std.mem.Allocator, options: BinNameOptions) error{OutOfMemory}![]u8 {
|
||||
const root_name = options.root_name;
|
||||
const target = options.target;
|
||||
switch (options.object_format orelse target.getObjectFormat()) {
|
||||
.coff, .pe => switch (options.output_mode) {
|
||||
.Exe => return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, target.exeFileExt() }),
|
||||
.Lib => {
|
||||
const suffix = switch (options.link_mode orelse .Static) {
|
||||
.Static => ".lib",
|
||||
.Dynamic => ".dll",
|
||||
};
|
||||
return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, suffix });
|
||||
},
|
||||
.Obj => return std.fmt.allocPrint(allocator, "{}{}", .{ root_name, target.oFileExt() }),
|
||||
.Obj => return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, target.oFileExt() }),
|
||||
},
|
||||
.elf => switch (options.output_mode) {
|
||||
.Exe => return allocator.dupe(u8, root_name),
|
||||
.Lib => {
|
||||
switch (options.link_mode orelse .Static) {
|
||||
.Static => return std.fmt.allocPrint(allocator, "{s}{s}.a", .{
|
||||
target.libPrefix(), root_name,
|
||||
}),
|
||||
.Dynamic => {
|
||||
if (options.version) |ver| {
|
||||
return std.fmt.allocPrint(allocator, "{s}{s}.so.{d}.{d}.{d}", .{
|
||||
target.libPrefix(), root_name, ver.major, ver.minor, ver.patch,
|
||||
});
|
||||
} else {
|
||||
return std.fmt.allocPrint(allocator, "{s}{s}.so", .{
|
||||
target.libPrefix(), root_name,
|
||||
});
|
||||
}
|
||||
},
|
||||
}
|
||||
},
|
||||
.Obj => return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, target.oFileExt() }),
|
||||
},
|
||||
.macho => switch (options.output_mode) {
|
||||
.Exe => return allocator.dupe(u8, root_name),
|
||||
.Lib => {
|
||||
switch (options.link_mode orelse .Static) {
|
||||
.Static => return std.fmt.allocPrint(allocator, "{s}{s}.a", .{
|
||||
target.libPrefix(), root_name,
|
||||
}),
|
||||
.Dynamic => {
|
||||
if (options.version) |ver| {
|
||||
return std.fmt.allocPrint(allocator, "{s}{s}.{d}.{d}.{d}.dylib", .{
|
||||
target.libPrefix(), root_name, ver.major, ver.minor, ver.patch,
|
||||
});
|
||||
} else {
|
||||
return std.fmt.allocPrint(allocator, "{s}{s}.dylib", .{
|
||||
target.libPrefix(), root_name,
|
||||
});
|
||||
}
|
||||
},
|
||||
}
|
||||
return std.fmt.allocPrint(allocator, "{s}{s}{s}", .{ target.libPrefix(), root_name, suffix });
|
||||
},
|
||||
.Obj => return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, target.oFileExt() }),
|
||||
},
|
||||
.wasm => switch (options.output_mode) {
|
||||
.Exe => return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, target.exeFileExt() }),
|
||||
.Obj => return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, target.oFileExt() }),
|
||||
.Lib => return std.fmt.allocPrint(allocator, "{s}.wasm", .{root_name}),
|
||||
},
|
||||
.c => return std.fmt.allocPrint(allocator, "{s}.c", .{root_name}),
|
||||
.hex => return std.fmt.allocPrint(allocator, "{s}.ihex", .{root_name}),
|
||||
.raw => return std.fmt.allocPrint(allocator, "{s}.bin", .{root_name}),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -375,7 +375,7 @@ pub const CrossTarget = struct {
|
||||
// `Target.current.os` works when doing `zig build` because Zig generates a build executable using
|
||||
// native OS version range. However this will not be accurate otherwise, and
|
||||
// will need to be integrated with `std.zig.system.NativeTargetInfo.detect`.
|
||||
var adjusted_os = if (self.os_tag) |os_tag| Target.Os.defaultVersionRange(os_tag) else Target.current.os;
|
||||
var adjusted_os = if (self.os_tag) |os_tag| os_tag.defaultVersionRange() else Target.current.os;
|
||||
|
||||
if (self.os_version_min) |min| switch (min) {
|
||||
.none => {},
|
||||
@ -466,7 +466,7 @@ pub const CrossTarget = struct {
|
||||
}
|
||||
|
||||
pub fn oFileExt(self: CrossTarget) [:0]const u8 {
|
||||
return self.getAbi().oFileExt();
|
||||
return Target.oFileExt_cpu_arch_abi(self.getCpuArch(), self.getAbi());
|
||||
}
|
||||
|
||||
pub fn exeFileExt(self: CrossTarget) [:0]const u8 {
|
||||
|
||||
@ -203,7 +203,7 @@ pub const NativeTargetInfo = struct {
|
||||
/// deinitialization method.
|
||||
/// TODO Remove the Allocator requirement from this function.
|
||||
pub fn detect(allocator: *Allocator, cross_target: CrossTarget) DetectError!NativeTargetInfo {
|
||||
var os = Target.Os.defaultVersionRange(cross_target.getOsTag());
|
||||
var os = cross_target.getOsTag().defaultVersionRange();
|
||||
if (cross_target.os_tag == null) {
|
||||
switch (Target.current.os.tag) {
|
||||
.linux => {
|
||||
@ -393,6 +393,12 @@ pub const NativeTargetInfo = struct {
|
||||
if (!native_target_has_ld or have_all_info or os_is_non_native) {
|
||||
return defaultAbiAndDynamicLinker(cpu, os, cross_target);
|
||||
}
|
||||
if (cross_target.abi) |abi| {
|
||||
if (abi.isMusl()) {
|
||||
// musl implies static linking.
|
||||
return defaultAbiAndDynamicLinker(cpu, os, cross_target);
|
||||
}
|
||||
}
|
||||
// The current target's ABI cannot be relied on for this. For example, we may build the zig
|
||||
// compiler for target riscv64-linux-musl and provide a tarball for users to download.
|
||||
// A user could then run that zig compiler on riscv64-linux-gnu. This use case is well-defined
|
||||
|
||||
@ -1,59 +0,0 @@
|
||||
pub const Table = std.StringHashMap(*Package);
|
||||
|
||||
/// This should be used for file operations.
|
||||
root_src_dir: std.fs.Dir,
|
||||
/// This is for metadata purposes, for example putting into debug information.
|
||||
root_src_dir_path: []u8,
|
||||
/// Relative to `root_src_dir` and `root_src_dir_path`.
|
||||
root_src_path: []u8,
|
||||
table: Table,
|
||||
|
||||
/// No references to `root_src_dir` and `root_src_path` are kept.
|
||||
pub fn create(
|
||||
allocator: *mem.Allocator,
|
||||
base_dir: std.fs.Dir,
|
||||
/// Relative to `base_dir`.
|
||||
root_src_dir: []const u8,
|
||||
/// Relative to `root_src_dir`.
|
||||
root_src_path: []const u8,
|
||||
) !*Package {
|
||||
const ptr = try allocator.create(Package);
|
||||
errdefer allocator.destroy(ptr);
|
||||
const root_src_path_dupe = try mem.dupe(allocator, u8, root_src_path);
|
||||
errdefer allocator.free(root_src_path_dupe);
|
||||
const root_src_dir_path = try mem.dupe(allocator, u8, root_src_dir);
|
||||
errdefer allocator.free(root_src_dir_path);
|
||||
ptr.* = .{
|
||||
.root_src_dir = try base_dir.openDir(root_src_dir, .{}),
|
||||
.root_src_dir_path = root_src_dir_path,
|
||||
.root_src_path = root_src_path_dupe,
|
||||
.table = Table.init(allocator),
|
||||
};
|
||||
return ptr;
|
||||
}
|
||||
|
||||
pub fn destroy(self: *Package) void {
|
||||
const allocator = self.table.allocator;
|
||||
self.root_src_dir.close();
|
||||
allocator.free(self.root_src_path);
|
||||
allocator.free(self.root_src_dir_path);
|
||||
{
|
||||
var it = self.table.iterator();
|
||||
while (it.next()) |kv| {
|
||||
allocator.free(kv.key);
|
||||
}
|
||||
}
|
||||
self.table.deinit();
|
||||
allocator.destroy(self);
|
||||
}
|
||||
|
||||
pub fn add(self: *Package, name: []const u8, package: *Package) !void {
|
||||
try self.table.ensureCapacity(self.table.items().len + 1);
|
||||
const name_dupe = try mem.dupe(self.table.allocator, u8, name);
|
||||
self.table.putAssumeCapacityNoClobber(name_dupe, package);
|
||||
}
|
||||
|
||||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
const assert = std.debug.assert;
|
||||
const Package = @This();
|
||||
@ -1,138 +0,0 @@
|
||||
//! Introspection and determination of system libraries needed by zig.
|
||||
|
||||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
const fs = std.fs;
|
||||
const CacheHash = std.cache_hash.CacheHash;
|
||||
|
||||
/// Caller must free result
|
||||
pub fn testZigInstallPrefix(allocator: *mem.Allocator, test_path: []const u8) ![]u8 {
|
||||
{
|
||||
const test_zig_dir = try fs.path.join(allocator, &[_][]const u8{ test_path, "lib", "zig" });
|
||||
errdefer allocator.free(test_zig_dir);
|
||||
|
||||
const test_index_file = try fs.path.join(allocator, &[_][]const u8{ test_zig_dir, "std", "std.zig" });
|
||||
defer allocator.free(test_index_file);
|
||||
|
||||
if (fs.cwd().openFile(test_index_file, .{})) |file| {
|
||||
file.close();
|
||||
return test_zig_dir;
|
||||
} else |err| switch (err) {
|
||||
error.FileNotFound => {
|
||||
allocator.free(test_zig_dir);
|
||||
},
|
||||
else => |e| return e,
|
||||
}
|
||||
}
|
||||
|
||||
// Also try without "zig"
|
||||
const test_zig_dir = try fs.path.join(allocator, &[_][]const u8{ test_path, "lib" });
|
||||
errdefer allocator.free(test_zig_dir);
|
||||
|
||||
const test_index_file = try fs.path.join(allocator, &[_][]const u8{ test_zig_dir, "std", "std.zig" });
|
||||
defer allocator.free(test_index_file);
|
||||
|
||||
const file = try fs.cwd().openFile(test_index_file, .{});
|
||||
file.close();
|
||||
|
||||
return test_zig_dir;
|
||||
}
|
||||
|
||||
/// Caller must free result
|
||||
pub fn findZigLibDir(allocator: *mem.Allocator) ![]u8 {
|
||||
const self_exe_path = try fs.selfExePathAlloc(allocator);
|
||||
defer allocator.free(self_exe_path);
|
||||
|
||||
var cur_path: []const u8 = self_exe_path;
|
||||
while (true) {
|
||||
const test_dir = fs.path.dirname(cur_path) orelse ".";
|
||||
|
||||
if (mem.eql(u8, test_dir, cur_path)) {
|
||||
break;
|
||||
}
|
||||
|
||||
return testZigInstallPrefix(allocator, test_dir) catch |err| {
|
||||
cur_path = test_dir;
|
||||
continue;
|
||||
};
|
||||
}
|
||||
|
||||
return error.FileNotFound;
|
||||
}
|
||||
|
||||
pub fn resolveZigLibDir(allocator: *mem.Allocator) ![]u8 {
|
||||
return findZigLibDir(allocator) catch |err| {
|
||||
std.debug.print(
|
||||
\\Unable to find zig lib directory: {}.
|
||||
\\Reinstall Zig or use --zig-install-prefix.
|
||||
\\
|
||||
, .{@errorName(err)});
|
||||
|
||||
return error.ZigLibDirNotFound;
|
||||
};
|
||||
}
|
||||
|
||||
/// Caller owns returned memory.
|
||||
pub fn resolveGlobalCacheDir(allocator: *mem.Allocator) ![]u8 {
|
||||
const appname = "zig";
|
||||
|
||||
if (std.Target.current.os.tag != .windows) {
|
||||
if (std.os.getenv("XDG_CACHE_HOME")) |cache_root| {
|
||||
return fs.path.join(allocator, &[_][]const u8{ cache_root, appname });
|
||||
} else if (std.os.getenv("HOME")) |home| {
|
||||
return fs.path.join(allocator, &[_][]const u8{ home, ".cache", appname });
|
||||
}
|
||||
}
|
||||
|
||||
return fs.getAppDataDir(allocator, appname);
|
||||
}
|
||||
|
||||
pub fn openGlobalCacheDir() !fs.Dir {
|
||||
var buf: [fs.MAX_PATH_BYTES]u8 = undefined;
|
||||
var fba = std.heap.FixedBufferAllocator.init(&buf);
|
||||
const path_name = try resolveGlobalCacheDir(&fba.allocator);
|
||||
return fs.cwd().makeOpenPath(path_name, .{});
|
||||
}
|
||||
|
||||
var compiler_id_mutex = std.Mutex{};
|
||||
var compiler_id: [16]u8 = undefined;
|
||||
var compiler_id_computed = false;
|
||||
|
||||
pub fn resolveCompilerId(gpa: *mem.Allocator) ![16]u8 {
|
||||
const held = compiler_id_mutex.acquire();
|
||||
defer held.release();
|
||||
|
||||
if (compiler_id_computed)
|
||||
return compiler_id;
|
||||
compiler_id_computed = true;
|
||||
|
||||
var cache_dir = try openGlobalCacheDir();
|
||||
defer cache_dir.close();
|
||||
|
||||
var ch = try CacheHash.init(gpa, cache_dir, "exe");
|
||||
defer ch.release();
|
||||
|
||||
const self_exe_path = try fs.selfExePathAlloc(gpa);
|
||||
defer gpa.free(self_exe_path);
|
||||
|
||||
_ = try ch.addFile(self_exe_path, null);
|
||||
|
||||
if (try ch.hit()) |digest| {
|
||||
compiler_id = digest[0..16].*;
|
||||
return compiler_id;
|
||||
}
|
||||
|
||||
const libs = try std.process.getSelfExeSharedLibPaths(gpa);
|
||||
defer {
|
||||
for (libs) |lib| gpa.free(lib);
|
||||
gpa.free(libs);
|
||||
}
|
||||
|
||||
for (libs) |lib| {
|
||||
try ch.addFilePost(lib);
|
||||
}
|
||||
|
||||
const digest = ch.final();
|
||||
compiler_id = digest[0..16].*;
|
||||
return compiler_id;
|
||||
}
|
||||
@ -1,279 +0,0 @@
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const Module = @import("Module.zig");
|
||||
const fs = std.fs;
|
||||
const trace = @import("tracy.zig").trace;
|
||||
const Package = @import("Package.zig");
|
||||
const Type = @import("type.zig").Type;
|
||||
const build_options = @import("build_options");
|
||||
|
||||
pub const producer_string = if (std.builtin.is_test) "zig test" else "zig " ++ build_options.version;
|
||||
|
||||
pub const Options = struct {
|
||||
target: std.Target,
|
||||
output_mode: std.builtin.OutputMode,
|
||||
link_mode: std.builtin.LinkMode,
|
||||
object_format: std.builtin.ObjectFormat,
|
||||
optimize_mode: std.builtin.Mode,
|
||||
root_name: []const u8,
|
||||
root_pkg: *const Package,
|
||||
/// Used for calculating how much space to reserve for symbols in case the binary file
|
||||
/// does not already have a symbol table.
|
||||
symbol_count_hint: u64 = 32,
|
||||
/// Used for calculating how much space to reserve for executable program code in case
|
||||
/// the binary file deos not already have such a section.
|
||||
program_code_size_hint: u64 = 256 * 1024,
|
||||
entry_addr: ?u64 = null,
|
||||
};
|
||||
|
||||
pub const File = struct {
|
||||
tag: Tag,
|
||||
options: Options,
|
||||
file: ?fs.File,
|
||||
allocator: *Allocator,
|
||||
|
||||
pub const LinkBlock = union {
|
||||
elf: Elf.TextBlock,
|
||||
coff: Coff.TextBlock,
|
||||
macho: MachO.TextBlock,
|
||||
c: void,
|
||||
wasm: void,
|
||||
};
|
||||
|
||||
pub const LinkFn = union {
|
||||
elf: Elf.SrcFn,
|
||||
coff: Coff.SrcFn,
|
||||
macho: MachO.SrcFn,
|
||||
c: void,
|
||||
wasm: ?Wasm.FnData,
|
||||
};
|
||||
|
||||
/// For DWARF .debug_info.
|
||||
pub const DbgInfoTypeRelocsTable = std.HashMapUnmanaged(Type, DbgInfoTypeReloc, Type.hash, Type.eql, std.hash_map.DefaultMaxLoadPercentage);
|
||||
|
||||
/// For DWARF .debug_info.
|
||||
pub const DbgInfoTypeReloc = struct {
|
||||
/// Offset from `TextBlock.dbg_info_off` (the buffer that is local to a Decl).
|
||||
/// This is where the .debug_info tag for the type is.
|
||||
off: u32,
|
||||
/// Offset from `TextBlock.dbg_info_off` (the buffer that is local to a Decl).
|
||||
/// List of DW.AT_type / DW.FORM_ref4 that points to the type.
|
||||
relocs: std.ArrayListUnmanaged(u32),
|
||||
};
|
||||
|
||||
/// Attempts incremental linking, if the file already exists. If
|
||||
/// incremental linking fails, falls back to truncating the file and
|
||||
/// rewriting it. A malicious file is detected as incremental link failure
|
||||
/// and does not cause Illegal Behavior. This operation is not atomic.
|
||||
pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: Options) !*File {
|
||||
switch (options.object_format) {
|
||||
.unknown => unreachable,
|
||||
.coff, .pe => return Coff.openPath(allocator, dir, sub_path, options),
|
||||
.elf => return Elf.openPath(allocator, dir, sub_path, options),
|
||||
.macho => return MachO.openPath(allocator, dir, sub_path, options),
|
||||
.wasm => return Wasm.openPath(allocator, dir, sub_path, options),
|
||||
.c => return C.openPath(allocator, dir, sub_path, options),
|
||||
.hex => return error.TODOImplementHex,
|
||||
.raw => return error.TODOImplementRaw,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn cast(base: *File, comptime T: type) ?*T {
|
||||
if (base.tag != T.base_tag)
|
||||
return null;
|
||||
|
||||
return @fieldParentPtr(T, "base", base);
|
||||
}
|
||||
|
||||
pub fn makeWritable(base: *File, dir: fs.Dir, sub_path: []const u8) !void {
|
||||
switch (base.tag) {
|
||||
.coff, .elf, .macho => {
|
||||
if (base.file != null) return;
|
||||
base.file = try dir.createFile(sub_path, .{
|
||||
.truncate = false,
|
||||
.read = true,
|
||||
.mode = determineMode(base.options),
|
||||
});
|
||||
},
|
||||
.c, .wasm => {},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn makeExecutable(base: *File) !void {
|
||||
switch (base.tag) {
|
||||
.c => unreachable,
|
||||
.wasm => {},
|
||||
else => if (base.file) |f| {
|
||||
f.close();
|
||||
base.file = null;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// May be called before or after updateDeclExports but must be called
|
||||
/// after allocateDeclIndexes for any given Decl.
|
||||
pub fn updateDecl(base: *File, module: *Module, decl: *Module.Decl) !void {
|
||||
switch (base.tag) {
|
||||
.coff => return @fieldParentPtr(Coff, "base", base).updateDecl(module, decl),
|
||||
.elf => return @fieldParentPtr(Elf, "base", base).updateDecl(module, decl),
|
||||
.macho => return @fieldParentPtr(MachO, "base", base).updateDecl(module, decl),
|
||||
.c => return @fieldParentPtr(C, "base", base).updateDecl(module, decl),
|
||||
.wasm => return @fieldParentPtr(Wasm, "base", base).updateDecl(module, decl),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn updateDeclLineNumber(base: *File, module: *Module, decl: *Module.Decl) !void {
|
||||
switch (base.tag) {
|
||||
.coff => return @fieldParentPtr(Coff, "base", base).updateDeclLineNumber(module, decl),
|
||||
.elf => return @fieldParentPtr(Elf, "base", base).updateDeclLineNumber(module, decl),
|
||||
.macho => return @fieldParentPtr(MachO, "base", base).updateDeclLineNumber(module, decl),
|
||||
.c, .wasm => {},
|
||||
}
|
||||
}
|
||||
|
||||
/// Must be called before any call to updateDecl or updateDeclExports for
|
||||
/// any given Decl.
|
||||
pub fn allocateDeclIndexes(base: *File, decl: *Module.Decl) !void {
|
||||
switch (base.tag) {
|
||||
.coff => return @fieldParentPtr(Coff, "base", base).allocateDeclIndexes(decl),
|
||||
.elf => return @fieldParentPtr(Elf, "base", base).allocateDeclIndexes(decl),
|
||||
.macho => return @fieldParentPtr(MachO, "base", base).allocateDeclIndexes(decl),
|
||||
.c, .wasm => {},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deinit(base: *File) void {
|
||||
if (base.file) |f| f.close();
|
||||
switch (base.tag) {
|
||||
.coff => @fieldParentPtr(Coff, "base", base).deinit(),
|
||||
.elf => @fieldParentPtr(Elf, "base", base).deinit(),
|
||||
.macho => @fieldParentPtr(MachO, "base", base).deinit(),
|
||||
.c => @fieldParentPtr(C, "base", base).deinit(),
|
||||
.wasm => @fieldParentPtr(Wasm, "base", base).deinit(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn destroy(base: *File) void {
|
||||
switch (base.tag) {
|
||||
.coff => {
|
||||
const parent = @fieldParentPtr(Coff, "base", base);
|
||||
parent.deinit();
|
||||
base.allocator.destroy(parent);
|
||||
},
|
||||
.elf => {
|
||||
const parent = @fieldParentPtr(Elf, "base", base);
|
||||
parent.deinit();
|
||||
base.allocator.destroy(parent);
|
||||
},
|
||||
.macho => {
|
||||
const parent = @fieldParentPtr(MachO, "base", base);
|
||||
parent.deinit();
|
||||
base.allocator.destroy(parent);
|
||||
},
|
||||
.c => {
|
||||
const parent = @fieldParentPtr(C, "base", base);
|
||||
parent.deinit();
|
||||
base.allocator.destroy(parent);
|
||||
},
|
||||
.wasm => {
|
||||
const parent = @fieldParentPtr(Wasm, "base", base);
|
||||
parent.deinit();
|
||||
base.allocator.destroy(parent);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn flush(base: *File, module: *Module) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
try switch (base.tag) {
|
||||
.coff => @fieldParentPtr(Coff, "base", base).flush(module),
|
||||
.elf => @fieldParentPtr(Elf, "base", base).flush(module),
|
||||
.macho => @fieldParentPtr(MachO, "base", base).flush(module),
|
||||
.c => @fieldParentPtr(C, "base", base).flush(module),
|
||||
.wasm => @fieldParentPtr(Wasm, "base", base).flush(module),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn freeDecl(base: *File, decl: *Module.Decl) void {
|
||||
switch (base.tag) {
|
||||
.coff => @fieldParentPtr(Coff, "base", base).freeDecl(decl),
|
||||
.elf => @fieldParentPtr(Elf, "base", base).freeDecl(decl),
|
||||
.macho => @fieldParentPtr(MachO, "base", base).freeDecl(decl),
|
||||
.c => unreachable,
|
||||
.wasm => @fieldParentPtr(Wasm, "base", base).freeDecl(decl),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn errorFlags(base: *File) ErrorFlags {
|
||||
return switch (base.tag) {
|
||||
.coff => @fieldParentPtr(Coff, "base", base).error_flags,
|
||||
.elf => @fieldParentPtr(Elf, "base", base).error_flags,
|
||||
.macho => @fieldParentPtr(MachO, "base", base).error_flags,
|
||||
.c => return .{ .no_entry_point_found = false },
|
||||
.wasm => return ErrorFlags{},
|
||||
};
|
||||
}
|
||||
|
||||
/// May be called before or after updateDecl, but must be called after
|
||||
/// allocateDeclIndexes for any given Decl.
|
||||
pub fn updateDeclExports(
|
||||
base: *File,
|
||||
module: *Module,
|
||||
decl: *const Module.Decl,
|
||||
exports: []const *Module.Export,
|
||||
) !void {
|
||||
switch (base.tag) {
|
||||
.coff => return @fieldParentPtr(Coff, "base", base).updateDeclExports(module, decl, exports),
|
||||
.elf => return @fieldParentPtr(Elf, "base", base).updateDeclExports(module, decl, exports),
|
||||
.macho => return @fieldParentPtr(MachO, "base", base).updateDeclExports(module, decl, exports),
|
||||
.c => return {},
|
||||
.wasm => return @fieldParentPtr(Wasm, "base", base).updateDeclExports(module, decl, exports),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn getDeclVAddr(base: *File, decl: *const Module.Decl) u64 {
|
||||
switch (base.tag) {
|
||||
.coff => return @fieldParentPtr(Coff, "base", base).getDeclVAddr(decl),
|
||||
.elf => return @fieldParentPtr(Elf, "base", base).getDeclVAddr(decl),
|
||||
.macho => return @fieldParentPtr(MachO, "base", base).getDeclVAddr(decl),
|
||||
.c => unreachable,
|
||||
.wasm => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
pub const Tag = enum {
|
||||
coff,
|
||||
elf,
|
||||
macho,
|
||||
c,
|
||||
wasm,
|
||||
};
|
||||
|
||||
pub const ErrorFlags = struct {
|
||||
no_entry_point_found: bool = false,
|
||||
};
|
||||
|
||||
pub const C = @import("link/C.zig");
|
||||
pub const Coff = @import("link/Coff.zig");
|
||||
pub const Elf = @import("link/Elf.zig");
|
||||
pub const MachO = @import("link/MachO.zig");
|
||||
pub const Wasm = @import("link/Wasm.zig");
|
||||
};
|
||||
|
||||
pub fn determineMode(options: Options) fs.File.Mode {
|
||||
// On common systems with a 0o022 umask, 0o777 will still result in a file created
|
||||
// with 0o755 permissions, but it works appropriately if the system is configured
|
||||
// more leniently. As another data point, C's fopen seems to open files with the
|
||||
// 666 mode.
|
||||
const executable_mode = if (std.Target.current.os.tag == .windows) 0 else 0o777;
|
||||
switch (options.output_mode) {
|
||||
.Lib => return switch (options.link_mode) {
|
||||
.Dynamic => executable_mode,
|
||||
.Static => fs.File.default_mode,
|
||||
},
|
||||
.Exe => return executable_mode,
|
||||
.Obj => return fs.File.default_mode,
|
||||
}
|
||||
}
|
||||
@ -1,792 +0,0 @@
|
||||
const Coff = @This();
|
||||
|
||||
const std = @import("std");
|
||||
const log = std.log.scoped(.link);
|
||||
const Allocator = std.mem.Allocator;
|
||||
const assert = std.debug.assert;
|
||||
const fs = std.fs;
|
||||
|
||||
const trace = @import("../tracy.zig").trace;
|
||||
const Module = @import("../Module.zig");
|
||||
const codegen = @import("../codegen.zig");
|
||||
const link = @import("../link.zig");
|
||||
|
||||
const allocation_padding = 4 / 3;
|
||||
const minimum_text_block_size = 64 * allocation_padding;
|
||||
|
||||
const section_alignment = 4096;
|
||||
const file_alignment = 512;
|
||||
const image_base = 0x400_000;
|
||||
const section_table_size = 2 * 40;
|
||||
comptime {
|
||||
std.debug.assert(std.mem.isAligned(image_base, section_alignment));
|
||||
}
|
||||
|
||||
pub const base_tag: link.File.Tag = .coff;
|
||||
|
||||
const msdos_stub = @embedFile("msdos-stub.bin");
|
||||
|
||||
base: link.File,
|
||||
ptr_width: enum { p32, p64 },
|
||||
error_flags: link.File.ErrorFlags = .{},
|
||||
|
||||
text_block_free_list: std.ArrayListUnmanaged(*TextBlock) = .{},
|
||||
last_text_block: ?*TextBlock = null,
|
||||
|
||||
/// Section table file pointer.
|
||||
section_table_offset: u32 = 0,
|
||||
/// Section data file pointer.
|
||||
section_data_offset: u32 = 0,
|
||||
/// Optiona header file pointer.
|
||||
optional_header_offset: u32 = 0,
|
||||
|
||||
/// Absolute virtual address of the offset table when the executable is loaded in memory.
|
||||
offset_table_virtual_address: u32 = 0,
|
||||
/// Current size of the offset table on disk, must be a multiple of `file_alignment`
|
||||
offset_table_size: u32 = 0,
|
||||
/// Contains absolute virtual addresses
|
||||
offset_table: std.ArrayListUnmanaged(u64) = .{},
|
||||
/// Free list of offset table indices
|
||||
offset_table_free_list: std.ArrayListUnmanaged(u32) = .{},
|
||||
|
||||
/// Virtual address of the entry point procedure relative to `image_base`
|
||||
entry_addr: ?u32 = null,
|
||||
|
||||
/// Absolute virtual address of the text section when the executable is loaded in memory.
|
||||
text_section_virtual_address: u32 = 0,
|
||||
/// Current size of the `.text` section on disk, must be a multiple of `file_alignment`
|
||||
text_section_size: u32 = 0,
|
||||
|
||||
offset_table_size_dirty: bool = false,
|
||||
text_section_size_dirty: bool = false,
|
||||
/// This flag is set when the virtual size of the whole image file when loaded in memory has changed
|
||||
/// and needs to be updated in the optional header.
|
||||
size_of_image_dirty: bool = false,
|
||||
|
||||
pub const TextBlock = struct {
|
||||
/// Offset of the code relative to the start of the text section
|
||||
text_offset: u32,
|
||||
/// Used size of the text block
|
||||
size: u32,
|
||||
/// This field is undefined for symbols with size = 0.
|
||||
offset_table_index: u32,
|
||||
/// Points to the previous and next neighbors, based on the `text_offset`.
|
||||
/// This can be used to find, for example, the capacity of this `TextBlock`.
|
||||
prev: ?*TextBlock,
|
||||
next: ?*TextBlock,
|
||||
|
||||
pub const empty = TextBlock{
|
||||
.text_offset = 0,
|
||||
.size = 0,
|
||||
.offset_table_index = undefined,
|
||||
.prev = null,
|
||||
.next = null,
|
||||
};
|
||||
|
||||
/// Returns how much room there is to grow in virtual address space.
|
||||
fn capacity(self: TextBlock) u64 {
|
||||
if (self.next) |next| {
|
||||
return next.text_offset - self.text_offset;
|
||||
}
|
||||
// This is the last block, the capacity is only limited by the address space.
|
||||
return std.math.maxInt(u32) - self.text_offset;
|
||||
}
|
||||
|
||||
fn freeListEligible(self: TextBlock) bool {
|
||||
// No need to keep a free list node for the last block.
|
||||
const next = self.next orelse return false;
|
||||
const cap = next.text_offset - self.text_offset;
|
||||
const ideal_cap = self.size * allocation_padding;
|
||||
if (cap <= ideal_cap) return false;
|
||||
const surplus = cap - ideal_cap;
|
||||
return surplus >= minimum_text_block_size;
|
||||
}
|
||||
|
||||
/// Absolute virtual address of the text block when the file is loaded in memory.
|
||||
fn getVAddr(self: TextBlock, coff: Coff) u32 {
|
||||
return coff.text_section_virtual_address + self.text_offset;
|
||||
}
|
||||
};
|
||||
|
||||
pub const SrcFn = void;
|
||||
|
||||
pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: link.Options) !*link.File {
|
||||
assert(options.object_format == .coff);
|
||||
|
||||
const file = try dir.createFile(sub_path, .{ .truncate = false, .read = true, .mode = link.determineMode(options) });
|
||||
errdefer file.close();
|
||||
|
||||
var coff_file = try allocator.create(Coff);
|
||||
errdefer allocator.destroy(coff_file);
|
||||
|
||||
coff_file.* = openFile(allocator, file, options) catch |err| switch (err) {
|
||||
error.IncrFailed => try createFile(allocator, file, options),
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
return &coff_file.base;
|
||||
}
|
||||
|
||||
/// Returns error.IncrFailed if incremental update could not be performed.
|
||||
fn openFile(allocator: *Allocator, file: fs.File, options: link.Options) !Coff {
|
||||
switch (options.output_mode) {
|
||||
.Exe => {},
|
||||
.Obj => return error.IncrFailed,
|
||||
.Lib => return error.IncrFailed,
|
||||
}
|
||||
var self: Coff = .{
|
||||
.base = .{
|
||||
.file = file,
|
||||
.tag = .coff,
|
||||
.options = options,
|
||||
.allocator = allocator,
|
||||
},
|
||||
.ptr_width = switch (options.target.cpu.arch.ptrBitWidth()) {
|
||||
32 => .p32,
|
||||
64 => .p64,
|
||||
else => return error.UnsupportedELFArchitecture,
|
||||
},
|
||||
};
|
||||
errdefer self.deinit();
|
||||
|
||||
// TODO implement reading the PE/COFF file
|
||||
return error.IncrFailed;
|
||||
}
|
||||
|
||||
/// Truncates the existing file contents and overwrites the contents.
|
||||
/// Returns an error if `file` is not already open with +read +write +seek abilities.
|
||||
fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !Coff {
|
||||
// TODO Write object specific relocations, COFF symbol table, then enable object file output.
|
||||
switch (options.output_mode) {
|
||||
.Exe => {},
|
||||
.Obj => return error.TODOImplementWritingObjFiles,
|
||||
.Lib => return error.TODOImplementWritingLibFiles,
|
||||
}
|
||||
var self: Coff = .{
|
||||
.base = .{
|
||||
.tag = .coff,
|
||||
.options = options,
|
||||
.allocator = allocator,
|
||||
.file = file,
|
||||
},
|
||||
.ptr_width = switch (options.target.cpu.arch.ptrBitWidth()) {
|
||||
32 => .p32,
|
||||
64 => .p64,
|
||||
else => return error.UnsupportedCOFFArchitecture,
|
||||
},
|
||||
};
|
||||
errdefer self.deinit();
|
||||
|
||||
var coff_file_header_offset: u32 = 0;
|
||||
if (options.output_mode == .Exe) {
|
||||
// Write the MS-DOS stub and the PE signature
|
||||
try self.base.file.?.pwriteAll(msdos_stub ++ "PE\x00\x00", 0);
|
||||
coff_file_header_offset = msdos_stub.len + 4;
|
||||
}
|
||||
|
||||
// COFF file header
|
||||
const data_directory_count = 0;
|
||||
var hdr_data: [112 + data_directory_count * 8 + section_table_size]u8 = undefined;
|
||||
var index: usize = 0;
|
||||
|
||||
const machine = self.base.options.target.cpu.arch.toCoffMachine();
|
||||
if (machine == .Unknown) {
|
||||
return error.UnsupportedCOFFArchitecture;
|
||||
}
|
||||
std.mem.writeIntLittle(u16, hdr_data[0..2], @enumToInt(machine));
|
||||
index += 2;
|
||||
|
||||
// Number of sections (we only use .got, .text)
|
||||
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 2);
|
||||
index += 2;
|
||||
// TimeDateStamp (u32), PointerToSymbolTable (u32), NumberOfSymbols (u32)
|
||||
std.mem.set(u8, hdr_data[index..][0..12], 0);
|
||||
index += 12;
|
||||
|
||||
const optional_header_size = switch (options.output_mode) {
|
||||
.Exe => data_directory_count * 8 + switch (self.ptr_width) {
|
||||
.p32 => @as(u16, 96),
|
||||
.p64 => 112,
|
||||
},
|
||||
else => 0,
|
||||
};
|
||||
|
||||
const section_table_offset = coff_file_header_offset + 20 + optional_header_size;
|
||||
const default_offset_table_size = file_alignment;
|
||||
const default_size_of_code = 0;
|
||||
|
||||
self.section_data_offset = std.mem.alignForwardGeneric(u32, self.section_table_offset + section_table_size, file_alignment);
|
||||
const section_data_relative_virtual_address = std.mem.alignForwardGeneric(u32, self.section_table_offset + section_table_size, section_alignment);
|
||||
self.offset_table_virtual_address = image_base + section_data_relative_virtual_address;
|
||||
self.offset_table_size = default_offset_table_size;
|
||||
self.section_table_offset = section_table_offset;
|
||||
self.text_section_virtual_address = image_base + section_data_relative_virtual_address + section_alignment;
|
||||
self.text_section_size = default_size_of_code;
|
||||
|
||||
// Size of file when loaded in memory
|
||||
const size_of_image = std.mem.alignForwardGeneric(u32, self.text_section_virtual_address - image_base + default_size_of_code, section_alignment);
|
||||
|
||||
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], optional_header_size);
|
||||
index += 2;
|
||||
|
||||
// Characteristics
|
||||
var characteristics: u16 = std.coff.IMAGE_FILE_DEBUG_STRIPPED | std.coff.IMAGE_FILE_RELOCS_STRIPPED; // TODO Remove debug info stripped flag when necessary
|
||||
if (options.output_mode == .Exe) {
|
||||
characteristics |= std.coff.IMAGE_FILE_EXECUTABLE_IMAGE;
|
||||
}
|
||||
switch (self.ptr_width) {
|
||||
.p32 => characteristics |= std.coff.IMAGE_FILE_32BIT_MACHINE,
|
||||
.p64 => characteristics |= std.coff.IMAGE_FILE_LARGE_ADDRESS_AWARE,
|
||||
}
|
||||
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], characteristics);
|
||||
index += 2;
|
||||
|
||||
assert(index == 20);
|
||||
try self.base.file.?.pwriteAll(hdr_data[0..index], coff_file_header_offset);
|
||||
|
||||
if (options.output_mode == .Exe) {
|
||||
self.optional_header_offset = coff_file_header_offset + 20;
|
||||
// Optional header
|
||||
index = 0;
|
||||
std.mem.writeIntLittle(u16, hdr_data[0..2], switch (self.ptr_width) {
|
||||
.p32 => @as(u16, 0x10b),
|
||||
.p64 => 0x20b,
|
||||
});
|
||||
index += 2;
|
||||
|
||||
// Linker version (u8 + u8)
|
||||
std.mem.set(u8, hdr_data[index..][0..2], 0);
|
||||
index += 2;
|
||||
|
||||
// SizeOfCode (UNUSED, u32), SizeOfInitializedData (u32), SizeOfUninitializedData (u32), AddressOfEntryPoint (u32), BaseOfCode (UNUSED, u32)
|
||||
std.mem.set(u8, hdr_data[index..][0..20], 0);
|
||||
index += 20;
|
||||
|
||||
if (self.ptr_width == .p32) {
|
||||
// Base of data relative to the image base (UNUSED)
|
||||
std.mem.set(u8, hdr_data[index..][0..4], 0);
|
||||
index += 4;
|
||||
|
||||
// Image base address
|
||||
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], image_base);
|
||||
index += 4;
|
||||
} else {
|
||||
// Image base address
|
||||
std.mem.writeIntLittle(u64, hdr_data[index..][0..8], image_base);
|
||||
index += 8;
|
||||
}
|
||||
|
||||
// Section alignment
|
||||
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], section_alignment);
|
||||
index += 4;
|
||||
// File alignment
|
||||
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], file_alignment);
|
||||
index += 4;
|
||||
// Required OS version, 6.0 is vista
|
||||
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 6);
|
||||
index += 2;
|
||||
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 0);
|
||||
index += 2;
|
||||
// Image version
|
||||
std.mem.set(u8, hdr_data[index..][0..4], 0);
|
||||
index += 4;
|
||||
// Required subsystem version, same as OS version
|
||||
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 6);
|
||||
index += 2;
|
||||
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 0);
|
||||
index += 2;
|
||||
// Reserved zeroes (u32)
|
||||
std.mem.set(u8, hdr_data[index..][0..4], 0);
|
||||
index += 4;
|
||||
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], size_of_image);
|
||||
index += 4;
|
||||
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.section_data_offset);
|
||||
index += 4;
|
||||
// CheckSum (u32)
|
||||
std.mem.set(u8, hdr_data[index..][0..4], 0);
|
||||
index += 4;
|
||||
// Subsystem, TODO: Let users specify the subsystem, always CUI for now
|
||||
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 3);
|
||||
index += 2;
|
||||
// DLL characteristics
|
||||
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 0x0);
|
||||
index += 2;
|
||||
|
||||
switch (self.ptr_width) {
|
||||
.p32 => {
|
||||
// Size of stack reserve + commit
|
||||
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x1_000_000);
|
||||
index += 4;
|
||||
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x1_000);
|
||||
index += 4;
|
||||
// Size of heap reserve + commit
|
||||
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x100_000);
|
||||
index += 4;
|
||||
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x1_000);
|
||||
index += 4;
|
||||
},
|
||||
.p64 => {
|
||||
// Size of stack reserve + commit
|
||||
std.mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x1_000_000);
|
||||
index += 8;
|
||||
std.mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x1_000);
|
||||
index += 8;
|
||||
// Size of heap reserve + commit
|
||||
std.mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x100_000);
|
||||
index += 8;
|
||||
std.mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x1_000);
|
||||
index += 8;
|
||||
},
|
||||
}
|
||||
|
||||
// Reserved zeroes
|
||||
std.mem.set(u8, hdr_data[index..][0..4], 0);
|
||||
index += 4;
|
||||
|
||||
// Number of data directories
|
||||
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], data_directory_count);
|
||||
index += 4;
|
||||
// Initialize data directories to zero
|
||||
std.mem.set(u8, hdr_data[index..][0 .. data_directory_count * 8], 0);
|
||||
index += data_directory_count * 8;
|
||||
|
||||
assert(index == optional_header_size);
|
||||
}
|
||||
|
||||
// Write section table.
|
||||
// First, the .got section
|
||||
hdr_data[index..][0..8].* = ".got\x00\x00\x00\x00".*;
|
||||
index += 8;
|
||||
if (options.output_mode == .Exe) {
|
||||
// Virtual size (u32)
|
||||
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], default_offset_table_size);
|
||||
index += 4;
|
||||
// Virtual address (u32)
|
||||
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.offset_table_virtual_address - image_base);
|
||||
index += 4;
|
||||
} else {
|
||||
std.mem.set(u8, hdr_data[index..][0..8], 0);
|
||||
index += 8;
|
||||
}
|
||||
// Size of raw data (u32)
|
||||
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], default_offset_table_size);
|
||||
index += 4;
|
||||
// File pointer to the start of the section
|
||||
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.section_data_offset);
|
||||
index += 4;
|
||||
// Pointer to relocations (u32), PointerToLinenumbers (u32), NumberOfRelocations (u16), NumberOfLinenumbers (u16)
|
||||
std.mem.set(u8, hdr_data[index..][0..12], 0);
|
||||
index += 12;
|
||||
// Section flags
|
||||
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], std.coff.IMAGE_SCN_CNT_INITIALIZED_DATA | std.coff.IMAGE_SCN_MEM_READ);
|
||||
index += 4;
|
||||
// Then, the .text section
|
||||
hdr_data[index..][0..8].* = ".text\x00\x00\x00".*;
|
||||
index += 8;
|
||||
if (options.output_mode == .Exe) {
|
||||
// Virtual size (u32)
|
||||
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], default_size_of_code);
|
||||
index += 4;
|
||||
// Virtual address (u32)
|
||||
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.text_section_virtual_address - image_base);
|
||||
index += 4;
|
||||
} else {
|
||||
std.mem.set(u8, hdr_data[index..][0..8], 0);
|
||||
index += 8;
|
||||
}
|
||||
// Size of raw data (u32)
|
||||
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], default_size_of_code);
|
||||
index += 4;
|
||||
// File pointer to the start of the section
|
||||
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.section_data_offset + default_offset_table_size);
|
||||
index += 4;
|
||||
// Pointer to relocations (u32), PointerToLinenumbers (u32), NumberOfRelocations (u16), NumberOfLinenumbers (u16)
|
||||
std.mem.set(u8, hdr_data[index..][0..12], 0);
|
||||
index += 12;
|
||||
// Section flags
|
||||
std.mem.writeIntLittle(
|
||||
u32,
|
||||
hdr_data[index..][0..4],
|
||||
std.coff.IMAGE_SCN_CNT_CODE | std.coff.IMAGE_SCN_MEM_EXECUTE | std.coff.IMAGE_SCN_MEM_READ | std.coff.IMAGE_SCN_MEM_WRITE,
|
||||
);
|
||||
index += 4;
|
||||
|
||||
assert(index == optional_header_size + section_table_size);
|
||||
try self.base.file.?.pwriteAll(hdr_data[0..index], self.optional_header_offset);
|
||||
try self.base.file.?.setEndPos(self.section_data_offset + default_offset_table_size + default_size_of_code);
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
pub fn allocateDeclIndexes(self: *Coff, decl: *Module.Decl) !void {
|
||||
try self.offset_table.ensureCapacity(self.base.allocator, self.offset_table.items.len + 1);
|
||||
|
||||
if (self.offset_table_free_list.popOrNull()) |i| {
|
||||
decl.link.coff.offset_table_index = i;
|
||||
} else {
|
||||
decl.link.coff.offset_table_index = @intCast(u32, self.offset_table.items.len);
|
||||
_ = self.offset_table.addOneAssumeCapacity();
|
||||
|
||||
const entry_size = self.base.options.target.cpu.arch.ptrBitWidth() / 8;
|
||||
if (self.offset_table.items.len > self.offset_table_size / entry_size) {
|
||||
self.offset_table_size_dirty = true;
|
||||
}
|
||||
}
|
||||
|
||||
self.offset_table.items[decl.link.coff.offset_table_index] = 0;
|
||||
}
|
||||
|
||||
fn allocateTextBlock(self: *Coff, text_block: *TextBlock, new_block_size: u64, alignment: u64) !u64 {
|
||||
const new_block_min_capacity = new_block_size * allocation_padding;
|
||||
|
||||
// We use these to indicate our intention to update metadata, placing the new block,
|
||||
// and possibly removing a free list node.
|
||||
// It would be simpler to do it inside the for loop below, but that would cause a
|
||||
// problem if an error was returned later in the function. So this action
|
||||
// is actually carried out at the end of the function, when errors are no longer possible.
|
||||
var block_placement: ?*TextBlock = null;
|
||||
var free_list_removal: ?usize = null;
|
||||
|
||||
const vaddr = blk: {
|
||||
var i: usize = 0;
|
||||
while (i < self.text_block_free_list.items.len) {
|
||||
const free_block = self.text_block_free_list.items[i];
|
||||
|
||||
const next_block_text_offset = free_block.text_offset + free_block.capacity();
|
||||
const new_block_text_offset = std.mem.alignForwardGeneric(u64, free_block.getVAddr(self.*) + free_block.size, alignment) - self.text_section_virtual_address;
|
||||
if (new_block_text_offset < next_block_text_offset and next_block_text_offset - new_block_text_offset >= new_block_min_capacity) {
|
||||
block_placement = free_block;
|
||||
|
||||
const remaining_capacity = next_block_text_offset - new_block_text_offset - new_block_min_capacity;
|
||||
if (remaining_capacity < minimum_text_block_size) {
|
||||
free_list_removal = i;
|
||||
}
|
||||
|
||||
break :blk new_block_text_offset + self.text_section_virtual_address;
|
||||
} else {
|
||||
if (!free_block.freeListEligible()) {
|
||||
_ = self.text_block_free_list.swapRemove(i);
|
||||
} else {
|
||||
i += 1;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
} else if (self.last_text_block) |last| {
|
||||
const new_block_vaddr = std.mem.alignForwardGeneric(u64, last.getVAddr(self.*) + last.size, alignment);
|
||||
block_placement = last;
|
||||
break :blk new_block_vaddr;
|
||||
} else {
|
||||
break :blk self.text_section_virtual_address;
|
||||
}
|
||||
};
|
||||
|
||||
const expand_text_section = block_placement == null or block_placement.?.next == null;
|
||||
if (expand_text_section) {
|
||||
const needed_size = @intCast(u32, std.mem.alignForwardGeneric(u64, vaddr + new_block_size - self.text_section_virtual_address, file_alignment));
|
||||
if (needed_size > self.text_section_size) {
|
||||
const current_text_section_virtual_size = std.mem.alignForwardGeneric(u32, self.text_section_size, section_alignment);
|
||||
const new_text_section_virtual_size = std.mem.alignForwardGeneric(u32, needed_size, section_alignment);
|
||||
if (current_text_section_virtual_size != new_text_section_virtual_size) {
|
||||
self.size_of_image_dirty = true;
|
||||
// Write new virtual size
|
||||
var buf: [4]u8 = undefined;
|
||||
std.mem.writeIntLittle(u32, &buf, new_text_section_virtual_size);
|
||||
try self.base.file.?.pwriteAll(&buf, self.section_table_offset + 40 + 8);
|
||||
}
|
||||
|
||||
self.text_section_size = needed_size;
|
||||
self.text_section_size_dirty = true;
|
||||
}
|
||||
self.last_text_block = text_block;
|
||||
}
|
||||
text_block.text_offset = @intCast(u32, vaddr - self.text_section_virtual_address);
|
||||
text_block.size = @intCast(u32, new_block_size);
|
||||
|
||||
// This function can also reallocate a text block.
|
||||
// In this case we need to "unplug" it from its previous location before
|
||||
// plugging it in to its new location.
|
||||
if (text_block.prev) |prev| {
|
||||
prev.next = text_block.next;
|
||||
}
|
||||
if (text_block.next) |next| {
|
||||
next.prev = text_block.prev;
|
||||
}
|
||||
|
||||
if (block_placement) |big_block| {
|
||||
text_block.prev = big_block;
|
||||
text_block.next = big_block.next;
|
||||
big_block.next = text_block;
|
||||
} else {
|
||||
text_block.prev = null;
|
||||
text_block.next = null;
|
||||
}
|
||||
if (free_list_removal) |i| {
|
||||
_ = self.text_block_free_list.swapRemove(i);
|
||||
}
|
||||
return vaddr;
|
||||
}
|
||||
|
||||
fn growTextBlock(self: *Coff, text_block: *TextBlock, new_block_size: u64, alignment: u64) !u64 {
|
||||
const block_vaddr = text_block.getVAddr(self.*);
|
||||
const align_ok = std.mem.alignBackwardGeneric(u64, block_vaddr, alignment) == block_vaddr;
|
||||
const need_realloc = !align_ok or new_block_size > text_block.capacity();
|
||||
if (!need_realloc) return @as(u64, block_vaddr);
|
||||
return self.allocateTextBlock(text_block, new_block_size, alignment);
|
||||
}
|
||||
|
||||
fn shrinkTextBlock(self: *Coff, text_block: *TextBlock, new_block_size: u64) void {
|
||||
text_block.size = @intCast(u32, new_block_size);
|
||||
if (text_block.capacity() - text_block.size >= minimum_text_block_size) {
|
||||
self.text_block_free_list.append(self.base.allocator, text_block) catch {};
|
||||
}
|
||||
}
|
||||
|
||||
fn freeTextBlock(self: *Coff, text_block: *TextBlock) void {
|
||||
var already_have_free_list_node = false;
|
||||
{
|
||||
var i: usize = 0;
|
||||
// TODO turn text_block_free_list into a hash map
|
||||
while (i < self.text_block_free_list.items.len) {
|
||||
if (self.text_block_free_list.items[i] == text_block) {
|
||||
_ = self.text_block_free_list.swapRemove(i);
|
||||
continue;
|
||||
}
|
||||
if (self.text_block_free_list.items[i] == text_block.prev) {
|
||||
already_have_free_list_node = true;
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
if (self.last_text_block == text_block) {
|
||||
self.last_text_block = text_block.prev;
|
||||
}
|
||||
if (text_block.prev) |prev| {
|
||||
prev.next = text_block.next;
|
||||
|
||||
if (!already_have_free_list_node and prev.freeListEligible()) {
|
||||
// The free list is heuristics, it doesn't have to be perfect, so we can
|
||||
// ignore the OOM here.
|
||||
self.text_block_free_list.append(self.base.allocator, prev) catch {};
|
||||
}
|
||||
}
|
||||
|
||||
if (text_block.next) |next| {
|
||||
next.prev = text_block.prev;
|
||||
}
|
||||
}
|
||||
|
||||
fn writeOffsetTableEntry(self: *Coff, index: usize) !void {
|
||||
const entry_size = self.base.options.target.cpu.arch.ptrBitWidth() / 8;
|
||||
const endian = self.base.options.target.cpu.arch.endian();
|
||||
|
||||
const offset_table_start = self.section_data_offset;
|
||||
if (self.offset_table_size_dirty) {
|
||||
const current_raw_size = self.offset_table_size;
|
||||
const new_raw_size = self.offset_table_size * 2;
|
||||
log.debug("growing offset table from raw size {} to {}\n", .{ current_raw_size, new_raw_size });
|
||||
|
||||
// Move the text section to a new place in the executable
|
||||
const current_text_section_start = self.section_data_offset + current_raw_size;
|
||||
const new_text_section_start = self.section_data_offset + new_raw_size;
|
||||
|
||||
const amt = try self.base.file.?.copyRangeAll(current_text_section_start, self.base.file.?, new_text_section_start, self.text_section_size);
|
||||
if (amt != self.text_section_size) return error.InputOutput;
|
||||
|
||||
// Write the new raw size in the .got header
|
||||
var buf: [8]u8 = undefined;
|
||||
std.mem.writeIntLittle(u32, buf[0..4], new_raw_size);
|
||||
try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 16);
|
||||
// Write the new .text section file offset in the .text section header
|
||||
std.mem.writeIntLittle(u32, buf[0..4], new_text_section_start);
|
||||
try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 40 + 20);
|
||||
|
||||
const current_virtual_size = std.mem.alignForwardGeneric(u32, self.offset_table_size, section_alignment);
|
||||
const new_virtual_size = std.mem.alignForwardGeneric(u32, new_raw_size, section_alignment);
|
||||
// If we had to move in the virtual address space, we need to fix the VAs in the offset table, as well as the virtual address of the `.text` section
|
||||
// and the virutal size of the `.got` section
|
||||
|
||||
if (new_virtual_size != current_virtual_size) {
|
||||
log.debug("growing offset table from virtual size {} to {}\n", .{ current_virtual_size, new_virtual_size });
|
||||
self.size_of_image_dirty = true;
|
||||
const va_offset = new_virtual_size - current_virtual_size;
|
||||
|
||||
// Write .got virtual size
|
||||
std.mem.writeIntLittle(u32, buf[0..4], new_virtual_size);
|
||||
try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 8);
|
||||
|
||||
// Write .text new virtual address
|
||||
self.text_section_virtual_address = self.text_section_virtual_address + va_offset;
|
||||
std.mem.writeIntLittle(u32, buf[0..4], self.text_section_virtual_address - image_base);
|
||||
try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 40 + 12);
|
||||
|
||||
// Fix the VAs in the offset table
|
||||
for (self.offset_table.items) |*va, idx| {
|
||||
if (va.* != 0) {
|
||||
va.* += va_offset;
|
||||
|
||||
switch (entry_size) {
|
||||
4 => {
|
||||
std.mem.writeInt(u32, buf[0..4], @intCast(u32, va.*), endian);
|
||||
try self.base.file.?.pwriteAll(buf[0..4], offset_table_start + idx * entry_size);
|
||||
},
|
||||
8 => {
|
||||
std.mem.writeInt(u64, &buf, va.*, endian);
|
||||
try self.base.file.?.pwriteAll(&buf, offset_table_start + idx * entry_size);
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
self.offset_table_size = new_raw_size;
|
||||
self.offset_table_size_dirty = false;
|
||||
}
|
||||
// Write the new entry
|
||||
switch (entry_size) {
|
||||
4 => {
|
||||
var buf: [4]u8 = undefined;
|
||||
std.mem.writeInt(u32, &buf, @intCast(u32, self.offset_table.items[index]), endian);
|
||||
try self.base.file.?.pwriteAll(&buf, offset_table_start + index * entry_size);
|
||||
},
|
||||
8 => {
|
||||
var buf: [8]u8 = undefined;
|
||||
std.mem.writeInt(u64, &buf, self.offset_table.items[index], endian);
|
||||
try self.base.file.?.pwriteAll(&buf, offset_table_start + index * entry_size);
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void {
|
||||
// TODO COFF/PE debug information
|
||||
// TODO Implement exports
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
|
||||
defer code_buffer.deinit();
|
||||
|
||||
const typed_value = decl.typed_value.most_recent.typed_value;
|
||||
const res = try codegen.generateSymbol(&self.base, decl.src(), typed_value, &code_buffer, .none);
|
||||
const code = switch (res) {
|
||||
.externally_managed => |x| x,
|
||||
.appended => code_buffer.items,
|
||||
.fail => |em| {
|
||||
decl.analysis = .codegen_failure;
|
||||
try module.failed_decls.put(module.gpa, decl, em);
|
||||
return;
|
||||
},
|
||||
};
|
||||
|
||||
const required_alignment = typed_value.ty.abiAlignment(self.base.options.target);
|
||||
const curr_size = decl.link.coff.size;
|
||||
if (curr_size != 0) {
|
||||
const capacity = decl.link.coff.capacity();
|
||||
const need_realloc = code.len > capacity or
|
||||
!std.mem.isAlignedGeneric(u32, decl.link.coff.text_offset, required_alignment);
|
||||
if (need_realloc) {
|
||||
const curr_vaddr = self.getDeclVAddr(decl);
|
||||
const vaddr = try self.growTextBlock(&decl.link.coff, code.len, required_alignment);
|
||||
log.debug("growing {} from 0x{x} to 0x{x}\n", .{ decl.name, curr_vaddr, vaddr });
|
||||
if (vaddr != curr_vaddr) {
|
||||
log.debug(" (writing new offset table entry)\n", .{});
|
||||
self.offset_table.items[decl.link.coff.offset_table_index] = vaddr;
|
||||
try self.writeOffsetTableEntry(decl.link.coff.offset_table_index);
|
||||
}
|
||||
} else if (code.len < curr_size) {
|
||||
self.shrinkTextBlock(&decl.link.coff, code.len);
|
||||
}
|
||||
} else {
|
||||
const vaddr = try self.allocateTextBlock(&decl.link.coff, code.len, required_alignment);
|
||||
log.debug("allocated text block for {} at 0x{x} (size: {Bi})\n", .{ std.mem.spanZ(decl.name), vaddr, code.len });
|
||||
errdefer self.freeTextBlock(&decl.link.coff);
|
||||
self.offset_table.items[decl.link.coff.offset_table_index] = vaddr;
|
||||
try self.writeOffsetTableEntry(decl.link.coff.offset_table_index);
|
||||
}
|
||||
|
||||
// Write the code into the file
|
||||
try self.base.file.?.pwriteAll(code, self.section_data_offset + self.offset_table_size + decl.link.coff.text_offset);
|
||||
|
||||
// Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated.
|
||||
const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{};
|
||||
return self.updateDeclExports(module, decl, decl_exports);
|
||||
}
|
||||
|
||||
pub fn freeDecl(self: *Coff, decl: *Module.Decl) void {
|
||||
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
|
||||
self.freeTextBlock(&decl.link.coff);
|
||||
self.offset_table_free_list.append(self.base.allocator, decl.link.coff.offset_table_index) catch {};
|
||||
}
|
||||
|
||||
pub fn updateDeclExports(self: *Coff, module: *Module, decl: *const Module.Decl, exports: []const *Module.Export) !void {
|
||||
for (exports) |exp| {
|
||||
if (exp.options.section) |section_name| {
|
||||
if (!std.mem.eql(u8, section_name, ".text")) {
|
||||
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1);
|
||||
module.failed_exports.putAssumeCapacityNoClobber(
|
||||
exp,
|
||||
try Module.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: ExportOptions.section", .{}),
|
||||
);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (std.mem.eql(u8, exp.options.name, "_start")) {
|
||||
self.entry_addr = decl.link.coff.getVAddr(self.*) - image_base;
|
||||
} else {
|
||||
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1);
|
||||
module.failed_exports.putAssumeCapacityNoClobber(
|
||||
exp,
|
||||
try Module.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: Exports other than '_start'", .{}),
|
||||
);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn flush(self: *Coff, module: *Module) !void {
|
||||
if (self.text_section_size_dirty) {
|
||||
// Write the new raw size in the .text header
|
||||
var buf: [4]u8 = undefined;
|
||||
std.mem.writeIntLittle(u32, &buf, self.text_section_size);
|
||||
try self.base.file.?.pwriteAll(&buf, self.section_table_offset + 40 + 16);
|
||||
try self.base.file.?.setEndPos(self.section_data_offset + self.offset_table_size + self.text_section_size);
|
||||
self.text_section_size_dirty = false;
|
||||
}
|
||||
|
||||
if (self.base.options.output_mode == .Exe and self.size_of_image_dirty) {
|
||||
const new_size_of_image = std.mem.alignForwardGeneric(u32, self.text_section_virtual_address - image_base + self.text_section_size, section_alignment);
|
||||
var buf: [4]u8 = undefined;
|
||||
std.mem.writeIntLittle(u32, &buf, new_size_of_image);
|
||||
try self.base.file.?.pwriteAll(&buf, self.optional_header_offset + 56);
|
||||
self.size_of_image_dirty = false;
|
||||
}
|
||||
|
||||
if (self.entry_addr == null and self.base.options.output_mode == .Exe) {
|
||||
log.debug("flushing. no_entry_point_found = true\n", .{});
|
||||
self.error_flags.no_entry_point_found = true;
|
||||
} else {
|
||||
log.debug("flushing. no_entry_point_found = false\n", .{});
|
||||
self.error_flags.no_entry_point_found = false;
|
||||
|
||||
if (self.base.options.output_mode == .Exe) {
|
||||
// Write AddressOfEntryPoint
|
||||
var buf: [4]u8 = undefined;
|
||||
std.mem.writeIntLittle(u32, &buf, self.entry_addr.?);
|
||||
try self.base.file.?.pwriteAll(&buf, self.optional_header_offset + 16);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn getDeclVAddr(self: *Coff, decl: *const Module.Decl) u64 {
|
||||
return self.text_section_virtual_address + decl.link.coff.text_offset;
|
||||
}
|
||||
|
||||
pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl: *Module.Decl) !void {
|
||||
// TODO Implement this
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Coff) void {
|
||||
self.text_block_free_list.deinit(self.base.allocator);
|
||||
self.offset_table.deinit(self.base.allocator);
|
||||
self.offset_table_free_list.deinit(self.base.allocator);
|
||||
}
|
||||
@ -1,251 +0,0 @@
|
||||
const Wasm = @This();
|
||||
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const assert = std.debug.assert;
|
||||
const fs = std.fs;
|
||||
const leb = std.debug.leb;
|
||||
|
||||
const Module = @import("../Module.zig");
|
||||
const codegen = @import("../codegen/wasm.zig");
|
||||
const link = @import("../link.zig");
|
||||
|
||||
/// Various magic numbers defined by the wasm spec
|
||||
const spec = struct {
|
||||
const magic = [_]u8{ 0x00, 0x61, 0x73, 0x6D }; // \0asm
|
||||
const version = [_]u8{ 0x01, 0x00, 0x00, 0x00 }; // version 1
|
||||
|
||||
const custom_id = 0;
|
||||
const types_id = 1;
|
||||
const imports_id = 2;
|
||||
const funcs_id = 3;
|
||||
const tables_id = 4;
|
||||
const memories_id = 5;
|
||||
const globals_id = 6;
|
||||
const exports_id = 7;
|
||||
const start_id = 8;
|
||||
const elements_id = 9;
|
||||
const code_id = 10;
|
||||
const data_id = 11;
|
||||
};
|
||||
|
||||
pub const base_tag = link.File.Tag.wasm;
|
||||
|
||||
pub const FnData = struct {
|
||||
/// Generated code for the type of the function
|
||||
functype: std.ArrayListUnmanaged(u8) = .{},
|
||||
/// Generated code for the body of the function
|
||||
code: std.ArrayListUnmanaged(u8) = .{},
|
||||
/// Locations in the generated code where function indexes must be filled in.
|
||||
/// This must be kept ordered by offset.
|
||||
idx_refs: std.ArrayListUnmanaged(struct { offset: u32, decl: *Module.Decl }) = .{},
|
||||
};
|
||||
|
||||
base: link.File,
|
||||
|
||||
/// List of all function Decls to be written to the output file. The index of
|
||||
/// each Decl in this list at the time of writing the binary is used as the
|
||||
/// function index.
|
||||
/// TODO: can/should we access some data structure in Module directly?
|
||||
funcs: std.ArrayListUnmanaged(*Module.Decl) = .{},
|
||||
|
||||
pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: link.Options) !*link.File {
|
||||
assert(options.object_format == .wasm);
|
||||
|
||||
// TODO: read the file and keep vaild parts instead of truncating
|
||||
const file = try dir.createFile(sub_path, .{ .truncate = true, .read = true });
|
||||
errdefer file.close();
|
||||
|
||||
const wasm = try allocator.create(Wasm);
|
||||
errdefer allocator.destroy(wasm);
|
||||
|
||||
try file.writeAll(&(spec.magic ++ spec.version));
|
||||
|
||||
wasm.* = .{
|
||||
.base = .{
|
||||
.tag = .wasm,
|
||||
.options = options,
|
||||
.file = file,
|
||||
.allocator = allocator,
|
||||
},
|
||||
};
|
||||
|
||||
return &wasm.base;
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Wasm) void {
|
||||
for (self.funcs.items) |decl| {
|
||||
decl.fn_link.wasm.?.functype.deinit(self.base.allocator);
|
||||
decl.fn_link.wasm.?.code.deinit(self.base.allocator);
|
||||
decl.fn_link.wasm.?.idx_refs.deinit(self.base.allocator);
|
||||
}
|
||||
self.funcs.deinit(self.base.allocator);
|
||||
}
|
||||
|
||||
// Generate code for the Decl, storing it in memory to be later written to
|
||||
// the file on flush().
|
||||
pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void {
|
||||
if (decl.typed_value.most_recent.typed_value.ty.zigTypeTag() != .Fn)
|
||||
return error.TODOImplementNonFnDeclsForWasm;
|
||||
|
||||
if (decl.fn_link.wasm) |*fn_data| {
|
||||
fn_data.functype.items.len = 0;
|
||||
fn_data.code.items.len = 0;
|
||||
fn_data.idx_refs.items.len = 0;
|
||||
} else {
|
||||
decl.fn_link.wasm = .{};
|
||||
try self.funcs.append(self.base.allocator, decl);
|
||||
}
|
||||
const fn_data = &decl.fn_link.wasm.?;
|
||||
|
||||
var managed_functype = fn_data.functype.toManaged(self.base.allocator);
|
||||
var managed_code = fn_data.code.toManaged(self.base.allocator);
|
||||
try codegen.genFunctype(&managed_functype, decl);
|
||||
try codegen.genCode(&managed_code, decl);
|
||||
fn_data.functype = managed_functype.toUnmanaged();
|
||||
fn_data.code = managed_code.toUnmanaged();
|
||||
}
|
||||
|
||||
pub fn updateDeclExports(
|
||||
self: *Wasm,
|
||||
module: *Module,
|
||||
decl: *const Module.Decl,
|
||||
exports: []const *Module.Export,
|
||||
) !void {}
|
||||
|
||||
pub fn freeDecl(self: *Wasm, decl: *Module.Decl) void {
|
||||
// TODO: remove this assert when non-function Decls are implemented
|
||||
assert(decl.typed_value.most_recent.typed_value.ty.zigTypeTag() == .Fn);
|
||||
_ = self.funcs.swapRemove(self.getFuncidx(decl).?);
|
||||
decl.fn_link.wasm.?.functype.deinit(self.base.allocator);
|
||||
decl.fn_link.wasm.?.code.deinit(self.base.allocator);
|
||||
decl.fn_link.wasm.?.idx_refs.deinit(self.base.allocator);
|
||||
decl.fn_link.wasm = null;
|
||||
}
|
||||
|
||||
pub fn flush(self: *Wasm, module: *Module) !void {
|
||||
const file = self.base.file.?;
|
||||
const header_size = 5 + 1;
|
||||
|
||||
// No need to rewrite the magic/version header
|
||||
try file.setEndPos(@sizeOf(@TypeOf(spec.magic ++ spec.version)));
|
||||
try file.seekTo(@sizeOf(@TypeOf(spec.magic ++ spec.version)));
|
||||
|
||||
// Type section
|
||||
{
|
||||
const header_offset = try reserveVecSectionHeader(file);
|
||||
for (self.funcs.items) |decl| {
|
||||
try file.writeAll(decl.fn_link.wasm.?.functype.items);
|
||||
}
|
||||
try writeVecSectionHeader(
|
||||
file,
|
||||
header_offset,
|
||||
spec.types_id,
|
||||
@intCast(u32, (try file.getPos()) - header_offset - header_size),
|
||||
@intCast(u32, self.funcs.items.len),
|
||||
);
|
||||
}
|
||||
|
||||
// Function section
|
||||
{
|
||||
const header_offset = try reserveVecSectionHeader(file);
|
||||
const writer = file.writer();
|
||||
for (self.funcs.items) |_, typeidx| try leb.writeULEB128(writer, @intCast(u32, typeidx));
|
||||
try writeVecSectionHeader(
|
||||
file,
|
||||
header_offset,
|
||||
spec.funcs_id,
|
||||
@intCast(u32, (try file.getPos()) - header_offset - header_size),
|
||||
@intCast(u32, self.funcs.items.len),
|
||||
);
|
||||
}
|
||||
|
||||
// Export section
|
||||
{
|
||||
const header_offset = try reserveVecSectionHeader(file);
|
||||
const writer = file.writer();
|
||||
var count: u32 = 0;
|
||||
for (module.decl_exports.entries.items) |entry| {
|
||||
for (entry.value) |exprt| {
|
||||
// Export name length + name
|
||||
try leb.writeULEB128(writer, @intCast(u32, exprt.options.name.len));
|
||||
try writer.writeAll(exprt.options.name);
|
||||
|
||||
switch (exprt.exported_decl.typed_value.most_recent.typed_value.ty.zigTypeTag()) {
|
||||
.Fn => {
|
||||
// Type of the export
|
||||
try writer.writeByte(0x00);
|
||||
// Exported function index
|
||||
try leb.writeULEB128(writer, self.getFuncidx(exprt.exported_decl).?);
|
||||
},
|
||||
else => return error.TODOImplementNonFnDeclsForWasm,
|
||||
}
|
||||
|
||||
count += 1;
|
||||
}
|
||||
}
|
||||
try writeVecSectionHeader(
|
||||
file,
|
||||
header_offset,
|
||||
spec.exports_id,
|
||||
@intCast(u32, (try file.getPos()) - header_offset - header_size),
|
||||
count,
|
||||
);
|
||||
}
|
||||
|
||||
// Code section
|
||||
{
|
||||
const header_offset = try reserveVecSectionHeader(file);
|
||||
const writer = file.writer();
|
||||
for (self.funcs.items) |decl| {
|
||||
const fn_data = &decl.fn_link.wasm.?;
|
||||
|
||||
// Write the already generated code to the file, inserting
|
||||
// function indexes where required.
|
||||
var current: u32 = 0;
|
||||
for (fn_data.idx_refs.items) |idx_ref| {
|
||||
try writer.writeAll(fn_data.code.items[current..idx_ref.offset]);
|
||||
current = idx_ref.offset;
|
||||
// Use a fixed width here to make calculating the code size
|
||||
// in codegen.wasm.genCode() simpler.
|
||||
var buf: [5]u8 = undefined;
|
||||
leb.writeUnsignedFixed(5, &buf, self.getFuncidx(idx_ref.decl).?);
|
||||
try writer.writeAll(&buf);
|
||||
}
|
||||
|
||||
try writer.writeAll(fn_data.code.items[current..]);
|
||||
}
|
||||
try writeVecSectionHeader(
|
||||
file,
|
||||
header_offset,
|
||||
spec.code_id,
|
||||
@intCast(u32, (try file.getPos()) - header_offset - header_size),
|
||||
@intCast(u32, self.funcs.items.len),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the current index of a given Decl in the function list
|
||||
/// TODO: we could maintain a hash map to potentially make this
|
||||
fn getFuncidx(self: Wasm, decl: *Module.Decl) ?u32 {
|
||||
return for (self.funcs.items) |func, idx| {
|
||||
if (func == decl) break @intCast(u32, idx);
|
||||
} else null;
|
||||
}
|
||||
|
||||
fn reserveVecSectionHeader(file: fs.File) !u64 {
|
||||
// section id + fixed leb contents size + fixed leb vector length
|
||||
const header_size = 1 + 5 + 5;
|
||||
// TODO: this should be a single lseek(2) call, but fs.File does not
|
||||
// currently provide a way to do this.
|
||||
try file.seekBy(header_size);
|
||||
return (try file.getPos()) - header_size;
|
||||
}
|
||||
|
||||
fn writeVecSectionHeader(file: fs.File, offset: u64, section: u8, size: u32, items: u32) !void {
|
||||
var buf: [1 + 5 + 5]u8 = undefined;
|
||||
buf[0] = section;
|
||||
leb.writeUnsignedFixed(5, buf[1..6], size);
|
||||
leb.writeUnsignedFixed(5, buf[6..], items);
|
||||
try file.pwriteAll(&buf, offset);
|
||||
}
|
||||
@ -1,293 +0,0 @@
|
||||
const c = @import("c.zig");
|
||||
const assert = @import("std").debug.assert;
|
||||
|
||||
// we wrap the c module for 3 reasons:
|
||||
// 1. to avoid accidentally calling the non-thread-safe functions
|
||||
// 2. patch up some of the types to remove nullability
|
||||
// 3. some functions have been augmented by zig_llvm.cpp to be more powerful,
|
||||
// such as ZigLLVMTargetMachineEmitToFile
|
||||
|
||||
pub const AttributeIndex = c_uint;
|
||||
pub const Bool = c_int;
|
||||
|
||||
pub const Builder = c.LLVMBuilderRef.Child.Child;
|
||||
pub const Context = c.LLVMContextRef.Child.Child;
|
||||
pub const Module = c.LLVMModuleRef.Child.Child;
|
||||
pub const Value = c.LLVMValueRef.Child.Child;
|
||||
pub const Type = c.LLVMTypeRef.Child.Child;
|
||||
pub const BasicBlock = c.LLVMBasicBlockRef.Child.Child;
|
||||
pub const Attribute = c.LLVMAttributeRef.Child.Child;
|
||||
pub const Target = c.LLVMTargetRef.Child.Child;
|
||||
pub const TargetMachine = c.LLVMTargetMachineRef.Child.Child;
|
||||
pub const TargetData = c.LLVMTargetDataRef.Child.Child;
|
||||
pub const DIBuilder = c.ZigLLVMDIBuilder;
|
||||
pub const DIFile = c.ZigLLVMDIFile;
|
||||
pub const DICompileUnit = c.ZigLLVMDICompileUnit;
|
||||
|
||||
pub const ABIAlignmentOfType = c.LLVMABIAlignmentOfType;
|
||||
pub const AddAttributeAtIndex = c.LLVMAddAttributeAtIndex;
|
||||
pub const AddModuleCodeViewFlag = c.ZigLLVMAddModuleCodeViewFlag;
|
||||
pub const AddModuleDebugInfoFlag = c.ZigLLVMAddModuleDebugInfoFlag;
|
||||
pub const ClearCurrentDebugLocation = c.ZigLLVMClearCurrentDebugLocation;
|
||||
pub const ConstAllOnes = c.LLVMConstAllOnes;
|
||||
pub const ConstArray = c.LLVMConstArray;
|
||||
pub const ConstBitCast = c.LLVMConstBitCast;
|
||||
pub const ConstIntOfArbitraryPrecision = c.LLVMConstIntOfArbitraryPrecision;
|
||||
pub const ConstNeg = c.LLVMConstNeg;
|
||||
pub const ConstStructInContext = c.LLVMConstStructInContext;
|
||||
pub const DIBuilderFinalize = c.ZigLLVMDIBuilderFinalize;
|
||||
pub const DisposeBuilder = c.LLVMDisposeBuilder;
|
||||
pub const DisposeDIBuilder = c.ZigLLVMDisposeDIBuilder;
|
||||
pub const DisposeMessage = c.LLVMDisposeMessage;
|
||||
pub const DisposeModule = c.LLVMDisposeModule;
|
||||
pub const DisposeTargetData = c.LLVMDisposeTargetData;
|
||||
pub const DisposeTargetMachine = c.LLVMDisposeTargetMachine;
|
||||
pub const DoubleTypeInContext = c.LLVMDoubleTypeInContext;
|
||||
pub const DumpModule = c.LLVMDumpModule;
|
||||
pub const FP128TypeInContext = c.LLVMFP128TypeInContext;
|
||||
pub const FloatTypeInContext = c.LLVMFloatTypeInContext;
|
||||
pub const GetEnumAttributeKindForName = c.LLVMGetEnumAttributeKindForName;
|
||||
pub const GetMDKindIDInContext = c.LLVMGetMDKindIDInContext;
|
||||
pub const GetUndef = c.LLVMGetUndef;
|
||||
pub const HalfTypeInContext = c.LLVMHalfTypeInContext;
|
||||
pub const InitializeAllAsmParsers = c.LLVMInitializeAllAsmParsers;
|
||||
pub const InitializeAllAsmPrinters = c.LLVMInitializeAllAsmPrinters;
|
||||
pub const InitializeAllTargetInfos = c.LLVMInitializeAllTargetInfos;
|
||||
pub const InitializeAllTargetMCs = c.LLVMInitializeAllTargetMCs;
|
||||
pub const InitializeAllTargets = c.LLVMInitializeAllTargets;
|
||||
pub const InsertBasicBlockInContext = c.LLVMInsertBasicBlockInContext;
|
||||
pub const Int128TypeInContext = c.LLVMInt128TypeInContext;
|
||||
pub const Int16TypeInContext = c.LLVMInt16TypeInContext;
|
||||
pub const Int1TypeInContext = c.LLVMInt1TypeInContext;
|
||||
pub const Int32TypeInContext = c.LLVMInt32TypeInContext;
|
||||
pub const Int64TypeInContext = c.LLVMInt64TypeInContext;
|
||||
pub const Int8TypeInContext = c.LLVMInt8TypeInContext;
|
||||
pub const IntPtrTypeForASInContext = c.LLVMIntPtrTypeForASInContext;
|
||||
pub const IntPtrTypeInContext = c.LLVMIntPtrTypeInContext;
|
||||
pub const LabelTypeInContext = c.LLVMLabelTypeInContext;
|
||||
pub const MDNodeInContext = c.LLVMMDNodeInContext;
|
||||
pub const MDStringInContext = c.LLVMMDStringInContext;
|
||||
pub const MetadataTypeInContext = c.LLVMMetadataTypeInContext;
|
||||
pub const PPCFP128TypeInContext = c.LLVMPPCFP128TypeInContext;
|
||||
pub const SetAlignment = c.LLVMSetAlignment;
|
||||
pub const SetDataLayout = c.LLVMSetDataLayout;
|
||||
pub const SetGlobalConstant = c.LLVMSetGlobalConstant;
|
||||
pub const SetInitializer = c.LLVMSetInitializer;
|
||||
pub const SetLinkage = c.LLVMSetLinkage;
|
||||
pub const SetTarget = c.LLVMSetTarget;
|
||||
pub const SetUnnamedAddr = c.LLVMSetUnnamedAddr;
|
||||
pub const SetVolatile = c.LLVMSetVolatile;
|
||||
pub const StructTypeInContext = c.LLVMStructTypeInContext;
|
||||
pub const TokenTypeInContext = c.LLVMTokenTypeInContext;
|
||||
pub const X86FP80TypeInContext = c.LLVMX86FP80TypeInContext;
|
||||
pub const X86MMXTypeInContext = c.LLVMX86MMXTypeInContext;
|
||||
|
||||
pub const AddGlobal = LLVMAddGlobal;
|
||||
extern fn LLVMAddGlobal(M: *Module, Ty: *Type, Name: [*:0]const u8) ?*Value;
|
||||
|
||||
pub const ConstStringInContext = LLVMConstStringInContext;
|
||||
extern fn LLVMConstStringInContext(C: *Context, Str: [*]const u8, Length: c_uint, DontNullTerminate: Bool) ?*Value;
|
||||
|
||||
pub const ConstInt = LLVMConstInt;
|
||||
extern fn LLVMConstInt(IntTy: *Type, N: c_ulonglong, SignExtend: Bool) ?*Value;
|
||||
|
||||
pub const BuildLoad = LLVMBuildLoad;
|
||||
extern fn LLVMBuildLoad(arg0: *Builder, PointerVal: *Value, Name: [*:0]const u8) ?*Value;
|
||||
|
||||
pub const ConstNull = LLVMConstNull;
|
||||
extern fn LLVMConstNull(Ty: *Type) ?*Value;
|
||||
|
||||
pub const CreateStringAttribute = LLVMCreateStringAttribute;
|
||||
extern fn LLVMCreateStringAttribute(
|
||||
C: *Context,
|
||||
K: [*]const u8,
|
||||
KLength: c_uint,
|
||||
V: [*]const u8,
|
||||
VLength: c_uint,
|
||||
) ?*Attribute;
|
||||
|
||||
pub const CreateEnumAttribute = LLVMCreateEnumAttribute;
|
||||
extern fn LLVMCreateEnumAttribute(C: *Context, KindID: c_uint, Val: u64) ?*Attribute;
|
||||
|
||||
pub const AddFunction = LLVMAddFunction;
|
||||
extern fn LLVMAddFunction(M: *Module, Name: [*:0]const u8, FunctionTy: *Type) ?*Value;
|
||||
|
||||
pub const CreateCompileUnit = ZigLLVMCreateCompileUnit;
|
||||
extern fn ZigLLVMCreateCompileUnit(
|
||||
dibuilder: *DIBuilder,
|
||||
lang: c_uint,
|
||||
difile: *DIFile,
|
||||
producer: [*:0]const u8,
|
||||
is_optimized: bool,
|
||||
flags: [*:0]const u8,
|
||||
runtime_version: c_uint,
|
||||
split_name: [*:0]const u8,
|
||||
dwo_id: u64,
|
||||
emit_debug_info: bool,
|
||||
) ?*DICompileUnit;
|
||||
|
||||
pub const CreateFile = ZigLLVMCreateFile;
|
||||
extern fn ZigLLVMCreateFile(dibuilder: *DIBuilder, filename: [*:0]const u8, directory: [*:0]const u8) ?*DIFile;
|
||||
|
||||
pub const ArrayType = LLVMArrayType;
|
||||
extern fn LLVMArrayType(ElementType: *Type, ElementCount: c_uint) ?*Type;
|
||||
|
||||
pub const CreateDIBuilder = ZigLLVMCreateDIBuilder;
|
||||
extern fn ZigLLVMCreateDIBuilder(module: *Module, allow_unresolved: bool) ?*DIBuilder;
|
||||
|
||||
pub const PointerType = LLVMPointerType;
|
||||
extern fn LLVMPointerType(ElementType: *Type, AddressSpace: c_uint) ?*Type;
|
||||
|
||||
pub const CreateBuilderInContext = LLVMCreateBuilderInContext;
|
||||
extern fn LLVMCreateBuilderInContext(C: *Context) ?*Builder;
|
||||
|
||||
pub const IntTypeInContext = LLVMIntTypeInContext;
|
||||
extern fn LLVMIntTypeInContext(C: *Context, NumBits: c_uint) ?*Type;
|
||||
|
||||
pub const ModuleCreateWithNameInContext = LLVMModuleCreateWithNameInContext;
|
||||
extern fn LLVMModuleCreateWithNameInContext(ModuleID: [*:0]const u8, C: *Context) ?*Module;
|
||||
|
||||
pub const VoidTypeInContext = LLVMVoidTypeInContext;
|
||||
extern fn LLVMVoidTypeInContext(C: *Context) ?*Type;
|
||||
|
||||
pub const ContextCreate = LLVMContextCreate;
|
||||
extern fn LLVMContextCreate() ?*Context;
|
||||
|
||||
pub const ContextDispose = LLVMContextDispose;
|
||||
extern fn LLVMContextDispose(C: *Context) void;
|
||||
|
||||
pub const CopyStringRepOfTargetData = LLVMCopyStringRepOfTargetData;
|
||||
extern fn LLVMCopyStringRepOfTargetData(TD: *TargetData) ?[*:0]u8;
|
||||
|
||||
pub const CreateTargetDataLayout = LLVMCreateTargetDataLayout;
|
||||
extern fn LLVMCreateTargetDataLayout(T: *TargetMachine) ?*TargetData;
|
||||
|
||||
pub const CreateTargetMachine = ZigLLVMCreateTargetMachine;
|
||||
extern fn ZigLLVMCreateTargetMachine(
|
||||
T: *Target,
|
||||
Triple: [*:0]const u8,
|
||||
CPU: [*:0]const u8,
|
||||
Features: [*:0]const u8,
|
||||
Level: CodeGenOptLevel,
|
||||
Reloc: RelocMode,
|
||||
CodeModel: CodeModel,
|
||||
function_sections: bool,
|
||||
) ?*TargetMachine;
|
||||
|
||||
pub const GetHostCPUName = LLVMGetHostCPUName;
|
||||
extern fn LLVMGetHostCPUName() ?[*:0]u8;
|
||||
|
||||
pub const GetNativeFeatures = ZigLLVMGetNativeFeatures;
|
||||
extern fn ZigLLVMGetNativeFeatures() ?[*:0]u8;
|
||||
|
||||
pub const GetElementType = LLVMGetElementType;
|
||||
extern fn LLVMGetElementType(Ty: *Type) *Type;
|
||||
|
||||
pub const TypeOf = LLVMTypeOf;
|
||||
extern fn LLVMTypeOf(Val: *Value) *Type;
|
||||
|
||||
pub const BuildStore = LLVMBuildStore;
|
||||
extern fn LLVMBuildStore(arg0: *Builder, Val: *Value, Ptr: *Value) ?*Value;
|
||||
|
||||
pub const BuildAlloca = LLVMBuildAlloca;
|
||||
extern fn LLVMBuildAlloca(arg0: *Builder, Ty: *Type, Name: ?[*:0]const u8) ?*Value;
|
||||
|
||||
pub const ConstInBoundsGEP = LLVMConstInBoundsGEP;
|
||||
pub extern fn LLVMConstInBoundsGEP(ConstantVal: *Value, ConstantIndices: [*]*Value, NumIndices: c_uint) ?*Value;
|
||||
|
||||
pub const GetTargetFromTriple = LLVMGetTargetFromTriple;
|
||||
extern fn LLVMGetTargetFromTriple(Triple: [*:0]const u8, T: **Target, ErrorMessage: ?*[*:0]u8) Bool;
|
||||
|
||||
pub const VerifyModule = LLVMVerifyModule;
|
||||
extern fn LLVMVerifyModule(M: *Module, Action: VerifierFailureAction, OutMessage: *?[*:0]u8) Bool;
|
||||
|
||||
pub const GetInsertBlock = LLVMGetInsertBlock;
|
||||
extern fn LLVMGetInsertBlock(Builder: *Builder) *BasicBlock;
|
||||
|
||||
pub const FunctionType = LLVMFunctionType;
|
||||
extern fn LLVMFunctionType(
|
||||
ReturnType: *Type,
|
||||
ParamTypes: [*]*Type,
|
||||
ParamCount: c_uint,
|
||||
IsVarArg: Bool,
|
||||
) ?*Type;
|
||||
|
||||
pub const GetParam = LLVMGetParam;
|
||||
extern fn LLVMGetParam(Fn: *Value, Index: c_uint) *Value;
|
||||
|
||||
pub const AppendBasicBlockInContext = LLVMAppendBasicBlockInContext;
|
||||
extern fn LLVMAppendBasicBlockInContext(C: *Context, Fn: *Value, Name: [*:0]const u8) ?*BasicBlock;
|
||||
|
||||
pub const PositionBuilderAtEnd = LLVMPositionBuilderAtEnd;
|
||||
extern fn LLVMPositionBuilderAtEnd(Builder: *Builder, Block: *BasicBlock) void;
|
||||
|
||||
pub const AbortProcessAction = VerifierFailureAction.LLVMAbortProcessAction;
|
||||
pub const PrintMessageAction = VerifierFailureAction.LLVMPrintMessageAction;
|
||||
pub const ReturnStatusAction = VerifierFailureAction.LLVMReturnStatusAction;
|
||||
pub const VerifierFailureAction = c.LLVMVerifierFailureAction;
|
||||
|
||||
pub const CodeGenLevelNone = CodeGenOptLevel.LLVMCodeGenLevelNone;
|
||||
pub const CodeGenLevelLess = CodeGenOptLevel.LLVMCodeGenLevelLess;
|
||||
pub const CodeGenLevelDefault = CodeGenOptLevel.LLVMCodeGenLevelDefault;
|
||||
pub const CodeGenLevelAggressive = CodeGenOptLevel.LLVMCodeGenLevelAggressive;
|
||||
pub const CodeGenOptLevel = c.LLVMCodeGenOptLevel;
|
||||
|
||||
pub const RelocDefault = RelocMode.LLVMRelocDefault;
|
||||
pub const RelocStatic = RelocMode.LLVMRelocStatic;
|
||||
pub const RelocPIC = RelocMode.LLVMRelocPIC;
|
||||
pub const RelocDynamicNoPic = RelocMode.LLVMRelocDynamicNoPic;
|
||||
pub const RelocMode = c.LLVMRelocMode;
|
||||
|
||||
pub const CodeModelDefault = CodeModel.LLVMCodeModelDefault;
|
||||
pub const CodeModelJITDefault = CodeModel.LLVMCodeModelJITDefault;
|
||||
pub const CodeModelSmall = CodeModel.LLVMCodeModelSmall;
|
||||
pub const CodeModelKernel = CodeModel.LLVMCodeModelKernel;
|
||||
pub const CodeModelMedium = CodeModel.LLVMCodeModelMedium;
|
||||
pub const CodeModelLarge = CodeModel.LLVMCodeModelLarge;
|
||||
pub const CodeModel = c.LLVMCodeModel;
|
||||
|
||||
pub const EmitAssembly = EmitOutputType.ZigLLVM_EmitAssembly;
|
||||
pub const EmitBinary = EmitOutputType.ZigLLVM_EmitBinary;
|
||||
pub const EmitLLVMIr = EmitOutputType.ZigLLVM_EmitLLVMIr;
|
||||
pub const EmitOutputType = c.ZigLLVM_EmitOutputType;
|
||||
|
||||
pub const CCallConv = CallConv.LLVMCCallConv;
|
||||
pub const FastCallConv = CallConv.LLVMFastCallConv;
|
||||
pub const ColdCallConv = CallConv.LLVMColdCallConv;
|
||||
pub const WebKitJSCallConv = CallConv.LLVMWebKitJSCallConv;
|
||||
pub const AnyRegCallConv = CallConv.LLVMAnyRegCallConv;
|
||||
pub const X86StdcallCallConv = CallConv.LLVMX86StdcallCallConv;
|
||||
pub const X86FastcallCallConv = CallConv.LLVMX86FastcallCallConv;
|
||||
pub const CallConv = c.LLVMCallConv;
|
||||
|
||||
pub const CallAttr = extern enum {
|
||||
Auto,
|
||||
NeverTail,
|
||||
NeverInline,
|
||||
AlwaysTail,
|
||||
AlwaysInline,
|
||||
};
|
||||
|
||||
fn removeNullability(comptime T: type) type {
|
||||
comptime assert(@typeInfo(T).Pointer.size == .C);
|
||||
return *T.Child;
|
||||
}
|
||||
|
||||
pub const BuildRet = LLVMBuildRet;
|
||||
extern fn LLVMBuildRet(arg0: *Builder, V: ?*Value) ?*Value;
|
||||
|
||||
pub const TargetMachineEmitToFile = ZigLLVMTargetMachineEmitToFile;
|
||||
extern fn ZigLLVMTargetMachineEmitToFile(
|
||||
targ_machine_ref: *TargetMachine,
|
||||
module_ref: *Module,
|
||||
filename: [*:0]const u8,
|
||||
output_type: EmitOutputType,
|
||||
error_message: *[*:0]u8,
|
||||
is_debug: bool,
|
||||
is_small: bool,
|
||||
) bool;
|
||||
|
||||
pub const BuildCall = ZigLLVMBuildCall;
|
||||
extern fn ZigLLVMBuildCall(B: *Builder, Fn: *Value, Args: [*]*Value, NumArgs: c_uint, CC: CallConv, fn_inline: CallAttr, Name: [*:0]const u8) ?*Value;
|
||||
|
||||
pub const PrivateLinkage = c.LLVMLinkage.LLVMPrivateLinkage;
|
||||
@ -1,927 +0,0 @@
|
||||
const std = @import("std");
|
||||
const io = std.io;
|
||||
const fs = std.fs;
|
||||
const mem = std.mem;
|
||||
const process = std.process;
|
||||
const Allocator = mem.Allocator;
|
||||
const ArrayList = std.ArrayList;
|
||||
const ast = std.zig.ast;
|
||||
const Module = @import("Module.zig");
|
||||
const link = @import("link.zig");
|
||||
const Package = @import("Package.zig");
|
||||
const zir = @import("zir.zig");
|
||||
const build_options = @import("build_options");
|
||||
|
||||
pub const max_src_size = 2 * 1024 * 1024 * 1024; // 2 GiB
|
||||
|
||||
pub const Color = enum {
|
||||
Auto,
|
||||
Off,
|
||||
On,
|
||||
};
|
||||
|
||||
const usage =
|
||||
\\Usage: zig [command] [options]
|
||||
\\
|
||||
\\Commands:
|
||||
\\
|
||||
\\ build-exe [source] Create executable from source or object files
|
||||
\\ build-lib [source] Create library from source or object files
|
||||
\\ build-obj [source] Create object from source or assembly
|
||||
\\ fmt [source] Parse file and render in canonical zig format
|
||||
\\ targets List available compilation targets
|
||||
\\ env Print lib path, std path, compiler id and version
|
||||
\\ version Print version number and exit
|
||||
\\ zen Print zen of zig and exit
|
||||
\\
|
||||
\\
|
||||
;
|
||||
|
||||
pub fn log(
|
||||
comptime level: std.log.Level,
|
||||
comptime scope: @TypeOf(.EnumLiteral),
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
// Hide anything more verbose than warn unless it was added with `-Dlog=foo`.
|
||||
if (@enumToInt(level) > @enumToInt(std.log.level) or
|
||||
@enumToInt(level) > @enumToInt(std.log.Level.warn))
|
||||
{
|
||||
const scope_name = @tagName(scope);
|
||||
const ok = comptime for (build_options.log_scopes) |log_scope| {
|
||||
if (mem.eql(u8, log_scope, scope_name))
|
||||
break true;
|
||||
} else false;
|
||||
|
||||
if (!ok)
|
||||
return;
|
||||
}
|
||||
|
||||
const prefix = "[" ++ @tagName(level) ++ "] " ++ "(" ++ @tagName(scope) ++ "): ";
|
||||
|
||||
// Print the message to stderr, silently ignoring any errors
|
||||
std.debug.print(prefix ++ format ++ "\n", args);
|
||||
}
|
||||
|
||||
var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
|
||||
pub fn main() !void {
|
||||
const gpa = if (std.builtin.link_libc) std.heap.c_allocator else &general_purpose_allocator.allocator;
|
||||
defer if (!std.builtin.link_libc) {
|
||||
_ = general_purpose_allocator.deinit();
|
||||
};
|
||||
var arena_instance = std.heap.ArenaAllocator.init(gpa);
|
||||
defer arena_instance.deinit();
|
||||
const arena = &arena_instance.allocator;
|
||||
|
||||
const args = try process.argsAlloc(arena);
|
||||
|
||||
if (args.len <= 1) {
|
||||
std.debug.print("expected command argument\n\n{}", .{usage});
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const cmd = args[1];
|
||||
const cmd_args = args[2..];
|
||||
if (mem.eql(u8, cmd, "build-exe")) {
|
||||
return buildOutputType(gpa, arena, cmd_args, .Exe);
|
||||
} else if (mem.eql(u8, cmd, "build-lib")) {
|
||||
return buildOutputType(gpa, arena, cmd_args, .Lib);
|
||||
} else if (mem.eql(u8, cmd, "build-obj")) {
|
||||
return buildOutputType(gpa, arena, cmd_args, .Obj);
|
||||
} else if (mem.eql(u8, cmd, "fmt")) {
|
||||
return cmdFmt(gpa, cmd_args);
|
||||
} else if (mem.eql(u8, cmd, "targets")) {
|
||||
const info = try std.zig.system.NativeTargetInfo.detect(arena, .{});
|
||||
const stdout = io.getStdOut().outStream();
|
||||
return @import("print_targets.zig").cmdTargets(arena, cmd_args, stdout, info.target);
|
||||
} else if (mem.eql(u8, cmd, "version")) {
|
||||
try std.io.getStdOut().writeAll(build_options.version ++ "\n");
|
||||
} else if (mem.eql(u8, cmd, "env")) {
|
||||
try @import("print_env.zig").cmdEnv(arena, cmd_args, io.getStdOut().outStream());
|
||||
} else if (mem.eql(u8, cmd, "zen")) {
|
||||
try io.getStdOut().writeAll(info_zen);
|
||||
} else if (mem.eql(u8, cmd, "help")) {
|
||||
try io.getStdOut().writeAll(usage);
|
||||
} else {
|
||||
std.debug.print("unknown command: {}\n\n{}", .{ args[1], usage });
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
const usage_build_generic =
|
||||
\\Usage: zig build-exe <options> [files]
|
||||
\\ zig build-lib <options> [files]
|
||||
\\ zig build-obj <options> [files]
|
||||
\\
|
||||
\\Supported file types:
|
||||
\\ .zig Zig source code
|
||||
\\ .zir Zig Intermediate Representation code
|
||||
\\ (planned) .o ELF object file
|
||||
\\ (planned) .o MACH-O (macOS) object file
|
||||
\\ (planned) .obj COFF (Windows) object file
|
||||
\\ (planned) .lib COFF (Windows) static library
|
||||
\\ (planned) .a ELF static library
|
||||
\\ (planned) .so ELF shared object (dynamic link)
|
||||
\\ (planned) .dll Windows Dynamic Link Library
|
||||
\\ (planned) .dylib MACH-O (macOS) dynamic library
|
||||
\\ (planned) .s Target-specific assembly source code
|
||||
\\ (planned) .S Assembly with C preprocessor (requires LLVM extensions)
|
||||
\\ (planned) .c C source code (requires LLVM extensions)
|
||||
\\ (planned) .cpp C++ source code (requires LLVM extensions)
|
||||
\\ Other C++ extensions: .C .cc .cxx
|
||||
\\
|
||||
\\General Options:
|
||||
\\ -h, --help Print this help and exit
|
||||
\\ --watch Enable compiler REPL
|
||||
\\ --color [auto|off|on] Enable or disable colored error messages
|
||||
\\ -femit-bin[=path] (default) output machine code
|
||||
\\ -fno-emit-bin Do not output machine code
|
||||
\\
|
||||
\\Compile Options:
|
||||
\\ -target [name] <arch><sub>-<os>-<abi> see the targets command
|
||||
\\ -mcpu [cpu] Specify target CPU and feature set
|
||||
\\ --name [name] Override output name
|
||||
\\ --mode [mode] Set the build mode
|
||||
\\ Debug (default) optimizations off, safety on
|
||||
\\ ReleaseFast Optimizations on, safety off
|
||||
\\ ReleaseSafe Optimizations on, safety on
|
||||
\\ ReleaseSmall Optimize for small binary, safety off
|
||||
\\ --dynamic Force output to be dynamically linked
|
||||
\\ --strip Exclude debug symbols
|
||||
\\ -ofmt=[mode] Override target object format
|
||||
\\ elf Executable and Linking Format
|
||||
\\ c Compile to C source code
|
||||
\\ wasm WebAssembly
|
||||
\\ pe Portable Executable (Windows)
|
||||
\\ coff (planned) Common Object File Format (Windows)
|
||||
\\ macho (planned) macOS relocatables
|
||||
\\ hex (planned) Intel IHEX
|
||||
\\ raw (planned) Dump machine code directly
|
||||
\\
|
||||
\\Link Options:
|
||||
\\ -l[lib], --library [lib] Link against system library
|
||||
\\ --dynamic-linker [path] Set the dynamic interpreter path (usually ld.so)
|
||||
\\ --version [ver] Dynamic library semver
|
||||
\\
|
||||
\\Debug Options (Zig Compiler Development):
|
||||
\\ -ftime-report Print timing diagnostics
|
||||
\\ --debug-tokenize verbose tokenization
|
||||
\\ --debug-ast-tree verbose parsing into an AST (tree view)
|
||||
\\ --debug-ast-fmt verbose parsing into an AST (render source)
|
||||
\\ --debug-ir verbose Zig IR
|
||||
\\ --debug-link verbose linking
|
||||
\\ --debug-codegen verbose machine code generation
|
||||
\\
|
||||
;
|
||||
|
||||
const Emit = union(enum) {
|
||||
no,
|
||||
yes_default_path,
|
||||
yes: []const u8,
|
||||
};
|
||||
|
||||
fn buildOutputType(
|
||||
gpa: *Allocator,
|
||||
arena: *Allocator,
|
||||
args: []const []const u8,
|
||||
output_mode: std.builtin.OutputMode,
|
||||
) !void {
|
||||
var color: Color = .Auto;
|
||||
var build_mode: std.builtin.Mode = .Debug;
|
||||
var provided_name: ?[]const u8 = null;
|
||||
var link_mode: ?std.builtin.LinkMode = null;
|
||||
var root_src_file: ?[]const u8 = null;
|
||||
var version: std.builtin.Version = .{ .major = 0, .minor = 0, .patch = 0 };
|
||||
var strip = false;
|
||||
var watch = false;
|
||||
var debug_tokenize = false;
|
||||
var debug_ast_tree = false;
|
||||
var debug_ast_fmt = false;
|
||||
var debug_link = false;
|
||||
var debug_ir = false;
|
||||
var debug_codegen = false;
|
||||
var time_report = false;
|
||||
var emit_bin: Emit = .yes_default_path;
|
||||
var emit_zir: Emit = .no;
|
||||
var target_arch_os_abi: []const u8 = "native";
|
||||
var target_mcpu: ?[]const u8 = null;
|
||||
var target_dynamic_linker: ?[]const u8 = null;
|
||||
var target_ofmt: ?[]const u8 = null;
|
||||
|
||||
var system_libs = std.ArrayList([]const u8).init(gpa);
|
||||
defer system_libs.deinit();
|
||||
|
||||
{
|
||||
var i: usize = 0;
|
||||
while (i < args.len) : (i += 1) {
|
||||
const arg = args[i];
|
||||
if (mem.startsWith(u8, arg, "-")) {
|
||||
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
|
||||
try io.getStdOut().writeAll(usage_build_generic);
|
||||
process.exit(0);
|
||||
} else if (mem.eql(u8, arg, "--color")) {
|
||||
if (i + 1 >= args.len) {
|
||||
std.debug.print("expected [auto|on|off] after --color\n", .{});
|
||||
process.exit(1);
|
||||
}
|
||||
i += 1;
|
||||
const next_arg = args[i];
|
||||
if (mem.eql(u8, next_arg, "auto")) {
|
||||
color = .Auto;
|
||||
} else if (mem.eql(u8, next_arg, "on")) {
|
||||
color = .On;
|
||||
} else if (mem.eql(u8, next_arg, "off")) {
|
||||
color = .Off;
|
||||
} else {
|
||||
std.debug.print("expected [auto|on|off] after --color, found '{}'\n", .{next_arg});
|
||||
process.exit(1);
|
||||
}
|
||||
} else if (mem.eql(u8, arg, "--mode")) {
|
||||
if (i + 1 >= args.len) {
|
||||
std.debug.print("expected [Debug|ReleaseSafe|ReleaseFast|ReleaseSmall] after --mode\n", .{});
|
||||
process.exit(1);
|
||||
}
|
||||
i += 1;
|
||||
const next_arg = args[i];
|
||||
if (mem.eql(u8, next_arg, "Debug")) {
|
||||
build_mode = .Debug;
|
||||
} else if (mem.eql(u8, next_arg, "ReleaseSafe")) {
|
||||
build_mode = .ReleaseSafe;
|
||||
} else if (mem.eql(u8, next_arg, "ReleaseFast")) {
|
||||
build_mode = .ReleaseFast;
|
||||
} else if (mem.eql(u8, next_arg, "ReleaseSmall")) {
|
||||
build_mode = .ReleaseSmall;
|
||||
} else {
|
||||
std.debug.print("expected [Debug|ReleaseSafe|ReleaseFast|ReleaseSmall] after --mode, found '{}'\n", .{next_arg});
|
||||
process.exit(1);
|
||||
}
|
||||
} else if (mem.eql(u8, arg, "--name")) {
|
||||
if (i + 1 >= args.len) {
|
||||
std.debug.print("expected parameter after --name\n", .{});
|
||||
process.exit(1);
|
||||
}
|
||||
i += 1;
|
||||
provided_name = args[i];
|
||||
} else if (mem.eql(u8, arg, "--library")) {
|
||||
if (i + 1 >= args.len) {
|
||||
std.debug.print("expected parameter after --library\n", .{});
|
||||
process.exit(1);
|
||||
}
|
||||
i += 1;
|
||||
try system_libs.append(args[i]);
|
||||
} else if (mem.eql(u8, arg, "--version")) {
|
||||
if (i + 1 >= args.len) {
|
||||
std.debug.print("expected parameter after --version\n", .{});
|
||||
process.exit(1);
|
||||
}
|
||||
i += 1;
|
||||
version = std.builtin.Version.parse(args[i]) catch |err| {
|
||||
std.debug.print("unable to parse --version '{}': {}\n", .{ args[i], @errorName(err) });
|
||||
process.exit(1);
|
||||
};
|
||||
} else if (mem.eql(u8, arg, "-target")) {
|
||||
if (i + 1 >= args.len) {
|
||||
std.debug.print("expected parameter after -target\n", .{});
|
||||
process.exit(1);
|
||||
}
|
||||
i += 1;
|
||||
target_arch_os_abi = args[i];
|
||||
} else if (mem.eql(u8, arg, "-mcpu")) {
|
||||
if (i + 1 >= args.len) {
|
||||
std.debug.print("expected parameter after -mcpu\n", .{});
|
||||
process.exit(1);
|
||||
}
|
||||
i += 1;
|
||||
target_mcpu = args[i];
|
||||
} else if (mem.startsWith(u8, arg, "-ofmt=")) {
|
||||
target_ofmt = arg["-ofmt=".len..];
|
||||
} else if (mem.startsWith(u8, arg, "-mcpu=")) {
|
||||
target_mcpu = arg["-mcpu=".len..];
|
||||
} else if (mem.eql(u8, arg, "--dynamic-linker")) {
|
||||
if (i + 1 >= args.len) {
|
||||
std.debug.print("expected parameter after --dynamic-linker\n", .{});
|
||||
process.exit(1);
|
||||
}
|
||||
i += 1;
|
||||
target_dynamic_linker = args[i];
|
||||
} else if (mem.eql(u8, arg, "--watch")) {
|
||||
watch = true;
|
||||
} else if (mem.eql(u8, arg, "-ftime-report")) {
|
||||
time_report = true;
|
||||
} else if (mem.eql(u8, arg, "-femit-bin")) {
|
||||
emit_bin = .yes_default_path;
|
||||
} else if (mem.startsWith(u8, arg, "-femit-bin=")) {
|
||||
emit_bin = .{ .yes = arg["-femit-bin=".len..] };
|
||||
} else if (mem.eql(u8, arg, "-fno-emit-bin")) {
|
||||
emit_bin = .no;
|
||||
} else if (mem.eql(u8, arg, "-femit-zir")) {
|
||||
emit_zir = .yes_default_path;
|
||||
} else if (mem.startsWith(u8, arg, "-femit-zir=")) {
|
||||
emit_zir = .{ .yes = arg["-femit-zir=".len..] };
|
||||
} else if (mem.eql(u8, arg, "-fno-emit-zir")) {
|
||||
emit_zir = .no;
|
||||
} else if (mem.eql(u8, arg, "-dynamic")) {
|
||||
link_mode = .Dynamic;
|
||||
} else if (mem.eql(u8, arg, "-static")) {
|
||||
link_mode = .Static;
|
||||
} else if (mem.eql(u8, arg, "--strip")) {
|
||||
strip = true;
|
||||
} else if (mem.eql(u8, arg, "--debug-tokenize")) {
|
||||
debug_tokenize = true;
|
||||
} else if (mem.eql(u8, arg, "--debug-ast-tree")) {
|
||||
debug_ast_tree = true;
|
||||
} else if (mem.eql(u8, arg, "--debug-ast-fmt")) {
|
||||
debug_ast_fmt = true;
|
||||
} else if (mem.eql(u8, arg, "--debug-link")) {
|
||||
debug_link = true;
|
||||
} else if (mem.eql(u8, arg, "--debug-ir")) {
|
||||
debug_ir = true;
|
||||
} else if (mem.eql(u8, arg, "--debug-codegen")) {
|
||||
debug_codegen = true;
|
||||
} else if (mem.startsWith(u8, arg, "-l")) {
|
||||
try system_libs.append(arg[2..]);
|
||||
} else {
|
||||
std.debug.print("unrecognized parameter: '{}'\n", .{arg});
|
||||
process.exit(1);
|
||||
}
|
||||
} else if (mem.endsWith(u8, arg, ".s") or mem.endsWith(u8, arg, ".S")) {
|
||||
std.debug.print("assembly files not supported yet\n", .{});
|
||||
process.exit(1);
|
||||
} else if (mem.endsWith(u8, arg, ".o") or
|
||||
mem.endsWith(u8, arg, ".obj") or
|
||||
mem.endsWith(u8, arg, ".a") or
|
||||
mem.endsWith(u8, arg, ".lib"))
|
||||
{
|
||||
std.debug.print("object files and static libraries not supported yet\n", .{});
|
||||
process.exit(1);
|
||||
} else if (mem.endsWith(u8, arg, ".c") or
|
||||
mem.endsWith(u8, arg, ".cpp"))
|
||||
{
|
||||
std.debug.print("compilation of C and C++ source code requires LLVM extensions which are not implemented yet\n", .{});
|
||||
process.exit(1);
|
||||
} else if (mem.endsWith(u8, arg, ".so") or
|
||||
mem.endsWith(u8, arg, ".dylib") or
|
||||
mem.endsWith(u8, arg, ".dll"))
|
||||
{
|
||||
std.debug.print("linking against dynamic libraries not yet supported\n", .{});
|
||||
process.exit(1);
|
||||
} else if (mem.endsWith(u8, arg, ".zig") or mem.endsWith(u8, arg, ".zir")) {
|
||||
if (root_src_file) |other| {
|
||||
std.debug.print("found another zig file '{}' after root source file '{}'\n", .{ arg, other });
|
||||
process.exit(1);
|
||||
} else {
|
||||
root_src_file = arg;
|
||||
}
|
||||
} else {
|
||||
std.debug.print("unrecognized file extension of parameter '{}'\n", .{arg});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const root_name = if (provided_name) |n| n else blk: {
|
||||
if (root_src_file) |file| {
|
||||
const basename = fs.path.basename(file);
|
||||
var it = mem.split(basename, ".");
|
||||
break :blk it.next() orelse basename;
|
||||
} else {
|
||||
std.debug.print("--name [name] not provided and unable to infer\n", .{});
|
||||
process.exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
if (system_libs.items.len != 0) {
|
||||
std.debug.print("linking against system libraries not yet supported\n", .{});
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
var diags: std.zig.CrossTarget.ParseOptions.Diagnostics = .{};
|
||||
const cross_target = std.zig.CrossTarget.parse(.{
|
||||
.arch_os_abi = target_arch_os_abi,
|
||||
.cpu_features = target_mcpu,
|
||||
.dynamic_linker = target_dynamic_linker,
|
||||
.diagnostics = &diags,
|
||||
}) catch |err| switch (err) {
|
||||
error.UnknownCpuModel => {
|
||||
std.debug.print("Unknown CPU: '{}'\nAvailable CPUs for architecture '{}':\n", .{
|
||||
diags.cpu_name.?,
|
||||
@tagName(diags.arch.?),
|
||||
});
|
||||
for (diags.arch.?.allCpuModels()) |cpu| {
|
||||
std.debug.print(" {}\n", .{cpu.name});
|
||||
}
|
||||
process.exit(1);
|
||||
},
|
||||
error.UnknownCpuFeature => {
|
||||
std.debug.print(
|
||||
\\Unknown CPU feature: '{}'
|
||||
\\Available CPU features for architecture '{}':
|
||||
\\
|
||||
, .{
|
||||
diags.unknown_feature_name,
|
||||
@tagName(diags.arch.?),
|
||||
});
|
||||
for (diags.arch.?.allFeaturesList()) |feature| {
|
||||
std.debug.print(" {}: {}\n", .{ feature.name, feature.description });
|
||||
}
|
||||
process.exit(1);
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
var target_info = try std.zig.system.NativeTargetInfo.detect(gpa, cross_target);
|
||||
if (target_info.cpu_detection_unimplemented) {
|
||||
// TODO We want to just use detected_info.target but implementing
|
||||
// CPU model & feature detection is todo so here we rely on LLVM.
|
||||
std.debug.print("CPU features detection is not yet available for this system without LLVM extensions\n", .{});
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const src_path = root_src_file orelse {
|
||||
std.debug.print("expected at least one file argument", .{});
|
||||
process.exit(1);
|
||||
};
|
||||
|
||||
const object_format: ?std.Target.ObjectFormat = blk: {
|
||||
const ofmt = target_ofmt orelse break :blk null;
|
||||
if (mem.eql(u8, ofmt, "elf")) {
|
||||
break :blk .elf;
|
||||
} else if (mem.eql(u8, ofmt, "c")) {
|
||||
break :blk .c;
|
||||
} else if (mem.eql(u8, ofmt, "coff")) {
|
||||
break :blk .coff;
|
||||
} else if (mem.eql(u8, ofmt, "pe")) {
|
||||
break :blk .pe;
|
||||
} else if (mem.eql(u8, ofmt, "macho")) {
|
||||
break :blk .macho;
|
||||
} else if (mem.eql(u8, ofmt, "wasm")) {
|
||||
break :blk .wasm;
|
||||
} else if (mem.eql(u8, ofmt, "hex")) {
|
||||
break :blk .hex;
|
||||
} else if (mem.eql(u8, ofmt, "raw")) {
|
||||
break :blk .raw;
|
||||
} else {
|
||||
std.debug.print("unsupported object format: {}", .{ofmt});
|
||||
process.exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
const bin_path = switch (emit_bin) {
|
||||
.no => {
|
||||
std.debug.print("-fno-emit-bin not supported yet", .{});
|
||||
process.exit(1);
|
||||
},
|
||||
.yes_default_path => if (object_format != null and object_format.? == .c)
|
||||
try std.fmt.allocPrint(arena, "{}.c", .{root_name})
|
||||
else
|
||||
try std.zig.binNameAlloc(arena, root_name, target_info.target, output_mode, link_mode),
|
||||
|
||||
.yes => |p| p,
|
||||
};
|
||||
|
||||
const zir_out_path: ?[]const u8 = switch (emit_zir) {
|
||||
.no => null,
|
||||
.yes_default_path => blk: {
|
||||
if (root_src_file) |rsf| {
|
||||
if (mem.endsWith(u8, rsf, ".zir")) {
|
||||
break :blk try std.fmt.allocPrint(arena, "{}.out.zir", .{root_name});
|
||||
}
|
||||
}
|
||||
break :blk try std.fmt.allocPrint(arena, "{}.zir", .{root_name});
|
||||
},
|
||||
.yes => |p| p,
|
||||
};
|
||||
|
||||
const root_pkg = try Package.create(gpa, fs.cwd(), ".", src_path);
|
||||
defer root_pkg.destroy();
|
||||
|
||||
var module = try Module.init(gpa, .{
|
||||
.root_name = root_name,
|
||||
.target = target_info.target,
|
||||
.output_mode = output_mode,
|
||||
.root_pkg = root_pkg,
|
||||
.bin_file_dir = fs.cwd(),
|
||||
.bin_file_path = bin_path,
|
||||
.link_mode = link_mode,
|
||||
.object_format = object_format,
|
||||
.optimize_mode = build_mode,
|
||||
.keep_source_files_loaded = zir_out_path != null,
|
||||
});
|
||||
defer module.deinit();
|
||||
|
||||
const stdin = std.io.getStdIn().inStream();
|
||||
const stderr = std.io.getStdErr().outStream();
|
||||
var repl_buf: [1024]u8 = undefined;
|
||||
|
||||
try updateModule(gpa, &module, zir_out_path);
|
||||
|
||||
while (watch) {
|
||||
try stderr.print("🦎 ", .{});
|
||||
if (output_mode == .Exe) {
|
||||
try module.makeBinFileExecutable();
|
||||
}
|
||||
if (stdin.readUntilDelimiterOrEof(&repl_buf, '\n') catch |err| {
|
||||
try stderr.print("\nUnable to parse command: {}\n", .{@errorName(err)});
|
||||
continue;
|
||||
}) |line| {
|
||||
const actual_line = mem.trimRight(u8, line, "\r\n ");
|
||||
|
||||
if (mem.eql(u8, actual_line, "update")) {
|
||||
if (output_mode == .Exe) {
|
||||
try module.makeBinFileWritable();
|
||||
}
|
||||
try updateModule(gpa, &module, zir_out_path);
|
||||
} else if (mem.eql(u8, actual_line, "exit")) {
|
||||
break;
|
||||
} else if (mem.eql(u8, actual_line, "help")) {
|
||||
try stderr.writeAll(repl_help);
|
||||
} else {
|
||||
try stderr.print("unknown command: {}\n", .{actual_line});
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn updateModule(gpa: *Allocator, module: *Module, zir_out_path: ?[]const u8) !void {
|
||||
var timer = try std.time.Timer.start();
|
||||
try module.update();
|
||||
const update_nanos = timer.read();
|
||||
|
||||
var errors = try module.getAllErrorsAlloc();
|
||||
defer errors.deinit(module.gpa);
|
||||
|
||||
if (errors.list.len != 0) {
|
||||
for (errors.list) |full_err_msg| {
|
||||
std.debug.print("{}:{}:{}: error: {}\n", .{
|
||||
full_err_msg.src_path,
|
||||
full_err_msg.line + 1,
|
||||
full_err_msg.column + 1,
|
||||
full_err_msg.msg,
|
||||
});
|
||||
}
|
||||
} else {
|
||||
std.log.scoped(.compiler).info("Update completed in {} ms\n", .{update_nanos / std.time.ns_per_ms});
|
||||
}
|
||||
|
||||
if (zir_out_path) |zop| {
|
||||
var new_zir_module = try zir.emit(gpa, module.*);
|
||||
defer new_zir_module.deinit(gpa);
|
||||
|
||||
const baf = try io.BufferedAtomicFile.create(gpa, fs.cwd(), zop, .{});
|
||||
defer baf.destroy();
|
||||
|
||||
try new_zir_module.writeToStream(gpa, baf.stream());
|
||||
|
||||
try baf.finish();
|
||||
}
|
||||
}
|
||||
|
||||
const repl_help =
|
||||
\\Commands:
|
||||
\\ update Detect changes to source files and update output files.
|
||||
\\ help Print this text
|
||||
\\ exit Quit this repl
|
||||
\\
|
||||
;
|
||||
|
||||
pub const usage_fmt =
|
||||
\\usage: zig fmt [file]...
|
||||
\\
|
||||
\\ Formats the input files and modifies them in-place.
|
||||
\\ Arguments can be files or directories, which are searched
|
||||
\\ recursively.
|
||||
\\
|
||||
\\Options:
|
||||
\\ --help Print this help and exit
|
||||
\\ --color [auto|off|on] Enable or disable colored error messages
|
||||
\\ --stdin Format code from stdin; output to stdout
|
||||
\\ --check List non-conforming files and exit with an error
|
||||
\\ if the list is non-empty
|
||||
\\
|
||||
\\
|
||||
;
|
||||
|
||||
const Fmt = struct {
|
||||
seen: SeenMap,
|
||||
any_error: bool,
|
||||
color: Color,
|
||||
gpa: *Allocator,
|
||||
out_buffer: std.ArrayList(u8),
|
||||
|
||||
const SeenMap = std.AutoHashMap(fs.File.INode, void);
|
||||
};
|
||||
|
||||
pub fn cmdFmt(gpa: *Allocator, args: []const []const u8) !void {
|
||||
const stderr_file = io.getStdErr();
|
||||
var color: Color = .Auto;
|
||||
var stdin_flag: bool = false;
|
||||
var check_flag: bool = false;
|
||||
var input_files = ArrayList([]const u8).init(gpa);
|
||||
|
||||
{
|
||||
var i: usize = 0;
|
||||
while (i < args.len) : (i += 1) {
|
||||
const arg = args[i];
|
||||
if (mem.startsWith(u8, arg, "-")) {
|
||||
if (mem.eql(u8, arg, "--help")) {
|
||||
const stdout = io.getStdOut().outStream();
|
||||
try stdout.writeAll(usage_fmt);
|
||||
process.exit(0);
|
||||
} else if (mem.eql(u8, arg, "--color")) {
|
||||
if (i + 1 >= args.len) {
|
||||
std.debug.print("expected [auto|on|off] after --color\n", .{});
|
||||
process.exit(1);
|
||||
}
|
||||
i += 1;
|
||||
const next_arg = args[i];
|
||||
if (mem.eql(u8, next_arg, "auto")) {
|
||||
color = .Auto;
|
||||
} else if (mem.eql(u8, next_arg, "on")) {
|
||||
color = .On;
|
||||
} else if (mem.eql(u8, next_arg, "off")) {
|
||||
color = .Off;
|
||||
} else {
|
||||
std.debug.print("expected [auto|on|off] after --color, found '{}'\n", .{next_arg});
|
||||
process.exit(1);
|
||||
}
|
||||
} else if (mem.eql(u8, arg, "--stdin")) {
|
||||
stdin_flag = true;
|
||||
} else if (mem.eql(u8, arg, "--check")) {
|
||||
check_flag = true;
|
||||
} else {
|
||||
std.debug.print("unrecognized parameter: '{}'", .{arg});
|
||||
process.exit(1);
|
||||
}
|
||||
} else {
|
||||
try input_files.append(arg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (stdin_flag) {
|
||||
if (input_files.items.len != 0) {
|
||||
std.debug.print("cannot use --stdin with positional arguments\n", .{});
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const stdin = io.getStdIn().inStream();
|
||||
|
||||
const source_code = try stdin.readAllAlloc(gpa, max_src_size);
|
||||
defer gpa.free(source_code);
|
||||
|
||||
const tree = std.zig.parse(gpa, source_code) catch |err| {
|
||||
std.debug.print("error parsing stdin: {}\n", .{err});
|
||||
process.exit(1);
|
||||
};
|
||||
defer tree.deinit();
|
||||
|
||||
for (tree.errors) |parse_error| {
|
||||
try printErrMsgToFile(gpa, parse_error, tree, "<stdin>", stderr_file, color);
|
||||
}
|
||||
if (tree.errors.len != 0) {
|
||||
process.exit(1);
|
||||
}
|
||||
if (check_flag) {
|
||||
const anything_changed = try std.zig.render(gpa, io.null_out_stream, tree);
|
||||
const code = if (anything_changed) @as(u8, 1) else @as(u8, 0);
|
||||
process.exit(code);
|
||||
}
|
||||
|
||||
const stdout = io.getStdOut().outStream();
|
||||
_ = try std.zig.render(gpa, stdout, tree);
|
||||
return;
|
||||
}
|
||||
|
||||
if (input_files.items.len == 0) {
|
||||
std.debug.print("expected at least one source file argument\n", .{});
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
var fmt = Fmt{
|
||||
.gpa = gpa,
|
||||
.seen = Fmt.SeenMap.init(gpa),
|
||||
.any_error = false,
|
||||
.color = color,
|
||||
.out_buffer = std.ArrayList(u8).init(gpa),
|
||||
};
|
||||
defer fmt.seen.deinit();
|
||||
defer fmt.out_buffer.deinit();
|
||||
|
||||
for (input_files.span()) |file_path| {
|
||||
// Get the real path here to avoid Windows failing on relative file paths with . or .. in them.
|
||||
const real_path = fs.realpathAlloc(gpa, file_path) catch |err| {
|
||||
std.debug.print("unable to open '{}': {}\n", .{ file_path, err });
|
||||
process.exit(1);
|
||||
};
|
||||
defer gpa.free(real_path);
|
||||
|
||||
try fmtPath(&fmt, file_path, check_flag, fs.cwd(), real_path);
|
||||
}
|
||||
if (fmt.any_error) {
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
const FmtError = error{
|
||||
SystemResources,
|
||||
OperationAborted,
|
||||
IoPending,
|
||||
BrokenPipe,
|
||||
Unexpected,
|
||||
WouldBlock,
|
||||
FileClosed,
|
||||
DestinationAddressRequired,
|
||||
DiskQuota,
|
||||
FileTooBig,
|
||||
InputOutput,
|
||||
NoSpaceLeft,
|
||||
AccessDenied,
|
||||
OutOfMemory,
|
||||
RenameAcrossMountPoints,
|
||||
ReadOnlyFileSystem,
|
||||
LinkQuotaExceeded,
|
||||
FileBusy,
|
||||
EndOfStream,
|
||||
Unseekable,
|
||||
NotOpenForWriting,
|
||||
} || fs.File.OpenError;
|
||||
|
||||
fn fmtPath(fmt: *Fmt, file_path: []const u8, check_mode: bool, dir: fs.Dir, sub_path: []const u8) FmtError!void {
|
||||
fmtPathFile(fmt, file_path, check_mode, dir, sub_path) catch |err| switch (err) {
|
||||
error.IsDir, error.AccessDenied => return fmtPathDir(fmt, file_path, check_mode, dir, sub_path),
|
||||
else => {
|
||||
std.debug.print("unable to format '{}': {}\n", .{ file_path, err });
|
||||
fmt.any_error = true;
|
||||
return;
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
fn fmtPathDir(
|
||||
fmt: *Fmt,
|
||||
file_path: []const u8,
|
||||
check_mode: bool,
|
||||
parent_dir: fs.Dir,
|
||||
parent_sub_path: []const u8,
|
||||
) FmtError!void {
|
||||
var dir = try parent_dir.openDir(parent_sub_path, .{ .iterate = true });
|
||||
defer dir.close();
|
||||
|
||||
const stat = try dir.stat();
|
||||
if (try fmt.seen.fetchPut(stat.inode, {})) |_| return;
|
||||
|
||||
var dir_it = dir.iterate();
|
||||
while (try dir_it.next()) |entry| {
|
||||
const is_dir = entry.kind == .Directory;
|
||||
if (is_dir or mem.endsWith(u8, entry.name, ".zig")) {
|
||||
const full_path = try fs.path.join(fmt.gpa, &[_][]const u8{ file_path, entry.name });
|
||||
defer fmt.gpa.free(full_path);
|
||||
|
||||
if (is_dir) {
|
||||
try fmtPathDir(fmt, full_path, check_mode, dir, entry.name);
|
||||
} else {
|
||||
fmtPathFile(fmt, full_path, check_mode, dir, entry.name) catch |err| {
|
||||
std.debug.print("unable to format '{}': {}\n", .{ full_path, err });
|
||||
fmt.any_error = true;
|
||||
return;
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn fmtPathFile(
|
||||
fmt: *Fmt,
|
||||
file_path: []const u8,
|
||||
check_mode: bool,
|
||||
dir: fs.Dir,
|
||||
sub_path: []const u8,
|
||||
) FmtError!void {
|
||||
const source_file = try dir.openFile(sub_path, .{});
|
||||
var file_closed = false;
|
||||
errdefer if (!file_closed) source_file.close();
|
||||
|
||||
const stat = try source_file.stat();
|
||||
|
||||
if (stat.kind == .Directory)
|
||||
return error.IsDir;
|
||||
|
||||
const source_code = source_file.readToEndAllocOptions(
|
||||
fmt.gpa,
|
||||
max_src_size,
|
||||
stat.size,
|
||||
@alignOf(u8),
|
||||
null,
|
||||
) catch |err| switch (err) {
|
||||
error.ConnectionResetByPeer => unreachable,
|
||||
error.ConnectionTimedOut => unreachable,
|
||||
error.NotOpenForReading => unreachable,
|
||||
else => |e| return e,
|
||||
};
|
||||
source_file.close();
|
||||
file_closed = true;
|
||||
defer fmt.gpa.free(source_code);
|
||||
|
||||
// Add to set after no longer possible to get error.IsDir.
|
||||
if (try fmt.seen.fetchPut(stat.inode, {})) |_| return;
|
||||
|
||||
const tree = try std.zig.parse(fmt.gpa, source_code);
|
||||
defer tree.deinit();
|
||||
|
||||
for (tree.errors) |parse_error| {
|
||||
try printErrMsgToFile(fmt.gpa, parse_error, tree, file_path, std.io.getStdErr(), fmt.color);
|
||||
}
|
||||
if (tree.errors.len != 0) {
|
||||
fmt.any_error = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if (check_mode) {
|
||||
const anything_changed = try std.zig.render(fmt.gpa, io.null_out_stream, tree);
|
||||
if (anything_changed) {
|
||||
std.debug.print("{}\n", .{file_path});
|
||||
fmt.any_error = true;
|
||||
}
|
||||
} else {
|
||||
// As a heuristic, we make enough capacity for the same as the input source.
|
||||
try fmt.out_buffer.ensureCapacity(source_code.len);
|
||||
fmt.out_buffer.items.len = 0;
|
||||
const writer = fmt.out_buffer.writer();
|
||||
const anything_changed = try std.zig.render(fmt.gpa, writer, tree);
|
||||
if (!anything_changed)
|
||||
return; // Good thing we didn't waste any file system access on this.
|
||||
|
||||
var af = try dir.atomicFile(sub_path, .{ .mode = stat.mode });
|
||||
defer af.deinit();
|
||||
|
||||
try af.file.writeAll(fmt.out_buffer.items);
|
||||
try af.finish();
|
||||
std.debug.print("{}\n", .{file_path});
|
||||
}
|
||||
}
|
||||
|
||||
fn printErrMsgToFile(
|
||||
gpa: *mem.Allocator,
|
||||
parse_error: ast.Error,
|
||||
tree: *ast.Tree,
|
||||
path: []const u8,
|
||||
file: fs.File,
|
||||
color: Color,
|
||||
) !void {
|
||||
const color_on = switch (color) {
|
||||
.Auto => file.isTty(),
|
||||
.On => true,
|
||||
.Off => false,
|
||||
};
|
||||
const lok_token = parse_error.loc();
|
||||
const span_first = lok_token;
|
||||
const span_last = lok_token;
|
||||
|
||||
const first_token = tree.token_locs[span_first];
|
||||
const last_token = tree.token_locs[span_last];
|
||||
const start_loc = tree.tokenLocationLoc(0, first_token);
|
||||
const end_loc = tree.tokenLocationLoc(first_token.end, last_token);
|
||||
|
||||
var text_buf = std.ArrayList(u8).init(gpa);
|
||||
defer text_buf.deinit();
|
||||
const out_stream = text_buf.outStream();
|
||||
try parse_error.render(tree.token_ids, out_stream);
|
||||
const text = text_buf.span();
|
||||
|
||||
const stream = file.outStream();
|
||||
try stream.print("{}:{}:{}: error: {}\n", .{ path, start_loc.line + 1, start_loc.column + 1, text });
|
||||
|
||||
if (!color_on) return;
|
||||
|
||||
// Print \r and \t as one space each so that column counts line up
|
||||
for (tree.source[start_loc.line_start..start_loc.line_end]) |byte| {
|
||||
try stream.writeByte(switch (byte) {
|
||||
'\r', '\t' => ' ',
|
||||
else => byte,
|
||||
});
|
||||
}
|
||||
try stream.writeByte('\n');
|
||||
try stream.writeByteNTimes(' ', start_loc.column);
|
||||
try stream.writeByteNTimes('~', last_token.end - first_token.start);
|
||||
try stream.writeByte('\n');
|
||||
}
|
||||
|
||||
pub const info_zen =
|
||||
\\
|
||||
\\ * Communicate intent precisely.
|
||||
\\ * Edge cases matter.
|
||||
\\ * Favor reading code over writing code.
|
||||
\\ * Only one obvious way to do things.
|
||||
\\ * Runtime crashes are better than bugs.
|
||||
\\ * Compile errors are better than runtime crashes.
|
||||
\\ * Incremental improvements.
|
||||
\\ * Avoid local maximums.
|
||||
\\ * Reduce the amount one must remember.
|
||||
\\ * Minimize energy spent on coding style.
|
||||
\\ * Resource deallocation must succeed.
|
||||
\\ * Together we serve the users.
|
||||
\\
|
||||
\\
|
||||
;
|
||||
File diff suppressed because it is too large
Load Diff
911
src/Cache.zig
Normal file
911
src/Cache.zig
Normal file
@ -0,0 +1,911 @@
|
||||
gpa: *Allocator,
|
||||
manifest_dir: fs.Dir,
|
||||
hash: HashHelper = .{},
|
||||
|
||||
const Cache = @This();
|
||||
const std = @import("std");
|
||||
const crypto = std.crypto;
|
||||
const fs = std.fs;
|
||||
const assert = std.debug.assert;
|
||||
const testing = std.testing;
|
||||
const mem = std.mem;
|
||||
const fmt = std.fmt;
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
/// Be sure to call `Manifest.deinit` after successful initialization.
|
||||
pub fn obtain(cache: *const Cache) Manifest {
|
||||
return Manifest{
|
||||
.cache = cache,
|
||||
.hash = cache.hash,
|
||||
.manifest_file = null,
|
||||
.manifest_dirty = false,
|
||||
.hex_digest = undefined,
|
||||
};
|
||||
}
|
||||
|
||||
/// This is 128 bits - Even with 2^54 cache entries, the probably of a collision would be under 10^-6
|
||||
pub const bin_digest_len = 16;
|
||||
pub const hex_digest_len = bin_digest_len * 2;
|
||||
|
||||
const manifest_file_size_max = 50 * 1024 * 1024;
|
||||
|
||||
/// The type used for hashing file contents. Currently, this is SipHash128(1, 3), because it
|
||||
/// provides enough collision resistance for the Manifest use cases, while being one of our
|
||||
/// fastest options right now.
|
||||
pub const Hasher = crypto.auth.siphash.SipHash128(1, 3);
|
||||
|
||||
/// Initial state, that can be copied.
|
||||
pub const hasher_init: Hasher = Hasher.init(&[_]u8{0} ** Hasher.minimum_key_length);
|
||||
|
||||
pub const File = struct {
|
||||
path: ?[]const u8,
|
||||
max_file_size: ?usize,
|
||||
stat: fs.File.Stat,
|
||||
bin_digest: [bin_digest_len]u8,
|
||||
contents: ?[]const u8,
|
||||
|
||||
pub fn deinit(self: *File, allocator: *Allocator) void {
|
||||
if (self.path) |owned_slice| {
|
||||
allocator.free(owned_slice);
|
||||
self.path = null;
|
||||
}
|
||||
if (self.contents) |contents| {
|
||||
allocator.free(contents);
|
||||
self.contents = null;
|
||||
}
|
||||
self.* = undefined;
|
||||
}
|
||||
};
|
||||
|
||||
pub const HashHelper = struct {
|
||||
hasher: Hasher = hasher_init,
|
||||
|
||||
/// Record a slice of bytes as an dependency of the process being cached
|
||||
pub fn addBytes(hh: *HashHelper, bytes: []const u8) void {
|
||||
hh.hasher.update(mem.asBytes(&bytes.len));
|
||||
hh.hasher.update(bytes);
|
||||
}
|
||||
|
||||
pub fn addOptionalBytes(hh: *HashHelper, optional_bytes: ?[]const u8) void {
|
||||
hh.add(optional_bytes != null);
|
||||
hh.addBytes(optional_bytes orelse return);
|
||||
}
|
||||
|
||||
pub fn addListOfBytes(hh: *HashHelper, list_of_bytes: []const []const u8) void {
|
||||
hh.add(list_of_bytes.len);
|
||||
for (list_of_bytes) |bytes| hh.addBytes(bytes);
|
||||
}
|
||||
|
||||
pub fn addStringSet(hh: *HashHelper, hm: std.StringArrayHashMapUnmanaged(void)) void {
|
||||
const entries = hm.items();
|
||||
hh.add(entries.len);
|
||||
for (entries) |entry| {
|
||||
hh.addBytes(entry.key);
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert the input value into bytes and record it as a dependency of the process being cached.
|
||||
pub fn add(hh: *HashHelper, x: anytype) void {
|
||||
switch (@TypeOf(x)) {
|
||||
std.builtin.Version => {
|
||||
hh.add(x.major);
|
||||
hh.add(x.minor);
|
||||
hh.add(x.patch);
|
||||
},
|
||||
std.Target.Os.TaggedVersionRange => {
|
||||
switch (x) {
|
||||
.linux => |linux| {
|
||||
hh.add(linux.range.min);
|
||||
hh.add(linux.range.max);
|
||||
hh.add(linux.glibc);
|
||||
},
|
||||
.windows => |windows| {
|
||||
hh.add(windows.min);
|
||||
hh.add(windows.max);
|
||||
},
|
||||
.semver => |semver| {
|
||||
hh.add(semver.min);
|
||||
hh.add(semver.max);
|
||||
},
|
||||
.none => {},
|
||||
}
|
||||
},
|
||||
else => switch (@typeInfo(@TypeOf(x))) {
|
||||
.Bool, .Int, .Enum, .Array => hh.addBytes(mem.asBytes(&x)),
|
||||
else => @compileError("unable to hash type " ++ @typeName(@TypeOf(x))),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn addOptional(hh: *HashHelper, optional: anytype) void {
|
||||
hh.add(optional != null);
|
||||
hh.add(optional orelse return);
|
||||
}
|
||||
|
||||
/// Returns a hex encoded hash of the inputs, without modifying state.
|
||||
pub fn peek(hh: HashHelper) [hex_digest_len]u8 {
|
||||
var copy = hh;
|
||||
return copy.final();
|
||||
}
|
||||
|
||||
pub fn peekBin(hh: HashHelper) [bin_digest_len]u8 {
|
||||
var copy = hh;
|
||||
var bin_digest: [bin_digest_len]u8 = undefined;
|
||||
copy.hasher.final(&bin_digest);
|
||||
return bin_digest;
|
||||
}
|
||||
|
||||
/// Returns a hex encoded hash of the inputs, mutating the state of the hasher.
|
||||
pub fn final(hh: *HashHelper) [hex_digest_len]u8 {
|
||||
var bin_digest: [bin_digest_len]u8 = undefined;
|
||||
hh.hasher.final(&bin_digest);
|
||||
|
||||
var out_digest: [hex_digest_len]u8 = undefined;
|
||||
_ = std.fmt.bufPrint(&out_digest, "{x}", .{bin_digest}) catch unreachable;
|
||||
return out_digest;
|
||||
}
|
||||
};
|
||||
|
||||
pub const Lock = struct {
|
||||
manifest_file: fs.File,
|
||||
|
||||
pub fn release(lock: *Lock) void {
|
||||
lock.manifest_file.close();
|
||||
lock.* = undefined;
|
||||
}
|
||||
};
|
||||
|
||||
/// Manifest manages project-local `zig-cache` directories.
|
||||
/// This is not a general-purpose cache.
|
||||
/// It is designed to be fast and simple, not to withstand attacks using specially-crafted input.
|
||||
pub const Manifest = struct {
|
||||
cache: *const Cache,
|
||||
/// Current state for incremental hashing.
|
||||
hash: HashHelper,
|
||||
manifest_file: ?fs.File,
|
||||
manifest_dirty: bool,
|
||||
files: std.ArrayListUnmanaged(File) = .{},
|
||||
hex_digest: [hex_digest_len]u8,
|
||||
|
||||
/// Add a file as a dependency of process being cached. When `hit` is
|
||||
/// called, the file's contents will be checked to ensure that it matches
|
||||
/// the contents from previous times.
|
||||
///
|
||||
/// Max file size will be used to determine the amount of space to the file contents
|
||||
/// are allowed to take up in memory. If max_file_size is null, then the contents
|
||||
/// will not be loaded into memory.
|
||||
///
|
||||
/// Returns the index of the entry in the `files` array list. You can use it
|
||||
/// to access the contents of the file after calling `hit()` like so:
|
||||
///
|
||||
/// ```
|
||||
/// var file_contents = cache_hash.files.items[file_index].contents.?;
|
||||
/// ```
|
||||
pub fn addFile(self: *Manifest, file_path: []const u8, max_file_size: ?usize) !usize {
|
||||
assert(self.manifest_file == null);
|
||||
|
||||
try self.files.ensureCapacity(self.cache.gpa, self.files.items.len + 1);
|
||||
const resolved_path = try fs.path.resolve(self.cache.gpa, &[_][]const u8{file_path});
|
||||
|
||||
const idx = self.files.items.len;
|
||||
self.files.addOneAssumeCapacity().* = .{
|
||||
.path = resolved_path,
|
||||
.contents = null,
|
||||
.max_file_size = max_file_size,
|
||||
.stat = undefined,
|
||||
.bin_digest = undefined,
|
||||
};
|
||||
|
||||
self.hash.addBytes(resolved_path);
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
pub fn addOptionalFile(self: *Manifest, optional_file_path: ?[]const u8) !void {
|
||||
self.hash.add(optional_file_path != null);
|
||||
const file_path = optional_file_path orelse return;
|
||||
_ = try self.addFile(file_path, null);
|
||||
}
|
||||
|
||||
pub fn addListOfFiles(self: *Manifest, list_of_files: []const []const u8) !void {
|
||||
self.hash.add(list_of_files.len);
|
||||
for (list_of_files) |file_path| {
|
||||
_ = try self.addFile(file_path, null);
|
||||
}
|
||||
}
|
||||
|
||||
/// Check the cache to see if the input exists in it. If it exists, returns `true`.
|
||||
/// A hex encoding of its hash is available by calling `final`.
|
||||
///
|
||||
/// This function will also acquire an exclusive lock to the manifest file. This means
|
||||
/// that a process holding a Manifest will block any other process attempting to
|
||||
/// acquire the lock.
|
||||
///
|
||||
/// The lock on the manifest file is released when `deinit` is called. As another
|
||||
/// option, one may call `toOwnedLock` to obtain a smaller object which can represent
|
||||
/// the lock. `deinit` is safe to call whether or not `toOwnedLock` has been called.
|
||||
pub fn hit(self: *Manifest) !bool {
|
||||
assert(self.manifest_file == null);
|
||||
|
||||
const ext = ".txt";
|
||||
var manifest_file_path: [self.hex_digest.len + ext.len]u8 = undefined;
|
||||
|
||||
var bin_digest: [bin_digest_len]u8 = undefined;
|
||||
self.hash.hasher.final(&bin_digest);
|
||||
|
||||
_ = std.fmt.bufPrint(&self.hex_digest, "{x}", .{bin_digest}) catch unreachable;
|
||||
|
||||
self.hash.hasher = hasher_init;
|
||||
self.hash.hasher.update(&bin_digest);
|
||||
|
||||
mem.copy(u8, &manifest_file_path, &self.hex_digest);
|
||||
manifest_file_path[self.hex_digest.len..][0..ext.len].* = ext.*;
|
||||
|
||||
if (self.files.items.len != 0) {
|
||||
self.manifest_file = try self.cache.manifest_dir.createFile(&manifest_file_path, .{
|
||||
.read = true,
|
||||
.truncate = false,
|
||||
.lock = .Exclusive,
|
||||
});
|
||||
} else {
|
||||
// If there are no file inputs, we check if the manifest file exists instead of
|
||||
// comparing the hashes on the files used for the cached item
|
||||
self.manifest_file = self.cache.manifest_dir.openFile(&manifest_file_path, .{
|
||||
.read = true,
|
||||
.write = true,
|
||||
.lock = .Exclusive,
|
||||
}) catch |err| switch (err) {
|
||||
error.FileNotFound => {
|
||||
self.manifest_dirty = true;
|
||||
self.manifest_file = try self.cache.manifest_dir.createFile(&manifest_file_path, .{
|
||||
.read = true,
|
||||
.truncate = false,
|
||||
.lock = .Exclusive,
|
||||
});
|
||||
return false;
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
|
||||
const file_contents = try self.manifest_file.?.inStream().readAllAlloc(self.cache.gpa, manifest_file_size_max);
|
||||
defer self.cache.gpa.free(file_contents);
|
||||
|
||||
const input_file_count = self.files.items.len;
|
||||
var any_file_changed = false;
|
||||
var line_iter = mem.tokenize(file_contents, "\n");
|
||||
var idx: usize = 0;
|
||||
while (line_iter.next()) |line| {
|
||||
defer idx += 1;
|
||||
|
||||
const cache_hash_file = if (idx < input_file_count) &self.files.items[idx] else blk: {
|
||||
const new = try self.files.addOne(self.cache.gpa);
|
||||
new.* = .{
|
||||
.path = null,
|
||||
.contents = null,
|
||||
.max_file_size = null,
|
||||
.stat = undefined,
|
||||
.bin_digest = undefined,
|
||||
};
|
||||
break :blk new;
|
||||
};
|
||||
|
||||
var iter = mem.tokenize(line, " ");
|
||||
const size = iter.next() orelse return error.InvalidFormat;
|
||||
const inode = iter.next() orelse return error.InvalidFormat;
|
||||
const mtime_nsec_str = iter.next() orelse return error.InvalidFormat;
|
||||
const digest_str = iter.next() orelse return error.InvalidFormat;
|
||||
const file_path = iter.rest();
|
||||
|
||||
cache_hash_file.stat.size = fmt.parseInt(u64, size, 10) catch return error.InvalidFormat;
|
||||
cache_hash_file.stat.inode = fmt.parseInt(fs.File.INode, inode, 10) catch return error.InvalidFormat;
|
||||
cache_hash_file.stat.mtime = fmt.parseInt(i64, mtime_nsec_str, 10) catch return error.InvalidFormat;
|
||||
std.fmt.hexToBytes(&cache_hash_file.bin_digest, digest_str) catch return error.InvalidFormat;
|
||||
|
||||
if (file_path.len == 0) {
|
||||
return error.InvalidFormat;
|
||||
}
|
||||
if (cache_hash_file.path) |p| {
|
||||
if (!mem.eql(u8, file_path, p)) {
|
||||
return error.InvalidFormat;
|
||||
}
|
||||
}
|
||||
|
||||
if (cache_hash_file.path == null) {
|
||||
cache_hash_file.path = try self.cache.gpa.dupe(u8, file_path);
|
||||
}
|
||||
|
||||
const this_file = fs.cwd().openFile(cache_hash_file.path.?, .{ .read = true }) catch {
|
||||
return error.CacheUnavailable;
|
||||
};
|
||||
defer this_file.close();
|
||||
|
||||
const actual_stat = try this_file.stat();
|
||||
const size_match = actual_stat.size == cache_hash_file.stat.size;
|
||||
const mtime_match = actual_stat.mtime == cache_hash_file.stat.mtime;
|
||||
const inode_match = actual_stat.inode == cache_hash_file.stat.inode;
|
||||
|
||||
if (!size_match or !mtime_match or !inode_match) {
|
||||
self.manifest_dirty = true;
|
||||
|
||||
cache_hash_file.stat = actual_stat;
|
||||
|
||||
if (isProblematicTimestamp(cache_hash_file.stat.mtime)) {
|
||||
cache_hash_file.stat.mtime = 0;
|
||||
cache_hash_file.stat.inode = 0;
|
||||
}
|
||||
|
||||
var actual_digest: [bin_digest_len]u8 = undefined;
|
||||
try hashFile(this_file, &actual_digest);
|
||||
|
||||
if (!mem.eql(u8, &cache_hash_file.bin_digest, &actual_digest)) {
|
||||
cache_hash_file.bin_digest = actual_digest;
|
||||
// keep going until we have the input file digests
|
||||
any_file_changed = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!any_file_changed) {
|
||||
self.hash.hasher.update(&cache_hash_file.bin_digest);
|
||||
}
|
||||
}
|
||||
|
||||
if (any_file_changed) {
|
||||
// cache miss
|
||||
// keep the manifest file open
|
||||
self.unhit(bin_digest, input_file_count);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (idx < input_file_count) {
|
||||
self.manifest_dirty = true;
|
||||
while (idx < input_file_count) : (idx += 1) {
|
||||
const ch_file = &self.files.items[idx];
|
||||
try self.populateFileHash(ch_file);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
pub fn unhit(self: *Manifest, bin_digest: [bin_digest_len]u8, input_file_count: usize) void {
|
||||
// Reset the hash.
|
||||
self.hash.hasher = hasher_init;
|
||||
self.hash.hasher.update(&bin_digest);
|
||||
|
||||
// Remove files not in the initial hash.
|
||||
for (self.files.items[input_file_count..]) |*file| {
|
||||
file.deinit(self.cache.gpa);
|
||||
}
|
||||
self.files.shrinkRetainingCapacity(input_file_count);
|
||||
|
||||
for (self.files.items) |file| {
|
||||
self.hash.hasher.update(&file.bin_digest);
|
||||
}
|
||||
}
|
||||
|
||||
fn populateFileHash(self: *Manifest, ch_file: *File) !void {
|
||||
const file = try fs.cwd().openFile(ch_file.path.?, .{});
|
||||
defer file.close();
|
||||
|
||||
ch_file.stat = try file.stat();
|
||||
|
||||
if (isProblematicTimestamp(ch_file.stat.mtime)) {
|
||||
ch_file.stat.mtime = 0;
|
||||
ch_file.stat.inode = 0;
|
||||
}
|
||||
|
||||
if (ch_file.max_file_size) |max_file_size| {
|
||||
if (ch_file.stat.size > max_file_size) {
|
||||
return error.FileTooBig;
|
||||
}
|
||||
|
||||
const contents = try self.cache.gpa.alloc(u8, @intCast(usize, ch_file.stat.size));
|
||||
errdefer self.cache.gpa.free(contents);
|
||||
|
||||
// Hash while reading from disk, to keep the contents in the cpu cache while
|
||||
// doing hashing.
|
||||
var hasher = hasher_init;
|
||||
var off: usize = 0;
|
||||
while (true) {
|
||||
// give me everything you've got, captain
|
||||
const bytes_read = try file.read(contents[off..]);
|
||||
if (bytes_read == 0) break;
|
||||
hasher.update(contents[off..][0..bytes_read]);
|
||||
off += bytes_read;
|
||||
}
|
||||
hasher.final(&ch_file.bin_digest);
|
||||
|
||||
ch_file.contents = contents;
|
||||
} else {
|
||||
try hashFile(file, &ch_file.bin_digest);
|
||||
}
|
||||
|
||||
self.hash.hasher.update(&ch_file.bin_digest);
|
||||
}
|
||||
|
||||
/// Add a file as a dependency of process being cached, after the initial hash has been
|
||||
/// calculated. This is useful for processes that don't know the all the files that
|
||||
/// are depended on ahead of time. For example, a source file that can import other files
|
||||
/// will need to be recompiled if the imported file is changed.
|
||||
pub fn addFilePostFetch(self: *Manifest, file_path: []const u8, max_file_size: usize) ![]const u8 {
|
||||
assert(self.manifest_file != null);
|
||||
|
||||
const resolved_path = try fs.path.resolve(self.cache.gpa, &[_][]const u8{file_path});
|
||||
errdefer self.cache.gpa.free(resolved_path);
|
||||
|
||||
const new_ch_file = try self.files.addOne(self.cache.gpa);
|
||||
new_ch_file.* = .{
|
||||
.path = resolved_path,
|
||||
.max_file_size = max_file_size,
|
||||
.stat = undefined,
|
||||
.bin_digest = undefined,
|
||||
.contents = null,
|
||||
};
|
||||
errdefer self.files.shrinkRetainingCapacity(self.files.items.len - 1);
|
||||
|
||||
try self.populateFileHash(new_ch_file);
|
||||
|
||||
return new_ch_file.contents.?;
|
||||
}
|
||||
|
||||
/// Add a file as a dependency of process being cached, after the initial hash has been
|
||||
/// calculated. This is useful for processes that don't know the all the files that
|
||||
/// are depended on ahead of time. For example, a source file that can import other files
|
||||
/// will need to be recompiled if the imported file is changed.
|
||||
pub fn addFilePost(self: *Manifest, file_path: []const u8) !void {
|
||||
assert(self.manifest_file != null);
|
||||
|
||||
const resolved_path = try fs.path.resolve(self.cache.gpa, &[_][]const u8{file_path});
|
||||
errdefer self.cache.gpa.free(resolved_path);
|
||||
|
||||
const new_ch_file = try self.files.addOne(self.cache.gpa);
|
||||
new_ch_file.* = .{
|
||||
.path = resolved_path,
|
||||
.max_file_size = null,
|
||||
.stat = undefined,
|
||||
.bin_digest = undefined,
|
||||
.contents = null,
|
||||
};
|
||||
errdefer self.files.shrinkRetainingCapacity(self.files.items.len - 1);
|
||||
|
||||
try self.populateFileHash(new_ch_file);
|
||||
}
|
||||
|
||||
pub fn addDepFilePost(self: *Manifest, dir: fs.Dir, dep_file_basename: []const u8) !void {
|
||||
assert(self.manifest_file != null);
|
||||
|
||||
const dep_file_contents = try dir.readFileAlloc(self.cache.gpa, dep_file_basename, manifest_file_size_max);
|
||||
defer self.cache.gpa.free(dep_file_contents);
|
||||
|
||||
var error_buf = std.ArrayList(u8).init(self.cache.gpa);
|
||||
defer error_buf.deinit();
|
||||
|
||||
var it: @import("DepTokenizer.zig") = .{ .bytes = dep_file_contents };
|
||||
|
||||
// Skip first token: target.
|
||||
switch (it.next() orelse return) { // Empty dep file OK.
|
||||
.target, .target_must_resolve, .prereq => {},
|
||||
else => |err| {
|
||||
try err.printError(error_buf.writer());
|
||||
std.log.err("failed parsing {}: {}", .{ dep_file_basename, error_buf.items });
|
||||
return error.InvalidDepFile;
|
||||
},
|
||||
}
|
||||
// Process 0+ preqreqs.
|
||||
// Clang is invoked in single-source mode so we never get more targets.
|
||||
while (true) {
|
||||
switch (it.next() orelse return) {
|
||||
.target, .target_must_resolve => return,
|
||||
.prereq => |bytes| try self.addFilePost(bytes),
|
||||
else => |err| {
|
||||
try err.printError(error_buf.writer());
|
||||
std.log.err("failed parsing {}: {}", .{ dep_file_basename, error_buf.items });
|
||||
return error.InvalidDepFile;
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a hex encoded hash of the inputs.
|
||||
pub fn final(self: *Manifest) [hex_digest_len]u8 {
|
||||
assert(self.manifest_file != null);
|
||||
|
||||
// We don't close the manifest file yet, because we want to
|
||||
// keep it locked until the API user is done using it.
|
||||
// We also don't write out the manifest yet, because until
|
||||
// cache_release is called we still might be working on creating
|
||||
// the artifacts to cache.
|
||||
|
||||
var bin_digest: [bin_digest_len]u8 = undefined;
|
||||
self.hash.hasher.final(&bin_digest);
|
||||
|
||||
var out_digest: [hex_digest_len]u8 = undefined;
|
||||
_ = std.fmt.bufPrint(&out_digest, "{x}", .{bin_digest}) catch unreachable;
|
||||
|
||||
return out_digest;
|
||||
}
|
||||
|
||||
pub fn writeManifest(self: *Manifest) !void {
|
||||
const manifest_file = self.manifest_file.?;
|
||||
if (!self.manifest_dirty) return;
|
||||
|
||||
var contents = std.ArrayList(u8).init(self.cache.gpa);
|
||||
defer contents.deinit();
|
||||
|
||||
const writer = contents.writer();
|
||||
var encoded_digest: [hex_digest_len]u8 = undefined;
|
||||
|
||||
for (self.files.items) |file| {
|
||||
_ = std.fmt.bufPrint(&encoded_digest, "{x}", .{file.bin_digest}) catch unreachable;
|
||||
try writer.print("{d} {d} {d} {s} {s}\n", .{
|
||||
file.stat.size,
|
||||
file.stat.inode,
|
||||
file.stat.mtime,
|
||||
&encoded_digest,
|
||||
file.path,
|
||||
});
|
||||
}
|
||||
|
||||
try manifest_file.setEndPos(contents.items.len);
|
||||
try manifest_file.pwriteAll(contents.items, 0);
|
||||
self.manifest_dirty = false;
|
||||
}
|
||||
|
||||
/// Obtain only the data needed to maintain a lock on the manifest file.
|
||||
/// The `Manifest` remains safe to deinit.
|
||||
/// Don't forget to call `writeManifest` before this!
|
||||
pub fn toOwnedLock(self: *Manifest) Lock {
|
||||
const manifest_file = self.manifest_file.?;
|
||||
self.manifest_file = null;
|
||||
return Lock{ .manifest_file = manifest_file };
|
||||
}
|
||||
|
||||
/// Releases the manifest file and frees any memory the Manifest was using.
|
||||
/// `Manifest.hit` must be called first.
|
||||
/// Don't forget to call `writeManifest` before this!
|
||||
pub fn deinit(self: *Manifest) void {
|
||||
if (self.manifest_file) |file| {
|
||||
file.close();
|
||||
}
|
||||
for (self.files.items) |*file| {
|
||||
file.deinit(self.cache.gpa);
|
||||
}
|
||||
self.files.deinit(self.cache.gpa);
|
||||
}
|
||||
};
|
||||
|
||||
fn hashFile(file: fs.File, bin_digest: []u8) !void {
|
||||
var buf: [1024]u8 = undefined;
|
||||
|
||||
var hasher = hasher_init;
|
||||
while (true) {
|
||||
const bytes_read = try file.read(&buf);
|
||||
if (bytes_read == 0) break;
|
||||
hasher.update(buf[0..bytes_read]);
|
||||
}
|
||||
|
||||
hasher.final(bin_digest);
|
||||
}
|
||||
|
||||
/// If the wall clock time, rounded to the same precision as the
|
||||
/// mtime, is equal to the mtime, then we cannot rely on this mtime
|
||||
/// yet. We will instead save an mtime value that indicates the hash
|
||||
/// must be unconditionally computed.
|
||||
/// This function recognizes the precision of mtime by looking at trailing
|
||||
/// zero bits of the seconds and nanoseconds.
|
||||
fn isProblematicTimestamp(fs_clock: i128) bool {
|
||||
const wall_clock = std.time.nanoTimestamp();
|
||||
|
||||
// We have to break the nanoseconds into seconds and remainder nanoseconds
|
||||
// to detect precision of seconds, because looking at the zero bits in base
|
||||
// 2 would not detect precision of the seconds value.
|
||||
const fs_sec = @intCast(i64, @divFloor(fs_clock, std.time.ns_per_s));
|
||||
const fs_nsec = @intCast(i64, @mod(fs_clock, std.time.ns_per_s));
|
||||
var wall_sec = @intCast(i64, @divFloor(wall_clock, std.time.ns_per_s));
|
||||
var wall_nsec = @intCast(i64, @mod(wall_clock, std.time.ns_per_s));
|
||||
|
||||
// First make all the least significant zero bits in the fs_clock, also zero bits in the wall clock.
|
||||
if (fs_nsec == 0) {
|
||||
wall_nsec = 0;
|
||||
if (fs_sec == 0) {
|
||||
wall_sec = 0;
|
||||
} else {
|
||||
wall_sec &= @as(i64, -1) << @intCast(u6, @ctz(i64, fs_sec));
|
||||
}
|
||||
} else {
|
||||
wall_nsec &= @as(i64, -1) << @intCast(u6, @ctz(i64, fs_nsec));
|
||||
}
|
||||
return wall_nsec == fs_nsec and wall_sec == fs_sec;
|
||||
}
|
||||
|
||||
test "cache file and then recall it" {
|
||||
if (std.Target.current.os.tag == .wasi) {
|
||||
// https://github.com/ziglang/zig/issues/5437
|
||||
return error.SkipZigTest;
|
||||
}
|
||||
const cwd = fs.cwd();
|
||||
|
||||
const temp_file = "test.txt";
|
||||
const temp_manifest_dir = "temp_manifest_dir";
|
||||
|
||||
const ts = std.time.nanoTimestamp();
|
||||
try cwd.writeFile(temp_file, "Hello, world!\n");
|
||||
|
||||
while (isProblematicTimestamp(ts)) {
|
||||
std.time.sleep(1);
|
||||
}
|
||||
|
||||
var digest1: [hex_digest_len]u8 = undefined;
|
||||
var digest2: [hex_digest_len]u8 = undefined;
|
||||
|
||||
{
|
||||
var cache = Cache{
|
||||
.gpa = testing.allocator,
|
||||
.manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}),
|
||||
};
|
||||
defer cache.manifest_dir.close();
|
||||
|
||||
{
|
||||
var ch = cache.obtain();
|
||||
defer ch.deinit();
|
||||
|
||||
ch.hash.add(true);
|
||||
ch.hash.add(@as(u16, 1234));
|
||||
ch.hash.addBytes("1234");
|
||||
_ = try ch.addFile(temp_file, null);
|
||||
|
||||
// There should be nothing in the cache
|
||||
testing.expectEqual(false, try ch.hit());
|
||||
|
||||
digest1 = ch.final();
|
||||
try ch.writeManifest();
|
||||
}
|
||||
{
|
||||
var ch = cache.obtain();
|
||||
defer ch.deinit();
|
||||
|
||||
ch.hash.add(true);
|
||||
ch.hash.add(@as(u16, 1234));
|
||||
ch.hash.addBytes("1234");
|
||||
_ = try ch.addFile(temp_file, null);
|
||||
|
||||
// Cache hit! We just "built" the same file
|
||||
testing.expect(try ch.hit());
|
||||
digest2 = ch.final();
|
||||
|
||||
try ch.writeManifest();
|
||||
}
|
||||
|
||||
testing.expectEqual(digest1, digest2);
|
||||
}
|
||||
|
||||
try cwd.deleteTree(temp_manifest_dir);
|
||||
try cwd.deleteFile(temp_file);
|
||||
}
|
||||
|
||||
test "give problematic timestamp" {
|
||||
var fs_clock = std.time.nanoTimestamp();
|
||||
// to make it problematic, we make it only accurate to the second
|
||||
fs_clock = @divTrunc(fs_clock, std.time.ns_per_s);
|
||||
fs_clock *= std.time.ns_per_s;
|
||||
testing.expect(isProblematicTimestamp(fs_clock));
|
||||
}
|
||||
|
||||
test "give nonproblematic timestamp" {
|
||||
testing.expect(!isProblematicTimestamp(std.time.nanoTimestamp() - std.time.ns_per_s));
|
||||
}
|
||||
|
||||
test "check that changing a file makes cache fail" {
|
||||
if (std.Target.current.os.tag == .wasi) {
|
||||
// https://github.com/ziglang/zig/issues/5437
|
||||
return error.SkipZigTest;
|
||||
}
|
||||
const cwd = fs.cwd();
|
||||
|
||||
const temp_file = "cache_hash_change_file_test.txt";
|
||||
const temp_manifest_dir = "cache_hash_change_file_manifest_dir";
|
||||
const original_temp_file_contents = "Hello, world!\n";
|
||||
const updated_temp_file_contents = "Hello, world; but updated!\n";
|
||||
|
||||
try cwd.deleteTree(temp_manifest_dir);
|
||||
try cwd.deleteTree(temp_file);
|
||||
|
||||
const ts = std.time.nanoTimestamp();
|
||||
try cwd.writeFile(temp_file, original_temp_file_contents);
|
||||
|
||||
while (isProblematicTimestamp(ts)) {
|
||||
std.time.sleep(1);
|
||||
}
|
||||
|
||||
var digest1: [hex_digest_len]u8 = undefined;
|
||||
var digest2: [hex_digest_len]u8 = undefined;
|
||||
|
||||
{
|
||||
var cache = Cache{
|
||||
.gpa = testing.allocator,
|
||||
.manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}),
|
||||
};
|
||||
defer cache.manifest_dir.close();
|
||||
|
||||
{
|
||||
var ch = cache.obtain();
|
||||
defer ch.deinit();
|
||||
|
||||
ch.hash.addBytes("1234");
|
||||
const temp_file_idx = try ch.addFile(temp_file, 100);
|
||||
|
||||
// There should be nothing in the cache
|
||||
testing.expectEqual(false, try ch.hit());
|
||||
|
||||
testing.expect(mem.eql(u8, original_temp_file_contents, ch.files.items[temp_file_idx].contents.?));
|
||||
|
||||
digest1 = ch.final();
|
||||
|
||||
try ch.writeManifest();
|
||||
}
|
||||
|
||||
try cwd.writeFile(temp_file, updated_temp_file_contents);
|
||||
|
||||
{
|
||||
var ch = cache.obtain();
|
||||
defer ch.deinit();
|
||||
|
||||
ch.hash.addBytes("1234");
|
||||
const temp_file_idx = try ch.addFile(temp_file, 100);
|
||||
|
||||
// A file that we depend on has been updated, so the cache should not contain an entry for it
|
||||
testing.expectEqual(false, try ch.hit());
|
||||
|
||||
// The cache system does not keep the contents of re-hashed input files.
|
||||
testing.expect(ch.files.items[temp_file_idx].contents == null);
|
||||
|
||||
digest2 = ch.final();
|
||||
|
||||
try ch.writeManifest();
|
||||
}
|
||||
|
||||
testing.expect(!mem.eql(u8, digest1[0..], digest2[0..]));
|
||||
}
|
||||
|
||||
try cwd.deleteTree(temp_manifest_dir);
|
||||
try cwd.deleteTree(temp_file);
|
||||
}
|
||||
|
||||
test "no file inputs" {
|
||||
if (std.Target.current.os.tag == .wasi) {
|
||||
// https://github.com/ziglang/zig/issues/5437
|
||||
return error.SkipZigTest;
|
||||
}
|
||||
const cwd = fs.cwd();
|
||||
const temp_manifest_dir = "no_file_inputs_manifest_dir";
|
||||
defer cwd.deleteTree(temp_manifest_dir) catch {};
|
||||
|
||||
var digest1: [hex_digest_len]u8 = undefined;
|
||||
var digest2: [hex_digest_len]u8 = undefined;
|
||||
|
||||
var cache = Cache{
|
||||
.gpa = testing.allocator,
|
||||
.manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}),
|
||||
};
|
||||
defer cache.manifest_dir.close();
|
||||
|
||||
{
|
||||
var ch = cache.obtain();
|
||||
defer ch.deinit();
|
||||
|
||||
ch.hash.addBytes("1234");
|
||||
|
||||
// There should be nothing in the cache
|
||||
testing.expectEqual(false, try ch.hit());
|
||||
|
||||
digest1 = ch.final();
|
||||
|
||||
try ch.writeManifest();
|
||||
}
|
||||
{
|
||||
var ch = cache.obtain();
|
||||
defer ch.deinit();
|
||||
|
||||
ch.hash.addBytes("1234");
|
||||
|
||||
testing.expect(try ch.hit());
|
||||
digest2 = ch.final();
|
||||
try ch.writeManifest();
|
||||
}
|
||||
|
||||
testing.expectEqual(digest1, digest2);
|
||||
}
|
||||
|
||||
test "Manifest with files added after initial hash work" {
|
||||
if (std.Target.current.os.tag == .wasi) {
|
||||
// https://github.com/ziglang/zig/issues/5437
|
||||
return error.SkipZigTest;
|
||||
}
|
||||
const cwd = fs.cwd();
|
||||
|
||||
const temp_file1 = "cache_hash_post_file_test1.txt";
|
||||
const temp_file2 = "cache_hash_post_file_test2.txt";
|
||||
const temp_manifest_dir = "cache_hash_post_file_manifest_dir";
|
||||
|
||||
const ts1 = std.time.nanoTimestamp();
|
||||
try cwd.writeFile(temp_file1, "Hello, world!\n");
|
||||
try cwd.writeFile(temp_file2, "Hello world the second!\n");
|
||||
|
||||
while (isProblematicTimestamp(ts1)) {
|
||||
std.time.sleep(1);
|
||||
}
|
||||
|
||||
var digest1: [hex_digest_len]u8 = undefined;
|
||||
var digest2: [hex_digest_len]u8 = undefined;
|
||||
var digest3: [hex_digest_len]u8 = undefined;
|
||||
|
||||
{
|
||||
var cache = Cache{
|
||||
.gpa = testing.allocator,
|
||||
.manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}),
|
||||
};
|
||||
defer cache.manifest_dir.close();
|
||||
|
||||
{
|
||||
var ch = cache.obtain();
|
||||
defer ch.deinit();
|
||||
|
||||
ch.hash.addBytes("1234");
|
||||
_ = try ch.addFile(temp_file1, null);
|
||||
|
||||
// There should be nothing in the cache
|
||||
testing.expectEqual(false, try ch.hit());
|
||||
|
||||
_ = try ch.addFilePost(temp_file2);
|
||||
|
||||
digest1 = ch.final();
|
||||
try ch.writeManifest();
|
||||
}
|
||||
{
|
||||
var ch = cache.obtain();
|
||||
defer ch.deinit();
|
||||
|
||||
ch.hash.addBytes("1234");
|
||||
_ = try ch.addFile(temp_file1, null);
|
||||
|
||||
testing.expect(try ch.hit());
|
||||
digest2 = ch.final();
|
||||
|
||||
try ch.writeManifest();
|
||||
}
|
||||
testing.expect(mem.eql(u8, &digest1, &digest2));
|
||||
|
||||
// Modify the file added after initial hash
|
||||
const ts2 = std.time.nanoTimestamp();
|
||||
try cwd.writeFile(temp_file2, "Hello world the second, updated\n");
|
||||
|
||||
while (isProblematicTimestamp(ts2)) {
|
||||
std.time.sleep(1);
|
||||
}
|
||||
|
||||
{
|
||||
var ch = cache.obtain();
|
||||
defer ch.deinit();
|
||||
|
||||
ch.hash.addBytes("1234");
|
||||
_ = try ch.addFile(temp_file1, null);
|
||||
|
||||
// A file that we depend on has been updated, so the cache should not contain an entry for it
|
||||
testing.expectEqual(false, try ch.hit());
|
||||
|
||||
_ = try ch.addFilePost(temp_file2);
|
||||
|
||||
digest3 = ch.final();
|
||||
|
||||
try ch.writeManifest();
|
||||
}
|
||||
|
||||
testing.expect(!mem.eql(u8, &digest1, &digest3));
|
||||
}
|
||||
|
||||
try cwd.deleteTree(temp_manifest_dir);
|
||||
try cwd.deleteFile(temp_file1);
|
||||
try cwd.deleteFile(temp_file2);
|
||||
}
|
||||
2882
src/Compilation.zig
Normal file
2882
src/Compilation.zig
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,361 +1,405 @@
|
||||
const Tokenizer = @This();
|
||||
|
||||
index: usize = 0,
|
||||
bytes: []const u8,
|
||||
state: State = .lhs,
|
||||
|
||||
const std = @import("std");
|
||||
const testing = std.testing;
|
||||
const assert = std.debug.assert;
|
||||
|
||||
pub const Tokenizer = struct {
|
||||
arena: std.heap.ArenaAllocator,
|
||||
index: usize,
|
||||
bytes: []const u8,
|
||||
error_text: []const u8,
|
||||
state: State,
|
||||
|
||||
pub fn init(allocator: *std.mem.Allocator, bytes: []const u8) Tokenizer {
|
||||
return Tokenizer{
|
||||
.arena = std.heap.ArenaAllocator.init(allocator),
|
||||
.index = 0,
|
||||
.bytes = bytes,
|
||||
.error_text = "",
|
||||
.state = State{ .lhs = {} },
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Tokenizer) void {
|
||||
self.arena.deinit();
|
||||
}
|
||||
|
||||
pub fn next(self: *Tokenizer) Error!?Token {
|
||||
pub fn next(self: *Tokenizer) ?Token {
|
||||
var start = self.index;
|
||||
var must_resolve = false;
|
||||
while (self.index < self.bytes.len) {
|
||||
const char = self.bytes[self.index];
|
||||
while (true) {
|
||||
switch (self.state) {
|
||||
.lhs => switch (char) {
|
||||
'\t', '\n', '\r', ' ' => {
|
||||
// silently ignore whitespace
|
||||
break; // advance
|
||||
self.index += 1;
|
||||
},
|
||||
else => {
|
||||
self.state = State{ .target = try std.ArrayListSentineled(u8, 0).initSize(&self.arena.allocator, 0) };
|
||||
start = self.index;
|
||||
self.state = .target;
|
||||
},
|
||||
},
|
||||
.target => |*target| switch (char) {
|
||||
.target => switch (char) {
|
||||
'\t', '\n', '\r', ' ' => {
|
||||
return self.errorIllegalChar(self.index, char, "invalid target", .{});
|
||||
return errorIllegalChar(.invalid_target, self.index, char);
|
||||
},
|
||||
'$' => {
|
||||
self.state = State{ .target_dollar_sign = target.* };
|
||||
break; // advance
|
||||
self.state = .target_dollar_sign;
|
||||
self.index += 1;
|
||||
},
|
||||
'\\' => {
|
||||
self.state = State{ .target_reverse_solidus = target.* };
|
||||
break; // advance
|
||||
self.state = .target_reverse_solidus;
|
||||
self.index += 1;
|
||||
},
|
||||
':' => {
|
||||
self.state = State{ .target_colon = target.* };
|
||||
break; // advance
|
||||
self.state = .target_colon;
|
||||
self.index += 1;
|
||||
},
|
||||
else => {
|
||||
try target.append(char);
|
||||
break; // advance
|
||||
self.index += 1;
|
||||
},
|
||||
},
|
||||
.target_reverse_solidus => |*target| switch (char) {
|
||||
.target_reverse_solidus => switch (char) {
|
||||
'\t', '\n', '\r' => {
|
||||
return self.errorIllegalChar(self.index, char, "bad target escape", .{});
|
||||
return errorIllegalChar(.bad_target_escape, self.index, char);
|
||||
},
|
||||
' ', '#', '\\' => {
|
||||
try target.append(char);
|
||||
self.state = State{ .target = target.* };
|
||||
break; // advance
|
||||
must_resolve = true;
|
||||
self.state = .target;
|
||||
self.index += 1;
|
||||
},
|
||||
'$' => {
|
||||
try target.appendSlice(self.bytes[self.index - 1 .. self.index]);
|
||||
self.state = State{ .target_dollar_sign = target.* };
|
||||
break; // advance
|
||||
self.state = .target_dollar_sign;
|
||||
self.index += 1;
|
||||
},
|
||||
else => {
|
||||
try target.appendSlice(self.bytes[self.index - 1 .. self.index + 1]);
|
||||
self.state = State{ .target = target.* };
|
||||
break; // advance
|
||||
self.state = .target;
|
||||
self.index += 1;
|
||||
},
|
||||
},
|
||||
.target_dollar_sign => |*target| switch (char) {
|
||||
.target_dollar_sign => switch (char) {
|
||||
'$' => {
|
||||
try target.append(char);
|
||||
self.state = State{ .target = target.* };
|
||||
break; // advance
|
||||
must_resolve = true;
|
||||
self.state = .target;
|
||||
self.index += 1;
|
||||
},
|
||||
else => {
|
||||
return self.errorIllegalChar(self.index, char, "expecting '$'", .{});
|
||||
return errorIllegalChar(.expected_dollar_sign, self.index, char);
|
||||
},
|
||||
},
|
||||
.target_colon => |*target| switch (char) {
|
||||
.target_colon => switch (char) {
|
||||
'\n', '\r' => {
|
||||
const bytes = target.span();
|
||||
const bytes = self.bytes[start .. self.index - 1];
|
||||
if (bytes.len != 0) {
|
||||
self.state = State{ .lhs = {} };
|
||||
return Token{ .id = .target, .bytes = bytes };
|
||||
self.state = .lhs;
|
||||
return finishTarget(must_resolve, bytes);
|
||||
}
|
||||
// silently ignore null target
|
||||
self.state = State{ .lhs = {} };
|
||||
continue;
|
||||
self.state = .lhs;
|
||||
},
|
||||
'\\' => {
|
||||
self.state = State{ .target_colon_reverse_solidus = target.* };
|
||||
break; // advance
|
||||
self.state = .target_colon_reverse_solidus;
|
||||
self.index += 1;
|
||||
},
|
||||
else => {
|
||||
const bytes = target.span();
|
||||
const bytes = self.bytes[start .. self.index - 1];
|
||||
if (bytes.len != 0) {
|
||||
self.state = State{ .rhs = {} };
|
||||
return Token{ .id = .target, .bytes = bytes };
|
||||
self.state = .rhs;
|
||||
return finishTarget(must_resolve, bytes);
|
||||
}
|
||||
// silently ignore null target
|
||||
self.state = State{ .lhs = {} };
|
||||
continue;
|
||||
self.state = .lhs;
|
||||
},
|
||||
},
|
||||
.target_colon_reverse_solidus => |*target| switch (char) {
|
||||
.target_colon_reverse_solidus => switch (char) {
|
||||
'\n', '\r' => {
|
||||
const bytes = target.span();
|
||||
const bytes = self.bytes[start .. self.index - 2];
|
||||
if (bytes.len != 0) {
|
||||
self.state = State{ .lhs = {} };
|
||||
return Token{ .id = .target, .bytes = bytes };
|
||||
self.state = .lhs;
|
||||
return finishTarget(must_resolve, bytes);
|
||||
}
|
||||
// silently ignore null target
|
||||
self.state = State{ .lhs = {} };
|
||||
continue;
|
||||
self.state = .lhs;
|
||||
},
|
||||
else => {
|
||||
try target.appendSlice(self.bytes[self.index - 2 .. self.index + 1]);
|
||||
self.state = State{ .target = target.* };
|
||||
break;
|
||||
self.state = .target;
|
||||
},
|
||||
},
|
||||
.rhs => switch (char) {
|
||||
'\t', ' ' => {
|
||||
// silently ignore horizontal whitespace
|
||||
break; // advance
|
||||
self.index += 1;
|
||||
},
|
||||
'\n', '\r' => {
|
||||
self.state = State{ .lhs = {} };
|
||||
continue;
|
||||
self.state = .lhs;
|
||||
},
|
||||
'\\' => {
|
||||
self.state = State{ .rhs_continuation = {} };
|
||||
break; // advance
|
||||
self.state = .rhs_continuation;
|
||||
self.index += 1;
|
||||
},
|
||||
'"' => {
|
||||
self.state = State{ .prereq_quote = try std.ArrayListSentineled(u8, 0).initSize(&self.arena.allocator, 0) };
|
||||
break; // advance
|
||||
self.state = .prereq_quote;
|
||||
self.index += 1;
|
||||
start = self.index;
|
||||
},
|
||||
else => {
|
||||
self.state = State{ .prereq = try std.ArrayListSentineled(u8, 0).initSize(&self.arena.allocator, 0) };
|
||||
start = self.index;
|
||||
self.state = .prereq;
|
||||
},
|
||||
},
|
||||
.rhs_continuation => switch (char) {
|
||||
'\n' => {
|
||||
self.state = State{ .rhs = {} };
|
||||
break; // advance
|
||||
self.state = .rhs;
|
||||
self.index += 1;
|
||||
},
|
||||
'\r' => {
|
||||
self.state = State{ .rhs_continuation_linefeed = {} };
|
||||
break; // advance
|
||||
self.state = .rhs_continuation_linefeed;
|
||||
self.index += 1;
|
||||
},
|
||||
else => {
|
||||
return self.errorIllegalChar(self.index, char, "continuation expecting end-of-line", .{});
|
||||
return errorIllegalChar(.continuation_eol, self.index, char);
|
||||
},
|
||||
},
|
||||
.rhs_continuation_linefeed => switch (char) {
|
||||
'\n' => {
|
||||
self.state = State{ .rhs = {} };
|
||||
break; // advance
|
||||
},
|
||||
else => {
|
||||
return self.errorIllegalChar(self.index, char, "continuation expecting end-of-line", .{});
|
||||
},
|
||||
},
|
||||
.prereq_quote => |*prereq| switch (char) {
|
||||
'"' => {
|
||||
const bytes = prereq.span();
|
||||
self.state = .rhs;
|
||||
self.index += 1;
|
||||
self.state = State{ .rhs = {} };
|
||||
return Token{ .id = .prereq, .bytes = bytes };
|
||||
},
|
||||
else => {
|
||||
try prereq.append(char);
|
||||
break; // advance
|
||||
return errorIllegalChar(.continuation_eol, self.index, char);
|
||||
},
|
||||
},
|
||||
.prereq => |*prereq| switch (char) {
|
||||
.prereq_quote => switch (char) {
|
||||
'"' => {
|
||||
self.index += 1;
|
||||
self.state = .rhs;
|
||||
return Token{ .prereq = self.bytes[start .. self.index - 1] };
|
||||
},
|
||||
else => {
|
||||
self.index += 1;
|
||||
},
|
||||
},
|
||||
.prereq => switch (char) {
|
||||
'\t', ' ' => {
|
||||
const bytes = prereq.span();
|
||||
self.state = State{ .rhs = {} };
|
||||
return Token{ .id = .prereq, .bytes = bytes };
|
||||
self.state = .rhs;
|
||||
return Token{ .prereq = self.bytes[start..self.index] };
|
||||
},
|
||||
'\n', '\r' => {
|
||||
const bytes = prereq.span();
|
||||
self.state = State{ .lhs = {} };
|
||||
return Token{ .id = .prereq, .bytes = bytes };
|
||||
self.state = .lhs;
|
||||
return Token{ .prereq = self.bytes[start..self.index] };
|
||||
},
|
||||
'\\' => {
|
||||
self.state = State{ .prereq_continuation = prereq.* };
|
||||
break; // advance
|
||||
self.state = .prereq_continuation;
|
||||
self.index += 1;
|
||||
},
|
||||
else => {
|
||||
try prereq.append(char);
|
||||
break; // advance
|
||||
},
|
||||
},
|
||||
.prereq_continuation => |*prereq| switch (char) {
|
||||
'\n' => {
|
||||
const bytes = prereq.span();
|
||||
self.index += 1;
|
||||
self.state = State{ .rhs = {} };
|
||||
return Token{ .id = .prereq, .bytes = bytes };
|
||||
},
|
||||
},
|
||||
.prereq_continuation => switch (char) {
|
||||
'\n' => {
|
||||
self.index += 1;
|
||||
self.state = .rhs;
|
||||
return Token{ .prereq = self.bytes[start .. self.index - 2] };
|
||||
},
|
||||
'\r' => {
|
||||
self.state = State{ .prereq_continuation_linefeed = prereq.* };
|
||||
break; // advance
|
||||
self.state = .prereq_continuation_linefeed;
|
||||
self.index += 1;
|
||||
},
|
||||
else => {
|
||||
// not continuation
|
||||
try prereq.appendSlice(self.bytes[self.index - 1 .. self.index + 1]);
|
||||
self.state = State{ .prereq = prereq.* };
|
||||
break; // advance
|
||||
},
|
||||
},
|
||||
.prereq_continuation_linefeed => |prereq| switch (char) {
|
||||
'\n' => {
|
||||
const bytes = prereq.span();
|
||||
self.state = .prereq;
|
||||
self.index += 1;
|
||||
self.state = State{ .rhs = {} };
|
||||
return Token{ .id = .prereq, .bytes = bytes };
|
||||
},
|
||||
},
|
||||
.prereq_continuation_linefeed => switch (char) {
|
||||
'\n' => {
|
||||
self.index += 1;
|
||||
self.state = .rhs;
|
||||
return Token{ .prereq = self.bytes[start .. self.index - 1] };
|
||||
},
|
||||
else => {
|
||||
return self.errorIllegalChar(self.index, char, "continuation expecting end-of-line", .{});
|
||||
return errorIllegalChar(.continuation_eol, self.index, char);
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
self.index += 1;
|
||||
}
|
||||
|
||||
// eof, handle maybe incomplete token
|
||||
if (self.index == 0) return null;
|
||||
const idx = self.index - 1;
|
||||
} else {
|
||||
switch (self.state) {
|
||||
.lhs,
|
||||
.rhs,
|
||||
.rhs_continuation,
|
||||
.rhs_continuation_linefeed,
|
||||
=> {},
|
||||
.target => |target| {
|
||||
return self.errorPosition(idx, target.span(), "incomplete target", .{});
|
||||
=> return null,
|
||||
.target => {
|
||||
return errorPosition(.incomplete_target, start, self.bytes[start..]);
|
||||
},
|
||||
.target_reverse_solidus,
|
||||
.target_dollar_sign,
|
||||
=> {
|
||||
const index = self.index - 1;
|
||||
return self.errorIllegalChar(idx, self.bytes[idx], "incomplete escape", .{});
|
||||
const idx = self.index - 1;
|
||||
return errorIllegalChar(.incomplete_escape, idx, self.bytes[idx]);
|
||||
},
|
||||
.target_colon => |target| {
|
||||
const bytes = target.span();
|
||||
.target_colon => {
|
||||
const bytes = self.bytes[start .. self.index - 1];
|
||||
if (bytes.len != 0) {
|
||||
self.index += 1;
|
||||
self.state = State{ .rhs = {} };
|
||||
return Token{ .id = .target, .bytes = bytes };
|
||||
self.state = .rhs;
|
||||
return finishTarget(must_resolve, bytes);
|
||||
}
|
||||
// silently ignore null target
|
||||
self.state = State{ .lhs = {} };
|
||||
},
|
||||
.target_colon_reverse_solidus => |target| {
|
||||
const bytes = target.span();
|
||||
if (bytes.len != 0) {
|
||||
self.index += 1;
|
||||
self.state = State{ .rhs = {} };
|
||||
return Token{ .id = .target, .bytes = bytes };
|
||||
}
|
||||
// silently ignore null target
|
||||
self.state = State{ .lhs = {} };
|
||||
},
|
||||
.prereq_quote => |prereq| {
|
||||
return self.errorPosition(idx, prereq.span(), "incomplete quoted prerequisite", .{});
|
||||
},
|
||||
.prereq => |prereq| {
|
||||
const bytes = prereq.span();
|
||||
self.state = State{ .lhs = {} };
|
||||
return Token{ .id = .prereq, .bytes = bytes };
|
||||
},
|
||||
.prereq_continuation => |prereq| {
|
||||
const bytes = prereq.span();
|
||||
self.state = State{ .lhs = {} };
|
||||
return Token{ .id = .prereq, .bytes = bytes };
|
||||
},
|
||||
.prereq_continuation_linefeed => |prereq| {
|
||||
const bytes = prereq.span();
|
||||
self.state = State{ .lhs = {} };
|
||||
return Token{ .id = .prereq, .bytes = bytes };
|
||||
},
|
||||
}
|
||||
self.state = .lhs;
|
||||
return null;
|
||||
},
|
||||
.target_colon_reverse_solidus => {
|
||||
const bytes = self.bytes[start .. self.index - 2];
|
||||
if (bytes.len != 0) {
|
||||
self.index += 1;
|
||||
self.state = .rhs;
|
||||
return finishTarget(must_resolve, bytes);
|
||||
}
|
||||
// silently ignore null target
|
||||
self.state = .lhs;
|
||||
return null;
|
||||
},
|
||||
.prereq_quote => {
|
||||
return errorPosition(.incomplete_quoted_prerequisite, start, self.bytes[start..]);
|
||||
},
|
||||
.prereq => {
|
||||
self.state = .lhs;
|
||||
return Token{ .prereq = self.bytes[start..] };
|
||||
},
|
||||
.prereq_continuation => {
|
||||
self.state = .lhs;
|
||||
return Token{ .prereq = self.bytes[start .. self.index - 1] };
|
||||
},
|
||||
.prereq_continuation_linefeed => {
|
||||
self.state = .lhs;
|
||||
return Token{ .prereq = self.bytes[start .. self.index - 2] };
|
||||
},
|
||||
}
|
||||
}
|
||||
unreachable;
|
||||
}
|
||||
|
||||
fn errorf(self: *Tokenizer, comptime fmt: []const u8, args: anytype) Error {
|
||||
self.error_text = try std.fmt.allocPrintZ(&self.arena.allocator, fmt, args);
|
||||
return Error.InvalidInput;
|
||||
fn errorPosition(comptime id: @TagType(Token), index: usize, bytes: []const u8) Token {
|
||||
return @unionInit(Token, @tagName(id), .{ .index = index, .bytes = bytes });
|
||||
}
|
||||
|
||||
fn errorPosition(self: *Tokenizer, position: usize, bytes: []const u8, comptime fmt: []const u8, args: anytype) Error {
|
||||
var buffer = try std.ArrayListSentineled(u8, 0).initSize(&self.arena.allocator, 0);
|
||||
try buffer.outStream().print(fmt, args);
|
||||
try buffer.appendSlice(" '");
|
||||
var out = makeOutput(std.ArrayListSentineled(u8, 0).appendSlice, &buffer);
|
||||
try printCharValues(&out, bytes);
|
||||
try buffer.appendSlice("'");
|
||||
try buffer.outStream().print(" at position {}", .{position - (bytes.len - 1)});
|
||||
self.error_text = buffer.span();
|
||||
return Error.InvalidInput;
|
||||
fn errorIllegalChar(comptime id: @TagType(Token), index: usize, char: u8) Token {
|
||||
return @unionInit(Token, @tagName(id), .{ .index = index, .char = char });
|
||||
}
|
||||
|
||||
fn errorIllegalChar(self: *Tokenizer, position: usize, char: u8, comptime fmt: []const u8, args: anytype) Error {
|
||||
var buffer = try std.ArrayListSentineled(u8, 0).initSize(&self.arena.allocator, 0);
|
||||
try buffer.appendSlice("illegal char ");
|
||||
try printUnderstandableChar(&buffer, char);
|
||||
try buffer.outStream().print(" at position {}", .{position});
|
||||
if (fmt.len != 0) try buffer.outStream().print(": " ++ fmt, args);
|
||||
self.error_text = buffer.span();
|
||||
return Error.InvalidInput;
|
||||
fn finishTarget(must_resolve: bool, bytes: []const u8) Token {
|
||||
return if (must_resolve)
|
||||
.{ .target_must_resolve = bytes }
|
||||
else
|
||||
.{ .target = bytes };
|
||||
}
|
||||
|
||||
const Error = error{
|
||||
OutOfMemory,
|
||||
InvalidInput,
|
||||
};
|
||||
|
||||
const State = union(enum) {
|
||||
lhs: void,
|
||||
target: std.ArrayListSentineled(u8, 0),
|
||||
target_reverse_solidus: std.ArrayListSentineled(u8, 0),
|
||||
target_dollar_sign: std.ArrayListSentineled(u8, 0),
|
||||
target_colon: std.ArrayListSentineled(u8, 0),
|
||||
target_colon_reverse_solidus: std.ArrayListSentineled(u8, 0),
|
||||
rhs: void,
|
||||
rhs_continuation: void,
|
||||
rhs_continuation_linefeed: void,
|
||||
prereq_quote: std.ArrayListSentineled(u8, 0),
|
||||
prereq: std.ArrayListSentineled(u8, 0),
|
||||
prereq_continuation: std.ArrayListSentineled(u8, 0),
|
||||
prereq_continuation_linefeed: std.ArrayListSentineled(u8, 0),
|
||||
};
|
||||
|
||||
const Token = struct {
|
||||
id: ID,
|
||||
bytes: []const u8,
|
||||
|
||||
const ID = enum {
|
||||
const State = enum {
|
||||
lhs,
|
||||
target,
|
||||
target_reverse_solidus,
|
||||
target_dollar_sign,
|
||||
target_colon,
|
||||
target_colon_reverse_solidus,
|
||||
rhs,
|
||||
rhs_continuation,
|
||||
rhs_continuation_linefeed,
|
||||
prereq_quote,
|
||||
prereq,
|
||||
prereq_continuation,
|
||||
prereq_continuation_linefeed,
|
||||
};
|
||||
|
||||
pub const Token = union(enum) {
|
||||
target: []const u8,
|
||||
target_must_resolve: []const u8,
|
||||
prereq: []const u8,
|
||||
|
||||
incomplete_quoted_prerequisite: IndexAndBytes,
|
||||
incomplete_target: IndexAndBytes,
|
||||
|
||||
invalid_target: IndexAndChar,
|
||||
bad_target_escape: IndexAndChar,
|
||||
expected_dollar_sign: IndexAndChar,
|
||||
continuation_eol: IndexAndChar,
|
||||
incomplete_escape: IndexAndChar,
|
||||
|
||||
pub const IndexAndChar = struct {
|
||||
index: usize,
|
||||
char: u8,
|
||||
};
|
||||
|
||||
pub const IndexAndBytes = struct {
|
||||
index: usize,
|
||||
bytes: []const u8,
|
||||
};
|
||||
|
||||
/// Resolve escapes in target. Only valid with .target_must_resolve.
|
||||
pub fn resolve(self: Token, writer: anytype) @TypeOf(writer).Error!void {
|
||||
const bytes = self.target_must_resolve; // resolve called on incorrect token
|
||||
|
||||
var state: enum { start, escape, dollar } = .start;
|
||||
for (bytes) |c| {
|
||||
switch (state) {
|
||||
.start => {
|
||||
switch (c) {
|
||||
'\\' => state = .escape,
|
||||
'$' => state = .dollar,
|
||||
else => try writer.writeByte(c),
|
||||
}
|
||||
},
|
||||
.escape => {
|
||||
switch (c) {
|
||||
' ', '#', '\\' => {},
|
||||
'$' => {
|
||||
try writer.writeByte('\\');
|
||||
state = .dollar;
|
||||
continue;
|
||||
},
|
||||
else => try writer.writeByte('\\'),
|
||||
}
|
||||
try writer.writeByte(c);
|
||||
state = .start;
|
||||
},
|
||||
.dollar => {
|
||||
try writer.writeByte('$');
|
||||
switch (c) {
|
||||
'$' => {},
|
||||
else => try writer.writeByte(c),
|
||||
}
|
||||
state = .start;
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn printError(self: Token, writer: anytype) @TypeOf(writer).Error!void {
|
||||
switch (self) {
|
||||
.target, .target_must_resolve, .prereq => unreachable, // not an error
|
||||
.incomplete_quoted_prerequisite,
|
||||
.incomplete_target,
|
||||
=> |index_and_bytes| {
|
||||
try writer.print("{} '", .{self.errStr()});
|
||||
if (self == .incomplete_target) {
|
||||
const tmp = Token{ .target_must_resolve = index_and_bytes.bytes };
|
||||
try tmp.resolve(writer);
|
||||
} else {
|
||||
try printCharValues(writer, index_and_bytes.bytes);
|
||||
}
|
||||
try writer.print("' at position {}", .{index_and_bytes.index});
|
||||
},
|
||||
.invalid_target,
|
||||
.bad_target_escape,
|
||||
.expected_dollar_sign,
|
||||
.continuation_eol,
|
||||
.incomplete_escape,
|
||||
=> |index_and_char| {
|
||||
try writer.writeAll("illegal char ");
|
||||
try printUnderstandableChar(writer, index_and_char.char);
|
||||
try writer.print(" at position {}: {}", .{ index_and_char.index, self.errStr() });
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn errStr(self: Token) []const u8 {
|
||||
return switch (self) {
|
||||
.target, .target_must_resolve, .prereq => unreachable, // not an error
|
||||
.incomplete_quoted_prerequisite => "incomplete quoted prerequisite",
|
||||
.incomplete_target => "incomplete target",
|
||||
.invalid_target => "invalid target",
|
||||
.bad_target_escape => "bad target escape",
|
||||
.expected_dollar_sign => "expecting '$'",
|
||||
.continuation_eol => "continuation expecting end-of-line",
|
||||
.incomplete_escape => "incomplete escape",
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
test "empty file" {
|
||||
@ -750,16 +794,16 @@ test "error incomplete target" {
|
||||
);
|
||||
|
||||
try depTokenizer("\\ foo.o",
|
||||
\\ERROR: incomplete target ' foo.o' at position 1
|
||||
\\ERROR: incomplete target ' foo.o' at position 0
|
||||
);
|
||||
try depTokenizer("\\#foo.o",
|
||||
\\ERROR: incomplete target '#foo.o' at position 1
|
||||
\\ERROR: incomplete target '#foo.o' at position 0
|
||||
);
|
||||
try depTokenizer("\\\\foo.o",
|
||||
\\ERROR: incomplete target '\foo.o' at position 1
|
||||
\\ERROR: incomplete target '\foo.o' at position 0
|
||||
);
|
||||
try depTokenizer("$$foo.o",
|
||||
\\ERROR: incomplete target '$foo.o' at position 1
|
||||
\\ERROR: incomplete target '$foo.o' at position 0
|
||||
);
|
||||
}
|
||||
|
||||
@ -836,33 +880,40 @@ test "error prereq - continuation expecting end-of-line" {
|
||||
|
||||
// - tokenize input, emit textual representation, and compare to expect
|
||||
fn depTokenizer(input: []const u8, expect: []const u8) !void {
|
||||
var arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator);
|
||||
const arena = &arena_allocator.allocator;
|
||||
defer arena_allocator.deinit();
|
||||
|
||||
var it = Tokenizer.init(arena, input);
|
||||
var it: Tokenizer = .{ .bytes = input };
|
||||
var buffer = try std.ArrayListSentineled(u8, 0).initSize(arena, 0);
|
||||
var resolve_buf = std.ArrayList(u8).init(arena);
|
||||
var i: usize = 0;
|
||||
while (true) {
|
||||
const r = it.next() catch |err| {
|
||||
switch (err) {
|
||||
Tokenizer.Error.InvalidInput => {
|
||||
while (it.next()) |token| {
|
||||
if (i != 0) try buffer.appendSlice("\n");
|
||||
try buffer.appendSlice("ERROR: ");
|
||||
try buffer.appendSlice(it.error_text);
|
||||
},
|
||||
else => return err,
|
||||
}
|
||||
break;
|
||||
};
|
||||
const token = r orelse break;
|
||||
if (i != 0) try buffer.appendSlice("\n");
|
||||
try buffer.appendSlice(@tagName(token.id));
|
||||
switch (token) {
|
||||
.target, .prereq => |bytes| {
|
||||
try buffer.appendSlice(@tagName(token));
|
||||
try buffer.appendSlice(" = {");
|
||||
for (token.bytes) |b| {
|
||||
for (bytes) |b| {
|
||||
try buffer.append(printable_char_tab[b]);
|
||||
}
|
||||
try buffer.appendSlice("}");
|
||||
},
|
||||
.target_must_resolve => {
|
||||
try buffer.appendSlice("target = {");
|
||||
try token.resolve(resolve_buf.writer());
|
||||
for (resolve_buf.items) |b| {
|
||||
try buffer.append(printable_char_tab[b]);
|
||||
}
|
||||
resolve_buf.items.len = 0;
|
||||
try buffer.appendSlice("}");
|
||||
},
|
||||
else => {
|
||||
try buffer.appendSlice("ERROR: ");
|
||||
try token.printError(buffer.outStream());
|
||||
break;
|
||||
},
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
const got: []const u8 = buffer.span();
|
||||
@ -872,13 +923,13 @@ fn depTokenizer(input: []const u8, expect: []const u8) !void {
|
||||
return;
|
||||
}
|
||||
|
||||
var out = makeOutput(std.fs.File.write, try std.io.getStdErr());
|
||||
const out = std.io.getStdErr().writer();
|
||||
|
||||
try out.write("\n");
|
||||
try printSection(&out, "<<<< input", input);
|
||||
try printSection(&out, "==== expect", expect);
|
||||
try printSection(&out, ">>>> got", got);
|
||||
try printRuler(&out);
|
||||
try out.writeAll("\n");
|
||||
try printSection(out, "<<<< input", input);
|
||||
try printSection(out, "==== expect", expect);
|
||||
try printSection(out, ">>>> got", got);
|
||||
try printRuler(out);
|
||||
|
||||
testing.expect(false);
|
||||
}
|
||||
@ -887,29 +938,29 @@ fn printSection(out: anytype, label: []const u8, bytes: []const u8) !void {
|
||||
try printLabel(out, label, bytes);
|
||||
try hexDump(out, bytes);
|
||||
try printRuler(out);
|
||||
try out.write(bytes);
|
||||
try out.write("\n");
|
||||
try out.writeAll(bytes);
|
||||
try out.writeAll("\n");
|
||||
}
|
||||
|
||||
fn printLabel(out: anytype, label: []const u8, bytes: []const u8) !void {
|
||||
var buf: [80]u8 = undefined;
|
||||
var text = try std.fmt.bufPrint(buf[0..], "{} {} bytes ", .{ label, bytes.len });
|
||||
try out.write(text);
|
||||
try out.writeAll(text);
|
||||
var i: usize = text.len;
|
||||
const end = 79;
|
||||
while (i < 79) : (i += 1) {
|
||||
try out.write([_]u8{label[0]});
|
||||
try out.writeAll(&[_]u8{label[0]});
|
||||
}
|
||||
try out.write("\n");
|
||||
try out.writeAll("\n");
|
||||
}
|
||||
|
||||
fn printRuler(out: anytype) !void {
|
||||
var i: usize = 0;
|
||||
const end = 79;
|
||||
while (i < 79) : (i += 1) {
|
||||
try out.write("-");
|
||||
try out.writeAll("-");
|
||||
}
|
||||
try out.write("\n");
|
||||
try out.writeAll("\n");
|
||||
}
|
||||
|
||||
fn hexDump(out: anytype, bytes: []const u8) !void {
|
||||
@ -924,116 +975,90 @@ fn hexDump(out: anytype, bytes: []const u8) !void {
|
||||
const n = bytes.len & 0x0f;
|
||||
if (n > 0) {
|
||||
try printDecValue(out, offset, 8);
|
||||
try out.write(":");
|
||||
try out.write(" ");
|
||||
try out.writeAll(":");
|
||||
try out.writeAll(" ");
|
||||
var end1 = std.math.min(offset + n, offset + 8);
|
||||
for (bytes[offset..end1]) |b| {
|
||||
try out.write(" ");
|
||||
try out.writeAll(" ");
|
||||
try printHexValue(out, b, 2);
|
||||
}
|
||||
var end2 = offset + n;
|
||||
if (end2 > end1) {
|
||||
try out.write(" ");
|
||||
try out.writeAll(" ");
|
||||
for (bytes[end1..end2]) |b| {
|
||||
try out.write(" ");
|
||||
try out.writeAll(" ");
|
||||
try printHexValue(out, b, 2);
|
||||
}
|
||||
}
|
||||
const short = 16 - n;
|
||||
var i: usize = 0;
|
||||
while (i < short) : (i += 1) {
|
||||
try out.write(" ");
|
||||
try out.writeAll(" ");
|
||||
}
|
||||
if (end2 > end1) {
|
||||
try out.write(" |");
|
||||
try out.writeAll(" |");
|
||||
} else {
|
||||
try out.write(" |");
|
||||
try out.writeAll(" |");
|
||||
}
|
||||
try printCharValues(out, bytes[offset..end2]);
|
||||
try out.write("|\n");
|
||||
try out.writeAll("|\n");
|
||||
offset += n;
|
||||
}
|
||||
|
||||
try printDecValue(out, offset, 8);
|
||||
try out.write(":");
|
||||
try out.write("\n");
|
||||
try out.writeAll(":");
|
||||
try out.writeAll("\n");
|
||||
}
|
||||
|
||||
fn hexDump16(out: anytype, offset: usize, bytes: []const u8) !void {
|
||||
try printDecValue(out, offset, 8);
|
||||
try out.write(":");
|
||||
try out.write(" ");
|
||||
try out.writeAll(":");
|
||||
try out.writeAll(" ");
|
||||
for (bytes[0..8]) |b| {
|
||||
try out.write(" ");
|
||||
try out.writeAll(" ");
|
||||
try printHexValue(out, b, 2);
|
||||
}
|
||||
try out.write(" ");
|
||||
try out.writeAll(" ");
|
||||
for (bytes[8..16]) |b| {
|
||||
try out.write(" ");
|
||||
try out.writeAll(" ");
|
||||
try printHexValue(out, b, 2);
|
||||
}
|
||||
try out.write(" |");
|
||||
try out.writeAll(" |");
|
||||
try printCharValues(out, bytes);
|
||||
try out.write("|\n");
|
||||
try out.writeAll("|\n");
|
||||
}
|
||||
|
||||
fn printDecValue(out: anytype, value: u64, width: u8) !void {
|
||||
var buffer: [20]u8 = undefined;
|
||||
const len = std.fmt.formatIntBuf(buffer[0..], value, 10, false, width);
|
||||
try out.write(buffer[0..len]);
|
||||
const len = std.fmt.formatIntBuf(buffer[0..], value, 10, false, .{ .width = width, .fill = '0' });
|
||||
try out.writeAll(buffer[0..len]);
|
||||
}
|
||||
|
||||
fn printHexValue(out: anytype, value: u64, width: u8) !void {
|
||||
var buffer: [16]u8 = undefined;
|
||||
const len = std.fmt.formatIntBuf(buffer[0..], value, 16, false, width);
|
||||
try out.write(buffer[0..len]);
|
||||
const len = std.fmt.formatIntBuf(buffer[0..], value, 16, false, .{ .width = width, .fill = '0' });
|
||||
try out.writeAll(buffer[0..len]);
|
||||
}
|
||||
|
||||
fn printCharValues(out: anytype, bytes: []const u8) !void {
|
||||
for (bytes) |b| {
|
||||
try out.write(&[_]u8{printable_char_tab[b]});
|
||||
try out.writeAll(&[_]u8{printable_char_tab[b]});
|
||||
}
|
||||
}
|
||||
|
||||
fn printUnderstandableChar(buffer: *std.ArrayListSentineled(u8, 0), char: u8) !void {
|
||||
fn printUnderstandableChar(out: anytype, char: u8) !void {
|
||||
if (!std.ascii.isPrint(char) or char == ' ') {
|
||||
try buffer.outStream().print("\\x{X:2}", .{char});
|
||||
try out.print("\\x{X:0>2}", .{char});
|
||||
} else {
|
||||
try buffer.appendSlice("'");
|
||||
try buffer.append(printable_char_tab[char]);
|
||||
try buffer.appendSlice("'");
|
||||
try out.print("'{c}'", .{printable_char_tab[char]});
|
||||
}
|
||||
}
|
||||
|
||||
// zig fmt: off
|
||||
const printable_char_tab: []const u8 =
|
||||
const printable_char_tab: [256]u8 = (
|
||||
"................................ !\"#$%&'()*+,-./0123456789:;<=>?" ++
|
||||
"@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~." ++
|
||||
"................................................................" ++
|
||||
"................................................................";
|
||||
// zig fmt: on
|
||||
comptime {
|
||||
std.debug.assert(printable_char_tab.len == 256);
|
||||
}
|
||||
"................................................................"
|
||||
).*;
|
||||
|
||||
// Make an output var that wraps a context and output function.
|
||||
// output: must be a function that takes a `self` idiom parameter
|
||||
// and a bytes parameter
|
||||
// context: must be that self
|
||||
fn makeOutput(comptime output: anytype, context: anytype) Output(output, @TypeOf(context)) {
|
||||
return Output(output, @TypeOf(context)){
|
||||
.context = context,
|
||||
};
|
||||
}
|
||||
|
||||
fn Output(comptime output_func: anytype, comptime Context: type) type {
|
||||
return struct {
|
||||
context: Context,
|
||||
|
||||
pub const output = output_func;
|
||||
|
||||
fn write(self: @This(), bytes: []const u8) !void {
|
||||
try output_func(self.context, bytes);
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -1,4 +1,6 @@
|
||||
const Module = @This();
|
||||
const std = @import("std");
|
||||
const Compilation = @import("Compilation.zig");
|
||||
const mem = std.mem;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const ArrayListUnmanaged = std.ArrayListUnmanaged;
|
||||
@ -14,25 +16,24 @@ const Package = @import("Package.zig");
|
||||
const link = @import("link.zig");
|
||||
const ir = @import("ir.zig");
|
||||
const zir = @import("zir.zig");
|
||||
const Module = @This();
|
||||
const Inst = ir.Inst;
|
||||
const Body = ir.Body;
|
||||
const ast = std.zig.ast;
|
||||
const trace = @import("tracy.zig").trace;
|
||||
const liveness = @import("liveness.zig");
|
||||
const astgen = @import("astgen.zig");
|
||||
const zir_sema = @import("zir_sema.zig");
|
||||
|
||||
/// General-purpose allocator. Used for both temporary and long-term storage.
|
||||
gpa: *Allocator,
|
||||
/// Pointer to externally managed resource.
|
||||
comp: *Compilation,
|
||||
|
||||
/// Where our incremental compilation metadata serialization will go.
|
||||
zig_cache_artifact_directory: Compilation.Directory,
|
||||
/// Pointer to externally managed resource. `null` if there is no zig file being compiled.
|
||||
root_pkg: *Package,
|
||||
/// Module owns this resource.
|
||||
/// The `Scope` is either a `Scope.ZIRModule` or `Scope.File`.
|
||||
root_scope: *Scope,
|
||||
bin_file: *link.File,
|
||||
bin_file_dir: std.fs.Dir,
|
||||
bin_file_path: []const u8,
|
||||
/// It's rare for a decl to be exported, so we save memory by having a sparse map of
|
||||
/// Decl pointers to details about them being exported.
|
||||
/// The Export memory is owned by the `export_owners` table; the slice itself is owned by this table.
|
||||
@ -47,55 +48,42 @@ symbol_exports: std.StringArrayHashMapUnmanaged(*Export) = .{},
|
||||
export_owners: std.AutoArrayHashMapUnmanaged(*Decl, []*Export) = .{},
|
||||
/// Maps fully qualified namespaced names to the Decl struct for them.
|
||||
decl_table: std.ArrayHashMapUnmanaged(Scope.NameHash, *Decl, Scope.name_hash_hash, Scope.name_hash_eql, false) = .{},
|
||||
|
||||
link_error_flags: link.File.ErrorFlags = .{},
|
||||
|
||||
work_queue: std.fifo.LinearFifo(WorkItem, .Dynamic),
|
||||
|
||||
/// We optimize memory usage for a compilation with no compile errors by storing the
|
||||
/// error messages and mapping outside of `Decl`.
|
||||
/// The ErrorMsg memory is owned by the decl, using Module's allocator.
|
||||
/// The ErrorMsg memory is owned by the decl, using Module's general purpose allocator.
|
||||
/// Note that a Decl can succeed but the Fn it represents can fail. In this case,
|
||||
/// a Decl can have a failed_decls entry but have analysis status of success.
|
||||
failed_decls: std.AutoArrayHashMapUnmanaged(*Decl, *ErrorMsg) = .{},
|
||||
failed_decls: std.AutoArrayHashMapUnmanaged(*Decl, *Compilation.ErrorMsg) = .{},
|
||||
/// Using a map here for consistency with the other fields here.
|
||||
/// The ErrorMsg memory is owned by the `Scope`, using Module's allocator.
|
||||
failed_files: std.AutoArrayHashMapUnmanaged(*Scope, *ErrorMsg) = .{},
|
||||
/// The ErrorMsg memory is owned by the `Scope`, using Module's general purpose allocator.
|
||||
failed_files: std.AutoArrayHashMapUnmanaged(*Scope, *Compilation.ErrorMsg) = .{},
|
||||
/// Using a map here for consistency with the other fields here.
|
||||
/// The ErrorMsg memory is owned by the `Export`, using Module's allocator.
|
||||
failed_exports: std.AutoArrayHashMapUnmanaged(*Export, *ErrorMsg) = .{},
|
||||
/// The ErrorMsg memory is owned by the `Export`, using Module's general purpose allocator.
|
||||
failed_exports: std.AutoArrayHashMapUnmanaged(*Export, *Compilation.ErrorMsg) = .{},
|
||||
|
||||
next_anon_name_index: usize = 0,
|
||||
|
||||
/// Candidates for deletion. After a semantic analysis update completes, this list
|
||||
/// contains Decls that need to be deleted if they end up having no references to them.
|
||||
deletion_set: ArrayListUnmanaged(*Decl) = .{},
|
||||
|
||||
/// Error tags and their values, tag names are duped with mod.gpa.
|
||||
global_error_set: std.StringHashMapUnmanaged(u16) = .{},
|
||||
|
||||
/// Incrementing integer used to compare against the corresponding Decl
|
||||
/// field to determine whether a Decl's status applies to an ongoing update, or a
|
||||
/// previous analysis.
|
||||
generation: u32 = 0,
|
||||
|
||||
next_anon_name_index: usize = 0,
|
||||
|
||||
/// Candidates for deletion. After a semantic analysis update completes, this list
|
||||
/// contains Decls that need to be deleted if they end up having no references to them.
|
||||
deletion_set: std.ArrayListUnmanaged(*Decl) = .{},
|
||||
|
||||
/// Owned by Module.
|
||||
root_name: []u8,
|
||||
keep_source_files_loaded: bool,
|
||||
|
||||
/// Error tags and their values, tag names are duped with mod.gpa.
|
||||
global_error_set: std.StringHashMapUnmanaged(u16) = .{},
|
||||
|
||||
pub const InnerError = error{ OutOfMemory, AnalysisFail };
|
||||
|
||||
const WorkItem = union(enum) {
|
||||
/// Write the machine code for a Decl to the output file.
|
||||
codegen_decl: *Decl,
|
||||
/// The Decl needs to be analyzed and possibly export itself.
|
||||
/// It may have already be analyzed, or it may have been determined
|
||||
/// to be outdated; in this case perform semantic analysis again.
|
||||
analyze_decl: *Decl,
|
||||
/// The source file containing the Decl has been updated, and so the
|
||||
/// Decl may need its line number information updated in the debug info.
|
||||
update_line_number: *Decl,
|
||||
};
|
||||
stage1_flags: packed struct {
|
||||
have_winmain: bool = false,
|
||||
have_wwinmain: bool = false,
|
||||
have_winmain_crt_startup: bool = false,
|
||||
have_wwinmain_crt_startup: bool = false,
|
||||
have_dllmain_crt_startup: bool = false,
|
||||
have_c_main: bool = false,
|
||||
reserved: u2 = 0,
|
||||
} = .{},
|
||||
|
||||
pub const Export = struct {
|
||||
options: std.builtin.ExportOptions,
|
||||
@ -622,7 +610,7 @@ pub const Scope = struct {
|
||||
pub fn getSource(self: *File, module: *Module) ![:0]const u8 {
|
||||
switch (self.source) {
|
||||
.unloaded => {
|
||||
const source = try module.root_pkg.root_src_dir.readFileAllocOptions(
|
||||
const source = try module.root_pkg.root_src_directory.handle.readFileAllocOptions(
|
||||
module.gpa,
|
||||
self.sub_file_path,
|
||||
std.math.maxInt(u32),
|
||||
@ -720,7 +708,7 @@ pub const Scope = struct {
|
||||
pub fn getSource(self: *ZIRModule, module: *Module) ![:0]const u8 {
|
||||
switch (self.source) {
|
||||
.unloaded => {
|
||||
const source = try module.root_pkg.root_src_dir.readFileAllocOptions(
|
||||
const source = try module.root_pkg.root_src_directory.handle.readFileAllocOptions(
|
||||
module.gpa,
|
||||
self.sub_file_path,
|
||||
std.math.maxInt(u32),
|
||||
@ -818,117 +806,14 @@ pub const Scope = struct {
|
||||
};
|
||||
};
|
||||
|
||||
pub const AllErrors = struct {
|
||||
arena: std.heap.ArenaAllocator.State,
|
||||
list: []const Message,
|
||||
|
||||
pub const Message = struct {
|
||||
src_path: []const u8,
|
||||
line: usize,
|
||||
column: usize,
|
||||
byte_offset: usize,
|
||||
msg: []const u8,
|
||||
};
|
||||
|
||||
pub fn deinit(self: *AllErrors, gpa: *Allocator) void {
|
||||
self.arena.promote(gpa).deinit();
|
||||
}
|
||||
|
||||
fn add(
|
||||
arena: *std.heap.ArenaAllocator,
|
||||
errors: *std.ArrayList(Message),
|
||||
sub_file_path: []const u8,
|
||||
source: []const u8,
|
||||
simple_err_msg: ErrorMsg,
|
||||
) !void {
|
||||
const loc = std.zig.findLineColumn(source, simple_err_msg.byte_offset);
|
||||
try errors.append(.{
|
||||
.src_path = try arena.allocator.dupe(u8, sub_file_path),
|
||||
.msg = try arena.allocator.dupe(u8, simple_err_msg.msg),
|
||||
.byte_offset = simple_err_msg.byte_offset,
|
||||
.line = loc.line,
|
||||
.column = loc.column,
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
pub const InitOptions = struct {
|
||||
target: std.Target,
|
||||
root_name: []const u8,
|
||||
root_pkg: *Package,
|
||||
output_mode: std.builtin.OutputMode,
|
||||
bin_file_dir: ?std.fs.Dir = null,
|
||||
bin_file_path: []const u8,
|
||||
link_mode: ?std.builtin.LinkMode = null,
|
||||
object_format: ?std.builtin.ObjectFormat = null,
|
||||
optimize_mode: std.builtin.Mode = .Debug,
|
||||
keep_source_files_loaded: bool = false,
|
||||
};
|
||||
|
||||
pub fn init(gpa: *Allocator, options: InitOptions) !Module {
|
||||
const root_name = try gpa.dupe(u8, options.root_name);
|
||||
errdefer gpa.free(root_name);
|
||||
|
||||
const bin_file_dir = options.bin_file_dir orelse std.fs.cwd();
|
||||
const bin_file = try link.File.openPath(gpa, bin_file_dir, options.bin_file_path, .{
|
||||
.root_name = root_name,
|
||||
.root_pkg = options.root_pkg,
|
||||
.target = options.target,
|
||||
.output_mode = options.output_mode,
|
||||
.link_mode = options.link_mode orelse .Static,
|
||||
.object_format = options.object_format orelse options.target.getObjectFormat(),
|
||||
.optimize_mode = options.optimize_mode,
|
||||
});
|
||||
errdefer bin_file.destroy();
|
||||
|
||||
const root_scope = blk: {
|
||||
if (mem.endsWith(u8, options.root_pkg.root_src_path, ".zig")) {
|
||||
const root_scope = try gpa.create(Scope.File);
|
||||
root_scope.* = .{
|
||||
.sub_file_path = options.root_pkg.root_src_path,
|
||||
.source = .{ .unloaded = {} },
|
||||
.contents = .{ .not_available = {} },
|
||||
.status = .never_loaded,
|
||||
.root_container = .{
|
||||
.file_scope = root_scope,
|
||||
.decls = .{},
|
||||
},
|
||||
};
|
||||
break :blk &root_scope.base;
|
||||
} else if (mem.endsWith(u8, options.root_pkg.root_src_path, ".zir")) {
|
||||
const root_scope = try gpa.create(Scope.ZIRModule);
|
||||
root_scope.* = .{
|
||||
.sub_file_path = options.root_pkg.root_src_path,
|
||||
.source = .{ .unloaded = {} },
|
||||
.contents = .{ .not_available = {} },
|
||||
.status = .never_loaded,
|
||||
.decls = .{},
|
||||
};
|
||||
break :blk &root_scope.base;
|
||||
} else {
|
||||
unreachable;
|
||||
}
|
||||
};
|
||||
|
||||
return Module{
|
||||
.gpa = gpa,
|
||||
.root_name = root_name,
|
||||
.root_pkg = options.root_pkg,
|
||||
.root_scope = root_scope,
|
||||
.bin_file_dir = bin_file_dir,
|
||||
.bin_file_path = options.bin_file_path,
|
||||
.bin_file = bin_file,
|
||||
.work_queue = std.fifo.LinearFifo(WorkItem, .Dynamic).init(gpa),
|
||||
.keep_source_files_loaded = options.keep_source_files_loaded,
|
||||
};
|
||||
}
|
||||
pub const InnerError = error{ OutOfMemory, AnalysisFail };
|
||||
|
||||
pub fn deinit(self: *Module) void {
|
||||
self.bin_file.destroy();
|
||||
const gpa = self.gpa;
|
||||
self.gpa.free(self.root_name);
|
||||
|
||||
self.zig_cache_artifact_directory.handle.close();
|
||||
|
||||
self.deletion_set.deinit(gpa);
|
||||
self.work_queue.deinit();
|
||||
|
||||
for (self.decl_table.items()) |entry| {
|
||||
entry.value.destroy(gpa);
|
||||
@ -969,7 +854,6 @@ pub fn deinit(self: *Module) void {
|
||||
gpa.free(entry.key);
|
||||
}
|
||||
self.global_error_set.deinit(gpa);
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
fn freeExportList(gpa: *Allocator, export_list: []*Export) void {
|
||||
@ -980,204 +864,6 @@ fn freeExportList(gpa: *Allocator, export_list: []*Export) void {
|
||||
gpa.free(export_list);
|
||||
}
|
||||
|
||||
pub fn target(self: Module) std.Target {
|
||||
return self.bin_file.options.target;
|
||||
}
|
||||
|
||||
pub fn optimizeMode(self: Module) std.builtin.Mode {
|
||||
return self.bin_file.options.optimize_mode;
|
||||
}
|
||||
|
||||
/// Detect changes to source files, perform semantic analysis, and update the output files.
|
||||
pub fn update(self: *Module) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
self.generation += 1;
|
||||
|
||||
// TODO Use the cache hash file system to detect which source files changed.
|
||||
// Until then we simulate a full cache miss. Source files could have been loaded for any reason;
|
||||
// to force a refresh we unload now.
|
||||
if (self.root_scope.cast(Scope.File)) |zig_file| {
|
||||
zig_file.unload(self.gpa);
|
||||
self.analyzeContainer(&zig_file.root_container) catch |err| switch (err) {
|
||||
error.AnalysisFail => {
|
||||
assert(self.totalErrorCount() != 0);
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
} else if (self.root_scope.cast(Scope.ZIRModule)) |zir_module| {
|
||||
zir_module.unload(self.gpa);
|
||||
self.analyzeRootZIRModule(zir_module) catch |err| switch (err) {
|
||||
error.AnalysisFail => {
|
||||
assert(self.totalErrorCount() != 0);
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
|
||||
try self.performAllTheWork();
|
||||
|
||||
// Process the deletion set.
|
||||
while (self.deletion_set.popOrNull()) |decl| {
|
||||
if (decl.dependants.items().len != 0) {
|
||||
decl.deletion_flag = false;
|
||||
continue;
|
||||
}
|
||||
try self.deleteDecl(decl);
|
||||
}
|
||||
|
||||
// This is needed before reading the error flags.
|
||||
try self.bin_file.flush(self);
|
||||
|
||||
self.link_error_flags = self.bin_file.errorFlags();
|
||||
|
||||
// If there are any errors, we anticipate the source files being loaded
|
||||
// to report error messages. Otherwise we unload all source files to save memory.
|
||||
if (self.totalErrorCount() == 0 and !self.keep_source_files_loaded) {
|
||||
self.root_scope.unload(self.gpa);
|
||||
}
|
||||
}
|
||||
|
||||
/// Having the file open for writing is problematic as far as executing the
|
||||
/// binary is concerned. This will remove the write flag, or close the file,
|
||||
/// or whatever is needed so that it can be executed.
|
||||
/// After this, one must call` makeFileWritable` before calling `update`.
|
||||
pub fn makeBinFileExecutable(self: *Module) !void {
|
||||
return self.bin_file.makeExecutable();
|
||||
}
|
||||
|
||||
pub fn makeBinFileWritable(self: *Module) !void {
|
||||
return self.bin_file.makeWritable(self.bin_file_dir, self.bin_file_path);
|
||||
}
|
||||
|
||||
pub fn totalErrorCount(self: *Module) usize {
|
||||
const total = self.failed_decls.items().len +
|
||||
self.failed_files.items().len +
|
||||
self.failed_exports.items().len;
|
||||
return if (total == 0) @boolToInt(self.link_error_flags.no_entry_point_found) else total;
|
||||
}
|
||||
|
||||
pub fn getAllErrorsAlloc(self: *Module) !AllErrors {
|
||||
var arena = std.heap.ArenaAllocator.init(self.gpa);
|
||||
errdefer arena.deinit();
|
||||
|
||||
var errors = std.ArrayList(AllErrors.Message).init(self.gpa);
|
||||
defer errors.deinit();
|
||||
|
||||
for (self.failed_files.items()) |entry| {
|
||||
const scope = entry.key;
|
||||
const err_msg = entry.value;
|
||||
const source = try scope.getSource(self);
|
||||
try AllErrors.add(&arena, &errors, scope.subFilePath(), source, err_msg.*);
|
||||
}
|
||||
for (self.failed_decls.items()) |entry| {
|
||||
const decl = entry.key;
|
||||
const err_msg = entry.value;
|
||||
const source = try decl.scope.getSource(self);
|
||||
try AllErrors.add(&arena, &errors, decl.scope.subFilePath(), source, err_msg.*);
|
||||
}
|
||||
for (self.failed_exports.items()) |entry| {
|
||||
const decl = entry.key.owner_decl;
|
||||
const err_msg = entry.value;
|
||||
const source = try decl.scope.getSource(self);
|
||||
try AllErrors.add(&arena, &errors, decl.scope.subFilePath(), source, err_msg.*);
|
||||
}
|
||||
|
||||
if (errors.items.len == 0 and self.link_error_flags.no_entry_point_found) {
|
||||
try errors.append(.{
|
||||
.src_path = self.root_pkg.root_src_path,
|
||||
.line = 0,
|
||||
.column = 0,
|
||||
.byte_offset = 0,
|
||||
.msg = try std.fmt.allocPrint(&arena.allocator, "no entry point found", .{}),
|
||||
});
|
||||
}
|
||||
|
||||
assert(errors.items.len == self.totalErrorCount());
|
||||
|
||||
return AllErrors{
|
||||
.list = try arena.allocator.dupe(AllErrors.Message, errors.items),
|
||||
.arena = arena.state,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void {
|
||||
while (self.work_queue.readItem()) |work_item| switch (work_item) {
|
||||
.codegen_decl => |decl| switch (decl.analysis) {
|
||||
.unreferenced => unreachable,
|
||||
.in_progress => unreachable,
|
||||
.outdated => unreachable,
|
||||
|
||||
.sema_failure,
|
||||
.codegen_failure,
|
||||
.dependency_failure,
|
||||
.sema_failure_retryable,
|
||||
=> continue,
|
||||
|
||||
.complete, .codegen_failure_retryable => {
|
||||
if (decl.typed_value.most_recent.typed_value.val.cast(Value.Payload.Function)) |payload| {
|
||||
switch (payload.func.analysis) {
|
||||
.queued => self.analyzeFnBody(decl, payload.func) catch |err| switch (err) {
|
||||
error.AnalysisFail => {
|
||||
assert(payload.func.analysis != .in_progress);
|
||||
continue;
|
||||
},
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
},
|
||||
.in_progress => unreachable,
|
||||
.sema_failure, .dependency_failure => continue,
|
||||
.success => {},
|
||||
}
|
||||
// Here we tack on additional allocations to the Decl's arena. The allocations are
|
||||
// lifetime annotations in the ZIR.
|
||||
var decl_arena = decl.typed_value.most_recent.arena.?.promote(self.gpa);
|
||||
defer decl.typed_value.most_recent.arena.?.* = decl_arena.state;
|
||||
log.debug("analyze liveness of {}\n", .{decl.name});
|
||||
try liveness.analyze(self.gpa, &decl_arena.allocator, payload.func.analysis.success);
|
||||
}
|
||||
|
||||
assert(decl.typed_value.most_recent.typed_value.ty.hasCodeGenBits());
|
||||
|
||||
self.bin_file.updateDecl(self, decl) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.AnalysisFail => {
|
||||
decl.analysis = .dependency_failure;
|
||||
},
|
||||
else => {
|
||||
try self.failed_decls.ensureCapacity(self.gpa, self.failed_decls.items().len + 1);
|
||||
self.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create(
|
||||
self.gpa,
|
||||
decl.src(),
|
||||
"unable to codegen: {}",
|
||||
.{@errorName(err)},
|
||||
));
|
||||
decl.analysis = .codegen_failure_retryable;
|
||||
},
|
||||
};
|
||||
},
|
||||
},
|
||||
.analyze_decl => |decl| {
|
||||
self.ensureDeclAnalyzed(decl) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.AnalysisFail => continue,
|
||||
};
|
||||
},
|
||||
.update_line_number => |decl| {
|
||||
self.bin_file.updateDeclLineNumber(self, decl) catch |err| {
|
||||
try self.failed_decls.ensureCapacity(self.gpa, self.failed_decls.items().len + 1);
|
||||
self.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create(
|
||||
self.gpa,
|
||||
decl.src(),
|
||||
"unable to update line number: {}",
|
||||
.{@errorName(err)},
|
||||
));
|
||||
decl.analysis = .codegen_failure_retryable;
|
||||
};
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn ensureDeclAnalyzed(self: *Module, decl: *Decl) InnerError!void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
@ -1227,7 +913,7 @@ pub fn ensureDeclAnalyzed(self: *Module, decl: *Decl) InnerError!void {
|
||||
error.AnalysisFail => return error.AnalysisFail,
|
||||
else => {
|
||||
try self.failed_decls.ensureCapacity(self.gpa, self.failed_decls.items().len + 1);
|
||||
self.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create(
|
||||
self.failed_decls.putAssumeCapacityNoClobber(decl, try Compilation.ErrorMsg.create(
|
||||
self.gpa,
|
||||
decl.src(),
|
||||
"unable to analyze: {}",
|
||||
@ -1457,10 +1143,10 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
|
||||
// We don't fully codegen the decl until later, but we do need to reserve a global
|
||||
// offset table index for it. This allows us to codegen decls out of dependency order,
|
||||
// increasing how many computations can be done in parallel.
|
||||
try self.bin_file.allocateDeclIndexes(decl);
|
||||
try self.work_queue.writeItem(.{ .codegen_decl = decl });
|
||||
try self.comp.bin_file.allocateDeclIndexes(decl);
|
||||
try self.comp.work_queue.writeItem(.{ .codegen_decl = decl });
|
||||
} else if (prev_type_has_bits) {
|
||||
self.bin_file.freeDecl(decl);
|
||||
self.comp.bin_file.freeDecl(decl);
|
||||
}
|
||||
|
||||
if (fn_proto.getExternExportInlineToken()) |maybe_export_token| {
|
||||
@ -1708,7 +1394,7 @@ fn getSrcModule(self: *Module, root_scope: *Scope.ZIRModule) !*zir.Module {
|
||||
if (zir_module.error_msg) |src_err_msg| {
|
||||
self.failed_files.putAssumeCapacityNoClobber(
|
||||
&root_scope.base,
|
||||
try ErrorMsg.create(self.gpa, src_err_msg.byte_offset, "{}", .{src_err_msg.msg}),
|
||||
try Compilation.ErrorMsg.create(self.gpa, src_err_msg.byte_offset, "{}", .{src_err_msg.msg}),
|
||||
);
|
||||
root_scope.status = .unloaded_parse_failure;
|
||||
return error.AnalysisFail;
|
||||
@ -1752,7 +1438,7 @@ fn getAstTree(self: *Module, container_scope: *Scope.Container) !*ast.Tree {
|
||||
defer msg.deinit();
|
||||
|
||||
try parse_err.render(tree.token_ids, msg.outStream());
|
||||
const err_msg = try self.gpa.create(ErrorMsg);
|
||||
const err_msg = try self.gpa.create(Compilation.ErrorMsg);
|
||||
err_msg.* = .{
|
||||
.msg = msg.toOwnedSlice(),
|
||||
.byte_offset = tree.token_locs[parse_err.loc()].start,
|
||||
@ -1776,7 +1462,7 @@ fn getAstTree(self: *Module, container_scope: *Scope.Container) !*ast.Tree {
|
||||
}
|
||||
}
|
||||
|
||||
fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void {
|
||||
pub fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
@ -1785,7 +1471,7 @@ fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void {
|
||||
const tree = try self.getAstTree(container_scope);
|
||||
const decls = tree.root_node.decls();
|
||||
|
||||
try self.work_queue.ensureUnusedCapacity(decls.len);
|
||||
try self.comp.work_queue.ensureUnusedCapacity(decls.len);
|
||||
try container_scope.decls.ensureCapacity(self.gpa, decls.len);
|
||||
|
||||
// Keep track of the decls that we expect to see in this file so that
|
||||
@ -1814,21 +1500,21 @@ fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void {
|
||||
decl.src_index = decl_i;
|
||||
if (deleted_decls.remove(decl) == null) {
|
||||
decl.analysis = .sema_failure;
|
||||
const err_msg = try ErrorMsg.create(self.gpa, tree.token_locs[name_tok].start, "redefinition of '{}'", .{decl.name});
|
||||
const err_msg = try Compilation.ErrorMsg.create(self.gpa, tree.token_locs[name_tok].start, "redefinition of '{}'", .{decl.name});
|
||||
errdefer err_msg.destroy(self.gpa);
|
||||
try self.failed_decls.putNoClobber(self.gpa, decl, err_msg);
|
||||
} else {
|
||||
if (!srcHashEql(decl.contents_hash, contents_hash)) {
|
||||
try self.markOutdatedDecl(decl);
|
||||
decl.contents_hash = contents_hash;
|
||||
} else switch (self.bin_file.tag) {
|
||||
} else switch (self.comp.bin_file.tag) {
|
||||
.coff => {
|
||||
// TODO Implement for COFF
|
||||
},
|
||||
.elf => if (decl.fn_link.elf.len != 0) {
|
||||
// TODO Look into detecting when this would be unnecessary by storing enough state
|
||||
// in `Decl` to notice that the line number did not change.
|
||||
self.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl });
|
||||
self.comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl });
|
||||
},
|
||||
.macho => {
|
||||
// TODO Implement for MachO
|
||||
@ -1841,7 +1527,7 @@ fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void {
|
||||
container_scope.decls.putAssumeCapacity(new_decl, {});
|
||||
if (fn_proto.getExternExportInlineToken()) |maybe_export_token| {
|
||||
if (tree.token_ids[maybe_export_token] == .Keyword_export) {
|
||||
self.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
|
||||
self.comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1856,7 +1542,7 @@ fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void {
|
||||
decl.src_index = decl_i;
|
||||
if (deleted_decls.remove(decl) == null) {
|
||||
decl.analysis = .sema_failure;
|
||||
const err_msg = try ErrorMsg.create(self.gpa, name_loc.start, "redefinition of '{}'", .{decl.name});
|
||||
const err_msg = try Compilation.ErrorMsg.create(self.gpa, name_loc.start, "redefinition of '{}'", .{decl.name});
|
||||
errdefer err_msg.destroy(self.gpa);
|
||||
try self.failed_decls.putNoClobber(self.gpa, decl, err_msg);
|
||||
} else if (!srcHashEql(decl.contents_hash, contents_hash)) {
|
||||
@ -1868,7 +1554,7 @@ fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void {
|
||||
container_scope.decls.putAssumeCapacity(new_decl, {});
|
||||
if (var_decl.getExternExportToken()) |maybe_export_token| {
|
||||
if (tree.token_ids[maybe_export_token] == .Keyword_export) {
|
||||
self.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
|
||||
self.comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1882,7 +1568,7 @@ fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void {
|
||||
|
||||
const new_decl = try self.createNewDecl(&container_scope.base, name, decl_i, name_hash, contents_hash);
|
||||
container_scope.decls.putAssumeCapacity(new_decl, {});
|
||||
self.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
|
||||
self.comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
|
||||
} else if (src_decl.castTag(.ContainerField)) |container_field| {
|
||||
log.err("TODO: analyze container field", .{});
|
||||
} else if (src_decl.castTag(.TestDecl)) |test_decl| {
|
||||
@ -1901,12 +1587,12 @@ fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void {
|
||||
}
|
||||
}
|
||||
|
||||
fn analyzeRootZIRModule(self: *Module, root_scope: *Scope.ZIRModule) !void {
|
||||
pub fn analyzeRootZIRModule(self: *Module, root_scope: *Scope.ZIRModule) !void {
|
||||
// We may be analyzing it for the first time, or this may be
|
||||
// an incremental update. This code handles both cases.
|
||||
const src_module = try self.getSrcModule(root_scope);
|
||||
|
||||
try self.work_queue.ensureUnusedCapacity(src_module.decls.len);
|
||||
try self.comp.work_queue.ensureUnusedCapacity(src_module.decls.len);
|
||||
try root_scope.decls.ensureCapacity(self.gpa, src_module.decls.len);
|
||||
|
||||
var exports_to_resolve = std.ArrayList(*zir.Decl).init(self.gpa);
|
||||
@ -1954,7 +1640,7 @@ fn analyzeRootZIRModule(self: *Module, root_scope: *Scope.ZIRModule) !void {
|
||||
}
|
||||
}
|
||||
|
||||
fn deleteDecl(self: *Module, decl: *Decl) !void {
|
||||
pub fn deleteDecl(self: *Module, decl: *Decl) !void {
|
||||
try self.deletion_set.ensureCapacity(self.gpa, self.deletion_set.items.len + decl.dependencies.items().len);
|
||||
|
||||
// Remove from the namespace it resides in. In the case of an anonymous Decl it will
|
||||
@ -1988,7 +1674,7 @@ fn deleteDecl(self: *Module, decl: *Decl) !void {
|
||||
entry.value.destroy(self.gpa);
|
||||
}
|
||||
self.deleteDeclExports(decl);
|
||||
self.bin_file.freeDecl(decl);
|
||||
self.comp.bin_file.freeDecl(decl);
|
||||
decl.destroy(self.gpa);
|
||||
}
|
||||
|
||||
@ -2016,7 +1702,7 @@ fn deleteDeclExports(self: *Module, decl: *Decl) void {
|
||||
self.decl_exports.removeAssertDiscard(exp.exported_decl);
|
||||
}
|
||||
}
|
||||
if (self.bin_file.cast(link.File.Elf)) |elf| {
|
||||
if (self.comp.bin_file.cast(link.File.Elf)) |elf| {
|
||||
elf.deleteExport(exp.link);
|
||||
}
|
||||
if (self.failed_exports.remove(exp)) |entry| {
|
||||
@ -2029,7 +1715,7 @@ fn deleteDeclExports(self: *Module, decl: *Decl) void {
|
||||
self.gpa.free(kv.value);
|
||||
}
|
||||
|
||||
fn analyzeFnBody(self: *Module, decl: *Decl, func: *Fn) !void {
|
||||
pub fn analyzeFnBody(self: *Module, decl: *Decl, func: *Fn) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
@ -2060,7 +1746,7 @@ fn analyzeFnBody(self: *Module, decl: *Decl, func: *Fn) !void {
|
||||
|
||||
fn markOutdatedDecl(self: *Module, decl: *Decl) !void {
|
||||
log.debug("mark {} outdated\n", .{decl.name});
|
||||
try self.work_queue.writeItem(.{ .analyze_decl = decl });
|
||||
try self.comp.work_queue.writeItem(.{ .analyze_decl = decl });
|
||||
if (self.failed_decls.remove(decl)) |entry| {
|
||||
entry.value.destroy(self.gpa);
|
||||
}
|
||||
@ -2082,14 +1768,14 @@ fn allocateNewDecl(
|
||||
.analysis = .unreferenced,
|
||||
.deletion_flag = false,
|
||||
.contents_hash = contents_hash,
|
||||
.link = switch (self.bin_file.tag) {
|
||||
.link = switch (self.comp.bin_file.tag) {
|
||||
.coff => .{ .coff = link.File.Coff.TextBlock.empty },
|
||||
.elf => .{ .elf = link.File.Elf.TextBlock.empty },
|
||||
.macho => .{ .macho = link.File.MachO.TextBlock.empty },
|
||||
.c => .{ .c = {} },
|
||||
.wasm => .{ .wasm = {} },
|
||||
},
|
||||
.fn_link = switch (self.bin_file.tag) {
|
||||
.fn_link = switch (self.comp.bin_file.tag) {
|
||||
.coff => .{ .coff = {} },
|
||||
.elf => .{ .elf = link.File.Elf.SrcFn.empty },
|
||||
.macho => .{ .macho = link.File.MachO.SrcFn.empty },
|
||||
@ -2206,7 +1892,7 @@ pub fn analyzeExport(self: *Module, scope: *Scope, src: usize, borrowed_symbol_n
|
||||
|
||||
if (self.symbol_exports.get(symbol_name)) |_| {
|
||||
try self.failed_exports.ensureCapacity(self.gpa, self.failed_exports.items().len + 1);
|
||||
self.failed_exports.putAssumeCapacityNoClobber(new_export, try ErrorMsg.create(
|
||||
self.failed_exports.putAssumeCapacityNoClobber(new_export, try Compilation.ErrorMsg.create(
|
||||
self.gpa,
|
||||
src,
|
||||
"exported symbol collision: {}",
|
||||
@ -2218,11 +1904,11 @@ pub fn analyzeExport(self: *Module, scope: *Scope, src: usize, borrowed_symbol_n
|
||||
}
|
||||
|
||||
try self.symbol_exports.putNoClobber(self.gpa, symbol_name, new_export);
|
||||
self.bin_file.updateDeclExports(self, exported_decl, de_gop.entry.value) catch |err| switch (err) {
|
||||
self.comp.bin_file.updateDeclExports(self, exported_decl, de_gop.entry.value) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
else => {
|
||||
try self.failed_exports.ensureCapacity(self.gpa, self.failed_exports.items().len + 1);
|
||||
self.failed_exports.putAssumeCapacityNoClobber(new_export, try ErrorMsg.create(
|
||||
self.failed_exports.putAssumeCapacityNoClobber(new_export, try Compilation.ErrorMsg.create(
|
||||
self.gpa,
|
||||
src,
|
||||
"unable to export: {}",
|
||||
@ -2502,8 +2188,8 @@ pub fn createAnonymousDecl(
|
||||
// We should be able to further improve the compiler to not omit Decls which are only referenced at
|
||||
// compile-time and not runtime.
|
||||
if (typed_value.ty.hasCodeGenBits()) {
|
||||
try self.bin_file.allocateDeclIndexes(new_decl);
|
||||
try self.work_queue.writeItem(.{ .codegen_decl = new_decl });
|
||||
try self.comp.bin_file.allocateDeclIndexes(new_decl);
|
||||
try self.comp.work_queue.writeItem(.{ .codegen_decl = new_decl });
|
||||
}
|
||||
|
||||
return new_decl;
|
||||
@ -2756,7 +2442,7 @@ pub fn cmpNumeric(
|
||||
} else if (rhs_ty_tag == .ComptimeFloat) {
|
||||
break :x lhs.ty;
|
||||
}
|
||||
if (lhs.ty.floatBits(self.target()) >= rhs.ty.floatBits(self.target())) {
|
||||
if (lhs.ty.floatBits(self.getTarget()) >= rhs.ty.floatBits(self.getTarget())) {
|
||||
break :x lhs.ty;
|
||||
} else {
|
||||
break :x rhs.ty;
|
||||
@ -2815,7 +2501,7 @@ pub fn cmpNumeric(
|
||||
} else if (lhs_is_float) {
|
||||
dest_float_type = lhs.ty;
|
||||
} else {
|
||||
const int_info = lhs.ty.intInfo(self.target());
|
||||
const int_info = lhs.ty.intInfo(self.getTarget());
|
||||
lhs_bits = int_info.bits + @boolToInt(!int_info.signed and dest_int_is_signed);
|
||||
}
|
||||
|
||||
@ -2850,7 +2536,7 @@ pub fn cmpNumeric(
|
||||
} else if (rhs_is_float) {
|
||||
dest_float_type = rhs.ty;
|
||||
} else {
|
||||
const int_info = rhs.ty.intInfo(self.target());
|
||||
const int_info = rhs.ty.intInfo(self.getTarget());
|
||||
rhs_bits = int_info.bits + @boolToInt(!int_info.signed and dest_int_is_signed);
|
||||
}
|
||||
|
||||
@ -2915,13 +2601,13 @@ pub fn resolvePeerTypes(self: *Module, scope: *Scope, instructions: []*Inst) !Ty
|
||||
next_inst.ty.isInt() and
|
||||
prev_inst.ty.isSignedInt() == next_inst.ty.isSignedInt())
|
||||
{
|
||||
if (prev_inst.ty.intInfo(self.target()).bits < next_inst.ty.intInfo(self.target()).bits) {
|
||||
if (prev_inst.ty.intInfo(self.getTarget()).bits < next_inst.ty.intInfo(self.getTarget()).bits) {
|
||||
prev_inst = next_inst;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (prev_inst.ty.isFloat() and next_inst.ty.isFloat()) {
|
||||
if (prev_inst.ty.floatBits(self.target()) < next_inst.ty.floatBits(self.target())) {
|
||||
if (prev_inst.ty.floatBits(self.getTarget()) < next_inst.ty.floatBits(self.getTarget())) {
|
||||
prev_inst = next_inst;
|
||||
}
|
||||
continue;
|
||||
@ -2989,8 +2675,8 @@ pub fn coerce(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst
|
||||
if (inst.ty.zigTypeTag() == .Int and dest_type.zigTypeTag() == .Int) {
|
||||
assert(inst.value() == null); // handled above
|
||||
|
||||
const src_info = inst.ty.intInfo(self.target());
|
||||
const dst_info = dest_type.intInfo(self.target());
|
||||
const src_info = inst.ty.intInfo(self.getTarget());
|
||||
const dst_info = dest_type.intInfo(self.getTarget());
|
||||
if ((src_info.signed == dst_info.signed and dst_info.bits >= src_info.bits) or
|
||||
// small enough unsigned ints can get casted to large enough signed ints
|
||||
(src_info.signed and !dst_info.signed and dst_info.bits > src_info.bits))
|
||||
@ -3004,8 +2690,8 @@ pub fn coerce(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst
|
||||
if (inst.ty.zigTypeTag() == .Float and dest_type.zigTypeTag() == .Float) {
|
||||
assert(inst.value() == null); // handled above
|
||||
|
||||
const src_bits = inst.ty.floatBits(self.target());
|
||||
const dst_bits = dest_type.floatBits(self.target());
|
||||
const src_bits = inst.ty.floatBits(self.getTarget());
|
||||
const dst_bits = dest_type.floatBits(self.getTarget());
|
||||
if (dst_bits >= src_bits) {
|
||||
const b = try self.requireRuntimeBlock(scope, inst.src);
|
||||
return self.addUnOp(b, inst.src, dest_type, .floatcast, inst);
|
||||
@ -3027,14 +2713,14 @@ pub fn coerceNum(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !?*
|
||||
}
|
||||
return self.fail(scope, inst.src, "TODO float to int", .{});
|
||||
} else if (src_zig_tag == .Int or src_zig_tag == .ComptimeInt) {
|
||||
if (!val.intFitsInType(dest_type, self.target())) {
|
||||
if (!val.intFitsInType(dest_type, self.getTarget())) {
|
||||
return self.fail(scope, inst.src, "type {} cannot represent integer value {}", .{ inst.ty, val });
|
||||
}
|
||||
return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = val });
|
||||
}
|
||||
} else if (dst_zig_tag == .ComptimeFloat or dst_zig_tag == .Float) {
|
||||
if (src_zig_tag == .Float or src_zig_tag == .ComptimeFloat) {
|
||||
const res = val.floatCast(scope.arena(), dest_type, self.target()) catch |err| switch (err) {
|
||||
const res = val.floatCast(scope.arena(), dest_type, self.getTarget()) catch |err| switch (err) {
|
||||
error.Overflow => return self.fail(
|
||||
scope,
|
||||
inst.src,
|
||||
@ -3087,7 +2773,7 @@ fn coerceArrayPtrToSlice(self: *Module, scope: *Scope, dest_type: Type, inst: *I
|
||||
|
||||
pub fn fail(self: *Module, scope: *Scope, src: usize, comptime format: []const u8, args: anytype) InnerError {
|
||||
@setCold(true);
|
||||
const err_msg = try ErrorMsg.create(self.gpa, src, format, args);
|
||||
const err_msg = try Compilation.ErrorMsg.create(self.gpa, src, format, args);
|
||||
return self.failWithOwnedErrorMsg(scope, src, err_msg);
|
||||
}
|
||||
|
||||
@ -3115,7 +2801,7 @@ pub fn failNode(
|
||||
return self.fail(scope, src, format, args);
|
||||
}
|
||||
|
||||
fn failWithOwnedErrorMsg(self: *Module, scope: *Scope, src: usize, err_msg: *ErrorMsg) InnerError {
|
||||
fn failWithOwnedErrorMsg(self: *Module, scope: *Scope, src: usize, err_msg: *Compilation.ErrorMsg) InnerError {
|
||||
{
|
||||
errdefer err_msg.destroy(self.gpa);
|
||||
try self.failed_decls.ensureCapacity(self.gpa, self.failed_decls.items().len + 1);
|
||||
@ -3181,36 +2867,6 @@ fn coerceInMemoryAllowed(dest_type: Type, src_type: Type) InMemoryCoercionResult
|
||||
return .no_match;
|
||||
}
|
||||
|
||||
pub const ErrorMsg = struct {
|
||||
byte_offset: usize,
|
||||
msg: []const u8,
|
||||
|
||||
pub fn create(gpa: *Allocator, byte_offset: usize, comptime format: []const u8, args: anytype) !*ErrorMsg {
|
||||
const self = try gpa.create(ErrorMsg);
|
||||
errdefer gpa.destroy(self);
|
||||
self.* = try init(gpa, byte_offset, format, args);
|
||||
return self;
|
||||
}
|
||||
|
||||
/// Assumes the ErrorMsg struct and msg were both allocated with allocator.
|
||||
pub fn destroy(self: *ErrorMsg, gpa: *Allocator) void {
|
||||
self.deinit(gpa);
|
||||
gpa.destroy(self);
|
||||
}
|
||||
|
||||
pub fn init(gpa: *Allocator, byte_offset: usize, comptime format: []const u8, args: anytype) !ErrorMsg {
|
||||
return ErrorMsg{
|
||||
.byte_offset = byte_offset,
|
||||
.msg = try std.fmt.allocPrint(gpa, format, args),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *ErrorMsg, gpa: *Allocator) void {
|
||||
gpa.free(self.msg);
|
||||
self.* = undefined;
|
||||
}
|
||||
};
|
||||
|
||||
fn srcHashEql(a: std.zig.SrcHash, b: std.zig.SrcHash) bool {
|
||||
return @bitCast(u128, a) == @bitCast(u128, b);
|
||||
}
|
||||
@ -3274,7 +2930,7 @@ pub fn intSub(allocator: *Allocator, lhs: Value, rhs: Value) !Value {
|
||||
pub fn floatAdd(self: *Module, scope: *Scope, float_type: Type, src: usize, lhs: Value, rhs: Value) !Value {
|
||||
var bit_count = switch (float_type.tag()) {
|
||||
.comptime_float => 128,
|
||||
else => float_type.floatBits(self.target()),
|
||||
else => float_type.floatBits(self.getTarget()),
|
||||
};
|
||||
|
||||
const allocator = scope.arena();
|
||||
@ -3308,7 +2964,7 @@ pub fn floatAdd(self: *Module, scope: *Scope, float_type: Type, src: usize, lhs:
|
||||
pub fn floatSub(self: *Module, scope: *Scope, float_type: Type, src: usize, lhs: Value, rhs: Value) !Value {
|
||||
var bit_count = switch (float_type.tag()) {
|
||||
.comptime_float => 128,
|
||||
else => float_type.floatBits(self.target()),
|
||||
else => float_type.floatBits(self.getTarget()),
|
||||
};
|
||||
|
||||
const allocator = scope.arena();
|
||||
@ -3579,3 +3235,11 @@ pub fn safetyPanic(mod: *Module, block: *Scope.Block, src: usize, panic_id: Pani
|
||||
_ = try mod.addNoOp(block, src, Type.initTag(.void), .breakpoint);
|
||||
return mod.addNoOp(block, src, Type.initTag(.noreturn), .unreach);
|
||||
}
|
||||
|
||||
pub fn getTarget(self: Module) Target {
|
||||
return self.comp.bin_file.options.target;
|
||||
}
|
||||
|
||||
pub fn optimizeMode(self: Module) std.builtin.Mode {
|
||||
return self.comp.bin_file.options.optimize_mode;
|
||||
}
|
||||
62
src/Package.zig
Normal file
62
src/Package.zig
Normal file
@ -0,0 +1,62 @@
|
||||
pub const Table = std.StringHashMapUnmanaged(*Package);
|
||||
|
||||
root_src_directory: Compilation.Directory,
|
||||
/// Relative to `root_src_directory`. May contain path separators.
|
||||
root_src_path: []const u8,
|
||||
table: Table = .{},
|
||||
parent: ?*Package = null,
|
||||
|
||||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const assert = std.debug.assert;
|
||||
const Package = @This();
|
||||
const Compilation = @import("Compilation.zig");
|
||||
|
||||
/// No references to `root_src_dir` and `root_src_path` are kept.
|
||||
pub fn create(
|
||||
gpa: *Allocator,
|
||||
base_directory: Compilation.Directory,
|
||||
/// Relative to `base_directory`.
|
||||
root_src_dir: []const u8,
|
||||
/// Relative to `root_src_dir`.
|
||||
root_src_path: []const u8,
|
||||
) !*Package {
|
||||
const ptr = try gpa.create(Package);
|
||||
errdefer gpa.destroy(ptr);
|
||||
|
||||
const root_src_dir_path = try base_directory.join(gpa, &[_][]const u8{root_src_dir});
|
||||
errdefer gpa.free(root_src_dir_path);
|
||||
|
||||
const root_src_path_dupe = try mem.dupe(gpa, u8, root_src_path);
|
||||
errdefer gpa.free(root_src_path_dupe);
|
||||
|
||||
ptr.* = .{
|
||||
.root_src_directory = .{
|
||||
.path = root_src_dir_path,
|
||||
.handle = try base_directory.handle.openDir(root_src_dir, .{}),
|
||||
},
|
||||
.root_src_path = root_src_path_dupe,
|
||||
};
|
||||
return ptr;
|
||||
}
|
||||
|
||||
pub fn destroy(pkg: *Package, gpa: *Allocator) void {
|
||||
pkg.root_src_directory.handle.close();
|
||||
gpa.free(pkg.root_src_path);
|
||||
if (pkg.root_src_directory.path) |p| gpa.free(p);
|
||||
{
|
||||
var it = pkg.table.iterator();
|
||||
while (it.next()) |kv| {
|
||||
gpa.free(kv.key);
|
||||
}
|
||||
}
|
||||
pkg.table.deinit(gpa);
|
||||
gpa.destroy(pkg);
|
||||
}
|
||||
|
||||
pub fn add(pkg: *Package, gpa: *Allocator, name: []const u8, package: *Package) !void {
|
||||
try pkg.table.ensureCapacity(gpa, pkg.table.items().len + 1);
|
||||
const name_dupe = try mem.dupe(gpa, u8, name);
|
||||
pkg.table.putAssumeCapacityNoClobber(name_dupe, package);
|
||||
}
|
||||
196
src/blake2.h
196
src/blake2.h
@ -1,196 +0,0 @@
|
||||
/*
|
||||
BLAKE2 reference source code package - reference C implementations
|
||||
|
||||
Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the
|
||||
terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at
|
||||
your option. The terms of these licenses can be found at:
|
||||
|
||||
- CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
|
||||
- OpenSSL license : https://www.openssl.org/source/license.html
|
||||
- Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
More information about the BLAKE2 hash function can be found at
|
||||
https://blake2.net.
|
||||
*/
|
||||
#ifndef BLAKE2_H
|
||||
#define BLAKE2_H
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#define BLAKE2_PACKED(x) __pragma(pack(push, 1)) x __pragma(pack(pop))
|
||||
#else
|
||||
#define BLAKE2_PACKED(x) x __attribute__((packed))
|
||||
#endif
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
enum blake2s_constant
|
||||
{
|
||||
BLAKE2S_BLOCKBYTES = 64,
|
||||
BLAKE2S_OUTBYTES = 32,
|
||||
BLAKE2S_KEYBYTES = 32,
|
||||
BLAKE2S_SALTBYTES = 8,
|
||||
BLAKE2S_PERSONALBYTES = 8
|
||||
};
|
||||
|
||||
enum blake2b_constant
|
||||
{
|
||||
BLAKE2B_BLOCKBYTES = 128,
|
||||
BLAKE2B_OUTBYTES = 64,
|
||||
BLAKE2B_KEYBYTES = 64,
|
||||
BLAKE2B_SALTBYTES = 16,
|
||||
BLAKE2B_PERSONALBYTES = 16
|
||||
};
|
||||
|
||||
typedef struct blake2s_state__
|
||||
{
|
||||
uint32_t h[8];
|
||||
uint32_t t[2];
|
||||
uint32_t f[2];
|
||||
uint8_t buf[BLAKE2S_BLOCKBYTES];
|
||||
size_t buflen;
|
||||
size_t outlen;
|
||||
uint8_t last_node;
|
||||
} blake2s_state;
|
||||
|
||||
typedef struct blake2b_state__
|
||||
{
|
||||
uint64_t h[8];
|
||||
uint64_t t[2];
|
||||
uint64_t f[2];
|
||||
uint8_t buf[BLAKE2B_BLOCKBYTES];
|
||||
size_t buflen;
|
||||
size_t outlen;
|
||||
uint8_t last_node;
|
||||
} blake2b_state;
|
||||
|
||||
typedef struct blake2sp_state__
|
||||
{
|
||||
blake2s_state S[8][1];
|
||||
blake2s_state R[1];
|
||||
uint8_t buf[8 * BLAKE2S_BLOCKBYTES];
|
||||
size_t buflen;
|
||||
size_t outlen;
|
||||
} blake2sp_state;
|
||||
|
||||
typedef struct blake2bp_state__
|
||||
{
|
||||
blake2b_state S[4][1];
|
||||
blake2b_state R[1];
|
||||
uint8_t buf[4 * BLAKE2B_BLOCKBYTES];
|
||||
size_t buflen;
|
||||
size_t outlen;
|
||||
} blake2bp_state;
|
||||
|
||||
|
||||
BLAKE2_PACKED(struct blake2s_param__
|
||||
{
|
||||
uint8_t digest_length; /* 1 */
|
||||
uint8_t key_length; /* 2 */
|
||||
uint8_t fanout; /* 3 */
|
||||
uint8_t depth; /* 4 */
|
||||
uint32_t leaf_length; /* 8 */
|
||||
uint32_t node_offset; /* 12 */
|
||||
uint16_t xof_length; /* 14 */
|
||||
uint8_t node_depth; /* 15 */
|
||||
uint8_t inner_length; /* 16 */
|
||||
/* uint8_t reserved[0]; */
|
||||
uint8_t salt[BLAKE2S_SALTBYTES]; /* 24 */
|
||||
uint8_t personal[BLAKE2S_PERSONALBYTES]; /* 32 */
|
||||
});
|
||||
|
||||
typedef struct blake2s_param__ blake2s_param;
|
||||
|
||||
BLAKE2_PACKED(struct blake2b_param__
|
||||
{
|
||||
uint8_t digest_length; /* 1 */
|
||||
uint8_t key_length; /* 2 */
|
||||
uint8_t fanout; /* 3 */
|
||||
uint8_t depth; /* 4 */
|
||||
uint32_t leaf_length; /* 8 */
|
||||
uint32_t node_offset; /* 12 */
|
||||
uint32_t xof_length; /* 16 */
|
||||
uint8_t node_depth; /* 17 */
|
||||
uint8_t inner_length; /* 18 */
|
||||
uint8_t reserved[14]; /* 32 */
|
||||
uint8_t salt[BLAKE2B_SALTBYTES]; /* 48 */
|
||||
uint8_t personal[BLAKE2B_PERSONALBYTES]; /* 64 */
|
||||
});
|
||||
|
||||
typedef struct blake2b_param__ blake2b_param;
|
||||
|
||||
typedef struct blake2xs_state__
|
||||
{
|
||||
blake2s_state S[1];
|
||||
blake2s_param P[1];
|
||||
} blake2xs_state;
|
||||
|
||||
typedef struct blake2xb_state__
|
||||
{
|
||||
blake2b_state S[1];
|
||||
blake2b_param P[1];
|
||||
} blake2xb_state;
|
||||
|
||||
/* Padded structs result in a compile-time error */
|
||||
enum {
|
||||
BLAKE2_DUMMY_1 = 1/(sizeof(blake2s_param) == BLAKE2S_OUTBYTES),
|
||||
BLAKE2_DUMMY_2 = 1/(sizeof(blake2b_param) == BLAKE2B_OUTBYTES)
|
||||
};
|
||||
|
||||
/* Streaming API */
|
||||
int blake2s_init( blake2s_state *S, size_t outlen );
|
||||
int blake2s_init_key( blake2s_state *S, size_t outlen, const void *key, size_t keylen );
|
||||
int blake2s_init_param( blake2s_state *S, const blake2s_param *P );
|
||||
int blake2s_update( blake2s_state *S, const void *in, size_t inlen );
|
||||
int blake2s_final( blake2s_state *S, void *out, size_t outlen );
|
||||
|
||||
int blake2b_init( blake2b_state *S, size_t outlen );
|
||||
int blake2b_init_key( blake2b_state *S, size_t outlen, const void *key, size_t keylen );
|
||||
int blake2b_init_param( blake2b_state *S, const blake2b_param *P );
|
||||
int blake2b_update( blake2b_state *S, const void *in, size_t inlen );
|
||||
int blake2b_final( blake2b_state *S, void *out, size_t outlen );
|
||||
|
||||
int blake2sp_init( blake2sp_state *S, size_t outlen );
|
||||
int blake2sp_init_key( blake2sp_state *S, size_t outlen, const void *key, size_t keylen );
|
||||
int blake2sp_update( blake2sp_state *S, const void *in, size_t inlen );
|
||||
int blake2sp_final( blake2sp_state *S, void *out, size_t outlen );
|
||||
|
||||
int blake2bp_init( blake2bp_state *S, size_t outlen );
|
||||
int blake2bp_init_key( blake2bp_state *S, size_t outlen, const void *key, size_t keylen );
|
||||
int blake2bp_update( blake2bp_state *S, const void *in, size_t inlen );
|
||||
int blake2bp_final( blake2bp_state *S, void *out, size_t outlen );
|
||||
|
||||
/* Variable output length API */
|
||||
int blake2xs_init( blake2xs_state *S, const size_t outlen );
|
||||
int blake2xs_init_key( blake2xs_state *S, const size_t outlen, const void *key, size_t keylen );
|
||||
int blake2xs_update( blake2xs_state *S, const void *in, size_t inlen );
|
||||
int blake2xs_final(blake2xs_state *S, void *out, size_t outlen);
|
||||
|
||||
int blake2xb_init( blake2xb_state *S, const size_t outlen );
|
||||
int blake2xb_init_key( blake2xb_state *S, const size_t outlen, const void *key, size_t keylen );
|
||||
int blake2xb_update( blake2xb_state *S, const void *in, size_t inlen );
|
||||
int blake2xb_final(blake2xb_state *S, void *out, size_t outlen);
|
||||
|
||||
/* Simple API */
|
||||
int blake2s( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen );
|
||||
int blake2b( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen );
|
||||
|
||||
int blake2sp( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen );
|
||||
int blake2bp( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen );
|
||||
|
||||
int blake2xs( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen );
|
||||
int blake2xb( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen );
|
||||
|
||||
/* This is simply an alias for blake2b */
|
||||
int blake2( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen );
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
539
src/blake2b.c
539
src/blake2b.c
@ -1,539 +0,0 @@
|
||||
/*
|
||||
BLAKE2 reference source code package - reference C implementations
|
||||
|
||||
Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the
|
||||
terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at
|
||||
your option. The terms of these licenses can be found at:
|
||||
|
||||
- CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
|
||||
- OpenSSL license : https://www.openssl.org/source/license.html
|
||||
- Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
More information about the BLAKE2 hash function can be found at
|
||||
https://blake2.net.
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#include "blake2.h"
|
||||
/*
|
||||
BLAKE2 reference source code package - reference C implementations
|
||||
|
||||
Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the
|
||||
terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at
|
||||
your option. The terms of these licenses can be found at:
|
||||
|
||||
- CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
|
||||
- OpenSSL license : https://www.openssl.org/source/license.html
|
||||
- Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
More information about the BLAKE2 hash function can be found at
|
||||
https://blake2.net.
|
||||
*/
|
||||
#ifndef BLAKE2_IMPL_H
|
||||
#define BLAKE2_IMPL_H
|
||||
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
|
||||
#if !defined(__cplusplus) && (!defined(__STDC_VERSION__) || __STDC_VERSION__ < 199901L)
|
||||
#if defined(_MSC_VER)
|
||||
#define BLAKE2_INLINE __inline
|
||||
#elif defined(__GNUC__)
|
||||
#define BLAKE2_INLINE __inline__
|
||||
#else
|
||||
#define BLAKE2_INLINE
|
||||
#endif
|
||||
#else
|
||||
#define BLAKE2_INLINE inline
|
||||
#endif
|
||||
|
||||
static BLAKE2_INLINE uint32_t load32( const void *src )
|
||||
{
|
||||
#if defined(NATIVE_LITTLE_ENDIAN)
|
||||
uint32_t w;
|
||||
memcpy(&w, src, sizeof w);
|
||||
return w;
|
||||
#else
|
||||
const uint8_t *p = ( const uint8_t * )src;
|
||||
return (( uint32_t )( p[0] ) << 0) |
|
||||
(( uint32_t )( p[1] ) << 8) |
|
||||
(( uint32_t )( p[2] ) << 16) |
|
||||
(( uint32_t )( p[3] ) << 24) ;
|
||||
#endif
|
||||
}
|
||||
|
||||
static BLAKE2_INLINE uint64_t load64( const void *src )
|
||||
{
|
||||
#if defined(NATIVE_LITTLE_ENDIAN)
|
||||
uint64_t w;
|
||||
memcpy(&w, src, sizeof w);
|
||||
return w;
|
||||
#else
|
||||
const uint8_t *p = ( const uint8_t * )src;
|
||||
return (( uint64_t )( p[0] ) << 0) |
|
||||
(( uint64_t )( p[1] ) << 8) |
|
||||
(( uint64_t )( p[2] ) << 16) |
|
||||
(( uint64_t )( p[3] ) << 24) |
|
||||
(( uint64_t )( p[4] ) << 32) |
|
||||
(( uint64_t )( p[5] ) << 40) |
|
||||
(( uint64_t )( p[6] ) << 48) |
|
||||
(( uint64_t )( p[7] ) << 56) ;
|
||||
#endif
|
||||
}
|
||||
|
||||
static BLAKE2_INLINE uint16_t load16( const void *src )
|
||||
{
|
||||
#if defined(NATIVE_LITTLE_ENDIAN)
|
||||
uint16_t w;
|
||||
memcpy(&w, src, sizeof w);
|
||||
return w;
|
||||
#else
|
||||
const uint8_t *p = ( const uint8_t * )src;
|
||||
return ( uint16_t )((( uint32_t )( p[0] ) << 0) |
|
||||
(( uint32_t )( p[1] ) << 8));
|
||||
#endif
|
||||
}
|
||||
|
||||
static BLAKE2_INLINE void store16( void *dst, uint16_t w )
|
||||
{
|
||||
#if defined(NATIVE_LITTLE_ENDIAN)
|
||||
memcpy(dst, &w, sizeof w);
|
||||
#else
|
||||
uint8_t *p = ( uint8_t * )dst;
|
||||
*p++ = ( uint8_t )w; w >>= 8;
|
||||
*p++ = ( uint8_t )w;
|
||||
#endif
|
||||
}
|
||||
|
||||
static BLAKE2_INLINE void store32( void *dst, uint32_t w )
|
||||
{
|
||||
#if defined(NATIVE_LITTLE_ENDIAN)
|
||||
memcpy(dst, &w, sizeof w);
|
||||
#else
|
||||
uint8_t *p = ( uint8_t * )dst;
|
||||
p[0] = (uint8_t)(w >> 0);
|
||||
p[1] = (uint8_t)(w >> 8);
|
||||
p[2] = (uint8_t)(w >> 16);
|
||||
p[3] = (uint8_t)(w >> 24);
|
||||
#endif
|
||||
}
|
||||
|
||||
static BLAKE2_INLINE void store64( void *dst, uint64_t w )
|
||||
{
|
||||
#if defined(NATIVE_LITTLE_ENDIAN)
|
||||
memcpy(dst, &w, sizeof w);
|
||||
#else
|
||||
uint8_t *p = ( uint8_t * )dst;
|
||||
p[0] = (uint8_t)(w >> 0);
|
||||
p[1] = (uint8_t)(w >> 8);
|
||||
p[2] = (uint8_t)(w >> 16);
|
||||
p[3] = (uint8_t)(w >> 24);
|
||||
p[4] = (uint8_t)(w >> 32);
|
||||
p[5] = (uint8_t)(w >> 40);
|
||||
p[6] = (uint8_t)(w >> 48);
|
||||
p[7] = (uint8_t)(w >> 56);
|
||||
#endif
|
||||
}
|
||||
|
||||
static BLAKE2_INLINE uint64_t load48( const void *src )
|
||||
{
|
||||
const uint8_t *p = ( const uint8_t * )src;
|
||||
return (( uint64_t )( p[0] ) << 0) |
|
||||
(( uint64_t )( p[1] ) << 8) |
|
||||
(( uint64_t )( p[2] ) << 16) |
|
||||
(( uint64_t )( p[3] ) << 24) |
|
||||
(( uint64_t )( p[4] ) << 32) |
|
||||
(( uint64_t )( p[5] ) << 40) ;
|
||||
}
|
||||
|
||||
static BLAKE2_INLINE void store48( void *dst, uint64_t w )
|
||||
{
|
||||
uint8_t *p = ( uint8_t * )dst;
|
||||
p[0] = (uint8_t)(w >> 0);
|
||||
p[1] = (uint8_t)(w >> 8);
|
||||
p[2] = (uint8_t)(w >> 16);
|
||||
p[3] = (uint8_t)(w >> 24);
|
||||
p[4] = (uint8_t)(w >> 32);
|
||||
p[5] = (uint8_t)(w >> 40);
|
||||
}
|
||||
|
||||
static BLAKE2_INLINE uint32_t rotr32( const uint32_t w, const unsigned c )
|
||||
{
|
||||
return ( w >> c ) | ( w << ( 32 - c ) );
|
||||
}
|
||||
|
||||
static BLAKE2_INLINE uint64_t rotr64( const uint64_t w, const unsigned c )
|
||||
{
|
||||
return ( w >> c ) | ( w << ( 64 - c ) );
|
||||
}
|
||||
|
||||
/* prevents compiler optimizing out memset() */
|
||||
static BLAKE2_INLINE void secure_zero_memory(void *v, size_t n)
|
||||
{
|
||||
static void *(*const volatile memset_v)(void *, int, size_t) = &memset;
|
||||
memset_v(v, 0, n);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static const uint64_t blake2b_IV[8] =
|
||||
{
|
||||
0x6a09e667f3bcc908ULL, 0xbb67ae8584caa73bULL,
|
||||
0x3c6ef372fe94f82bULL, 0xa54ff53a5f1d36f1ULL,
|
||||
0x510e527fade682d1ULL, 0x9b05688c2b3e6c1fULL,
|
||||
0x1f83d9abfb41bd6bULL, 0x5be0cd19137e2179ULL
|
||||
};
|
||||
|
||||
static const uint8_t blake2b_sigma[12][16] =
|
||||
{
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } ,
|
||||
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } ,
|
||||
{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 } ,
|
||||
{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 } ,
|
||||
{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 } ,
|
||||
{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 } ,
|
||||
{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 } ,
|
||||
{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 } ,
|
||||
{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 } ,
|
||||
{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0 } ,
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } ,
|
||||
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }
|
||||
};
|
||||
|
||||
|
||||
static void blake2b_set_lastnode( blake2b_state *S )
|
||||
{
|
||||
S->f[1] = (uint64_t)-1;
|
||||
}
|
||||
|
||||
/* Some helper functions, not necessarily useful */
|
||||
static int blake2b_is_lastblock( const blake2b_state *S )
|
||||
{
|
||||
return S->f[0] != 0;
|
||||
}
|
||||
|
||||
static void blake2b_set_lastblock( blake2b_state *S )
|
||||
{
|
||||
if( S->last_node ) blake2b_set_lastnode( S );
|
||||
|
||||
S->f[0] = (uint64_t)-1;
|
||||
}
|
||||
|
||||
static void blake2b_increment_counter( blake2b_state *S, const uint64_t inc )
|
||||
{
|
||||
S->t[0] += inc;
|
||||
S->t[1] += ( S->t[0] < inc );
|
||||
}
|
||||
|
||||
static void blake2b_init0( blake2b_state *S )
|
||||
{
|
||||
size_t i;
|
||||
memset( S, 0, sizeof( blake2b_state ) );
|
||||
|
||||
for( i = 0; i < 8; ++i ) S->h[i] = blake2b_IV[i];
|
||||
}
|
||||
|
||||
/* init xors IV with input parameter block */
|
||||
int blake2b_init_param( blake2b_state *S, const blake2b_param *P )
|
||||
{
|
||||
const uint8_t *p = ( const uint8_t * )( P );
|
||||
size_t i;
|
||||
|
||||
blake2b_init0( S );
|
||||
|
||||
/* IV XOR ParamBlock */
|
||||
for( i = 0; i < 8; ++i )
|
||||
S->h[i] ^= load64( p + sizeof( S->h[i] ) * i );
|
||||
|
||||
S->outlen = P->digest_length;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
int blake2b_init( blake2b_state *S, size_t outlen )
|
||||
{
|
||||
blake2b_param P[1];
|
||||
|
||||
if ( ( !outlen ) || ( outlen > BLAKE2B_OUTBYTES ) ) return -1;
|
||||
|
||||
P->digest_length = (uint8_t)outlen;
|
||||
P->key_length = 0;
|
||||
P->fanout = 1;
|
||||
P->depth = 1;
|
||||
store32( &P->leaf_length, 0 );
|
||||
store32( &P->node_offset, 0 );
|
||||
store32( &P->xof_length, 0 );
|
||||
P->node_depth = 0;
|
||||
P->inner_length = 0;
|
||||
memset( P->reserved, 0, sizeof( P->reserved ) );
|
||||
memset( P->salt, 0, sizeof( P->salt ) );
|
||||
memset( P->personal, 0, sizeof( P->personal ) );
|
||||
return blake2b_init_param( S, P );
|
||||
}
|
||||
|
||||
|
||||
int blake2b_init_key( blake2b_state *S, size_t outlen, const void *key, size_t keylen )
|
||||
{
|
||||
blake2b_param P[1];
|
||||
|
||||
if ( ( !outlen ) || ( outlen > BLAKE2B_OUTBYTES ) ) return -1;
|
||||
|
||||
if ( !key || !keylen || keylen > BLAKE2B_KEYBYTES ) return -1;
|
||||
|
||||
P->digest_length = (uint8_t)outlen;
|
||||
P->key_length = (uint8_t)keylen;
|
||||
P->fanout = 1;
|
||||
P->depth = 1;
|
||||
store32( &P->leaf_length, 0 );
|
||||
store32( &P->node_offset, 0 );
|
||||
store32( &P->xof_length, 0 );
|
||||
P->node_depth = 0;
|
||||
P->inner_length = 0;
|
||||
memset( P->reserved, 0, sizeof( P->reserved ) );
|
||||
memset( P->salt, 0, sizeof( P->salt ) );
|
||||
memset( P->personal, 0, sizeof( P->personal ) );
|
||||
|
||||
if( blake2b_init_param( S, P ) < 0 ) return -1;
|
||||
|
||||
{
|
||||
uint8_t block[BLAKE2B_BLOCKBYTES];
|
||||
memset( block, 0, BLAKE2B_BLOCKBYTES );
|
||||
memcpy( block, key, keylen );
|
||||
blake2b_update( S, block, BLAKE2B_BLOCKBYTES );
|
||||
secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define G(r,i,a,b,c,d) \
|
||||
do { \
|
||||
a = a + b + m[blake2b_sigma[r][2*i+0]]; \
|
||||
d = rotr64(d ^ a, 32); \
|
||||
c = c + d; \
|
||||
b = rotr64(b ^ c, 24); \
|
||||
a = a + b + m[blake2b_sigma[r][2*i+1]]; \
|
||||
d = rotr64(d ^ a, 16); \
|
||||
c = c + d; \
|
||||
b = rotr64(b ^ c, 63); \
|
||||
} while(0)
|
||||
|
||||
#define ROUND(r) \
|
||||
do { \
|
||||
G(r,0,v[ 0],v[ 4],v[ 8],v[12]); \
|
||||
G(r,1,v[ 1],v[ 5],v[ 9],v[13]); \
|
||||
G(r,2,v[ 2],v[ 6],v[10],v[14]); \
|
||||
G(r,3,v[ 3],v[ 7],v[11],v[15]); \
|
||||
G(r,4,v[ 0],v[ 5],v[10],v[15]); \
|
||||
G(r,5,v[ 1],v[ 6],v[11],v[12]); \
|
||||
G(r,6,v[ 2],v[ 7],v[ 8],v[13]); \
|
||||
G(r,7,v[ 3],v[ 4],v[ 9],v[14]); \
|
||||
} while(0)
|
||||
|
||||
static void blake2b_compress( blake2b_state *S, const uint8_t block[BLAKE2B_BLOCKBYTES] )
|
||||
{
|
||||
uint64_t m[16];
|
||||
uint64_t v[16];
|
||||
size_t i;
|
||||
|
||||
for( i = 0; i < 16; ++i ) {
|
||||
m[i] = load64( block + i * sizeof( m[i] ) );
|
||||
}
|
||||
|
||||
for( i = 0; i < 8; ++i ) {
|
||||
v[i] = S->h[i];
|
||||
}
|
||||
|
||||
v[ 8] = blake2b_IV[0];
|
||||
v[ 9] = blake2b_IV[1];
|
||||
v[10] = blake2b_IV[2];
|
||||
v[11] = blake2b_IV[3];
|
||||
v[12] = blake2b_IV[4] ^ S->t[0];
|
||||
v[13] = blake2b_IV[5] ^ S->t[1];
|
||||
v[14] = blake2b_IV[6] ^ S->f[0];
|
||||
v[15] = blake2b_IV[7] ^ S->f[1];
|
||||
|
||||
ROUND( 0 );
|
||||
ROUND( 1 );
|
||||
ROUND( 2 );
|
||||
ROUND( 3 );
|
||||
ROUND( 4 );
|
||||
ROUND( 5 );
|
||||
ROUND( 6 );
|
||||
ROUND( 7 );
|
||||
ROUND( 8 );
|
||||
ROUND( 9 );
|
||||
ROUND( 10 );
|
||||
ROUND( 11 );
|
||||
|
||||
for( i = 0; i < 8; ++i ) {
|
||||
S->h[i] = S->h[i] ^ v[i] ^ v[i + 8];
|
||||
}
|
||||
}
|
||||
|
||||
#undef G
|
||||
#undef ROUND
|
||||
|
||||
int blake2b_update( blake2b_state *S, const void *pin, size_t inlen )
|
||||
{
|
||||
const unsigned char * in = (const unsigned char *)pin;
|
||||
if( inlen > 0 )
|
||||
{
|
||||
size_t left = S->buflen;
|
||||
size_t fill = BLAKE2B_BLOCKBYTES - left;
|
||||
if( inlen > fill )
|
||||
{
|
||||
S->buflen = 0;
|
||||
memcpy( S->buf + left, in, fill ); /* Fill buffer */
|
||||
blake2b_increment_counter( S, BLAKE2B_BLOCKBYTES );
|
||||
blake2b_compress( S, S->buf ); /* Compress */
|
||||
in += fill; inlen -= fill;
|
||||
while(inlen > BLAKE2B_BLOCKBYTES) {
|
||||
blake2b_increment_counter(S, BLAKE2B_BLOCKBYTES);
|
||||
blake2b_compress( S, in );
|
||||
in += BLAKE2B_BLOCKBYTES;
|
||||
inlen -= BLAKE2B_BLOCKBYTES;
|
||||
}
|
||||
}
|
||||
memcpy( S->buf + S->buflen, in, inlen );
|
||||
S->buflen += inlen;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int blake2b_final( blake2b_state *S, void *out, size_t outlen )
|
||||
{
|
||||
uint8_t buffer[BLAKE2B_OUTBYTES] = {0};
|
||||
size_t i;
|
||||
|
||||
if( out == NULL || outlen < S->outlen )
|
||||
return -1;
|
||||
|
||||
if( blake2b_is_lastblock( S ) )
|
||||
return -1;
|
||||
|
||||
blake2b_increment_counter( S, S->buflen );
|
||||
blake2b_set_lastblock( S );
|
||||
memset( S->buf + S->buflen, 0, BLAKE2B_BLOCKBYTES - S->buflen ); /* Padding */
|
||||
blake2b_compress( S, S->buf );
|
||||
|
||||
for( i = 0; i < 8; ++i ) /* Output full hash to temp buffer */
|
||||
store64( buffer + sizeof( S->h[i] ) * i, S->h[i] );
|
||||
|
||||
memcpy( out, buffer, S->outlen );
|
||||
secure_zero_memory(buffer, sizeof(buffer));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* inlen, at least, should be uint64_t. Others can be size_t. */
|
||||
int blake2b( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen )
|
||||
{
|
||||
blake2b_state S[1];
|
||||
|
||||
/* Verify parameters */
|
||||
if ( NULL == in && inlen > 0 ) return -1;
|
||||
|
||||
if ( NULL == out ) return -1;
|
||||
|
||||
if( NULL == key && keylen > 0 ) return -1;
|
||||
|
||||
if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1;
|
||||
|
||||
if( keylen > BLAKE2B_KEYBYTES ) return -1;
|
||||
|
||||
if( keylen > 0 )
|
||||
{
|
||||
if( blake2b_init_key( S, outlen, key, keylen ) < 0 ) return -1;
|
||||
}
|
||||
else
|
||||
{
|
||||
if( blake2b_init( S, outlen ) < 0 ) return -1;
|
||||
}
|
||||
|
||||
blake2b_update( S, ( const uint8_t * )in, inlen );
|
||||
blake2b_final( S, out, outlen );
|
||||
return 0;
|
||||
}
|
||||
|
||||
int blake2( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen ) {
|
||||
return blake2b(out, outlen, in, inlen, key, keylen);
|
||||
}
|
||||
|
||||
#if defined(SUPERCOP)
|
||||
int crypto_hash( unsigned char *out, unsigned char *in, unsigned long long inlen )
|
||||
{
|
||||
return blake2b( out, BLAKE2B_OUTBYTES, in, inlen, NULL, 0 );
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(BLAKE2B_SELFTEST)
|
||||
#include <string.h>
|
||||
#include "blake2-kat.h"
|
||||
int main( void )
|
||||
{
|
||||
uint8_t key[BLAKE2B_KEYBYTES];
|
||||
uint8_t buf[BLAKE2_KAT_LENGTH];
|
||||
size_t i, step;
|
||||
|
||||
for( i = 0; i < BLAKE2B_KEYBYTES; ++i )
|
||||
key[i] = ( uint8_t )i;
|
||||
|
||||
for( i = 0; i < BLAKE2_KAT_LENGTH; ++i )
|
||||
buf[i] = ( uint8_t )i;
|
||||
|
||||
/* Test simple API */
|
||||
for( i = 0; i < BLAKE2_KAT_LENGTH; ++i )
|
||||
{
|
||||
uint8_t hash[BLAKE2B_OUTBYTES];
|
||||
blake2b( hash, BLAKE2B_OUTBYTES, buf, i, key, BLAKE2B_KEYBYTES );
|
||||
|
||||
if( 0 != memcmp( hash, blake2b_keyed_kat[i], BLAKE2B_OUTBYTES ) )
|
||||
{
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
/* Test streaming API */
|
||||
for(step = 1; step < BLAKE2B_BLOCKBYTES; ++step) {
|
||||
for (i = 0; i < BLAKE2_KAT_LENGTH; ++i) {
|
||||
uint8_t hash[BLAKE2B_OUTBYTES];
|
||||
blake2b_state S;
|
||||
uint8_t * p = buf;
|
||||
size_t mlen = i;
|
||||
int err = 0;
|
||||
|
||||
if( (err = blake2b_init_key(&S, BLAKE2B_OUTBYTES, key, BLAKE2B_KEYBYTES)) < 0 ) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
while (mlen >= step) {
|
||||
if ( (err = blake2b_update(&S, p, step)) < 0 ) {
|
||||
goto fail;
|
||||
}
|
||||
mlen -= step;
|
||||
p += step;
|
||||
}
|
||||
if ( (err = blake2b_update(&S, p, mlen)) < 0) {
|
||||
goto fail;
|
||||
}
|
||||
if ( (err = blake2b_final(&S, hash, BLAKE2B_OUTBYTES)) < 0) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (0 != memcmp(hash, blake2b_keyed_kat[i], BLAKE2B_OUTBYTES)) {
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
puts( "ok" );
|
||||
return 0;
|
||||
fail:
|
||||
puts("error");
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -1,595 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2018 Andrew Kelley
|
||||
*
|
||||
* This file is part of zig, which is MIT licensed.
|
||||
* See http://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
#include "stage2.h"
|
||||
#include "cache_hash.hpp"
|
||||
#include "all_types.hpp"
|
||||
#include "buffer.hpp"
|
||||
#include "os.hpp"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
void cache_init(CacheHash *ch, Buf *manifest_dir) {
|
||||
int rc = blake2b_init(&ch->blake, 48);
|
||||
assert(rc == 0);
|
||||
ch->files = {};
|
||||
ch->manifest_dir = manifest_dir;
|
||||
ch->manifest_file_path = nullptr;
|
||||
ch->manifest_dirty = false;
|
||||
ch->force_check_manifest = false;
|
||||
ch->b64_digest = BUF_INIT;
|
||||
}
|
||||
|
||||
void cache_mem(CacheHash *ch, const char *ptr, size_t len) {
|
||||
assert(ch->manifest_file_path == nullptr);
|
||||
assert(ptr != nullptr);
|
||||
blake2b_update(&ch->blake, ptr, len);
|
||||
}
|
||||
|
||||
void cache_slice(CacheHash *ch, Slice<const char> slice) {
|
||||
// mix the length into the hash so that two juxtaposed cached slices can't collide
|
||||
cache_usize(ch, slice.len);
|
||||
cache_mem(ch, slice.ptr, slice.len);
|
||||
}
|
||||
|
||||
void cache_str(CacheHash *ch, const char *ptr) {
|
||||
// + 1 to include the null byte
|
||||
cache_mem(ch, ptr, strlen(ptr) + 1);
|
||||
}
|
||||
|
||||
void cache_int(CacheHash *ch, int x) {
|
||||
assert(ch->manifest_file_path == nullptr);
|
||||
// + 1 to include the null byte
|
||||
uint8_t buf[sizeof(int) + 1];
|
||||
memcpy(buf, &x, sizeof(int));
|
||||
buf[sizeof(int)] = 0;
|
||||
blake2b_update(&ch->blake, buf, sizeof(int) + 1);
|
||||
}
|
||||
|
||||
void cache_usize(CacheHash *ch, size_t x) {
|
||||
assert(ch->manifest_file_path == nullptr);
|
||||
// + 1 to include the null byte
|
||||
uint8_t buf[sizeof(size_t) + 1];
|
||||
memcpy(buf, &x, sizeof(size_t));
|
||||
buf[sizeof(size_t)] = 0;
|
||||
blake2b_update(&ch->blake, buf, sizeof(size_t) + 1);
|
||||
}
|
||||
|
||||
void cache_bool(CacheHash *ch, bool x) {
|
||||
assert(ch->manifest_file_path == nullptr);
|
||||
blake2b_update(&ch->blake, &x, 1);
|
||||
}
|
||||
|
||||
void cache_buf(CacheHash *ch, Buf *buf) {
|
||||
assert(ch->manifest_file_path == nullptr);
|
||||
assert(buf != nullptr);
|
||||
// + 1 to include the null byte
|
||||
blake2b_update(&ch->blake, buf_ptr(buf), buf_len(buf) + 1);
|
||||
}
|
||||
|
||||
void cache_buf_opt(CacheHash *ch, Buf *buf) {
|
||||
assert(ch->manifest_file_path == nullptr);
|
||||
if (buf == nullptr) {
|
||||
cache_str(ch, "");
|
||||
cache_str(ch, "");
|
||||
} else {
|
||||
cache_buf(ch, buf);
|
||||
}
|
||||
}
|
||||
|
||||
void cache_list_of_link_lib(CacheHash *ch, LinkLib **ptr, size_t len) {
|
||||
assert(ch->manifest_file_path == nullptr);
|
||||
for (size_t i = 0; i < len; i += 1) {
|
||||
LinkLib *lib = ptr[i];
|
||||
if (lib->provided_explicitly) {
|
||||
cache_buf(ch, lib->name);
|
||||
}
|
||||
}
|
||||
cache_str(ch, "");
|
||||
}
|
||||
|
||||
void cache_list_of_buf(CacheHash *ch, Buf **ptr, size_t len) {
|
||||
assert(ch->manifest_file_path == nullptr);
|
||||
for (size_t i = 0; i < len; i += 1) {
|
||||
Buf *buf = ptr[i];
|
||||
cache_buf(ch, buf);
|
||||
}
|
||||
cache_str(ch, "");
|
||||
}
|
||||
|
||||
void cache_list_of_file(CacheHash *ch, Buf **ptr, size_t len) {
|
||||
assert(ch->manifest_file_path == nullptr);
|
||||
|
||||
for (size_t i = 0; i < len; i += 1) {
|
||||
Buf *buf = ptr[i];
|
||||
cache_file(ch, buf);
|
||||
}
|
||||
cache_str(ch, "");
|
||||
}
|
||||
|
||||
void cache_list_of_str(CacheHash *ch, const char **ptr, size_t len) {
|
||||
assert(ch->manifest_file_path == nullptr);
|
||||
|
||||
for (size_t i = 0; i < len; i += 1) {
|
||||
const char *s = ptr[i];
|
||||
cache_str(ch, s);
|
||||
}
|
||||
cache_str(ch, "");
|
||||
}
|
||||
|
||||
void cache_file(CacheHash *ch, Buf *file_path) {
|
||||
assert(ch->manifest_file_path == nullptr);
|
||||
assert(file_path != nullptr);
|
||||
Buf *resolved_path = buf_alloc();
|
||||
*resolved_path = os_path_resolve(&file_path, 1);
|
||||
CacheHashFile *chf = ch->files.add_one();
|
||||
chf->path = resolved_path;
|
||||
cache_buf(ch, resolved_path);
|
||||
}
|
||||
|
||||
void cache_file_opt(CacheHash *ch, Buf *file_path) {
|
||||
assert(ch->manifest_file_path == nullptr);
|
||||
if (file_path == nullptr) {
|
||||
cache_str(ch, "");
|
||||
cache_str(ch, "");
|
||||
} else {
|
||||
cache_file(ch, file_path);
|
||||
}
|
||||
}
|
||||
|
||||
// Ported from std/base64.zig
|
||||
static uint8_t base64_fs_alphabet[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_";
|
||||
static void base64_encode(Slice<uint8_t> dest, Slice<uint8_t> source) {
|
||||
size_t dest_len = ((source.len + 2) / 3) * 4;
|
||||
assert(dest.len == dest_len);
|
||||
|
||||
size_t i = 0;
|
||||
size_t out_index = 0;
|
||||
for (; i + 2 < source.len; i += 3) {
|
||||
dest.ptr[out_index] = base64_fs_alphabet[(source.ptr[i] >> 2) & 0x3f];
|
||||
out_index += 1;
|
||||
|
||||
dest.ptr[out_index] = base64_fs_alphabet[((source.ptr[i] & 0x3) << 4) | ((source.ptr[i + 1] & 0xf0) >> 4)];
|
||||
out_index += 1;
|
||||
|
||||
dest.ptr[out_index] = base64_fs_alphabet[((source.ptr[i + 1] & 0xf) << 2) | ((source.ptr[i + 2] & 0xc0) >> 6)];
|
||||
out_index += 1;
|
||||
|
||||
dest.ptr[out_index] = base64_fs_alphabet[source.ptr[i + 2] & 0x3f];
|
||||
out_index += 1;
|
||||
}
|
||||
|
||||
// Assert that we never need pad characters.
|
||||
assert(i == source.len);
|
||||
}
|
||||
|
||||
// Ported from std/base64.zig
|
||||
static Error base64_decode(Slice<uint8_t> dest, Slice<uint8_t> source) {
|
||||
if (source.len % 4 != 0)
|
||||
return ErrorInvalidFormat;
|
||||
if (dest.len != (source.len / 4) * 3)
|
||||
return ErrorInvalidFormat;
|
||||
|
||||
// In Zig this is comptime computed. In C++ it's not worth it to do that.
|
||||
uint8_t char_to_index[256];
|
||||
bool char_in_alphabet[256] = {0};
|
||||
for (size_t i = 0; i < 64; i += 1) {
|
||||
uint8_t c = base64_fs_alphabet[i];
|
||||
assert(!char_in_alphabet[c]);
|
||||
char_in_alphabet[c] = true;
|
||||
char_to_index[c] = i;
|
||||
}
|
||||
|
||||
size_t src_cursor = 0;
|
||||
size_t dest_cursor = 0;
|
||||
|
||||
for (;src_cursor < source.len; src_cursor += 4) {
|
||||
if (!char_in_alphabet[source.ptr[src_cursor + 0]]) return ErrorInvalidFormat;
|
||||
if (!char_in_alphabet[source.ptr[src_cursor + 1]]) return ErrorInvalidFormat;
|
||||
if (!char_in_alphabet[source.ptr[src_cursor + 2]]) return ErrorInvalidFormat;
|
||||
if (!char_in_alphabet[source.ptr[src_cursor + 3]]) return ErrorInvalidFormat;
|
||||
dest.ptr[dest_cursor + 0] = (char_to_index[source.ptr[src_cursor + 0]] << 2) | (char_to_index[source.ptr[src_cursor + 1]] >> 4);
|
||||
dest.ptr[dest_cursor + 1] = (char_to_index[source.ptr[src_cursor + 1]] << 4) | (char_to_index[source.ptr[src_cursor + 2]] >> 2);
|
||||
dest.ptr[dest_cursor + 2] = (char_to_index[source.ptr[src_cursor + 2]] << 6) | (char_to_index[source.ptr[src_cursor + 3]]);
|
||||
dest_cursor += 3;
|
||||
}
|
||||
|
||||
assert(src_cursor == source.len);
|
||||
assert(dest_cursor == dest.len);
|
||||
return ErrorNone;
|
||||
}
|
||||
|
||||
static Error hash_file(uint8_t *digest, OsFile handle, Buf *contents) {
|
||||
Error err;
|
||||
|
||||
if (contents) {
|
||||
buf_resize(contents, 0);
|
||||
}
|
||||
|
||||
blake2b_state blake;
|
||||
int rc = blake2b_init(&blake, 48);
|
||||
assert(rc == 0);
|
||||
|
||||
for (;;) {
|
||||
uint8_t buf[4096];
|
||||
size_t amt = 4096;
|
||||
if ((err = os_file_read(handle, buf, &amt)))
|
||||
return err;
|
||||
if (amt == 0) {
|
||||
rc = blake2b_final(&blake, digest, 48);
|
||||
assert(rc == 0);
|
||||
return ErrorNone;
|
||||
}
|
||||
blake2b_update(&blake, buf, amt);
|
||||
if (contents) {
|
||||
buf_append_mem(contents, (char*)buf, amt);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If the wall clock time, rounded to the same precision as the
|
||||
// mtime, is equal to the mtime, then we cannot rely on this mtime
|
||||
// yet. We will instead save an mtime value that indicates the hash
|
||||
// must be unconditionally computed.
|
||||
static bool is_problematic_timestamp(const OsTimeStamp *fs_clock) {
|
||||
OsTimeStamp wall_clock = os_timestamp_calendar();
|
||||
// First make all the least significant zero bits in the fs_clock, also zero bits in the wall clock.
|
||||
if (fs_clock->nsec == 0) {
|
||||
wall_clock.nsec = 0;
|
||||
if (fs_clock->sec == 0) {
|
||||
wall_clock.sec = 0;
|
||||
} else {
|
||||
wall_clock.sec &= (-1ull) << ctzll(fs_clock->sec);
|
||||
}
|
||||
} else {
|
||||
wall_clock.nsec &= (-1ull) << ctzll(fs_clock->nsec);
|
||||
}
|
||||
return wall_clock.nsec == fs_clock->nsec && wall_clock.sec == fs_clock->sec;
|
||||
}
|
||||
|
||||
static Error populate_file_hash(CacheHash *ch, CacheHashFile *chf, Buf *contents) {
|
||||
Error err;
|
||||
|
||||
assert(chf->path != nullptr);
|
||||
|
||||
OsFile this_file;
|
||||
if ((err = os_file_open_r(chf->path, &this_file, &chf->attr)))
|
||||
return err;
|
||||
|
||||
if (is_problematic_timestamp(&chf->attr.mtime)) {
|
||||
chf->attr.mtime.sec = 0;
|
||||
chf->attr.mtime.nsec = 0;
|
||||
chf->attr.inode = 0;
|
||||
}
|
||||
|
||||
if ((err = hash_file(chf->bin_digest, this_file, contents))) {
|
||||
os_file_close(&this_file);
|
||||
return err;
|
||||
}
|
||||
os_file_close(&this_file);
|
||||
|
||||
blake2b_update(&ch->blake, chf->bin_digest, 48);
|
||||
|
||||
return ErrorNone;
|
||||
}
|
||||
|
||||
Error cache_hit(CacheHash *ch, Buf *out_digest) {
|
||||
Error err;
|
||||
|
||||
uint8_t bin_digest[48];
|
||||
int rc = blake2b_final(&ch->blake, bin_digest, 48);
|
||||
assert(rc == 0);
|
||||
|
||||
buf_resize(&ch->b64_digest, 64);
|
||||
base64_encode(buf_to_slice(&ch->b64_digest), {bin_digest, 48});
|
||||
|
||||
if (ch->files.length == 0 && !ch->force_check_manifest) {
|
||||
buf_resize(out_digest, 64);
|
||||
base64_encode(buf_to_slice(out_digest), {bin_digest, 48});
|
||||
return ErrorNone;
|
||||
}
|
||||
|
||||
rc = blake2b_init(&ch->blake, 48);
|
||||
assert(rc == 0);
|
||||
blake2b_update(&ch->blake, bin_digest, 48);
|
||||
|
||||
ch->manifest_file_path = buf_alloc();
|
||||
os_path_join(ch->manifest_dir, &ch->b64_digest, ch->manifest_file_path);
|
||||
|
||||
buf_append_str(ch->manifest_file_path, ".txt");
|
||||
|
||||
if ((err = os_make_path(ch->manifest_dir)))
|
||||
return err;
|
||||
|
||||
if ((err = os_file_open_lock_rw(ch->manifest_file_path, &ch->manifest_file)))
|
||||
return err;
|
||||
|
||||
Buf line_buf = BUF_INIT;
|
||||
buf_resize(&line_buf, 512);
|
||||
if ((err = os_file_read_all(ch->manifest_file, &line_buf))) {
|
||||
os_file_close(&ch->manifest_file);
|
||||
return err;
|
||||
}
|
||||
|
||||
size_t input_file_count = ch->files.length;
|
||||
bool any_file_changed = false;
|
||||
Error return_code = ErrorNone;
|
||||
size_t file_i = 0;
|
||||
SplitIterator line_it = memSplit(buf_to_slice(&line_buf), str("\n"));
|
||||
for (;; file_i += 1) {
|
||||
Optional<Slice<uint8_t>> opt_line = SplitIterator_next(&line_it);
|
||||
|
||||
CacheHashFile *chf;
|
||||
if (file_i < input_file_count) {
|
||||
chf = &ch->files.at(file_i);
|
||||
} else if (any_file_changed) {
|
||||
// cache miss.
|
||||
// keep the manifest file open with the rw lock
|
||||
// reset the hash
|
||||
rc = blake2b_init(&ch->blake, 48);
|
||||
assert(rc == 0);
|
||||
blake2b_update(&ch->blake, bin_digest, 48);
|
||||
ch->files.resize(input_file_count);
|
||||
// bring the hash up to the input file hashes
|
||||
for (file_i = 0; file_i < input_file_count; file_i += 1) {
|
||||
blake2b_update(&ch->blake, ch->files.at(file_i).bin_digest, 48);
|
||||
}
|
||||
// caller can notice that out_digest is unmodified.
|
||||
return return_code;
|
||||
} else if (!opt_line.is_some) {
|
||||
break;
|
||||
} else {
|
||||
chf = ch->files.add_one();
|
||||
chf->path = nullptr;
|
||||
}
|
||||
|
||||
if (!opt_line.is_some)
|
||||
break;
|
||||
|
||||
SplitIterator it = memSplit(opt_line.value, str(" "));
|
||||
|
||||
Optional<Slice<uint8_t>> opt_inode = SplitIterator_next(&it);
|
||||
if (!opt_inode.is_some) {
|
||||
return_code = ErrorInvalidFormat;
|
||||
break;
|
||||
}
|
||||
chf->attr.inode = strtoull((const char *)opt_inode.value.ptr, nullptr, 10);
|
||||
|
||||
Optional<Slice<uint8_t>> opt_mtime_sec = SplitIterator_next(&it);
|
||||
if (!opt_mtime_sec.is_some) {
|
||||
return_code = ErrorInvalidFormat;
|
||||
break;
|
||||
}
|
||||
chf->attr.mtime.sec = strtoull((const char *)opt_mtime_sec.value.ptr, nullptr, 10);
|
||||
|
||||
Optional<Slice<uint8_t>> opt_mtime_nsec = SplitIterator_next(&it);
|
||||
if (!opt_mtime_nsec.is_some) {
|
||||
return_code = ErrorInvalidFormat;
|
||||
break;
|
||||
}
|
||||
chf->attr.mtime.nsec = strtoull((const char *)opt_mtime_nsec.value.ptr, nullptr, 10);
|
||||
|
||||
Optional<Slice<uint8_t>> opt_digest = SplitIterator_next(&it);
|
||||
if (!opt_digest.is_some) {
|
||||
return_code = ErrorInvalidFormat;
|
||||
break;
|
||||
}
|
||||
if ((err = base64_decode({chf->bin_digest, 48}, opt_digest.value))) {
|
||||
return_code = ErrorInvalidFormat;
|
||||
break;
|
||||
}
|
||||
|
||||
Slice<uint8_t> file_path = SplitIterator_rest(&it);
|
||||
if (file_path.len == 0) {
|
||||
return_code = ErrorInvalidFormat;
|
||||
break;
|
||||
}
|
||||
Buf *this_path = buf_create_from_slice(file_path);
|
||||
if (chf->path != nullptr && !buf_eql_buf(this_path, chf->path)) {
|
||||
return_code = ErrorInvalidFormat;
|
||||
break;
|
||||
}
|
||||
chf->path = this_path;
|
||||
|
||||
// if the mtime matches we can trust the digest
|
||||
OsFile this_file;
|
||||
OsFileAttr actual_attr;
|
||||
if ((err = os_file_open_r(chf->path, &this_file, &actual_attr))) {
|
||||
fprintf(stderr, "Unable to open %s\n: %s", buf_ptr(chf->path), err_str(err));
|
||||
os_file_close(&ch->manifest_file);
|
||||
return ErrorCacheUnavailable;
|
||||
}
|
||||
if (chf->attr.mtime.sec == actual_attr.mtime.sec &&
|
||||
chf->attr.mtime.nsec == actual_attr.mtime.nsec &&
|
||||
chf->attr.inode == actual_attr.inode)
|
||||
{
|
||||
os_file_close(&this_file);
|
||||
} else {
|
||||
// we have to recompute the digest.
|
||||
// later we'll rewrite the manifest with the new mtime/digest values
|
||||
ch->manifest_dirty = true;
|
||||
chf->attr = actual_attr;
|
||||
|
||||
if (is_problematic_timestamp(&actual_attr.mtime)) {
|
||||
chf->attr.mtime.sec = 0;
|
||||
chf->attr.mtime.nsec = 0;
|
||||
chf->attr.inode = 0;
|
||||
}
|
||||
|
||||
uint8_t actual_digest[48];
|
||||
if ((err = hash_file(actual_digest, this_file, nullptr))) {
|
||||
os_file_close(&this_file);
|
||||
os_file_close(&ch->manifest_file);
|
||||
return err;
|
||||
}
|
||||
os_file_close(&this_file);
|
||||
if (memcmp(chf->bin_digest, actual_digest, 48) != 0) {
|
||||
memcpy(chf->bin_digest, actual_digest, 48);
|
||||
// keep going until we have the input file digests
|
||||
any_file_changed = true;
|
||||
}
|
||||
}
|
||||
if (!any_file_changed) {
|
||||
blake2b_update(&ch->blake, chf->bin_digest, 48);
|
||||
}
|
||||
}
|
||||
if (file_i < input_file_count || file_i == 0 || return_code != ErrorNone) {
|
||||
// manifest file is empty or missing entries, so this is a cache miss
|
||||
ch->manifest_dirty = true;
|
||||
for (; file_i < input_file_count; file_i += 1) {
|
||||
CacheHashFile *chf = &ch->files.at(file_i);
|
||||
if ((err = populate_file_hash(ch, chf, nullptr))) {
|
||||
fprintf(stderr, "Unable to hash %s: %s\n", buf_ptr(chf->path), err_str(err));
|
||||
os_file_close(&ch->manifest_file);
|
||||
return ErrorCacheUnavailable;
|
||||
}
|
||||
}
|
||||
if (return_code != ErrorNone && return_code != ErrorInvalidFormat) {
|
||||
os_file_close(&ch->manifest_file);
|
||||
}
|
||||
return return_code;
|
||||
}
|
||||
// Cache Hit
|
||||
return cache_final(ch, out_digest);
|
||||
}
|
||||
|
||||
Error cache_add_file_fetch(CacheHash *ch, Buf *resolved_path, Buf *contents) {
|
||||
Error err;
|
||||
|
||||
assert(ch->manifest_file_path != nullptr);
|
||||
CacheHashFile *chf = ch->files.add_one();
|
||||
chf->path = resolved_path;
|
||||
if ((err = populate_file_hash(ch, chf, contents))) {
|
||||
os_file_close(&ch->manifest_file);
|
||||
return err;
|
||||
}
|
||||
|
||||
return ErrorNone;
|
||||
}
|
||||
|
||||
Error cache_add_file(CacheHash *ch, Buf *path) {
|
||||
Buf *resolved_path = buf_alloc();
|
||||
*resolved_path = os_path_resolve(&path, 1);
|
||||
return cache_add_file_fetch(ch, resolved_path, nullptr);
|
||||
}
|
||||
|
||||
Error cache_add_dep_file(CacheHash *ch, Buf *dep_file_path, bool verbose) {
|
||||
Error err;
|
||||
Buf *contents = buf_alloc();
|
||||
if ((err = os_fetch_file_path(dep_file_path, contents))) {
|
||||
if (err == ErrorFileNotFound)
|
||||
return err;
|
||||
if (verbose) {
|
||||
fprintf(stderr, "%s: unable to read .d file: %s\n", err_str(err), buf_ptr(dep_file_path));
|
||||
}
|
||||
return ErrorReadingDepFile;
|
||||
}
|
||||
auto it = stage2_DepTokenizer_init(buf_ptr(contents), buf_len(contents));
|
||||
// skip first token: target
|
||||
{
|
||||
auto result = stage2_DepTokenizer_next(&it);
|
||||
switch (result.type_id) {
|
||||
case stage2_DepNextResult::error:
|
||||
if (verbose) {
|
||||
fprintf(stderr, "%s: failed processing .d file: %s\n", result.textz, buf_ptr(dep_file_path));
|
||||
}
|
||||
err = ErrorInvalidDepFile;
|
||||
goto finish;
|
||||
case stage2_DepNextResult::null:
|
||||
err = ErrorNone;
|
||||
goto finish;
|
||||
case stage2_DepNextResult::target:
|
||||
case stage2_DepNextResult::prereq:
|
||||
err = ErrorNone;
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Process 0+ preqreqs.
|
||||
// clang is invoked in single-source mode so we never get more targets.
|
||||
for (;;) {
|
||||
auto result = stage2_DepTokenizer_next(&it);
|
||||
switch (result.type_id) {
|
||||
case stage2_DepNextResult::error:
|
||||
if (verbose) {
|
||||
fprintf(stderr, "%s: failed processing .d file: %s\n", result.textz, buf_ptr(dep_file_path));
|
||||
}
|
||||
err = ErrorInvalidDepFile;
|
||||
goto finish;
|
||||
case stage2_DepNextResult::null:
|
||||
case stage2_DepNextResult::target:
|
||||
err = ErrorNone;
|
||||
goto finish;
|
||||
case stage2_DepNextResult::prereq:
|
||||
break;
|
||||
}
|
||||
auto textbuf = buf_alloc();
|
||||
buf_init_from_str(textbuf, result.textz);
|
||||
if ((err = cache_add_file(ch, textbuf))) {
|
||||
if (verbose) {
|
||||
fprintf(stderr, "unable to add %s to cache: %s\n", result.textz, err_str(err));
|
||||
fprintf(stderr, "when processing .d file: %s\n", buf_ptr(dep_file_path));
|
||||
}
|
||||
goto finish;
|
||||
}
|
||||
}
|
||||
|
||||
finish:
|
||||
stage2_DepTokenizer_deinit(&it);
|
||||
return err;
|
||||
}
|
||||
|
||||
static Error write_manifest_file(CacheHash *ch) {
|
||||
Error err;
|
||||
Buf contents = BUF_INIT;
|
||||
buf_resize(&contents, 0);
|
||||
uint8_t encoded_digest[65];
|
||||
encoded_digest[64] = 0;
|
||||
for (size_t i = 0; i < ch->files.length; i += 1) {
|
||||
CacheHashFile *chf = &ch->files.at(i);
|
||||
base64_encode({encoded_digest, 64}, {chf->bin_digest, 48});
|
||||
buf_appendf(&contents, "%" ZIG_PRI_u64 " %" ZIG_PRI_u64 " %" ZIG_PRI_u64 " %s %s\n",
|
||||
chf->attr.inode, chf->attr.mtime.sec, chf->attr.mtime.nsec, encoded_digest, buf_ptr(chf->path));
|
||||
}
|
||||
if ((err = os_file_overwrite(ch->manifest_file, &contents)))
|
||||
return err;
|
||||
|
||||
return ErrorNone;
|
||||
}
|
||||
|
||||
Error cache_final(CacheHash *ch, Buf *out_digest) {
|
||||
assert(ch->manifest_file_path != nullptr);
|
||||
|
||||
// We don't close the manifest file yet, because we want to
|
||||
// keep it locked until the API user is done using it.
|
||||
// We also don't write out the manifest yet, because until
|
||||
// cache_release is called we still might be working on creating
|
||||
// the artifacts to cache.
|
||||
|
||||
uint8_t bin_digest[48];
|
||||
int rc = blake2b_final(&ch->blake, bin_digest, 48);
|
||||
assert(rc == 0);
|
||||
buf_resize(out_digest, 64);
|
||||
base64_encode(buf_to_slice(out_digest), {bin_digest, 48});
|
||||
|
||||
return ErrorNone;
|
||||
}
|
||||
|
||||
void cache_release(CacheHash *ch) {
|
||||
assert(ch->manifest_file_path != nullptr);
|
||||
|
||||
Error err;
|
||||
|
||||
if (ch->manifest_dirty) {
|
||||
if ((err = write_manifest_file(ch))) {
|
||||
fprintf(stderr, "Warning: Unable to write cache file '%s': %s\n",
|
||||
buf_ptr(ch->manifest_file_path), err_str(err));
|
||||
}
|
||||
}
|
||||
|
||||
os_file_close(&ch->manifest_file);
|
||||
}
|
||||
|
||||
@ -1,83 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2018 Andrew Kelley
|
||||
*
|
||||
* This file is part of zig, which is MIT licensed.
|
||||
* See http://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
#ifndef ZIG_CACHE_HASH_HPP
|
||||
#define ZIG_CACHE_HASH_HPP
|
||||
|
||||
#include "blake2.h"
|
||||
#include "os.hpp"
|
||||
|
||||
struct LinkLib;
|
||||
|
||||
struct CacheHashFile {
|
||||
Buf *path;
|
||||
OsFileAttr attr;
|
||||
uint8_t bin_digest[48];
|
||||
Buf *contents;
|
||||
};
|
||||
|
||||
struct CacheHash {
|
||||
blake2b_state blake;
|
||||
ZigList<CacheHashFile> files;
|
||||
Buf *manifest_dir;
|
||||
Buf *manifest_file_path;
|
||||
Buf b64_digest;
|
||||
OsFile manifest_file;
|
||||
bool manifest_dirty;
|
||||
bool force_check_manifest;
|
||||
};
|
||||
|
||||
// Always call this first to set up.
|
||||
void cache_init(CacheHash *ch, Buf *manifest_dir);
|
||||
|
||||
// Next, use the hash population functions to add the initial parameters.
|
||||
void cache_mem(CacheHash *ch, const char *ptr, size_t len);
|
||||
void cache_slice(CacheHash *ch, Slice<const char> slice);
|
||||
void cache_str(CacheHash *ch, const char *ptr);
|
||||
void cache_int(CacheHash *ch, int x);
|
||||
void cache_bool(CacheHash *ch, bool x);
|
||||
void cache_usize(CacheHash *ch, size_t x);
|
||||
void cache_buf(CacheHash *ch, Buf *buf);
|
||||
void cache_buf_opt(CacheHash *ch, Buf *buf);
|
||||
void cache_list_of_link_lib(CacheHash *ch, LinkLib **ptr, size_t len);
|
||||
void cache_list_of_buf(CacheHash *ch, Buf **ptr, size_t len);
|
||||
void cache_list_of_file(CacheHash *ch, Buf **ptr, size_t len);
|
||||
void cache_list_of_str(CacheHash *ch, const char **ptr, size_t len);
|
||||
void cache_file(CacheHash *ch, Buf *path);
|
||||
void cache_file_opt(CacheHash *ch, Buf *path);
|
||||
|
||||
// Then call cache_hit when you're ready to see if you can skip the next step.
|
||||
// out_b64_digest will be left unchanged if it was a cache miss.
|
||||
// If you got a cache hit, the next step is cache_release.
|
||||
// From this point on, there is a lock on the input params. Release
|
||||
// the lock with cache_release.
|
||||
// Set force_check_manifest if you plan to add files later, but have not
|
||||
// added any files before calling cache_hit. CacheHash::b64_digest becomes
|
||||
// available for use after this call, even in the case of a miss, and it
|
||||
// is a hash of the input parameters only.
|
||||
// If this function returns ErrorInvalidFormat, that error may be treated
|
||||
// as a cache miss.
|
||||
Error ATTRIBUTE_MUST_USE cache_hit(CacheHash *ch, Buf *out_b64_digest);
|
||||
|
||||
// If you did not get a cache hit, call this function for every file
|
||||
// that is depended on, and then finish with cache_final.
|
||||
Error ATTRIBUTE_MUST_USE cache_add_file(CacheHash *ch, Buf *path);
|
||||
// This opens a file created by -MD -MF args to Clang
|
||||
Error ATTRIBUTE_MUST_USE cache_add_dep_file(CacheHash *ch, Buf *path, bool verbose);
|
||||
|
||||
// This variant of cache_add_file returns the file contents.
|
||||
// Also the file path argument must be already resolved.
|
||||
Error ATTRIBUTE_MUST_USE cache_add_file_fetch(CacheHash *ch, Buf *resolved_path, Buf *contents);
|
||||
|
||||
// out_b64_digest will be the same thing that cache_hit returns if you got a cache hit
|
||||
Error ATTRIBUTE_MUST_USE cache_final(CacheHash *ch, Buf *out_b64_digest);
|
||||
|
||||
// Until this function is called, no one will be able to get a lock on your input params.
|
||||
void cache_release(CacheHash *ch);
|
||||
|
||||
|
||||
#endif
|
||||
@ -7,9 +7,7 @@ pub const CliArg = struct {
|
||||
name: []const u8,
|
||||
syntax: Syntax,
|
||||
|
||||
/// TODO we're going to want to change this when we start shipping self-hosted because this causes
|
||||
/// all the functions in stage2.zig to get exported.
|
||||
zig_equivalent: @import("stage2.zig").ClangArgIterator.ZigEquivalent,
|
||||
zig_equivalent: @import("main.zig").ClangArgIterator.ZigEquivalent,
|
||||
|
||||
/// Prefixed by "-"
|
||||
pd1: bool = false,
|
||||
@ -7,7 +7,7 @@ flagpd1("CC"),
|
||||
.{
|
||||
.name = "E",
|
||||
.syntax = .flag,
|
||||
.zig_equivalent = .pp_or_asm,
|
||||
.zig_equivalent = .preprocess_only,
|
||||
.pd1 = true,
|
||||
.pd2 = false,
|
||||
.psl = false,
|
||||
@ -95,7 +95,7 @@ flagpd1("Qy"),
|
||||
.{
|
||||
.name = "S",
|
||||
.syntax = .flag,
|
||||
.zig_equivalent = .pp_or_asm,
|
||||
.zig_equivalent = .asm_only,
|
||||
.pd1 = true,
|
||||
.pd2 = false,
|
||||
.psl = false,
|
||||
@ -196,7 +196,7 @@ sepd1("Zlinker-input"),
|
||||
.{
|
||||
.name = "E",
|
||||
.syntax = .flag,
|
||||
.zig_equivalent = .pp_or_asm,
|
||||
.zig_equivalent = .preprocess_only,
|
||||
.pd1 = true,
|
||||
.pd2 = false,
|
||||
.psl = true,
|
||||
@ -1477,7 +1477,7 @@ flagpsl("MT"),
|
||||
.{
|
||||
.name = "assemble",
|
||||
.syntax = .flag,
|
||||
.zig_equivalent = .pp_or_asm,
|
||||
.zig_equivalent = .asm_only,
|
||||
.pd1 = false,
|
||||
.pd2 = true,
|
||||
.psl = false,
|
||||
@ -1805,7 +1805,7 @@ flagpsl("MT"),
|
||||
.{
|
||||
.name = "preprocess",
|
||||
.syntax = .flag,
|
||||
.zig_equivalent = .pp_or_asm,
|
||||
.zig_equivalent = .preprocess_only,
|
||||
.pd1 = false,
|
||||
.pd2 = true,
|
||||
.psl = false,
|
||||
@ -3406,6 +3406,8 @@ flagpd1("mlong-double-128"),
|
||||
flagpd1("mlong-double-64"),
|
||||
flagpd1("mlong-double-80"),
|
||||
flagpd1("mlongcall"),
|
||||
flagpd1("mlvi-cfi"),
|
||||
flagpd1("mlvi-hardening"),
|
||||
flagpd1("mlwp"),
|
||||
flagpd1("mlzcnt"),
|
||||
flagpd1("mmadd4"),
|
||||
@ -3499,6 +3501,8 @@ flagpd1("mno-ldc1-sdc1"),
|
||||
flagpd1("mno-local-sdata"),
|
||||
flagpd1("mno-long-calls"),
|
||||
flagpd1("mno-longcall"),
|
||||
flagpd1("mno-lvi-cfi"),
|
||||
flagpd1("mno-lvi-hardening"),
|
||||
flagpd1("mno-lwp"),
|
||||
flagpd1("mno-lzcnt"),
|
||||
flagpd1("mno-madd4"),
|
||||
@ -1,68 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015 Andrew Kelley
|
||||
*
|
||||
* This file is part of zig, which is MIT licensed.
|
||||
* See http://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
#ifndef ZIG_CODEGEN_HPP
|
||||
#define ZIG_CODEGEN_HPP
|
||||
|
||||
#include "parser.hpp"
|
||||
#include "errmsg.hpp"
|
||||
#include "target.hpp"
|
||||
#include "stage2.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
CodeGen *codegen_create(Buf *main_pkg_path, Buf *root_src_path, const ZigTarget *target,
|
||||
OutType out_type, BuildMode build_mode, Buf *zig_lib_dir,
|
||||
Stage2LibCInstallation *libc, Buf *cache_dir, bool is_test_build, Stage2ProgressNode *progress_node);
|
||||
|
||||
CodeGen *create_child_codegen(CodeGen *parent_gen, Buf *root_src_path, OutType out_type,
|
||||
Stage2LibCInstallation *libc, const char *name, Stage2ProgressNode *progress_node);
|
||||
|
||||
void codegen_set_clang_argv(CodeGen *codegen, const char **args, size_t len);
|
||||
void codegen_set_llvm_argv(CodeGen *codegen, const char **args, size_t len);
|
||||
void codegen_set_each_lib_rpath(CodeGen *codegen, bool each_lib_rpath);
|
||||
|
||||
void codegen_set_strip(CodeGen *codegen, bool strip);
|
||||
void codegen_set_errmsg_color(CodeGen *codegen, ErrColor err_color);
|
||||
void codegen_set_out_name(CodeGen *codegen, Buf *out_name);
|
||||
void codegen_add_lib_dir(CodeGen *codegen, const char *dir);
|
||||
void codegen_add_forbidden_lib(CodeGen *codegen, Buf *lib);
|
||||
LinkLib *codegen_add_link_lib(CodeGen *codegen, Buf *lib);
|
||||
void codegen_add_framework(CodeGen *codegen, const char *name);
|
||||
void codegen_add_rpath(CodeGen *codegen, const char *name);
|
||||
void codegen_set_rdynamic(CodeGen *g, bool rdynamic);
|
||||
void codegen_set_linker_script(CodeGen *g, const char *linker_script);
|
||||
void codegen_set_test_filter(CodeGen *g, Buf *filter);
|
||||
void codegen_set_test_name_prefix(CodeGen *g, Buf *prefix);
|
||||
void codegen_set_lib_version(CodeGen *g, bool is_versioned, size_t major, size_t minor, size_t patch);
|
||||
void codegen_add_time_event(CodeGen *g, const char *name);
|
||||
void codegen_print_timing_report(CodeGen *g, FILE *f);
|
||||
void codegen_link(CodeGen *g);
|
||||
void zig_link_add_compiler_rt(CodeGen *g, Stage2ProgressNode *progress_node);
|
||||
void codegen_build_and_link(CodeGen *g);
|
||||
|
||||
ZigPackage *codegen_create_package(CodeGen *g, const char *root_src_dir, const char *root_src_path,
|
||||
const char *pkg_path);
|
||||
void codegen_add_assembly(CodeGen *g, Buf *path);
|
||||
void codegen_add_object(CodeGen *g, Buf *object_path);
|
||||
|
||||
void codegen_translate_c(CodeGen *g, Buf *full_path);
|
||||
|
||||
Buf *codegen_generate_builtin_source(CodeGen *g);
|
||||
|
||||
TargetSubsystem detect_subsystem(CodeGen *g);
|
||||
|
||||
void codegen_release_caches(CodeGen *codegen);
|
||||
bool codegen_fn_has_err_ret_tracing_arg(CodeGen *g, ZigType *return_type);
|
||||
bool codegen_fn_has_err_ret_tracing_stack(CodeGen *g, ZigFn *fn, bool is_async);
|
||||
|
||||
ATTRIBUTE_NORETURN
|
||||
void codegen_report_errors_and_exit(CodeGen *g);
|
||||
|
||||
void codegen_switch_sub_prog_node(CodeGen *g, Stage2ProgressNode *node);
|
||||
|
||||
#endif
|
||||
@ -8,7 +8,8 @@ const Value = @import("value.zig").Value;
|
||||
const TypedValue = @import("TypedValue.zig");
|
||||
const link = @import("link.zig");
|
||||
const Module = @import("Module.zig");
|
||||
const ErrorMsg = Module.ErrorMsg;
|
||||
const Compilation = @import("Compilation.zig");
|
||||
const ErrorMsg = Compilation.ErrorMsg;
|
||||
const Target = std.Target;
|
||||
const Allocator = mem.Allocator;
|
||||
const trace = @import("tracy.zig").trace;
|
||||
@ -50,7 +51,7 @@ pub const Result = union(enum) {
|
||||
appended: void,
|
||||
/// The value is available externally, `code` is unused.
|
||||
externally_managed: []const u8,
|
||||
fail: *Module.ErrorMsg,
|
||||
fail: *ErrorMsg,
|
||||
};
|
||||
|
||||
pub const GenerateSymbolError = error{
|
||||
125
src/codegen/llvm.zig
Normal file
125
src/codegen/llvm.zig
Normal file
@ -0,0 +1,125 @@
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
pub fn targetTriple(allocator: *Allocator, target: std.Target) ![]u8 {
|
||||
const llvm_arch = switch (target.cpu.arch) {
|
||||
.arm => "arm",
|
||||
.armeb => "armeb",
|
||||
.aarch64 => "aarch64",
|
||||
.aarch64_be => "aarch64_be",
|
||||
.aarch64_32 => "aarch64_32",
|
||||
.arc => "arc",
|
||||
.avr => "avr",
|
||||
.bpfel => "bpfel",
|
||||
.bpfeb => "bpfeb",
|
||||
.hexagon => "hexagon",
|
||||
.mips => "mips",
|
||||
.mipsel => "mipsel",
|
||||
.mips64 => "mips64",
|
||||
.mips64el => "mips64el",
|
||||
.msp430 => "msp430",
|
||||
.powerpc => "powerpc",
|
||||
.powerpc64 => "powerpc64",
|
||||
.powerpc64le => "powerpc64le",
|
||||
.r600 => "r600",
|
||||
.amdgcn => "amdgcn",
|
||||
.riscv32 => "riscv32",
|
||||
.riscv64 => "riscv64",
|
||||
.sparc => "sparc",
|
||||
.sparcv9 => "sparcv9",
|
||||
.sparcel => "sparcel",
|
||||
.s390x => "s390x",
|
||||
.tce => "tce",
|
||||
.tcele => "tcele",
|
||||
.thumb => "thumb",
|
||||
.thumbeb => "thumbeb",
|
||||
.i386 => "i386",
|
||||
.x86_64 => "x86_64",
|
||||
.xcore => "xcore",
|
||||
.nvptx => "nvptx",
|
||||
.nvptx64 => "nvptx64",
|
||||
.le32 => "le32",
|
||||
.le64 => "le64",
|
||||
.amdil => "amdil",
|
||||
.amdil64 => "amdil64",
|
||||
.hsail => "hsail",
|
||||
.hsail64 => "hsail64",
|
||||
.spir => "spir",
|
||||
.spir64 => "spir64",
|
||||
.kalimba => "kalimba",
|
||||
.shave => "shave",
|
||||
.lanai => "lanai",
|
||||
.wasm32 => "wasm32",
|
||||
.wasm64 => "wasm64",
|
||||
.renderscript32 => "renderscript32",
|
||||
.renderscript64 => "renderscript64",
|
||||
.ve => "ve",
|
||||
.spu_2 => return error.LLVMBackendDoesNotSupportSPUMarkII,
|
||||
};
|
||||
// TODO Add a sub-arch for some architectures depending on CPU features.
|
||||
|
||||
const llvm_os = switch (target.os.tag) {
|
||||
.freestanding => "unknown",
|
||||
.ananas => "ananas",
|
||||
.cloudabi => "cloudabi",
|
||||
.dragonfly => "dragonfly",
|
||||
.freebsd => "freebsd",
|
||||
.fuchsia => "fuchsia",
|
||||
.ios => "ios",
|
||||
.kfreebsd => "kfreebsd",
|
||||
.linux => "linux",
|
||||
.lv2 => "lv2",
|
||||
.macosx => "macosx",
|
||||
.netbsd => "netbsd",
|
||||
.openbsd => "openbsd",
|
||||
.solaris => "solaris",
|
||||
.windows => "windows",
|
||||
.haiku => "haiku",
|
||||
.minix => "minix",
|
||||
.rtems => "rtems",
|
||||
.nacl => "nacl",
|
||||
.cnk => "cnk",
|
||||
.aix => "aix",
|
||||
.cuda => "cuda",
|
||||
.nvcl => "nvcl",
|
||||
.amdhsa => "amdhsa",
|
||||
.ps4 => "ps4",
|
||||
.elfiamcu => "elfiamcu",
|
||||
.tvos => "tvos",
|
||||
.watchos => "watchos",
|
||||
.mesa3d => "mesa3d",
|
||||
.contiki => "contiki",
|
||||
.amdpal => "amdpal",
|
||||
.hermit => "hermit",
|
||||
.hurd => "hurd",
|
||||
.wasi => "wasi",
|
||||
.emscripten => "emscripten",
|
||||
.uefi => "windows",
|
||||
.other => "unknown",
|
||||
};
|
||||
|
||||
const llvm_abi = switch (target.abi) {
|
||||
.none => "unknown",
|
||||
.gnu => "gnu",
|
||||
.gnuabin32 => "gnuabin32",
|
||||
.gnuabi64 => "gnuabi64",
|
||||
.gnueabi => "gnueabi",
|
||||
.gnueabihf => "gnueabihf",
|
||||
.gnux32 => "gnux32",
|
||||
.code16 => "code16",
|
||||
.eabi => "eabi",
|
||||
.eabihf => "eabihf",
|
||||
.android => "android",
|
||||
.musl => "musl",
|
||||
.musleabi => "musleabi",
|
||||
.musleabihf => "musleabihf",
|
||||
.msvc => "msvc",
|
||||
.itanium => "itanium",
|
||||
.cygnus => "cygnus",
|
||||
.coreclr => "coreclr",
|
||||
.simulator => "simulator",
|
||||
.macabi => "macabi",
|
||||
};
|
||||
|
||||
return std.fmt.allocPrint(allocator, "{}-unknown-{}-{}", .{ llvm_arch, llvm_os, llvm_abi });
|
||||
}
|
||||
@ -5,7 +5,8 @@ const assert = std.debug.assert;
|
||||
const leb = std.debug.leb;
|
||||
const mem = std.mem;
|
||||
|
||||
const Decl = @import("../Module.zig").Decl;
|
||||
const Module = @import("../Module.zig");
|
||||
const Decl = Module.Decl;
|
||||
const Inst = @import("../ir.zig").Inst;
|
||||
const Type = @import("../type.zig").Type;
|
||||
const Value = @import("../value.zig").Value;
|
||||
196
src/compiler.cpp
196
src/compiler.cpp
@ -1,196 +0,0 @@
|
||||
#include "cache_hash.hpp"
|
||||
#include "os.hpp"
|
||||
#include "compiler.hpp"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
Error get_compiler_id(Buf **result) {
|
||||
static Buf saved_compiler_id = BUF_INIT;
|
||||
|
||||
if (saved_compiler_id.list.length != 0) {
|
||||
*result = &saved_compiler_id;
|
||||
return ErrorNone;
|
||||
}
|
||||
|
||||
Error err;
|
||||
Buf *manifest_dir = buf_alloc();
|
||||
os_path_join(get_global_cache_dir(), buf_create_from_str("exe"), manifest_dir);
|
||||
|
||||
CacheHash cache_hash;
|
||||
CacheHash *ch = &cache_hash;
|
||||
cache_init(ch, manifest_dir);
|
||||
Buf self_exe_path = BUF_INIT;
|
||||
if ((err = os_self_exe_path(&self_exe_path)))
|
||||
return err;
|
||||
|
||||
cache_file(ch, &self_exe_path);
|
||||
|
||||
buf_resize(&saved_compiler_id, 0);
|
||||
if ((err = cache_hit(ch, &saved_compiler_id))) {
|
||||
if (err != ErrorInvalidFormat)
|
||||
return err;
|
||||
}
|
||||
if (buf_len(&saved_compiler_id) != 0) {
|
||||
cache_release(ch);
|
||||
*result = &saved_compiler_id;
|
||||
return ErrorNone;
|
||||
}
|
||||
ZigList<Buf *> lib_paths = {};
|
||||
if ((err = os_self_exe_shared_libs(lib_paths)))
|
||||
return err;
|
||||
#if defined(ZIG_OS_DARWIN)
|
||||
// only add the self exe path on mac os
|
||||
Buf *lib_path = lib_paths.at(0);
|
||||
if ((err = cache_add_file(ch, lib_path)))
|
||||
return err;
|
||||
#else
|
||||
for (size_t i = 0; i < lib_paths.length; i += 1) {
|
||||
Buf *lib_path = lib_paths.at(i);
|
||||
if ((err = cache_add_file(ch, lib_path)))
|
||||
return err;
|
||||
}
|
||||
#endif
|
||||
|
||||
if ((err = cache_final(ch, &saved_compiler_id)))
|
||||
return err;
|
||||
|
||||
cache_release(ch);
|
||||
|
||||
*result = &saved_compiler_id;
|
||||
return ErrorNone;
|
||||
}
|
||||
|
||||
static bool test_zig_install_prefix(Buf *test_path, Buf *out_zig_lib_dir) {
|
||||
{
|
||||
Buf *test_zig_dir = buf_sprintf("%s" OS_SEP "lib" OS_SEP "zig", buf_ptr(test_path));
|
||||
Buf *test_index_file = buf_sprintf("%s" OS_SEP "std" OS_SEP "std.zig", buf_ptr(test_zig_dir));
|
||||
int err;
|
||||
bool exists;
|
||||
if ((err = os_file_exists(test_index_file, &exists))) {
|
||||
exists = false;
|
||||
}
|
||||
if (exists) {
|
||||
buf_init_from_buf(out_zig_lib_dir, test_zig_dir);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Also try without "zig"
|
||||
{
|
||||
Buf *test_zig_dir = buf_sprintf("%s" OS_SEP "lib", buf_ptr(test_path));
|
||||
Buf *test_index_file = buf_sprintf("%s" OS_SEP "std" OS_SEP "std.zig", buf_ptr(test_zig_dir));
|
||||
int err;
|
||||
bool exists;
|
||||
if ((err = os_file_exists(test_index_file, &exists))) {
|
||||
exists = false;
|
||||
}
|
||||
if (exists) {
|
||||
buf_init_from_buf(out_zig_lib_dir, test_zig_dir);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int find_zig_lib_dir(Buf *out_path) {
|
||||
int err;
|
||||
|
||||
Buf self_exe_path = BUF_INIT;
|
||||
buf_resize(&self_exe_path, 0);
|
||||
if (!(err = os_self_exe_path(&self_exe_path))) {
|
||||
Buf *cur_path = &self_exe_path;
|
||||
|
||||
for (;;) {
|
||||
Buf *test_dir = buf_alloc();
|
||||
os_path_dirname(cur_path, test_dir);
|
||||
|
||||
if (buf_eql_buf(test_dir, cur_path)) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (test_zig_install_prefix(test_dir, out_path)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
cur_path = test_dir;
|
||||
}
|
||||
}
|
||||
|
||||
return ErrorFileNotFound;
|
||||
}
|
||||
|
||||
Buf *get_zig_lib_dir(void) {
|
||||
static Buf saved_lib_dir = BUF_INIT;
|
||||
if (saved_lib_dir.list.length != 0)
|
||||
return &saved_lib_dir;
|
||||
buf_resize(&saved_lib_dir, 0);
|
||||
|
||||
int err;
|
||||
if ((err = find_zig_lib_dir(&saved_lib_dir))) {
|
||||
fprintf(stderr, "Unable to find zig lib directory\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
return &saved_lib_dir;
|
||||
}
|
||||
|
||||
Buf *get_zig_std_dir(Buf *zig_lib_dir) {
|
||||
static Buf saved_std_dir = BUF_INIT;
|
||||
if (saved_std_dir.list.length != 0)
|
||||
return &saved_std_dir;
|
||||
buf_resize(&saved_std_dir, 0);
|
||||
|
||||
os_path_join(zig_lib_dir, buf_create_from_str("std"), &saved_std_dir);
|
||||
|
||||
return &saved_std_dir;
|
||||
}
|
||||
|
||||
Buf *get_zig_special_dir(Buf *zig_lib_dir) {
|
||||
static Buf saved_special_dir = BUF_INIT;
|
||||
if (saved_special_dir.list.length != 0)
|
||||
return &saved_special_dir;
|
||||
buf_resize(&saved_special_dir, 0);
|
||||
|
||||
os_path_join(get_zig_std_dir(zig_lib_dir), buf_sprintf("special"), &saved_special_dir);
|
||||
|
||||
return &saved_special_dir;
|
||||
}
|
||||
|
||||
Buf *get_global_cache_dir(void) {
|
||||
static Buf saved_global_cache_dir = BUF_INIT;
|
||||
if (saved_global_cache_dir.list.length != 0)
|
||||
return &saved_global_cache_dir;
|
||||
buf_resize(&saved_global_cache_dir, 0);
|
||||
|
||||
Buf app_data_dir = BUF_INIT;
|
||||
Error err;
|
||||
if ((err = os_get_app_data_dir(&app_data_dir, "zig"))) {
|
||||
fprintf(stderr, "Unable to get application data dir: %s\n", err_str(err));
|
||||
exit(1);
|
||||
}
|
||||
os_path_join(&app_data_dir, buf_create_from_str("stage1"), &saved_global_cache_dir);
|
||||
buf_deinit(&app_data_dir);
|
||||
return &saved_global_cache_dir;
|
||||
}
|
||||
|
||||
FileExt classify_file_ext(const char *filename_ptr, size_t filename_len) {
|
||||
if (mem_ends_with_str(filename_ptr, filename_len, ".c")) {
|
||||
return FileExtC;
|
||||
} else if (mem_ends_with_str(filename_ptr, filename_len, ".C") ||
|
||||
mem_ends_with_str(filename_ptr, filename_len, ".cc") ||
|
||||
mem_ends_with_str(filename_ptr, filename_len, ".cpp") ||
|
||||
mem_ends_with_str(filename_ptr, filename_len, ".cxx"))
|
||||
{
|
||||
return FileExtCpp;
|
||||
} else if (mem_ends_with_str(filename_ptr, filename_len, ".ll")) {
|
||||
return FileExtLLVMIr;
|
||||
} else if (mem_ends_with_str(filename_ptr, filename_len, ".bc")) {
|
||||
return FileExtLLVMBitCode;
|
||||
} else if (mem_ends_with_str(filename_ptr, filename_len, ".s") ||
|
||||
mem_ends_with_str(filename_ptr, filename_len, ".S"))
|
||||
{
|
||||
return FileExtAsm;
|
||||
}
|
||||
// TODO look for .so, .so.X, .so.X.Y, .so.X.Y.Z
|
||||
return FileExtUnknown;
|
||||
}
|
||||
@ -1,24 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2018 Andrew Kelley
|
||||
*
|
||||
* This file is part of zig, which is MIT licensed.
|
||||
* See http://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
#ifndef ZIG_COMPILER_HPP
|
||||
#define ZIG_COMPILER_HPP
|
||||
|
||||
#include "all_types.hpp"
|
||||
|
||||
Error get_compiler_id(Buf **result);
|
||||
|
||||
Buf *get_zig_lib_dir(void);
|
||||
Buf *get_zig_special_dir(Buf *zig_lib_dir);
|
||||
Buf *get_zig_std_dir(Buf *zig_lib_dir);
|
||||
|
||||
Buf *get_global_cache_dir(void);
|
||||
|
||||
|
||||
FileExt classify_file_ext(const char *filename_ptr, size_t filename_len);
|
||||
|
||||
#endif
|
||||
@ -1,3 +1,6 @@
|
||||
pub const have_llvm = true;
|
||||
pub const version: []const u8 = "@ZIG_VERSION@";
|
||||
pub const log_scopes: []const []const u8 = &[_][]const u8{};
|
||||
pub const zir_dumps: []const []const u8 = &[_][]const u8{};
|
||||
pub const enable_tracy = false;
|
||||
pub const is_stage1 = true;
|
||||
|
||||
392
src/glibc.cpp
392
src/glibc.cpp
@ -1,392 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2019 Andrew Kelley
|
||||
*
|
||||
* This file is part of zig, which is MIT licensed.
|
||||
* See http://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
#include "glibc.hpp"
|
||||
#include "compiler.hpp"
|
||||
#include "cache_hash.hpp"
|
||||
#include "codegen.hpp"
|
||||
|
||||
static const ZigGLibCLib glibc_libs[] = {
|
||||
{"c", 6},
|
||||
{"m", 6},
|
||||
{"pthread", 0},
|
||||
{"dl", 2},
|
||||
{"rt", 1},
|
||||
{"ld", 2},
|
||||
{"util", 1},
|
||||
};
|
||||
|
||||
Error glibc_load_metadata(ZigGLibCAbi **out_result, Buf *zig_lib_dir, bool verbose) {
|
||||
Error err;
|
||||
|
||||
ZigGLibCAbi *glibc_abi = heap::c_allocator.create<ZigGLibCAbi>();
|
||||
glibc_abi->vers_txt_path = buf_sprintf("%s" OS_SEP "libc" OS_SEP "glibc" OS_SEP "vers.txt", buf_ptr(zig_lib_dir));
|
||||
glibc_abi->fns_txt_path = buf_sprintf("%s" OS_SEP "libc" OS_SEP "glibc" OS_SEP "fns.txt", buf_ptr(zig_lib_dir));
|
||||
glibc_abi->abi_txt_path = buf_sprintf("%s" OS_SEP "libc" OS_SEP "glibc" OS_SEP "abi.txt", buf_ptr(zig_lib_dir));
|
||||
glibc_abi->version_table.init(16);
|
||||
|
||||
Buf *vers_txt_contents = buf_alloc();
|
||||
if ((err = os_fetch_file_path(glibc_abi->vers_txt_path, vers_txt_contents))) {
|
||||
if (verbose) {
|
||||
fprintf(stderr, "Unable to read %s: %s\n", buf_ptr(glibc_abi->vers_txt_path), err_str(err));
|
||||
}
|
||||
return err;
|
||||
}
|
||||
Buf *fns_txt_contents = buf_alloc();
|
||||
if ((err = os_fetch_file_path(glibc_abi->fns_txt_path, fns_txt_contents))) {
|
||||
if (verbose) {
|
||||
fprintf(stderr, "Unable to read %s: %s\n", buf_ptr(glibc_abi->fns_txt_path), err_str(err));
|
||||
}
|
||||
return err;
|
||||
}
|
||||
Buf *abi_txt_contents = buf_alloc();
|
||||
if ((err = os_fetch_file_path(glibc_abi->abi_txt_path, abi_txt_contents))) {
|
||||
if (verbose) {
|
||||
fprintf(stderr, "Unable to read %s: %s\n", buf_ptr(glibc_abi->abi_txt_path), err_str(err));
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
{
|
||||
SplitIterator it = memSplit(buf_to_slice(vers_txt_contents), str("\r\n"));
|
||||
for (;;) {
|
||||
Optional<Slice<uint8_t>> opt_component = SplitIterator_next(&it);
|
||||
if (!opt_component.is_some) break;
|
||||
Buf *ver_buf = buf_create_from_slice(opt_component.value);
|
||||
Stage2SemVer *this_ver = glibc_abi->all_versions.add_one();
|
||||
if ((err = target_parse_glibc_version(this_ver, buf_ptr(ver_buf)))) {
|
||||
if (verbose) {
|
||||
fprintf(stderr, "Unable to parse glibc version '%s': %s\n", buf_ptr(ver_buf), err_str(err));
|
||||
}
|
||||
return err;
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
SplitIterator it = memSplit(buf_to_slice(fns_txt_contents), str("\r\n"));
|
||||
for (;;) {
|
||||
Optional<Slice<uint8_t>> opt_component = SplitIterator_next(&it);
|
||||
if (!opt_component.is_some) break;
|
||||
SplitIterator line_it = memSplit(opt_component.value, str(" "));
|
||||
Optional<Slice<uint8_t>> opt_fn_name = SplitIterator_next(&line_it);
|
||||
if (!opt_fn_name.is_some) {
|
||||
if (verbose) {
|
||||
fprintf(stderr, "%s: Expected function name\n", buf_ptr(glibc_abi->fns_txt_path));
|
||||
}
|
||||
return ErrorInvalidFormat;
|
||||
}
|
||||
Optional<Slice<uint8_t>> opt_lib_name = SplitIterator_next(&line_it);
|
||||
if (!opt_lib_name.is_some) {
|
||||
if (verbose) {
|
||||
fprintf(stderr, "%s: Expected lib name\n", buf_ptr(glibc_abi->fns_txt_path));
|
||||
}
|
||||
return ErrorInvalidFormat;
|
||||
}
|
||||
|
||||
Buf *this_fn_name = buf_create_from_slice(opt_fn_name.value);
|
||||
Buf *this_lib_name = buf_create_from_slice(opt_lib_name.value);
|
||||
glibc_abi->all_functions.append({ this_fn_name, glibc_lib_find(buf_ptr(this_lib_name)) });
|
||||
}
|
||||
}
|
||||
{
|
||||
SplitIterator it = memSplit(buf_to_slice(abi_txt_contents), str("\r\n"));
|
||||
ZigGLibCVerList *ver_list_base = nullptr;
|
||||
int line_num = 0;
|
||||
for (;;) {
|
||||
if (ver_list_base == nullptr) {
|
||||
line_num += 1;
|
||||
Optional<Slice<uint8_t>> opt_line = SplitIterator_next_separate(&it);
|
||||
if (!opt_line.is_some) break;
|
||||
|
||||
ver_list_base = heap::c_allocator.allocate<ZigGLibCVerList>(glibc_abi->all_functions.length);
|
||||
SplitIterator line_it = memSplit(opt_line.value, str(" "));
|
||||
for (;;) {
|
||||
ZigTarget *target = heap::c_allocator.create<ZigTarget>();
|
||||
Optional<Slice<uint8_t>> opt_target = SplitIterator_next(&line_it);
|
||||
if (!opt_target.is_some) break;
|
||||
|
||||
SplitIterator component_it = memSplit(opt_target.value, str("-"));
|
||||
Optional<Slice<uint8_t>> opt_arch = SplitIterator_next(&component_it);
|
||||
assert(opt_arch.is_some);
|
||||
Optional<Slice<uint8_t>> opt_os = SplitIterator_next(&component_it);
|
||||
assert(opt_os.is_some); // it's always "linux" so we ignore it
|
||||
Optional<Slice<uint8_t>> opt_abi = SplitIterator_next(&component_it);
|
||||
assert(opt_abi.is_some);
|
||||
|
||||
|
||||
err = target_parse_arch(&target->arch, (char*)opt_arch.value.ptr, opt_arch.value.len);
|
||||
assert(err == ErrorNone);
|
||||
|
||||
target->os = OsLinux;
|
||||
|
||||
err = target_parse_abi(&target->abi, (char*)opt_abi.value.ptr, opt_abi.value.len);
|
||||
if (err != ErrorNone) {
|
||||
fprintf(stderr, "Error parsing %s:%d: %s\n", buf_ptr(glibc_abi->abi_txt_path),
|
||||
line_num, err_str(err));
|
||||
fprintf(stderr, "arch: '%.*s', os: '%.*s', abi: '%.*s'\n",
|
||||
(int)opt_arch.value.len, (const char*)opt_arch.value.ptr,
|
||||
(int)opt_os.value.len, (const char*)opt_os.value.ptr,
|
||||
(int)opt_abi.value.len, (const char*)opt_abi.value.ptr);
|
||||
fprintf(stderr, "parsed from target: '%.*s'\n",
|
||||
(int)opt_target.value.len, (const char*)opt_target.value.ptr);
|
||||
fprintf(stderr, "parsed from line:\n%.*s\n", (int)opt_line.value.len, opt_line.value.ptr);
|
||||
fprintf(stderr, "Zig installation appears to be corrupted.\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
glibc_abi->version_table.put(target, ver_list_base);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
for (size_t fn_i = 0; fn_i < glibc_abi->all_functions.length; fn_i += 1) {
|
||||
ZigGLibCVerList *ver_list = &ver_list_base[fn_i];
|
||||
line_num += 1;
|
||||
Optional<Slice<uint8_t>> opt_line = SplitIterator_next_separate(&it);
|
||||
assert(opt_line.is_some);
|
||||
|
||||
SplitIterator line_it = memSplit(opt_line.value, str(" "));
|
||||
for (;;) {
|
||||
Optional<Slice<uint8_t>> opt_ver = SplitIterator_next(&line_it);
|
||||
if (!opt_ver.is_some) break;
|
||||
assert(ver_list->len < 8); // increase the array len in the type
|
||||
|
||||
unsigned long ver_index = strtoul(buf_ptr(buf_create_from_slice(opt_ver.value)), nullptr, 10);
|
||||
assert(ver_index < 255); // use a bigger integer in the type
|
||||
ver_list->versions[ver_list->len] = ver_index;
|
||||
ver_list->len += 1;
|
||||
}
|
||||
}
|
||||
ver_list_base = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
*out_result = glibc_abi;
|
||||
return ErrorNone;
|
||||
}
|
||||
|
||||
Error glibc_build_dummies_and_maps(CodeGen *g, const ZigGLibCAbi *glibc_abi, const ZigTarget *target,
|
||||
Buf **out_dir, bool verbose, Stage2ProgressNode *progress_node)
|
||||
{
|
||||
Error err;
|
||||
|
||||
Buf *cache_dir = get_global_cache_dir();
|
||||
CacheHash *cache_hash = heap::c_allocator.create<CacheHash>();
|
||||
Buf *manifest_dir = buf_sprintf("%s" OS_SEP CACHE_HASH_SUBDIR, buf_ptr(cache_dir));
|
||||
cache_init(cache_hash, manifest_dir);
|
||||
|
||||
Buf *compiler_id;
|
||||
if ((err = get_compiler_id(&compiler_id))) {
|
||||
if (verbose) {
|
||||
fprintf(stderr, "unable to get compiler id: %s\n", err_str(err));
|
||||
}
|
||||
return err;
|
||||
}
|
||||
cache_buf(cache_hash, compiler_id);
|
||||
cache_int(cache_hash, target->arch);
|
||||
cache_int(cache_hash, target->abi);
|
||||
cache_int(cache_hash, target->glibc_or_darwin_version->major);
|
||||
cache_int(cache_hash, target->glibc_or_darwin_version->minor);
|
||||
cache_int(cache_hash, target->glibc_or_darwin_version->patch);
|
||||
|
||||
Buf digest = BUF_INIT;
|
||||
buf_resize(&digest, 0);
|
||||
if ((err = cache_hit(cache_hash, &digest))) {
|
||||
// Treat an invalid format error as a cache miss.
|
||||
if (err != ErrorInvalidFormat)
|
||||
return err;
|
||||
}
|
||||
// We should always get a cache hit because there are no
|
||||
// files in the input hash.
|
||||
assert(buf_len(&digest) != 0);
|
||||
|
||||
Buf *dummy_dir = buf_alloc();
|
||||
os_path_join(manifest_dir, &digest, dummy_dir);
|
||||
|
||||
if ((err = os_make_path(dummy_dir)))
|
||||
return err;
|
||||
|
||||
Buf *test_if_exists_path = buf_alloc();
|
||||
os_path_join(dummy_dir, buf_create_from_str("ok"), test_if_exists_path);
|
||||
|
||||
bool hit;
|
||||
if ((err = os_file_exists(test_if_exists_path, &hit)))
|
||||
return err;
|
||||
|
||||
if (hit) {
|
||||
*out_dir = dummy_dir;
|
||||
return ErrorNone;
|
||||
}
|
||||
|
||||
|
||||
ZigGLibCVerList *ver_list_base = glibc_abi->version_table.get(target);
|
||||
|
||||
uint8_t target_ver_index = 0;
|
||||
for (;target_ver_index < glibc_abi->all_versions.length; target_ver_index += 1) {
|
||||
const Stage2SemVer *this_ver = &glibc_abi->all_versions.at(target_ver_index);
|
||||
if (this_ver->major == target->glibc_or_darwin_version->major &&
|
||||
this_ver->minor == target->glibc_or_darwin_version->minor &&
|
||||
this_ver->patch == target->glibc_or_darwin_version->patch)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (target_ver_index == glibc_abi->all_versions.length) {
|
||||
if (verbose) {
|
||||
fprintf(stderr, "Unrecognized glibc version: %d.%d.%d\n",
|
||||
target->glibc_or_darwin_version->major,
|
||||
target->glibc_or_darwin_version->minor,
|
||||
target->glibc_or_darwin_version->patch);
|
||||
}
|
||||
return ErrorUnknownABI;
|
||||
}
|
||||
|
||||
Buf *map_file_path = buf_sprintf("%s" OS_SEP "all.map", buf_ptr(dummy_dir));
|
||||
Buf *map_contents = buf_alloc();
|
||||
|
||||
for (uint8_t ver_i = 0; ver_i < glibc_abi->all_versions.length; ver_i += 1) {
|
||||
const Stage2SemVer *ver = &glibc_abi->all_versions.at(ver_i);
|
||||
if (ver->patch == 0) {
|
||||
buf_appendf(map_contents, "GLIBC_%d.%d { };\n", ver->major, ver->minor);
|
||||
} else {
|
||||
buf_appendf(map_contents, "GLIBC_%d.%d.%d { };\n", ver->major, ver->minor, ver->patch);
|
||||
}
|
||||
}
|
||||
|
||||
if ((err = os_write_file(map_file_path, map_contents))) {
|
||||
if (verbose) {
|
||||
fprintf(stderr, "unable to write %s: %s", buf_ptr(map_file_path), err_str(err));
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
for (size_t lib_i = 0; lib_i < array_length(glibc_libs); lib_i += 1) {
|
||||
const ZigGLibCLib *lib = &glibc_libs[lib_i];
|
||||
Buf *zig_file_path = buf_sprintf("%s" OS_SEP "%s.zig", buf_ptr(dummy_dir), lib->name);
|
||||
Buf *zig_body = buf_alloc();
|
||||
Buf *zig_footer = buf_alloc();
|
||||
|
||||
buf_appendf(zig_body, "comptime {\n");
|
||||
buf_appendf(zig_body, " asm (\n");
|
||||
|
||||
for (size_t fn_i = 0; fn_i < glibc_abi->all_functions.length; fn_i += 1) {
|
||||
const ZigGLibCFn *libc_fn = &glibc_abi->all_functions.at(fn_i);
|
||||
if (libc_fn->lib != lib) continue;
|
||||
ZigGLibCVerList *ver_list = &ver_list_base[fn_i];
|
||||
// Pick the default symbol version:
|
||||
// - If there are no versions, don't emit it
|
||||
// - Take the greatest one <= than the target one
|
||||
// - If none of them is <= than the
|
||||
// specified one don't pick any default version
|
||||
if (ver_list->len == 0) continue;
|
||||
uint8_t chosen_def_ver_index = 255;
|
||||
for (uint8_t ver_i = 0; ver_i < ver_list->len; ver_i += 1) {
|
||||
uint8_t ver_index = ver_list->versions[ver_i];
|
||||
if ((chosen_def_ver_index == 255 || ver_index > chosen_def_ver_index) &&
|
||||
target_ver_index >= ver_index)
|
||||
{
|
||||
chosen_def_ver_index = ver_index;
|
||||
}
|
||||
}
|
||||
for (uint8_t ver_i = 0; ver_i < ver_list->len; ver_i += 1) {
|
||||
uint8_t ver_index = ver_list->versions[ver_i];
|
||||
|
||||
Buf *stub_name;
|
||||
const Stage2SemVer *ver = &glibc_abi->all_versions.at(ver_index);
|
||||
const char *sym_name = buf_ptr(libc_fn->name);
|
||||
if (ver->patch == 0) {
|
||||
stub_name = buf_sprintf("%s_%d_%d", sym_name, ver->major, ver->minor);
|
||||
} else {
|
||||
stub_name = buf_sprintf("%s_%d_%d_%d", sym_name, ver->major, ver->minor, ver->patch);
|
||||
}
|
||||
|
||||
buf_appendf(zig_footer, "export fn %s() void {}\n", buf_ptr(stub_name));
|
||||
|
||||
// Default symbol version definition vs normal symbol version definition
|
||||
const char *at_sign_str = (chosen_def_ver_index != 255 &&
|
||||
ver_index == chosen_def_ver_index) ? "@@" : "@";
|
||||
if (ver->patch == 0) {
|
||||
buf_appendf(zig_body, " \\\\ .symver %s, %s%sGLIBC_%d.%d\n",
|
||||
buf_ptr(stub_name), sym_name, at_sign_str, ver->major, ver->minor);
|
||||
} else {
|
||||
buf_appendf(zig_body, " \\\\ .symver %s, %s%sGLIBC_%d.%d.%d\n",
|
||||
buf_ptr(stub_name), sym_name, at_sign_str, ver->major, ver->minor, ver->patch);
|
||||
}
|
||||
// Hide the stub to keep the symbol table clean
|
||||
buf_appendf(zig_body, " \\\\ .hidden %s\n", buf_ptr(stub_name));
|
||||
}
|
||||
}
|
||||
|
||||
buf_appendf(zig_body, " );\n");
|
||||
buf_appendf(zig_body, "}\n");
|
||||
buf_append_buf(zig_body, zig_footer);
|
||||
|
||||
if ((err = os_write_file(zig_file_path, zig_body))) {
|
||||
if (verbose) {
|
||||
fprintf(stderr, "unable to write %s: %s", buf_ptr(zig_file_path), err_str(err));
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
bool is_ld = (strcmp(lib->name, "ld") == 0);
|
||||
|
||||
CodeGen *child_gen = create_child_codegen(g, zig_file_path, OutTypeLib, nullptr, lib->name, progress_node);
|
||||
codegen_set_lib_version(child_gen, true, lib->sover, 0, 0);
|
||||
child_gen->is_dynamic = true;
|
||||
child_gen->is_dummy_so = true;
|
||||
child_gen->version_script_path = map_file_path;
|
||||
child_gen->enable_cache = false;
|
||||
child_gen->output_dir = dummy_dir;
|
||||
if (is_ld) {
|
||||
assert(g->zig_target->standard_dynamic_linker_path != nullptr);
|
||||
Buf *ld_basename = buf_alloc();
|
||||
os_path_split(buf_create_from_str(g->zig_target->standard_dynamic_linker_path),
|
||||
nullptr, ld_basename);
|
||||
child_gen->override_soname = ld_basename;
|
||||
}
|
||||
codegen_build_and_link(child_gen);
|
||||
}
|
||||
|
||||
if ((err = os_write_file(test_if_exists_path, buf_alloc()))) {
|
||||
if (verbose) {
|
||||
fprintf(stderr, "unable to write %s: %s", buf_ptr(test_if_exists_path), err_str(err));
|
||||
}
|
||||
return err;
|
||||
}
|
||||
*out_dir = dummy_dir;
|
||||
return ErrorNone;
|
||||
}
|
||||
|
||||
uint32_t hash_glibc_target(const ZigTarget *x) {
|
||||
return x->arch * (uint32_t)3250106448 +
|
||||
x->os * (uint32_t)542534372 +
|
||||
x->abi * (uint32_t)59162639;
|
||||
}
|
||||
|
||||
bool eql_glibc_target(const ZigTarget *a, const ZigTarget *b) {
|
||||
return a->arch == b->arch &&
|
||||
a->os == b->os &&
|
||||
a->abi == b->abi;
|
||||
}
|
||||
|
||||
size_t glibc_lib_count(void) {
|
||||
return array_length(glibc_libs);
|
||||
}
|
||||
|
||||
const ZigGLibCLib *glibc_lib_enum(size_t index) {
|
||||
assert(index < array_length(glibc_libs));
|
||||
return &glibc_libs[index];
|
||||
}
|
||||
|
||||
const ZigGLibCLib *glibc_lib_find(const char *name) {
|
||||
for (size_t i = 0; i < array_length(glibc_libs); i += 1) {
|
||||
if (strcmp(glibc_libs[i].name, name) == 0) {
|
||||
return &glibc_libs[i];
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
@ -1,50 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2019 Andrew Kelley
|
||||
*
|
||||
* This file is part of zig, which is MIT licensed.
|
||||
* See http://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
#ifndef ZIG_GLIBC_HPP
|
||||
#define ZIG_GLIBC_HPP
|
||||
|
||||
#include "all_types.hpp"
|
||||
|
||||
struct ZigGLibCLib {
|
||||
const char *name;
|
||||
uint8_t sover;
|
||||
};
|
||||
|
||||
struct ZigGLibCFn {
|
||||
Buf *name;
|
||||
const ZigGLibCLib *lib;
|
||||
};
|
||||
|
||||
struct ZigGLibCVerList {
|
||||
uint8_t versions[8]; // 8 is just the max number, we know statically it's big enough
|
||||
uint8_t len;
|
||||
};
|
||||
|
||||
uint32_t hash_glibc_target(const ZigTarget *x);
|
||||
bool eql_glibc_target(const ZigTarget *a, const ZigTarget *b);
|
||||
|
||||
struct ZigGLibCAbi {
|
||||
Buf *abi_txt_path;
|
||||
Buf *vers_txt_path;
|
||||
Buf *fns_txt_path;
|
||||
ZigList<Stage2SemVer> all_versions;
|
||||
ZigList<ZigGLibCFn> all_functions;
|
||||
// The value is a pointer to all_functions.length items and each item is an index
|
||||
// into all_functions.
|
||||
HashMap<const ZigTarget *, ZigGLibCVerList *, hash_glibc_target, eql_glibc_target> version_table;
|
||||
};
|
||||
|
||||
Error glibc_load_metadata(ZigGLibCAbi **out_result, Buf *zig_lib_dir, bool verbose);
|
||||
Error glibc_build_dummies_and_maps(CodeGen *codegen, const ZigGLibCAbi *glibc_abi, const ZigTarget *target,
|
||||
Buf **out_dir, bool verbose, Stage2ProgressNode *progress_node);
|
||||
|
||||
size_t glibc_lib_count(void);
|
||||
const ZigGLibCLib *glibc_lib_enum(size_t index);
|
||||
const ZigGLibCLib *glibc_lib_find(const char *name);
|
||||
|
||||
#endif
|
||||
956
src/glibc.zig
Normal file
956
src/glibc.zig
Normal file
@ -0,0 +1,956 @@
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const mem = std.mem;
|
||||
const path = std.fs.path;
|
||||
const assert = std.debug.assert;
|
||||
|
||||
const target_util = @import("target.zig");
|
||||
const Compilation = @import("Compilation.zig");
|
||||
const build_options = @import("build_options");
|
||||
const trace = @import("tracy.zig").trace;
|
||||
const Cache = @import("Cache.zig");
|
||||
const Package = @import("Package.zig");
|
||||
|
||||
pub const Lib = struct {
|
||||
name: []const u8,
|
||||
sover: u8,
|
||||
};
|
||||
|
||||
pub const Fn = struct {
|
||||
name: []const u8,
|
||||
lib: *const Lib,
|
||||
};
|
||||
|
||||
pub const VerList = struct {
|
||||
/// 7 is just the max number, we know statically it's big enough.
|
||||
versions: [7]u8,
|
||||
len: u8,
|
||||
};
|
||||
|
||||
pub const ABI = struct {
|
||||
all_versions: []const std.builtin.Version,
|
||||
all_functions: []const Fn,
|
||||
/// The value is a pointer to all_functions.len items and each item is an index into all_functions.
|
||||
version_table: std.AutoHashMapUnmanaged(target_util.ArchOsAbi, [*]VerList),
|
||||
arena_state: std.heap.ArenaAllocator.State,
|
||||
|
||||
pub fn destroy(abi: *ABI, gpa: *Allocator) void {
|
||||
abi.version_table.deinit(gpa);
|
||||
abi.arena_state.promote(gpa).deinit(); // Frees the ABI memory too.
|
||||
}
|
||||
};
|
||||
|
||||
pub const libs = [_]Lib{
|
||||
.{ .name = "c", .sover = 6 },
|
||||
.{ .name = "m", .sover = 6 },
|
||||
.{ .name = "pthread", .sover = 0 },
|
||||
.{ .name = "dl", .sover = 2 },
|
||||
.{ .name = "rt", .sover = 1 },
|
||||
.{ .name = "ld", .sover = 2 },
|
||||
.{ .name = "util", .sover = 1 },
|
||||
};
|
||||
|
||||
pub const LoadMetaDataError = error{
|
||||
/// The files that ship with the Zig compiler were unable to be read, or otherwise had malformed data.
|
||||
ZigInstallationCorrupt,
|
||||
OutOfMemory,
|
||||
};
|
||||
|
||||
/// This function will emit a log error when there is a problem with the zig installation and then return
|
||||
/// `error.ZigInstallationCorrupt`.
|
||||
pub fn loadMetaData(gpa: *Allocator, zig_lib_dir: std.fs.Dir) LoadMetaDataError!*ABI {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
|
||||
errdefer arena_allocator.deinit();
|
||||
const arena = &arena_allocator.allocator;
|
||||
|
||||
var all_versions = std.ArrayListUnmanaged(std.builtin.Version){};
|
||||
var all_functions = std.ArrayListUnmanaged(Fn){};
|
||||
var version_table = std.AutoHashMapUnmanaged(target_util.ArchOsAbi, [*]VerList){};
|
||||
errdefer version_table.deinit(gpa);
|
||||
|
||||
var glibc_dir = zig_lib_dir.openDir("libc" ++ path.sep_str ++ "glibc", .{}) catch |err| {
|
||||
std.log.err("unable to open glibc dir: {}", .{@errorName(err)});
|
||||
return error.ZigInstallationCorrupt;
|
||||
};
|
||||
defer glibc_dir.close();
|
||||
|
||||
const max_txt_size = 500 * 1024; // Bigger than this and something is definitely borked.
|
||||
const vers_txt_contents = glibc_dir.readFileAlloc(gpa, "vers.txt", max_txt_size) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
else => {
|
||||
std.log.err("unable to read vers.txt: {}", .{@errorName(err)});
|
||||
return error.ZigInstallationCorrupt;
|
||||
},
|
||||
};
|
||||
defer gpa.free(vers_txt_contents);
|
||||
|
||||
// Arena allocated because the result contains references to function names.
|
||||
const fns_txt_contents = glibc_dir.readFileAlloc(arena, "fns.txt", max_txt_size) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
else => {
|
||||
std.log.err("unable to read fns.txt: {}", .{@errorName(err)});
|
||||
return error.ZigInstallationCorrupt;
|
||||
},
|
||||
};
|
||||
|
||||
const abi_txt_contents = glibc_dir.readFileAlloc(gpa, "abi.txt", max_txt_size) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
else => {
|
||||
std.log.err("unable to read abi.txt: {}", .{@errorName(err)});
|
||||
return error.ZigInstallationCorrupt;
|
||||
},
|
||||
};
|
||||
defer gpa.free(abi_txt_contents);
|
||||
|
||||
{
|
||||
var it = mem.tokenize(vers_txt_contents, "\r\n");
|
||||
var line_i: usize = 1;
|
||||
while (it.next()) |line| : (line_i += 1) {
|
||||
const prefix = "GLIBC_";
|
||||
if (!mem.startsWith(u8, line, prefix)) {
|
||||
std.log.err("vers.txt:{}: expected 'GLIBC_' prefix", .{line_i});
|
||||
return error.ZigInstallationCorrupt;
|
||||
}
|
||||
const adjusted_line = line[prefix.len..];
|
||||
const ver = std.builtin.Version.parse(adjusted_line) catch |err| {
|
||||
std.log.err("vers.txt:{}: unable to parse glibc version '{}': {}", .{ line_i, line, @errorName(err) });
|
||||
return error.ZigInstallationCorrupt;
|
||||
};
|
||||
try all_versions.append(arena, ver);
|
||||
}
|
||||
}
|
||||
{
|
||||
var file_it = mem.tokenize(fns_txt_contents, "\r\n");
|
||||
var line_i: usize = 1;
|
||||
while (file_it.next()) |line| : (line_i += 1) {
|
||||
var line_it = mem.tokenize(line, " ");
|
||||
const fn_name = line_it.next() orelse {
|
||||
std.log.err("fns.txt:{}: expected function name", .{line_i});
|
||||
return error.ZigInstallationCorrupt;
|
||||
};
|
||||
const lib_name = line_it.next() orelse {
|
||||
std.log.err("fns.txt:{}: expected library name", .{line_i});
|
||||
return error.ZigInstallationCorrupt;
|
||||
};
|
||||
const lib = findLib(lib_name) orelse {
|
||||
std.log.err("fns.txt:{}: unknown library name: {}", .{ line_i, lib_name });
|
||||
return error.ZigInstallationCorrupt;
|
||||
};
|
||||
try all_functions.append(arena, .{
|
||||
.name = fn_name,
|
||||
.lib = lib,
|
||||
});
|
||||
}
|
||||
}
|
||||
{
|
||||
var file_it = mem.split(abi_txt_contents, "\n");
|
||||
var line_i: usize = 0;
|
||||
while (true) {
|
||||
const ver_list_base: []VerList = blk: {
|
||||
const line = file_it.next() orelse break;
|
||||
if (line.len == 0) break;
|
||||
line_i += 1;
|
||||
const ver_list_base = try arena.alloc(VerList, all_functions.items.len);
|
||||
var line_it = mem.tokenize(line, " ");
|
||||
while (line_it.next()) |target_string| {
|
||||
var component_it = mem.tokenize(target_string, "-");
|
||||
const arch_name = component_it.next() orelse {
|
||||
std.log.err("abi.txt:{}: expected arch name", .{line_i});
|
||||
return error.ZigInstallationCorrupt;
|
||||
};
|
||||
const os_name = component_it.next() orelse {
|
||||
std.log.err("abi.txt:{}: expected OS name", .{line_i});
|
||||
return error.ZigInstallationCorrupt;
|
||||
};
|
||||
const abi_name = component_it.next() orelse {
|
||||
std.log.err("abi.txt:{}: expected ABI name", .{line_i});
|
||||
return error.ZigInstallationCorrupt;
|
||||
};
|
||||
const arch_tag = std.meta.stringToEnum(std.Target.Cpu.Arch, arch_name) orelse {
|
||||
std.log.err("abi.txt:{}: unrecognized arch: '{}'", .{ line_i, arch_name });
|
||||
return error.ZigInstallationCorrupt;
|
||||
};
|
||||
if (!mem.eql(u8, os_name, "linux")) {
|
||||
std.log.err("abi.txt:{}: expected OS 'linux', found '{}'", .{ line_i, os_name });
|
||||
return error.ZigInstallationCorrupt;
|
||||
}
|
||||
const abi_tag = std.meta.stringToEnum(std.Target.Abi, abi_name) orelse {
|
||||
std.log.err("abi.txt:{}: unrecognized ABI: '{}'", .{ line_i, abi_name });
|
||||
return error.ZigInstallationCorrupt;
|
||||
};
|
||||
|
||||
const triple = target_util.ArchOsAbi{
|
||||
.arch = arch_tag,
|
||||
.os = .linux,
|
||||
.abi = abi_tag,
|
||||
};
|
||||
try version_table.put(gpa, triple, ver_list_base.ptr);
|
||||
}
|
||||
break :blk ver_list_base;
|
||||
};
|
||||
for (ver_list_base) |*ver_list| {
|
||||
const line = file_it.next() orelse {
|
||||
std.log.err("abi.txt:{}: missing version number line", .{line_i});
|
||||
return error.ZigInstallationCorrupt;
|
||||
};
|
||||
line_i += 1;
|
||||
|
||||
ver_list.* = .{
|
||||
.versions = undefined,
|
||||
.len = 0,
|
||||
};
|
||||
var line_it = mem.tokenize(line, " ");
|
||||
while (line_it.next()) |version_index_string| {
|
||||
if (ver_list.len >= ver_list.versions.len) {
|
||||
// If this happens with legit data, increase the array len in the type.
|
||||
std.log.err("abi.txt:{}: too many versions", .{line_i});
|
||||
return error.ZigInstallationCorrupt;
|
||||
}
|
||||
const version_index = std.fmt.parseInt(u8, version_index_string, 10) catch |err| {
|
||||
// If this happens with legit data, increase the size of the integer type in the struct.
|
||||
std.log.err("abi.txt:{}: unable to parse version: {}", .{ line_i, @errorName(err) });
|
||||
return error.ZigInstallationCorrupt;
|
||||
};
|
||||
|
||||
ver_list.versions[ver_list.len] = version_index;
|
||||
ver_list.len += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const abi = try arena.create(ABI);
|
||||
abi.* = .{
|
||||
.all_versions = all_versions.items,
|
||||
.all_functions = all_functions.items,
|
||||
.version_table = version_table,
|
||||
.arena_state = arena_allocator.state,
|
||||
};
|
||||
return abi;
|
||||
}
|
||||
|
||||
fn findLib(name: []const u8) ?*const Lib {
|
||||
for (libs) |*lib| {
|
||||
if (mem.eql(u8, lib.name, name)) {
|
||||
return lib;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
pub const CRTFile = enum {
|
||||
crti_o,
|
||||
crtn_o,
|
||||
scrt1_o,
|
||||
libc_nonshared_a,
|
||||
};
|
||||
|
||||
pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
|
||||
if (!build_options.have_llvm) {
|
||||
return error.ZigCompilerNotBuiltWithLLVMExtensions;
|
||||
}
|
||||
const gpa = comp.gpa;
|
||||
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
|
||||
defer arena_allocator.deinit();
|
||||
const arena = &arena_allocator.allocator;
|
||||
|
||||
switch (crt_file) {
|
||||
.crti_o => {
|
||||
var args = std.ArrayList([]const u8).init(arena);
|
||||
try add_include_dirs(comp, arena, &args);
|
||||
try args.appendSlice(&[_][]const u8{
|
||||
"-D_LIBC_REENTRANT",
|
||||
"-include",
|
||||
try lib_path(comp, arena, lib_libc_glibc ++ "include" ++ path.sep_str ++ "libc-modules.h"),
|
||||
"-DMODULE_NAME=libc",
|
||||
"-Wno-nonportable-include-path",
|
||||
"-include",
|
||||
try lib_path(comp, arena, lib_libc_glibc ++ "include" ++ path.sep_str ++ "libc-symbols.h"),
|
||||
"-DTOP_NAMESPACE=glibc",
|
||||
"-DASSEMBLER",
|
||||
"-g",
|
||||
"-Wa,--noexecstack",
|
||||
});
|
||||
return comp.build_crt_file("crti", .Obj, &[1]Compilation.CSourceFile{
|
||||
.{
|
||||
.src_path = try start_asm_path(comp, arena, "crti.S"),
|
||||
.extra_flags = args.items,
|
||||
},
|
||||
});
|
||||
},
|
||||
.crtn_o => {
|
||||
var args = std.ArrayList([]const u8).init(arena);
|
||||
try add_include_dirs(comp, arena, &args);
|
||||
try args.appendSlice(&[_][]const u8{
|
||||
"-D_LIBC_REENTRANT",
|
||||
"-DMODULE_NAME=libc",
|
||||
"-DTOP_NAMESPACE=glibc",
|
||||
"-DASSEMBLER",
|
||||
"-g",
|
||||
"-Wa,--noexecstack",
|
||||
});
|
||||
return comp.build_crt_file("crtn", .Obj, &[1]Compilation.CSourceFile{
|
||||
.{
|
||||
.src_path = try start_asm_path(comp, arena, "crtn.S"),
|
||||
.extra_flags = args.items,
|
||||
},
|
||||
});
|
||||
},
|
||||
.scrt1_o => {
|
||||
const start_os: Compilation.CSourceFile = blk: {
|
||||
var args = std.ArrayList([]const u8).init(arena);
|
||||
try add_include_dirs(comp, arena, &args);
|
||||
try args.appendSlice(&[_][]const u8{
|
||||
"-D_LIBC_REENTRANT",
|
||||
"-include",
|
||||
try lib_path(comp, arena, lib_libc_glibc ++ "include" ++ path.sep_str ++ "libc-modules.h"),
|
||||
"-DMODULE_NAME=libc",
|
||||
"-Wno-nonportable-include-path",
|
||||
"-include",
|
||||
try lib_path(comp, arena, lib_libc_glibc ++ "include" ++ path.sep_str ++ "libc-symbols.h"),
|
||||
"-DPIC",
|
||||
"-DSHARED",
|
||||
"-DTOP_NAMESPACE=glibc",
|
||||
"-DASSEMBLER",
|
||||
"-g",
|
||||
"-Wa,--noexecstack",
|
||||
});
|
||||
break :blk .{
|
||||
.src_path = try start_asm_path(comp, arena, "start.S"),
|
||||
.extra_flags = args.items,
|
||||
};
|
||||
};
|
||||
const abi_note_o: Compilation.CSourceFile = blk: {
|
||||
var args = std.ArrayList([]const u8).init(arena);
|
||||
try args.appendSlice(&[_][]const u8{
|
||||
"-I",
|
||||
try lib_path(comp, arena, lib_libc_glibc ++ "csu"),
|
||||
});
|
||||
try add_include_dirs(comp, arena, &args);
|
||||
try args.appendSlice(&[_][]const u8{
|
||||
"-D_LIBC_REENTRANT",
|
||||
"-DMODULE_NAME=libc",
|
||||
"-DTOP_NAMESPACE=glibc",
|
||||
"-DASSEMBLER",
|
||||
"-g",
|
||||
"-Wa,--noexecstack",
|
||||
});
|
||||
break :blk .{
|
||||
.src_path = try lib_path(comp, arena, lib_libc_glibc ++ "csu" ++ path.sep_str ++ "abi-note.S"),
|
||||
.extra_flags = args.items,
|
||||
};
|
||||
};
|
||||
return comp.build_crt_file("Scrt1", .Obj, &[_]Compilation.CSourceFile{ start_os, abi_note_o });
|
||||
},
|
||||
.libc_nonshared_a => {
|
||||
const deps = [_][]const u8{
|
||||
lib_libc_glibc ++ "stdlib" ++ path.sep_str ++ "atexit.c",
|
||||
lib_libc_glibc ++ "stdlib" ++ path.sep_str ++ "at_quick_exit.c",
|
||||
lib_libc_glibc ++ "io" ++ path.sep_str ++ "stat.c",
|
||||
lib_libc_glibc ++ "io" ++ path.sep_str ++ "fstat.c",
|
||||
lib_libc_glibc ++ "io" ++ path.sep_str ++ "lstat.c",
|
||||
lib_libc_glibc ++ "io" ++ path.sep_str ++ "stat64.c",
|
||||
lib_libc_glibc ++ "io" ++ path.sep_str ++ "fstat64.c",
|
||||
lib_libc_glibc ++ "io" ++ path.sep_str ++ "lstat64.c",
|
||||
lib_libc_glibc ++ "io" ++ path.sep_str ++ "fstatat.c",
|
||||
lib_libc_glibc ++ "io" ++ path.sep_str ++ "fstatat64.c",
|
||||
lib_libc_glibc ++ "io" ++ path.sep_str ++ "mknod.c",
|
||||
lib_libc_glibc ++ "io" ++ path.sep_str ++ "mknodat.c",
|
||||
lib_libc_glibc ++ "nptl" ++ path.sep_str ++ "pthread_atfork.c",
|
||||
lib_libc_glibc ++ "debug" ++ path.sep_str ++ "stack_chk_fail_local.c",
|
||||
};
|
||||
|
||||
var c_source_files: [deps.len + 1]Compilation.CSourceFile = undefined;
|
||||
|
||||
c_source_files[0] = blk: {
|
||||
var args = std.ArrayList([]const u8).init(arena);
|
||||
try args.appendSlice(&[_][]const u8{
|
||||
"-std=gnu11",
|
||||
"-fgnu89-inline",
|
||||
"-g",
|
||||
"-O2",
|
||||
"-fmerge-all-constants",
|
||||
"-fno-stack-protector",
|
||||
"-fmath-errno",
|
||||
"-fno-stack-protector",
|
||||
"-I",
|
||||
try lib_path(comp, arena, lib_libc_glibc ++ "csu"),
|
||||
});
|
||||
try add_include_dirs(comp, arena, &args);
|
||||
try args.appendSlice(&[_][]const u8{
|
||||
"-DSTACK_PROTECTOR_LEVEL=0",
|
||||
"-fPIC",
|
||||
"-fno-stack-protector",
|
||||
"-ftls-model=initial-exec",
|
||||
"-D_LIBC_REENTRANT",
|
||||
"-include",
|
||||
try lib_path(comp, arena, lib_libc_glibc ++ "include" ++ path.sep_str ++ "libc-modules.h"),
|
||||
"-DMODULE_NAME=libc",
|
||||
"-Wno-nonportable-include-path",
|
||||
"-include",
|
||||
try lib_path(comp, arena, lib_libc_glibc ++ "include" ++ path.sep_str ++ "libc-symbols.h"),
|
||||
"-DPIC",
|
||||
"-DLIBC_NONSHARED=1",
|
||||
"-DTOP_NAMESPACE=glibc",
|
||||
});
|
||||
break :blk .{
|
||||
.src_path = try lib_path(comp, arena, lib_libc_glibc ++ "csu" ++ path.sep_str ++ "elf-init.c"),
|
||||
.extra_flags = args.items,
|
||||
};
|
||||
};
|
||||
|
||||
for (deps) |dep, i| {
|
||||
var args = std.ArrayList([]const u8).init(arena);
|
||||
try args.appendSlice(&[_][]const u8{
|
||||
"-std=gnu11",
|
||||
"-fgnu89-inline",
|
||||
"-g",
|
||||
"-O2",
|
||||
"-fmerge-all-constants",
|
||||
"-fno-stack-protector",
|
||||
"-fmath-errno",
|
||||
"-ftls-model=initial-exec",
|
||||
"-Wno-ignored-attributes",
|
||||
});
|
||||
try add_include_dirs(comp, arena, &args);
|
||||
try args.appendSlice(&[_][]const u8{
|
||||
"-D_LIBC_REENTRANT",
|
||||
"-include",
|
||||
try lib_path(comp, arena, lib_libc_glibc ++ "include" ++ path.sep_str ++ "libc-modules.h"),
|
||||
"-DMODULE_NAME=libc",
|
||||
"-Wno-nonportable-include-path",
|
||||
"-include",
|
||||
try lib_path(comp, arena, lib_libc_glibc ++ "include" ++ path.sep_str ++ "libc-symbols.h"),
|
||||
"-DPIC",
|
||||
"-DLIBC_NONSHARED=1",
|
||||
"-DTOP_NAMESPACE=glibc",
|
||||
});
|
||||
c_source_files[i + 1] = .{
|
||||
.src_path = try lib_path(comp, arena, dep),
|
||||
.extra_flags = args.items,
|
||||
};
|
||||
}
|
||||
return comp.build_crt_file("c_nonshared", .Lib, &c_source_files);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn start_asm_path(comp: *Compilation, arena: *Allocator, basename: []const u8) ![]const u8 {
|
||||
const arch = comp.getTarget().cpu.arch;
|
||||
const is_ppc = arch == .powerpc or arch == .powerpc64 or arch == .powerpc64le;
|
||||
const is_aarch64 = arch == .aarch64 or arch == .aarch64_be;
|
||||
const is_sparc = arch == .sparc or arch == .sparcel or arch == .sparcv9;
|
||||
const is_64 = arch.ptrBitWidth() == 64;
|
||||
|
||||
const s = path.sep_str;
|
||||
|
||||
var result = std.ArrayList(u8).init(arena);
|
||||
try result.appendSlice(comp.zig_lib_directory.path.?);
|
||||
try result.appendSlice(s ++ "libc" ++ s ++ "glibc" ++ s ++ "sysdeps" ++ s);
|
||||
if (is_sparc) {
|
||||
if (is_64) {
|
||||
try result.appendSlice("sparc" ++ s ++ "sparc64");
|
||||
} else {
|
||||
try result.appendSlice("sparc" ++ s ++ "sparc32");
|
||||
}
|
||||
} else if (arch.isARM()) {
|
||||
try result.appendSlice("arm");
|
||||
} else if (arch.isMIPS()) {
|
||||
try result.appendSlice("mips");
|
||||
} else if (arch == .x86_64) {
|
||||
try result.appendSlice("x86_64");
|
||||
} else if (arch == .i386) {
|
||||
try result.appendSlice("i386");
|
||||
} else if (is_aarch64) {
|
||||
try result.appendSlice("aarch64");
|
||||
} else if (arch.isRISCV()) {
|
||||
try result.appendSlice("riscv");
|
||||
} else if (is_ppc) {
|
||||
if (is_64) {
|
||||
try result.appendSlice("powerpc" ++ s ++ "powerpc64");
|
||||
} else {
|
||||
try result.appendSlice("powerpc" ++ s ++ "powerpc32");
|
||||
}
|
||||
}
|
||||
|
||||
try result.appendSlice(s);
|
||||
try result.appendSlice(basename);
|
||||
return result.items;
|
||||
}
|
||||
|
||||
fn add_include_dirs(comp: *Compilation, arena: *Allocator, args: *std.ArrayList([]const u8)) error{OutOfMemory}!void {
|
||||
const target = comp.getTarget();
|
||||
const arch = target.cpu.arch;
|
||||
const opt_nptl: ?[]const u8 = if (target.os.tag == .linux) "nptl" else "htl";
|
||||
const glibc = try lib_path(comp, arena, lib_libc ++ "glibc");
|
||||
|
||||
const s = path.sep_str;
|
||||
|
||||
try args.append("-I");
|
||||
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "include"));
|
||||
|
||||
if (target.os.tag == .linux) {
|
||||
try add_include_dirs_arch(arena, args, arch, null, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix" ++ s ++ "sysv" ++ s ++ "linux"));
|
||||
}
|
||||
|
||||
if (opt_nptl) |nptl| {
|
||||
try add_include_dirs_arch(arena, args, arch, nptl, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps"));
|
||||
}
|
||||
|
||||
if (target.os.tag == .linux) {
|
||||
try args.append("-I");
|
||||
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++
|
||||
"unix" ++ s ++ "sysv" ++ s ++ "linux" ++ s ++ "generic"));
|
||||
|
||||
try args.append("-I");
|
||||
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++
|
||||
"unix" ++ s ++ "sysv" ++ s ++ "linux" ++ s ++ "include"));
|
||||
try args.append("-I");
|
||||
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++
|
||||
"unix" ++ s ++ "sysv" ++ s ++ "linux"));
|
||||
}
|
||||
if (opt_nptl) |nptl| {
|
||||
try args.append("-I");
|
||||
try args.append(try path.join(arena, &[_][]const u8{ comp.zig_lib_directory.path.?, lib_libc_glibc ++ "sysdeps", nptl }));
|
||||
}
|
||||
|
||||
try args.append("-I");
|
||||
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "pthread"));
|
||||
|
||||
try args.append("-I");
|
||||
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix" ++ s ++ "sysv"));
|
||||
|
||||
try add_include_dirs_arch(arena, args, arch, null, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix"));
|
||||
|
||||
try args.append("-I");
|
||||
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix"));
|
||||
|
||||
try add_include_dirs_arch(arena, args, arch, null, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps"));
|
||||
|
||||
try args.append("-I");
|
||||
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "generic"));
|
||||
|
||||
try args.append("-I");
|
||||
try args.append(try path.join(arena, &[_][]const u8{ comp.zig_lib_directory.path.?, lib_libc ++ "glibc" }));
|
||||
|
||||
try args.append("-I");
|
||||
try args.append(try std.fmt.allocPrint(arena, "{}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "{}-{}-{}", .{
|
||||
comp.zig_lib_directory.path.?, @tagName(arch), @tagName(target.os.tag), @tagName(target.abi),
|
||||
}));
|
||||
|
||||
try args.append("-I");
|
||||
try args.append(try lib_path(comp, arena, lib_libc ++ "include" ++ s ++ "generic-glibc"));
|
||||
|
||||
try args.append("-I");
|
||||
try args.append(try std.fmt.allocPrint(arena, "{}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "{}-linux-any", .{
|
||||
comp.zig_lib_directory.path.?, @tagName(arch),
|
||||
}));
|
||||
|
||||
try args.append("-I");
|
||||
try args.append(try lib_path(comp, arena, lib_libc ++ "include" ++ s ++ "any-linux-any"));
|
||||
}
|
||||
|
||||
fn add_include_dirs_arch(
|
||||
arena: *Allocator,
|
||||
args: *std.ArrayList([]const u8),
|
||||
arch: std.Target.Cpu.Arch,
|
||||
opt_nptl: ?[]const u8,
|
||||
dir: []const u8,
|
||||
) error{OutOfMemory}!void {
|
||||
const is_x86 = arch == .i386 or arch == .x86_64;
|
||||
const is_aarch64 = arch == .aarch64 or arch == .aarch64_be;
|
||||
const is_ppc = arch == .powerpc or arch == .powerpc64 or arch == .powerpc64le;
|
||||
const is_sparc = arch == .sparc or arch == .sparcel or arch == .sparcv9;
|
||||
const is_64 = arch.ptrBitWidth() == 64;
|
||||
|
||||
const s = path.sep_str;
|
||||
|
||||
if (is_x86) {
|
||||
if (arch == .x86_64) {
|
||||
if (opt_nptl) |nptl| {
|
||||
try args.append("-I");
|
||||
try args.append(try path.join(arena, &[_][]const u8{ dir, "x86_64", nptl }));
|
||||
} else {
|
||||
try args.append("-I");
|
||||
try args.append(try path.join(arena, &[_][]const u8{ dir, "x86_64" }));
|
||||
}
|
||||
} else if (arch == .i386) {
|
||||
if (opt_nptl) |nptl| {
|
||||
try args.append("-I");
|
||||
try args.append(try path.join(arena, &[_][]const u8{ dir, "i386", nptl }));
|
||||
} else {
|
||||
try args.append("-I");
|
||||
try args.append(try path.join(arena, &[_][]const u8{ dir, "i386" }));
|
||||
}
|
||||
}
|
||||
if (opt_nptl) |nptl| {
|
||||
try args.append("-I");
|
||||
try args.append(try path.join(arena, &[_][]const u8{ dir, "x86", nptl }));
|
||||
} else {
|
||||
try args.append("-I");
|
||||
try args.append(try path.join(arena, &[_][]const u8{ dir, "x86" }));
|
||||
}
|
||||
} else if (arch.isARM()) {
|
||||
if (opt_nptl) |nptl| {
|
||||
try args.append("-I");
|
||||
try args.append(try path.join(arena, &[_][]const u8{ dir, "arm", nptl }));
|
||||
} else {
|
||||
try args.append("-I");
|
||||
try args.append(try path.join(arena, &[_][]const u8{ dir, "arm" }));
|
||||
}
|
||||
} else if (arch.isMIPS()) {
|
||||
if (opt_nptl) |nptl| {
|
||||
try args.append("-I");
|
||||
try args.append(try path.join(arena, &[_][]const u8{ dir, "mips", nptl }));
|
||||
} else {
|
||||
if (is_64) {
|
||||
try args.append("-I");
|
||||
try args.append(try path.join(arena, &[_][]const u8{ dir, "mips" ++ s ++ "mips64" }));
|
||||
} else {
|
||||
try args.append("-I");
|
||||
try args.append(try path.join(arena, &[_][]const u8{ dir, "mips" ++ s ++ "mips32" }));
|
||||
}
|
||||
try args.append("-I");
|
||||
try args.append(try path.join(arena, &[_][]const u8{ dir, "mips" }));
|
||||
}
|
||||
} else if (is_sparc) {
|
||||
if (opt_nptl) |nptl| {
|
||||
try args.append("-I");
|
||||
try args.append(try path.join(arena, &[_][]const u8{ dir, "sparc", nptl }));
|
||||
} else {
|
||||
if (is_64) {
|
||||
try args.append("-I");
|
||||
try args.append(try path.join(arena, &[_][]const u8{ dir, "sparc" ++ s ++ "sparc64" }));
|
||||
} else {
|
||||
try args.append("-I");
|
||||
try args.append(try path.join(arena, &[_][]const u8{ dir, "sparc" ++ s ++ "sparc32" }));
|
||||
}
|
||||
try args.append("-I");
|
||||
try args.append(try path.join(arena, &[_][]const u8{ dir, "sparc" }));
|
||||
}
|
||||
} else if (is_aarch64) {
|
||||
if (opt_nptl) |nptl| {
|
||||
try args.append("-I");
|
||||
try args.append(try path.join(arena, &[_][]const u8{ dir, "aarch64", nptl }));
|
||||
} else {
|
||||
try args.append("-I");
|
||||
try args.append(try path.join(arena, &[_][]const u8{ dir, "aarch64" }));
|
||||
}
|
||||
} else if (is_ppc) {
|
||||
if (opt_nptl) |nptl| {
|
||||
try args.append("-I");
|
||||
try args.append(try path.join(arena, &[_][]const u8{ dir, "powerpc", nptl }));
|
||||
} else {
|
||||
if (is_64) {
|
||||
try args.append("-I");
|
||||
try args.append(try path.join(arena, &[_][]const u8{ dir, "powerpc" ++ s ++ "powerpc64" }));
|
||||
} else {
|
||||
try args.append("-I");
|
||||
try args.append(try path.join(arena, &[_][]const u8{ dir, "powerpc" ++ s ++ "powerpc32" }));
|
||||
}
|
||||
try args.append("-I");
|
||||
try args.append(try path.join(arena, &[_][]const u8{ dir, "powerpc" }));
|
||||
}
|
||||
} else if (arch.isRISCV()) {
|
||||
if (opt_nptl) |nptl| {
|
||||
try args.append("-I");
|
||||
try args.append(try path.join(arena, &[_][]const u8{ dir, "riscv", nptl }));
|
||||
} else {
|
||||
try args.append("-I");
|
||||
try args.append(try path.join(arena, &[_][]const u8{ dir, "riscv" }));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn path_from_lib(comp: *Compilation, arena: *Allocator, sub_path: []const u8) ![]const u8 {
|
||||
return path.join(arena, &[_][]const u8{ comp.zig_lib_directory.path.?, sub_path });
|
||||
}
|
||||
|
||||
const lib_libc = "libc" ++ path.sep_str;
|
||||
const lib_libc_glibc = lib_libc ++ "glibc" ++ path.sep_str;
|
||||
|
||||
fn lib_path(comp: *Compilation, arena: *Allocator, sub_path: []const u8) ![]const u8 {
|
||||
return path.join(arena, &[_][]const u8{ comp.zig_lib_directory.path.?, sub_path });
|
||||
}
|
||||
|
||||
pub const BuiltSharedObjects = struct {
|
||||
lock: Cache.Lock,
|
||||
dir_path: []u8,
|
||||
|
||||
pub fn deinit(self: *BuiltSharedObjects, gpa: *Allocator) void {
|
||||
self.lock.release();
|
||||
gpa.free(self.dir_path);
|
||||
self.* = undefined;
|
||||
}
|
||||
};
|
||||
|
||||
const all_map_basename = "all.map";
|
||||
|
||||
// TODO Turn back on zig fmt when https://github.com/ziglang/zig/issues/5948 is implemented.
|
||||
// zig fmt: off
|
||||
|
||||
pub fn buildSharedObjects(comp: *Compilation) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
if (!build_options.have_llvm) {
|
||||
return error.ZigCompilerNotBuiltWithLLVMExtensions;
|
||||
}
|
||||
|
||||
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
|
||||
defer arena_allocator.deinit();
|
||||
const arena = &arena_allocator.allocator;
|
||||
|
||||
const target = comp.getTarget();
|
||||
const target_version = target.os.version_range.linux.glibc;
|
||||
|
||||
// Use the global cache directory.
|
||||
var cache_parent: Cache = .{
|
||||
.gpa = comp.gpa,
|
||||
.manifest_dir = try comp.global_cache_directory.handle.makeOpenPath("h", .{}),
|
||||
};
|
||||
defer cache_parent.manifest_dir.close();
|
||||
|
||||
var cache = cache_parent.obtain();
|
||||
defer cache.deinit();
|
||||
cache.hash.addBytes(build_options.version);
|
||||
cache.hash.addBytes(comp.zig_lib_directory.path orelse ".");
|
||||
cache.hash.add(target.cpu.arch);
|
||||
cache.hash.add(target.abi);
|
||||
cache.hash.add(target_version);
|
||||
|
||||
const hit = try cache.hit();
|
||||
const digest = cache.final();
|
||||
const o_sub_path = try path.join(arena, &[_][]const u8{ "o", &digest });
|
||||
|
||||
// Even if we get a hit, it doesn't guarantee that we finished the job last time.
|
||||
// We use the presence of an "ok" file to determine if it is a true hit.
|
||||
|
||||
var o_directory: Compilation.Directory = .{
|
||||
.handle = try comp.global_cache_directory.handle.makeOpenPath(o_sub_path, .{}),
|
||||
.path = try path.join(arena, &[_][]const u8{ comp.global_cache_directory.path.?, o_sub_path }),
|
||||
};
|
||||
defer o_directory.handle.close();
|
||||
|
||||
const ok_basename = "ok";
|
||||
const actual_hit = if (hit) blk: {
|
||||
o_directory.handle.access(ok_basename, .{}) catch |err| switch (err) {
|
||||
error.FileNotFound => break :blk false,
|
||||
else => |e| return e,
|
||||
};
|
||||
break :blk true;
|
||||
} else false;
|
||||
|
||||
if (!actual_hit) {
|
||||
const metadata = try loadMetaData(comp.gpa, comp.zig_lib_directory.handle);
|
||||
defer metadata.destroy(comp.gpa);
|
||||
|
||||
const ver_list_base = metadata.version_table.get(.{
|
||||
.arch = target.cpu.arch,
|
||||
.os = target.os.tag,
|
||||
.abi = target.abi,
|
||||
}) orelse return error.GLibCUnavailableForThisTarget;
|
||||
const target_ver_index = for (metadata.all_versions) |ver, i| {
|
||||
switch (ver.order(target_version)) {
|
||||
.eq => break i,
|
||||
.lt => continue,
|
||||
.gt => {
|
||||
// TODO Expose via compile error mechanism instead of log.
|
||||
std.log.warn("invalid target glibc version: {}", .{target_version});
|
||||
return error.InvalidTargetGLibCVersion;
|
||||
},
|
||||
}
|
||||
} else blk: {
|
||||
const latest_index = metadata.all_versions.len - 1;
|
||||
std.log.warn("zig cannot build new glibc version {}; providing instead {}", .{
|
||||
target_version, metadata.all_versions[latest_index],
|
||||
});
|
||||
break :blk latest_index;
|
||||
};
|
||||
{
|
||||
var map_contents = std.ArrayList(u8).init(arena);
|
||||
for (metadata.all_versions) |ver| {
|
||||
if (ver.patch == 0) {
|
||||
try map_contents.writer().print("GLIBC_{d}.{d} {{ }};\n", .{ ver.major, ver.minor });
|
||||
} else {
|
||||
try map_contents.writer().print("GLIBC_{d}.{d}.{d} {{ }};\n", .{ ver.major, ver.minor, ver.patch });
|
||||
}
|
||||
}
|
||||
try o_directory.handle.writeFile(all_map_basename, map_contents.items);
|
||||
map_contents.deinit(); // The most recent allocation of an arena can be freed :)
|
||||
}
|
||||
var zig_body = std.ArrayList(u8).init(comp.gpa);
|
||||
defer zig_body.deinit();
|
||||
for (libs) |*lib| {
|
||||
zig_body.shrinkRetainingCapacity(0);
|
||||
|
||||
for (metadata.all_functions) |*libc_fn, fn_i| {
|
||||
if (libc_fn.lib != lib) continue;
|
||||
|
||||
const ver_list = ver_list_base[fn_i];
|
||||
// Pick the default symbol version:
|
||||
// - If there are no versions, don't emit it
|
||||
// - Take the greatest one <= than the target one
|
||||
// - If none of them is <= than the
|
||||
// specified one don't pick any default version
|
||||
if (ver_list.len == 0) continue;
|
||||
var chosen_def_ver_index: u8 = 255;
|
||||
{
|
||||
var ver_i: u8 = 0;
|
||||
while (ver_i < ver_list.len) : (ver_i += 1) {
|
||||
const ver_index = ver_list.versions[ver_i];
|
||||
if ((chosen_def_ver_index == 255 or ver_index > chosen_def_ver_index) and
|
||||
target_ver_index >= ver_index)
|
||||
{
|
||||
chosen_def_ver_index = ver_index;
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
var ver_i: u8 = 0;
|
||||
while (ver_i < ver_list.len) : (ver_i += 1) {
|
||||
// Example:
|
||||
// .globl _Exit_2_2_5
|
||||
// .type _Exit_2_2_5, @function;
|
||||
// .symver _Exit_2_2_5, _Exit@@GLIBC_2.2.5
|
||||
// .hidden _Exit_2_2_5
|
||||
// _Exit_2_2_5:
|
||||
const ver_index = ver_list.versions[ver_i];
|
||||
const ver = metadata.all_versions[ver_index];
|
||||
const sym_name = libc_fn.name;
|
||||
// Default symbol version definition vs normal symbol version definition
|
||||
const want_two_ats = chosen_def_ver_index != 255 and ver_index == chosen_def_ver_index;
|
||||
const at_sign_str = "@@"[0 .. @boolToInt(want_two_ats) + @as(usize, 1)];
|
||||
|
||||
if (ver.patch == 0) {
|
||||
const sym_plus_ver = try std.fmt.allocPrint(
|
||||
arena, "{s}_{d}_{d}",
|
||||
.{sym_name, ver.major, ver.minor},
|
||||
);
|
||||
try zig_body.writer().print(
|
||||
\\.globl {s}
|
||||
\\.type {s}, @function;
|
||||
\\.symver {s}, {s}{s}GLIBC_{d}.{d}
|
||||
\\.hidden {s}
|
||||
\\{s}:
|
||||
\\
|
||||
, .{
|
||||
sym_plus_ver,
|
||||
sym_plus_ver,
|
||||
sym_plus_ver, sym_name, at_sign_str, ver.major, ver.minor,
|
||||
sym_plus_ver,
|
||||
sym_plus_ver,
|
||||
});
|
||||
} else {
|
||||
const sym_plus_ver = try std.fmt.allocPrint(arena, "{s}_{d}_{d}_{d}",
|
||||
.{sym_name, ver.major, ver.minor, ver.patch},
|
||||
);
|
||||
try zig_body.writer().print(
|
||||
\\.globl {s}
|
||||
\\.type {s}, @function;
|
||||
\\.symver {s}, {s}{s}GLIBC_{d}.{d}.{d}
|
||||
\\.hidden {s}
|
||||
\\{s}:
|
||||
\\
|
||||
, .{
|
||||
sym_plus_ver,
|
||||
sym_plus_ver,
|
||||
sym_plus_ver, sym_name, at_sign_str, ver.major, ver.minor, ver.patch,
|
||||
sym_plus_ver,
|
||||
sym_plus_ver,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var lib_name_buf: [32]u8 = undefined; // Larger than each of the names "c", "pthread", etc.
|
||||
const asm_file_basename = std.fmt.bufPrint(&lib_name_buf, "{s}.s", .{lib.name}) catch unreachable;
|
||||
try o_directory.handle.writeFile(asm_file_basename, zig_body.items);
|
||||
|
||||
try buildSharedLib(comp, arena, comp.global_cache_directory, o_directory, asm_file_basename, lib);
|
||||
}
|
||||
// No need to write the manifest because there are no file inputs associated with this cache hash.
|
||||
// However we do need to write the ok file now.
|
||||
if (o_directory.handle.createFile(ok_basename, .{})) |file| {
|
||||
file.close();
|
||||
} else |err| {
|
||||
std.log.warn("glibc shared objects: failed to mark completion: {}", .{@errorName(err)});
|
||||
}
|
||||
}
|
||||
|
||||
assert(comp.glibc_so_files == null);
|
||||
comp.glibc_so_files = BuiltSharedObjects{
|
||||
.lock = cache.toOwnedLock(),
|
||||
.dir_path = try path.join(comp.gpa, &[_][]const u8{ comp.global_cache_directory.path.?, o_sub_path }),
|
||||
};
|
||||
}
|
||||
|
||||
// zig fmt: on
|
||||
|
||||
fn buildSharedLib(
|
||||
comp: *Compilation,
|
||||
arena: *Allocator,
|
||||
zig_cache_directory: Compilation.Directory,
|
||||
bin_directory: Compilation.Directory,
|
||||
asm_file_basename: []const u8,
|
||||
lib: *const Lib,
|
||||
) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
const emit_bin = Compilation.EmitLoc{
|
||||
.directory = bin_directory,
|
||||
.basename = try std.fmt.allocPrint(arena, "lib{s}.so.{d}", .{ lib.name, lib.sover }),
|
||||
};
|
||||
const version: std.builtin.Version = .{ .major = lib.sover, .minor = 0, .patch = 0 };
|
||||
const ld_basename = path.basename(comp.getTarget().standardDynamicLinkerPath().get().?);
|
||||
const override_soname = if (mem.eql(u8, lib.name, "ld")) ld_basename else null;
|
||||
const map_file_path = try path.join(arena, &[_][]const u8{ bin_directory.path.?, all_map_basename });
|
||||
const c_source_files = [1]Compilation.CSourceFile{
|
||||
.{
|
||||
.src_path = try path.join(arena, &[_][]const u8{ bin_directory.path.?, asm_file_basename }),
|
||||
},
|
||||
};
|
||||
const sub_compilation = try Compilation.create(comp.gpa, .{
|
||||
.local_cache_directory = zig_cache_directory,
|
||||
.global_cache_directory = comp.global_cache_directory,
|
||||
.zig_lib_directory = comp.zig_lib_directory,
|
||||
.target = comp.getTarget(),
|
||||
.root_name = lib.name,
|
||||
.root_pkg = null,
|
||||
.output_mode = .Lib,
|
||||
.link_mode = .Dynamic,
|
||||
.rand = comp.rand,
|
||||
.libc_installation = comp.bin_file.options.libc_installation,
|
||||
.emit_bin = emit_bin,
|
||||
.optimize_mode = comp.bin_file.options.optimize_mode,
|
||||
.want_sanitize_c = false,
|
||||
.want_stack_check = false,
|
||||
.want_valgrind = false,
|
||||
.emit_h = null,
|
||||
.strip = comp.bin_file.options.strip,
|
||||
.is_native_os = false,
|
||||
.self_exe_path = comp.self_exe_path,
|
||||
.verbose_cc = comp.verbose_cc,
|
||||
.verbose_link = comp.bin_file.options.verbose_link,
|
||||
.verbose_tokenize = comp.verbose_tokenize,
|
||||
.verbose_ast = comp.verbose_ast,
|
||||
.verbose_ir = comp.verbose_ir,
|
||||
.verbose_llvm_ir = comp.verbose_llvm_ir,
|
||||
.verbose_cimport = comp.verbose_cimport,
|
||||
.verbose_llvm_cpu_features = comp.verbose_llvm_cpu_features,
|
||||
.clang_passthrough_mode = comp.clang_passthrough_mode,
|
||||
.version = version,
|
||||
.version_script = map_file_path,
|
||||
.override_soname = override_soname,
|
||||
.c_source_files = &c_source_files,
|
||||
.is_compiler_rt_or_libc = true,
|
||||
});
|
||||
defer sub_compilation.destroy();
|
||||
|
||||
try sub_compilation.updateSubCompilation();
|
||||
}
|
||||
1907
src/install_files.h
1907
src/install_files.h
File diff suppressed because it is too large
Load Diff
75
src/introspect.zig
Normal file
75
src/introspect.zig
Normal file
@ -0,0 +1,75 @@
|
||||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
const fs = std.fs;
|
||||
const Compilation = @import("Compilation.zig");
|
||||
|
||||
/// Returns the sub_path that worked, or `null` if none did.
|
||||
/// The path of the returned Directory is relative to `base`.
|
||||
/// The handle of the returned Directory is open.
|
||||
fn testZigInstallPrefix(base_dir: fs.Dir) ?Compilation.Directory {
|
||||
const test_index_file = "std" ++ fs.path.sep_str ++ "std.zig";
|
||||
|
||||
zig_dir: {
|
||||
// Try lib/zig/std/std.zig
|
||||
const lib_zig = "lib" ++ fs.path.sep_str ++ "zig";
|
||||
var test_zig_dir = base_dir.openDir(lib_zig, .{}) catch break :zig_dir;
|
||||
const file = test_zig_dir.openFile(test_index_file, .{}) catch {
|
||||
test_zig_dir.close();
|
||||
break :zig_dir;
|
||||
};
|
||||
file.close();
|
||||
return Compilation.Directory{ .handle = test_zig_dir, .path = lib_zig };
|
||||
}
|
||||
|
||||
// Try lib/std/std.zig
|
||||
var test_zig_dir = base_dir.openDir("lib", .{}) catch return null;
|
||||
const file = test_zig_dir.openFile(test_index_file, .{}) catch {
|
||||
test_zig_dir.close();
|
||||
return null;
|
||||
};
|
||||
file.close();
|
||||
return Compilation.Directory{ .handle = test_zig_dir, .path = "lib" };
|
||||
}
|
||||
|
||||
/// Both the directory handle and the path are newly allocated resources which the caller now owns.
|
||||
pub fn findZigLibDir(gpa: *mem.Allocator) !Compilation.Directory {
|
||||
const self_exe_path = try fs.selfExePathAlloc(gpa);
|
||||
defer gpa.free(self_exe_path);
|
||||
|
||||
return findZigLibDirFromSelfExe(gpa, self_exe_path);
|
||||
}
|
||||
|
||||
/// Both the directory handle and the path are newly allocated resources which the caller now owns.
|
||||
pub fn findZigLibDirFromSelfExe(
|
||||
allocator: *mem.Allocator,
|
||||
self_exe_path: []const u8,
|
||||
) error{ OutOfMemory, FileNotFound }!Compilation.Directory {
|
||||
const cwd = fs.cwd();
|
||||
var cur_path: []const u8 = self_exe_path;
|
||||
while (fs.path.dirname(cur_path)) |dirname| : (cur_path = dirname) {
|
||||
var base_dir = cwd.openDir(dirname, .{}) catch continue;
|
||||
defer base_dir.close();
|
||||
|
||||
const sub_directory = testZigInstallPrefix(base_dir) orelse continue;
|
||||
return Compilation.Directory{
|
||||
.handle = sub_directory.handle,
|
||||
.path = try fs.path.join(allocator, &[_][]const u8{ dirname, sub_directory.path.? }),
|
||||
};
|
||||
}
|
||||
return error.FileNotFound;
|
||||
}
|
||||
|
||||
/// Caller owns returned memory.
|
||||
pub fn resolveGlobalCacheDir(allocator: *mem.Allocator) ![]u8 {
|
||||
const appname = "zig";
|
||||
|
||||
if (std.Target.current.os.tag != .windows) {
|
||||
if (std.os.getenv("XDG_CACHE_HOME")) |cache_root| {
|
||||
return fs.path.join(allocator, &[_][]const u8{ cache_root, appname });
|
||||
} else if (std.os.getenv("HOME")) |home| {
|
||||
return fs.path.join(allocator, &[_][]const u8{ home, ".cache", appname });
|
||||
}
|
||||
}
|
||||
|
||||
return fs.getAppDataDir(allocator, appname);
|
||||
}
|
||||
@ -4,6 +4,7 @@ const Target = std.Target;
|
||||
const fs = std.fs;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const Batch = std.event.Batch;
|
||||
const build_options = @import("build_options");
|
||||
|
||||
const is_darwin = Target.current.isDarwin();
|
||||
const is_windows = Target.current.os.tag == .windows;
|
||||
@ -13,6 +14,8 @@ const log = std.log.scoped(.libc_installation);
|
||||
|
||||
usingnamespace @import("windows_sdk.zig");
|
||||
|
||||
// TODO https://github.com/ziglang/zig/issues/6345
|
||||
|
||||
/// See the render function implementation for documentation of the fields.
|
||||
pub const LibCInstallation = struct {
|
||||
include_dir: ?[]const u8 = null,
|
||||
@ -168,6 +171,8 @@ pub const LibCInstallation = struct {
|
||||
var self: LibCInstallation = .{};
|
||||
|
||||
if (is_windows) {
|
||||
if (!build_options.have_llvm)
|
||||
return error.WindowsSdkNotFound;
|
||||
var sdk: *ZigWindowsSDK = undefined;
|
||||
switch (zig_find_windows_sdk(&sdk)) {
|
||||
.None => {
|
||||
315
src/libcxx.zig
Normal file
315
src/libcxx.zig
Normal file
@ -0,0 +1,315 @@
|
||||
const std = @import("std");
|
||||
const path = std.fs.path;
|
||||
const assert = std.debug.assert;
|
||||
|
||||
const target_util = @import("target.zig");
|
||||
const Compilation = @import("Compilation.zig");
|
||||
const build_options = @import("build_options");
|
||||
const trace = @import("tracy.zig").trace;
|
||||
|
||||
const libcxxabi_files = [_][]const u8{
|
||||
"src/abort_message.cpp",
|
||||
"src/cxa_aux_runtime.cpp",
|
||||
"src/cxa_default_handlers.cpp",
|
||||
"src/cxa_demangle.cpp",
|
||||
"src/cxa_exception.cpp",
|
||||
"src/cxa_exception_storage.cpp",
|
||||
"src/cxa_guard.cpp",
|
||||
"src/cxa_handlers.cpp",
|
||||
"src/cxa_noexception.cpp",
|
||||
"src/cxa_personality.cpp",
|
||||
"src/cxa_thread_atexit.cpp",
|
||||
"src/cxa_unexpected.cpp",
|
||||
"src/cxa_vector.cpp",
|
||||
"src/cxa_virtual.cpp",
|
||||
"src/fallback_malloc.cpp",
|
||||
"src/private_typeinfo.cpp",
|
||||
"src/stdlib_exception.cpp",
|
||||
"src/stdlib_stdexcept.cpp",
|
||||
"src/stdlib_typeinfo.cpp",
|
||||
};
|
||||
|
||||
const libcxx_files = [_][]const u8{
|
||||
"src/algorithm.cpp",
|
||||
"src/any.cpp",
|
||||
"src/bind.cpp",
|
||||
"src/charconv.cpp",
|
||||
"src/chrono.cpp",
|
||||
"src/condition_variable.cpp",
|
||||
"src/condition_variable_destructor.cpp",
|
||||
"src/debug.cpp",
|
||||
"src/exception.cpp",
|
||||
"src/experimental/memory_resource.cpp",
|
||||
"src/filesystem/directory_iterator.cpp",
|
||||
"src/filesystem/operations.cpp",
|
||||
"src/functional.cpp",
|
||||
"src/future.cpp",
|
||||
"src/hash.cpp",
|
||||
"src/ios.cpp",
|
||||
"src/iostream.cpp",
|
||||
"src/locale.cpp",
|
||||
"src/memory.cpp",
|
||||
"src/mutex.cpp",
|
||||
"src/mutex_destructor.cpp",
|
||||
"src/new.cpp",
|
||||
"src/optional.cpp",
|
||||
"src/random.cpp",
|
||||
"src/regex.cpp",
|
||||
"src/shared_mutex.cpp",
|
||||
"src/stdexcept.cpp",
|
||||
"src/string.cpp",
|
||||
"src/strstream.cpp",
|
||||
"src/support/solaris/xlocale.cpp",
|
||||
"src/support/win32/locale_win32.cpp",
|
||||
"src/support/win32/support.cpp",
|
||||
"src/support/win32/thread_win32.cpp",
|
||||
"src/system_error.cpp",
|
||||
"src/thread.cpp",
|
||||
"src/typeinfo.cpp",
|
||||
"src/utility.cpp",
|
||||
"src/valarray.cpp",
|
||||
"src/variant.cpp",
|
||||
"src/vector.cpp",
|
||||
};
|
||||
|
||||
pub fn buildLibCXX(comp: *Compilation) !void {
|
||||
if (!build_options.have_llvm) {
|
||||
return error.ZigCompilerNotBuiltWithLLVMExtensions;
|
||||
}
|
||||
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
|
||||
defer arena_allocator.deinit();
|
||||
const arena = &arena_allocator.allocator;
|
||||
|
||||
const root_name = "c++";
|
||||
const output_mode = .Lib;
|
||||
const link_mode = .Static;
|
||||
const target = comp.getTarget();
|
||||
const basename = try std.zig.binNameAlloc(arena, .{
|
||||
.root_name = root_name,
|
||||
.target = target,
|
||||
.output_mode = output_mode,
|
||||
.link_mode = link_mode,
|
||||
});
|
||||
|
||||
const emit_bin = Compilation.EmitLoc{
|
||||
.directory = null, // Put it in the cache directory.
|
||||
.basename = basename,
|
||||
};
|
||||
|
||||
const cxxabi_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxxabi", "include" });
|
||||
const cxx_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxx", "include" });
|
||||
var c_source_files = std.ArrayList(Compilation.CSourceFile).init(arena);
|
||||
try c_source_files.ensureCapacity(libcxx_files.len);
|
||||
|
||||
for (libcxx_files) |cxx_src| {
|
||||
var cflags = std.ArrayList([]const u8).init(arena);
|
||||
|
||||
if (target.os.tag == .windows) {
|
||||
// Filesystem stuff isn't supported on Windows.
|
||||
if (std.mem.startsWith(u8, cxx_src, "src/filesystem/"))
|
||||
continue;
|
||||
} else {
|
||||
if (std.mem.startsWith(u8, cxx_src, "src/support/win32/"))
|
||||
continue;
|
||||
}
|
||||
|
||||
try cflags.append("-DNDEBUG");
|
||||
try cflags.append("-D_LIBCPP_BUILDING_LIBRARY");
|
||||
try cflags.append("-D_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER");
|
||||
try cflags.append("-DLIBCXX_BUILDING_LIBCXXABI");
|
||||
try cflags.append("-D_LIBCXXABI_DISABLE_VISIBILITY_ANNOTATIONS");
|
||||
try cflags.append("-D_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS");
|
||||
|
||||
if (target.abi.isMusl()) {
|
||||
try cflags.append("-D_LIBCPP_HAS_MUSL_LIBC");
|
||||
}
|
||||
|
||||
try cflags.append("-I");
|
||||
try cflags.append(cxx_include_path);
|
||||
|
||||
try cflags.append("-I");
|
||||
try cflags.append(cxxabi_include_path);
|
||||
|
||||
try cflags.append("-O3");
|
||||
try cflags.append("-DNDEBUG");
|
||||
if (target_util.supports_fpic(target)) {
|
||||
try cflags.append("-fPIC");
|
||||
}
|
||||
try cflags.append("-nostdinc++");
|
||||
try cflags.append("-fvisibility-inlines-hidden");
|
||||
try cflags.append("-std=c++14");
|
||||
try cflags.append("-Wno-user-defined-literals");
|
||||
|
||||
c_source_files.appendAssumeCapacity(.{
|
||||
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxx", cxx_src }),
|
||||
.extra_flags = cflags.items,
|
||||
});
|
||||
}
|
||||
|
||||
const sub_compilation = try Compilation.create(comp.gpa, .{
|
||||
.local_cache_directory = comp.global_cache_directory,
|
||||
.global_cache_directory = comp.global_cache_directory,
|
||||
.zig_lib_directory = comp.zig_lib_directory,
|
||||
.target = target,
|
||||
.root_name = root_name,
|
||||
.root_pkg = null,
|
||||
.output_mode = output_mode,
|
||||
.rand = comp.rand,
|
||||
.libc_installation = comp.bin_file.options.libc_installation,
|
||||
.emit_bin = emit_bin,
|
||||
.optimize_mode = comp.bin_file.options.optimize_mode,
|
||||
.link_mode = link_mode,
|
||||
.want_sanitize_c = false,
|
||||
.want_stack_check = false,
|
||||
.want_valgrind = false,
|
||||
.want_pic = comp.bin_file.options.pic,
|
||||
.emit_h = null,
|
||||
.strip = comp.bin_file.options.strip,
|
||||
.is_native_os = comp.bin_file.options.is_native_os,
|
||||
.self_exe_path = comp.self_exe_path,
|
||||
.c_source_files = c_source_files.items,
|
||||
.verbose_cc = comp.verbose_cc,
|
||||
.verbose_link = comp.bin_file.options.verbose_link,
|
||||
.verbose_tokenize = comp.verbose_tokenize,
|
||||
.verbose_ast = comp.verbose_ast,
|
||||
.verbose_ir = comp.verbose_ir,
|
||||
.verbose_llvm_ir = comp.verbose_llvm_ir,
|
||||
.verbose_cimport = comp.verbose_cimport,
|
||||
.verbose_llvm_cpu_features = comp.verbose_llvm_cpu_features,
|
||||
.clang_passthrough_mode = comp.clang_passthrough_mode,
|
||||
.link_libc = true,
|
||||
});
|
||||
defer sub_compilation.destroy();
|
||||
|
||||
try sub_compilation.updateSubCompilation();
|
||||
|
||||
assert(comp.libcxx_static_lib == null);
|
||||
comp.libcxx_static_lib = Compilation.CRTFile{
|
||||
.full_object_path = try sub_compilation.bin_file.options.emit.?.directory.join(
|
||||
comp.gpa,
|
||||
&[_][]const u8{basename},
|
||||
),
|
||||
.lock = sub_compilation.bin_file.toOwnedLock(),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn buildLibCXXABI(comp: *Compilation) !void {
|
||||
if (!build_options.have_llvm) {
|
||||
return error.ZigCompilerNotBuiltWithLLVMExtensions;
|
||||
}
|
||||
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
|
||||
defer arena_allocator.deinit();
|
||||
const arena = &arena_allocator.allocator;
|
||||
|
||||
const root_name = "c++abi";
|
||||
const output_mode = .Lib;
|
||||
const link_mode = .Static;
|
||||
const target = comp.getTarget();
|
||||
const basename = try std.zig.binNameAlloc(arena, .{
|
||||
.root_name = root_name,
|
||||
.target = target,
|
||||
.output_mode = output_mode,
|
||||
.link_mode = link_mode,
|
||||
});
|
||||
|
||||
const emit_bin = Compilation.EmitLoc{
|
||||
.directory = null, // Put it in the cache directory.
|
||||
.basename = basename,
|
||||
};
|
||||
|
||||
const cxxabi_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxxabi", "include" });
|
||||
const cxx_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxx", "include" });
|
||||
|
||||
var c_source_files: [libcxxabi_files.len]Compilation.CSourceFile = undefined;
|
||||
for (libcxxabi_files) |cxxabi_src, i| {
|
||||
var cflags = std.ArrayList([]const u8).init(arena);
|
||||
|
||||
try cflags.append("-DHAVE___CXA_THREAD_ATEXIT_IMPL");
|
||||
try cflags.append("-D_LIBCPP_DISABLE_EXTERN_TEMPLATE");
|
||||
try cflags.append("-D_LIBCPP_ENABLE_CXX17_REMOVED_UNEXPECTED_FUNCTIONS");
|
||||
try cflags.append("-D_LIBCXXABI_BUILDING_LIBRARY");
|
||||
try cflags.append("-D_LIBCXXABI_DISABLE_VISIBILITY_ANNOTATIONS");
|
||||
try cflags.append("-D_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS");
|
||||
|
||||
if (target.abi.isMusl()) {
|
||||
try cflags.append("-D_LIBCPP_HAS_MUSL_LIBC");
|
||||
}
|
||||
|
||||
try cflags.append("-I");
|
||||
try cflags.append(cxxabi_include_path);
|
||||
|
||||
try cflags.append("-I");
|
||||
try cflags.append(cxx_include_path);
|
||||
|
||||
try cflags.append("-O3");
|
||||
try cflags.append("-DNDEBUG");
|
||||
if (target_util.supports_fpic(target)) {
|
||||
try cflags.append("-fPIC");
|
||||
}
|
||||
try cflags.append("-nostdinc++");
|
||||
try cflags.append("-fstrict-aliasing");
|
||||
try cflags.append("-funwind-tables");
|
||||
try cflags.append("-D_DEBUG");
|
||||
try cflags.append("-UNDEBUG");
|
||||
try cflags.append("-std=c++11");
|
||||
|
||||
c_source_files[i] = .{
|
||||
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxxabi", cxxabi_src }),
|
||||
.extra_flags = cflags.items,
|
||||
};
|
||||
}
|
||||
|
||||
const sub_compilation = try Compilation.create(comp.gpa, .{
|
||||
.local_cache_directory = comp.global_cache_directory,
|
||||
.global_cache_directory = comp.global_cache_directory,
|
||||
.zig_lib_directory = comp.zig_lib_directory,
|
||||
.target = target,
|
||||
.root_name = root_name,
|
||||
.root_pkg = null,
|
||||
.output_mode = output_mode,
|
||||
.rand = comp.rand,
|
||||
.libc_installation = comp.bin_file.options.libc_installation,
|
||||
.emit_bin = emit_bin,
|
||||
.optimize_mode = comp.bin_file.options.optimize_mode,
|
||||
.link_mode = link_mode,
|
||||
.want_sanitize_c = false,
|
||||
.want_stack_check = false,
|
||||
.want_valgrind = false,
|
||||
.want_pic = comp.bin_file.options.pic,
|
||||
.emit_h = null,
|
||||
.strip = comp.bin_file.options.strip,
|
||||
.is_native_os = comp.bin_file.options.is_native_os,
|
||||
.self_exe_path = comp.self_exe_path,
|
||||
.c_source_files = &c_source_files,
|
||||
.verbose_cc = comp.verbose_cc,
|
||||
.verbose_link = comp.bin_file.options.verbose_link,
|
||||
.verbose_tokenize = comp.verbose_tokenize,
|
||||
.verbose_ast = comp.verbose_ast,
|
||||
.verbose_ir = comp.verbose_ir,
|
||||
.verbose_llvm_ir = comp.verbose_llvm_ir,
|
||||
.verbose_cimport = comp.verbose_cimport,
|
||||
.verbose_llvm_cpu_features = comp.verbose_llvm_cpu_features,
|
||||
.clang_passthrough_mode = comp.clang_passthrough_mode,
|
||||
.link_libc = true,
|
||||
});
|
||||
defer sub_compilation.destroy();
|
||||
|
||||
try sub_compilation.updateSubCompilation();
|
||||
|
||||
assert(comp.libcxxabi_static_lib == null);
|
||||
comp.libcxxabi_static_lib = Compilation.CRTFile{
|
||||
.full_object_path = try sub_compilation.bin_file.options.emit.?.directory.join(
|
||||
comp.gpa,
|
||||
&[_][]const u8{basename},
|
||||
),
|
||||
.lock = sub_compilation.bin_file.toOwnedLock(),
|
||||
};
|
||||
}
|
||||
135
src/libunwind.zig
Normal file
135
src/libunwind.zig
Normal file
@ -0,0 +1,135 @@
|
||||
const std = @import("std");
|
||||
const path = std.fs.path;
|
||||
const assert = std.debug.assert;
|
||||
|
||||
const target_util = @import("target.zig");
|
||||
const Compilation = @import("Compilation.zig");
|
||||
const build_options = @import("build_options");
|
||||
const trace = @import("tracy.zig").trace;
|
||||
|
||||
pub fn buildStaticLib(comp: *Compilation) !void {
|
||||
if (!build_options.have_llvm) {
|
||||
return error.ZigCompilerNotBuiltWithLLVMExtensions;
|
||||
}
|
||||
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
|
||||
defer arena_allocator.deinit();
|
||||
const arena = &arena_allocator.allocator;
|
||||
|
||||
const root_name = "unwind";
|
||||
const output_mode = .Lib;
|
||||
const link_mode = .Static;
|
||||
const target = comp.getTarget();
|
||||
const basename = try std.zig.binNameAlloc(arena, .{
|
||||
.root_name = root_name,
|
||||
.target = target,
|
||||
.output_mode = output_mode,
|
||||
.link_mode = link_mode,
|
||||
});
|
||||
const emit_bin = Compilation.EmitLoc{
|
||||
.directory = null, // Put it in the cache directory.
|
||||
.basename = basename,
|
||||
};
|
||||
const unwind_src_list = [_][]const u8{
|
||||
"libunwind" ++ path.sep_str ++ "src" ++ path.sep_str ++ "libunwind.cpp",
|
||||
"libunwind" ++ path.sep_str ++ "src" ++ path.sep_str ++ "Unwind-EHABI.cpp",
|
||||
"libunwind" ++ path.sep_str ++ "src" ++ path.sep_str ++ "Unwind-seh.cpp",
|
||||
"libunwind" ++ path.sep_str ++ "src" ++ path.sep_str ++ "UnwindLevel1.c",
|
||||
"libunwind" ++ path.sep_str ++ "src" ++ path.sep_str ++ "UnwindLevel1-gcc-ext.c",
|
||||
"libunwind" ++ path.sep_str ++ "src" ++ path.sep_str ++ "Unwind-sjlj.c",
|
||||
"libunwind" ++ path.sep_str ++ "src" ++ path.sep_str ++ "UnwindRegistersRestore.S",
|
||||
"libunwind" ++ path.sep_str ++ "src" ++ path.sep_str ++ "UnwindRegistersSave.S",
|
||||
};
|
||||
var c_source_files: [unwind_src_list.len]Compilation.CSourceFile = undefined;
|
||||
for (unwind_src_list) |unwind_src, i| {
|
||||
var cflags = std.ArrayList([]const u8).init(arena);
|
||||
|
||||
switch (Compilation.classifyFileExt(unwind_src)) {
|
||||
.c => {
|
||||
try cflags.append("-std=c99");
|
||||
},
|
||||
.cpp => {
|
||||
try cflags.appendSlice(&[_][]const u8{
|
||||
"-fno-rtti",
|
||||
"-I",
|
||||
try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxx", "include" }),
|
||||
});
|
||||
},
|
||||
.assembly => {},
|
||||
else => unreachable, // You can see the entire list of files just above.
|
||||
}
|
||||
try cflags.append("-I");
|
||||
try cflags.append(try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libunwind", "include" }));
|
||||
if (target_util.supports_fpic(target)) {
|
||||
try cflags.append("-fPIC");
|
||||
}
|
||||
try cflags.append("-D_LIBUNWIND_DISABLE_VISIBILITY_ANNOTATIONS");
|
||||
try cflags.append("-Wa,--noexecstack");
|
||||
|
||||
// This is intentionally always defined because the macro definition means, should it only
|
||||
// build for the target specified by compiler defines. Since we pass -target the compiler
|
||||
// defines will be correct.
|
||||
try cflags.append("-D_LIBUNWIND_IS_NATIVE_ONLY");
|
||||
|
||||
if (comp.bin_file.options.optimize_mode == .Debug) {
|
||||
try cflags.append("-D_DEBUG");
|
||||
}
|
||||
if (comp.bin_file.options.single_threaded) {
|
||||
try cflags.append("-D_LIBUNWIND_HAS_NO_THREADS");
|
||||
}
|
||||
try cflags.append("-Wno-bitwise-conditional-parentheses");
|
||||
|
||||
c_source_files[i] = .{
|
||||
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{unwind_src}),
|
||||
.extra_flags = cflags.items,
|
||||
};
|
||||
}
|
||||
const sub_compilation = try Compilation.create(comp.gpa, .{
|
||||
.local_cache_directory = comp.global_cache_directory,
|
||||
.global_cache_directory = comp.global_cache_directory,
|
||||
.zig_lib_directory = comp.zig_lib_directory,
|
||||
.target = target,
|
||||
.root_name = root_name,
|
||||
.root_pkg = null,
|
||||
.output_mode = output_mode,
|
||||
.rand = comp.rand,
|
||||
.libc_installation = comp.bin_file.options.libc_installation,
|
||||
.emit_bin = emit_bin,
|
||||
.optimize_mode = comp.bin_file.options.optimize_mode,
|
||||
.link_mode = link_mode,
|
||||
.want_sanitize_c = false,
|
||||
.want_stack_check = false,
|
||||
.want_valgrind = false,
|
||||
.want_pic = comp.bin_file.options.pic,
|
||||
.emit_h = null,
|
||||
.strip = comp.bin_file.options.strip,
|
||||
.is_native_os = comp.bin_file.options.is_native_os,
|
||||
.self_exe_path = comp.self_exe_path,
|
||||
.c_source_files = &c_source_files,
|
||||
.verbose_cc = comp.verbose_cc,
|
||||
.verbose_link = comp.bin_file.options.verbose_link,
|
||||
.verbose_tokenize = comp.verbose_tokenize,
|
||||
.verbose_ast = comp.verbose_ast,
|
||||
.verbose_ir = comp.verbose_ir,
|
||||
.verbose_llvm_ir = comp.verbose_llvm_ir,
|
||||
.verbose_cimport = comp.verbose_cimport,
|
||||
.verbose_llvm_cpu_features = comp.verbose_llvm_cpu_features,
|
||||
.clang_passthrough_mode = comp.clang_passthrough_mode,
|
||||
.link_libc = true,
|
||||
});
|
||||
defer sub_compilation.destroy();
|
||||
|
||||
try sub_compilation.updateSubCompilation();
|
||||
|
||||
assert(comp.libunwind_static_lib == null);
|
||||
comp.libunwind_static_lib = Compilation.CRTFile{
|
||||
.full_object_path = try sub_compilation.bin_file.options.emit.?.directory.join(
|
||||
comp.gpa,
|
||||
&[_][]const u8{basename},
|
||||
),
|
||||
.lock = sub_compilation.bin_file.toOwnedLock(),
|
||||
};
|
||||
}
|
||||
2984
src/link.cpp
2984
src/link.cpp
File diff suppressed because it is too large
Load Diff
549
src/link.zig
Normal file
549
src/link.zig
Normal file
@ -0,0 +1,549 @@
|
||||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const fs = std.fs;
|
||||
const log = std.log.scoped(.link);
|
||||
const assert = std.debug.assert;
|
||||
|
||||
const Compilation = @import("Compilation.zig");
|
||||
const Module = @import("Module.zig");
|
||||
const trace = @import("tracy.zig").trace;
|
||||
const Package = @import("Package.zig");
|
||||
const Type = @import("type.zig").Type;
|
||||
const Cache = @import("Cache.zig");
|
||||
const build_options = @import("build_options");
|
||||
const LibCInstallation = @import("libc_installation.zig").LibCInstallation;
|
||||
|
||||
pub const producer_string = if (std.builtin.is_test) "zig test" else "zig " ++ build_options.version;
|
||||
|
||||
pub const Emit = struct {
|
||||
/// Where the output will go.
|
||||
directory: Compilation.Directory,
|
||||
/// Path to the output file, relative to `directory`.
|
||||
sub_path: []const u8,
|
||||
};
|
||||
|
||||
pub const Options = struct {
|
||||
/// This is `null` when -fno-emit-bin is used. When `openPath` or `flush` is called,
|
||||
/// it will have already been null-checked.
|
||||
emit: ?Emit,
|
||||
target: std.Target,
|
||||
output_mode: std.builtin.OutputMode,
|
||||
link_mode: std.builtin.LinkMode,
|
||||
object_format: std.builtin.ObjectFormat,
|
||||
optimize_mode: std.builtin.Mode,
|
||||
machine_code_model: std.builtin.CodeModel,
|
||||
root_name: []const u8,
|
||||
/// Not every Compilation compiles .zig code! For example you could do `zig build-exe foo.o`.
|
||||
module: ?*Module,
|
||||
dynamic_linker: ?[]const u8,
|
||||
/// Used for calculating how much space to reserve for symbols in case the binary file
|
||||
/// does not already have a symbol table.
|
||||
symbol_count_hint: u64 = 32,
|
||||
/// Used for calculating how much space to reserve for executable program code in case
|
||||
/// the binary file does not already have such a section.
|
||||
program_code_size_hint: u64 = 256 * 1024,
|
||||
entry_addr: ?u64 = null,
|
||||
stack_size_override: ?u64,
|
||||
/// Set to `true` to omit debug info.
|
||||
strip: bool,
|
||||
/// If this is true then this link code is responsible for outputting an object
|
||||
/// file and then using LLD to link it together with the link options and other objects.
|
||||
/// Otherwise (depending on `use_llvm`) this link code directly outputs and updates the final binary.
|
||||
use_lld: bool,
|
||||
/// If this is true then this link code is responsible for making an LLVM IR Module,
|
||||
/// outputting it to an object file, and then linking that together with link options and
|
||||
/// other objects.
|
||||
/// Otherwise (depending on `use_lld`) this link code directly outputs and updates the final binary.
|
||||
use_llvm: bool,
|
||||
link_libc: bool,
|
||||
link_libcpp: bool,
|
||||
function_sections: bool,
|
||||
eh_frame_hdr: bool,
|
||||
rdynamic: bool,
|
||||
z_nodelete: bool,
|
||||
z_defs: bool,
|
||||
bind_global_refs_locally: bool,
|
||||
is_native_os: bool,
|
||||
pic: bool,
|
||||
valgrind: bool,
|
||||
stack_check: bool,
|
||||
single_threaded: bool,
|
||||
verbose_link: bool,
|
||||
dll_export_fns: bool,
|
||||
error_return_tracing: bool,
|
||||
is_compiler_rt_or_libc: bool,
|
||||
parent_compilation_link_libc: bool,
|
||||
each_lib_rpath: bool,
|
||||
disable_lld_caching: bool,
|
||||
is_test: bool,
|
||||
gc_sections: ?bool = null,
|
||||
allow_shlib_undefined: ?bool,
|
||||
subsystem: ?std.Target.SubSystem,
|
||||
linker_script: ?[]const u8,
|
||||
version_script: ?[]const u8,
|
||||
override_soname: ?[]const u8,
|
||||
llvm_cpu_features: ?[*:0]const u8,
|
||||
/// Extra args passed directly to LLD. Ignored when not linking with LLD.
|
||||
extra_lld_args: []const []const u8,
|
||||
|
||||
objects: []const []const u8,
|
||||
framework_dirs: []const []const u8,
|
||||
frameworks: []const []const u8,
|
||||
system_libs: std.StringArrayHashMapUnmanaged(void),
|
||||
lib_dirs: []const []const u8,
|
||||
rpath_list: []const []const u8,
|
||||
|
||||
version: ?std.builtin.Version,
|
||||
libc_installation: ?*const LibCInstallation,
|
||||
|
||||
pub fn effectiveOutputMode(options: Options) std.builtin.OutputMode {
|
||||
return if (options.use_lld) .Obj else options.output_mode;
|
||||
}
|
||||
};
|
||||
|
||||
pub const File = struct {
|
||||
tag: Tag,
|
||||
options: Options,
|
||||
file: ?fs.File,
|
||||
allocator: *Allocator,
|
||||
/// When linking with LLD, this linker code will output an object file only at
|
||||
/// this location, and then this path can be placed on the LLD linker line.
|
||||
intermediary_basename: ?[]const u8 = null,
|
||||
|
||||
/// Prevents other processes from clobbering files in the output directory
|
||||
/// of this linking operation.
|
||||
lock: ?Cache.Lock = null,
|
||||
|
||||
pub const LinkBlock = union {
|
||||
elf: Elf.TextBlock,
|
||||
coff: Coff.TextBlock,
|
||||
macho: MachO.TextBlock,
|
||||
c: void,
|
||||
wasm: void,
|
||||
};
|
||||
|
||||
pub const LinkFn = union {
|
||||
elf: Elf.SrcFn,
|
||||
coff: Coff.SrcFn,
|
||||
macho: MachO.SrcFn,
|
||||
c: void,
|
||||
wasm: ?Wasm.FnData,
|
||||
};
|
||||
|
||||
/// For DWARF .debug_info.
|
||||
pub const DbgInfoTypeRelocsTable = std.HashMapUnmanaged(Type, DbgInfoTypeReloc, Type.hash, Type.eql, std.hash_map.DefaultMaxLoadPercentage);
|
||||
|
||||
/// For DWARF .debug_info.
|
||||
pub const DbgInfoTypeReloc = struct {
|
||||
/// Offset from `TextBlock.dbg_info_off` (the buffer that is local to a Decl).
|
||||
/// This is where the .debug_info tag for the type is.
|
||||
off: u32,
|
||||
/// Offset from `TextBlock.dbg_info_off` (the buffer that is local to a Decl).
|
||||
/// List of DW.AT_type / DW.FORM_ref4 that points to the type.
|
||||
relocs: std.ArrayListUnmanaged(u32),
|
||||
};
|
||||
|
||||
/// Attempts incremental linking, if the file already exists. If
|
||||
/// incremental linking fails, falls back to truncating the file and
|
||||
/// rewriting it. A malicious file is detected as incremental link failure
|
||||
/// and does not cause Illegal Behavior. This operation is not atomic.
|
||||
pub fn openPath(allocator: *Allocator, options: Options) !*File {
|
||||
const use_stage1 = build_options.is_stage1 and options.use_llvm;
|
||||
if (use_stage1 or options.emit == null) {
|
||||
return switch (options.object_format) {
|
||||
.coff, .pe => &(try Coff.createEmpty(allocator, options)).base,
|
||||
.elf => &(try Elf.createEmpty(allocator, options)).base,
|
||||
.macho => &(try MachO.createEmpty(allocator, options)).base,
|
||||
.wasm => &(try Wasm.createEmpty(allocator, options)).base,
|
||||
.c => unreachable, // Reported error earlier.
|
||||
.hex => return error.HexObjectFormatUnimplemented,
|
||||
.raw => return error.RawObjectFormatUnimplemented,
|
||||
};
|
||||
}
|
||||
const emit = options.emit.?;
|
||||
const use_lld = build_options.have_llvm and options.use_lld; // comptime known false when !have_llvm
|
||||
const sub_path = if (use_lld) blk: {
|
||||
if (options.module == null) {
|
||||
// No point in opening a file, we would not write anything to it. Initialize with empty.
|
||||
return switch (options.object_format) {
|
||||
.coff, .pe => &(try Coff.createEmpty(allocator, options)).base,
|
||||
.elf => &(try Elf.createEmpty(allocator, options)).base,
|
||||
.macho => &(try MachO.createEmpty(allocator, options)).base,
|
||||
.wasm => &(try Wasm.createEmpty(allocator, options)).base,
|
||||
.c => unreachable, // Reported error earlier.
|
||||
.hex => return error.HexObjectFormatUnimplemented,
|
||||
.raw => return error.RawObjectFormatUnimplemented,
|
||||
};
|
||||
}
|
||||
// Open a temporary object file, not the final output file because we want to link with LLD.
|
||||
break :blk try std.fmt.allocPrint(allocator, "{s}{s}", .{ emit.sub_path, options.target.oFileExt() });
|
||||
} else emit.sub_path;
|
||||
errdefer if (use_lld) allocator.free(sub_path);
|
||||
|
||||
const file: *File = switch (options.object_format) {
|
||||
.coff, .pe => &(try Coff.openPath(allocator, sub_path, options)).base,
|
||||
.elf => &(try Elf.openPath(allocator, sub_path, options)).base,
|
||||
.macho => &(try MachO.openPath(allocator, sub_path, options)).base,
|
||||
.wasm => &(try Wasm.openPath(allocator, sub_path, options)).base,
|
||||
.c => &(try C.openPath(allocator, sub_path, options)).base,
|
||||
.hex => return error.HexObjectFormatUnimplemented,
|
||||
.raw => return error.RawObjectFormatUnimplemented,
|
||||
};
|
||||
|
||||
if (use_lld) {
|
||||
file.intermediary_basename = sub_path;
|
||||
}
|
||||
|
||||
return file;
|
||||
}
|
||||
|
||||
pub fn cast(base: *File, comptime T: type) ?*T {
|
||||
if (base.tag != T.base_tag)
|
||||
return null;
|
||||
|
||||
return @fieldParentPtr(T, "base", base);
|
||||
}
|
||||
|
||||
pub fn makeWritable(base: *File) !void {
|
||||
switch (base.tag) {
|
||||
.coff, .elf, .macho => {
|
||||
if (base.file != null) return;
|
||||
const emit = base.options.emit orelse return;
|
||||
base.file = try emit.directory.handle.createFile(emit.sub_path, .{
|
||||
.truncate = false,
|
||||
.read = true,
|
||||
.mode = determineMode(base.options),
|
||||
});
|
||||
},
|
||||
.c, .wasm => {},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn makeExecutable(base: *File) !void {
|
||||
switch (base.tag) {
|
||||
.coff, .elf, .macho => if (base.file) |f| {
|
||||
if (base.intermediary_basename != null) {
|
||||
// The file we have open is not the final file that we want to
|
||||
// make executable, so we don't have to close it.
|
||||
return;
|
||||
}
|
||||
f.close();
|
||||
base.file = null;
|
||||
},
|
||||
.c, .wasm => {},
|
||||
}
|
||||
}
|
||||
|
||||
/// May be called before or after updateDeclExports but must be called
|
||||
/// after allocateDeclIndexes for any given Decl.
|
||||
pub fn updateDecl(base: *File, module: *Module, decl: *Module.Decl) !void {
|
||||
switch (base.tag) {
|
||||
.coff => return @fieldParentPtr(Coff, "base", base).updateDecl(module, decl),
|
||||
.elf => return @fieldParentPtr(Elf, "base", base).updateDecl(module, decl),
|
||||
.macho => return @fieldParentPtr(MachO, "base", base).updateDecl(module, decl),
|
||||
.c => return @fieldParentPtr(C, "base", base).updateDecl(module, decl),
|
||||
.wasm => return @fieldParentPtr(Wasm, "base", base).updateDecl(module, decl),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn updateDeclLineNumber(base: *File, module: *Module, decl: *Module.Decl) !void {
|
||||
switch (base.tag) {
|
||||
.coff => return @fieldParentPtr(Coff, "base", base).updateDeclLineNumber(module, decl),
|
||||
.elf => return @fieldParentPtr(Elf, "base", base).updateDeclLineNumber(module, decl),
|
||||
.macho => return @fieldParentPtr(MachO, "base", base).updateDeclLineNumber(module, decl),
|
||||
.c, .wasm => {},
|
||||
}
|
||||
}
|
||||
|
||||
/// Must be called before any call to updateDecl or updateDeclExports for
|
||||
/// any given Decl.
|
||||
pub fn allocateDeclIndexes(base: *File, decl: *Module.Decl) !void {
|
||||
switch (base.tag) {
|
||||
.coff => return @fieldParentPtr(Coff, "base", base).allocateDeclIndexes(decl),
|
||||
.elf => return @fieldParentPtr(Elf, "base", base).allocateDeclIndexes(decl),
|
||||
.macho => return @fieldParentPtr(MachO, "base", base).allocateDeclIndexes(decl),
|
||||
.c, .wasm => {},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn releaseLock(self: *File) void {
|
||||
if (self.lock) |*lock| {
|
||||
lock.release();
|
||||
self.lock = null;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn toOwnedLock(self: *File) Cache.Lock {
|
||||
const lock = self.lock.?;
|
||||
self.lock = null;
|
||||
return lock;
|
||||
}
|
||||
|
||||
pub fn destroy(base: *File) void {
|
||||
base.releaseLock();
|
||||
if (base.file) |f| f.close();
|
||||
if (base.intermediary_basename) |sub_path| base.allocator.free(sub_path);
|
||||
switch (base.tag) {
|
||||
.coff => {
|
||||
const parent = @fieldParentPtr(Coff, "base", base);
|
||||
parent.deinit();
|
||||
base.allocator.destroy(parent);
|
||||
},
|
||||
.elf => {
|
||||
const parent = @fieldParentPtr(Elf, "base", base);
|
||||
parent.deinit();
|
||||
base.allocator.destroy(parent);
|
||||
},
|
||||
.macho => {
|
||||
const parent = @fieldParentPtr(MachO, "base", base);
|
||||
parent.deinit();
|
||||
base.allocator.destroy(parent);
|
||||
},
|
||||
.c => {
|
||||
const parent = @fieldParentPtr(C, "base", base);
|
||||
parent.deinit();
|
||||
base.allocator.destroy(parent);
|
||||
},
|
||||
.wasm => {
|
||||
const parent = @fieldParentPtr(Wasm, "base", base);
|
||||
parent.deinit();
|
||||
base.allocator.destroy(parent);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Commit pending changes and write headers. Takes into account final output mode
|
||||
/// and `use_lld`, not only `effectiveOutputMode`.
|
||||
pub fn flush(base: *File, comp: *Compilation) !void {
|
||||
const emit = base.options.emit orelse return; // -fno-emit-bin
|
||||
|
||||
if (comp.clang_preprocessor_mode == .yes) {
|
||||
// TODO: avoid extra link step when it's just 1 object file (the `zig cc -c` case)
|
||||
// Until then, we do `lld -r -o output.o input.o` even though the output is the same
|
||||
// as the input. For the preprocessing case (`zig cc -E -o foo`) we copy the file
|
||||
// to the final location. See also the corresponding TODO in Coff linking.
|
||||
const full_out_path = try emit.directory.join(comp.gpa, &[_][]const u8{emit.sub_path});
|
||||
defer comp.gpa.free(full_out_path);
|
||||
assert(comp.c_object_table.count() == 1);
|
||||
const the_entry = comp.c_object_table.items()[0];
|
||||
const cached_pp_file_path = the_entry.key.status.success.object_path;
|
||||
try fs.cwd().copyFile(cached_pp_file_path, fs.cwd(), full_out_path, .{});
|
||||
return;
|
||||
}
|
||||
const use_lld = build_options.have_llvm and base.options.use_lld;
|
||||
if (use_lld and base.options.output_mode == .Lib and base.options.link_mode == .Static and
|
||||
!base.options.target.isWasm())
|
||||
{
|
||||
return base.linkAsArchive(comp);
|
||||
}
|
||||
switch (base.tag) {
|
||||
.coff => return @fieldParentPtr(Coff, "base", base).flush(comp),
|
||||
.elf => return @fieldParentPtr(Elf, "base", base).flush(comp),
|
||||
.macho => return @fieldParentPtr(MachO, "base", base).flush(comp),
|
||||
.c => return @fieldParentPtr(C, "base", base).flush(comp),
|
||||
.wasm => return @fieldParentPtr(Wasm, "base", base).flush(comp),
|
||||
}
|
||||
}
|
||||
|
||||
/// Commit pending changes and write headers. Works based on `effectiveOutputMode`
|
||||
/// rather than final output mode.
|
||||
pub fn flushModule(base: *File, comp: *Compilation) !void {
|
||||
switch (base.tag) {
|
||||
.coff => return @fieldParentPtr(Coff, "base", base).flushModule(comp),
|
||||
.elf => return @fieldParentPtr(Elf, "base", base).flushModule(comp),
|
||||
.macho => return @fieldParentPtr(MachO, "base", base).flushModule(comp),
|
||||
.c => return @fieldParentPtr(C, "base", base).flushModule(comp),
|
||||
.wasm => return @fieldParentPtr(Wasm, "base", base).flushModule(comp),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn freeDecl(base: *File, decl: *Module.Decl) void {
|
||||
switch (base.tag) {
|
||||
.coff => @fieldParentPtr(Coff, "base", base).freeDecl(decl),
|
||||
.elf => @fieldParentPtr(Elf, "base", base).freeDecl(decl),
|
||||
.macho => @fieldParentPtr(MachO, "base", base).freeDecl(decl),
|
||||
.c => unreachable,
|
||||
.wasm => @fieldParentPtr(Wasm, "base", base).freeDecl(decl),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn errorFlags(base: *File) ErrorFlags {
|
||||
switch (base.tag) {
|
||||
.coff => return @fieldParentPtr(Coff, "base", base).error_flags,
|
||||
.elf => return @fieldParentPtr(Elf, "base", base).error_flags,
|
||||
.macho => return @fieldParentPtr(MachO, "base", base).error_flags,
|
||||
.c => return .{ .no_entry_point_found = false },
|
||||
.wasm => return ErrorFlags{},
|
||||
}
|
||||
}
|
||||
|
||||
/// May be called before or after updateDecl, but must be called after
|
||||
/// allocateDeclIndexes for any given Decl.
|
||||
pub fn updateDeclExports(
|
||||
base: *File,
|
||||
module: *Module,
|
||||
decl: *const Module.Decl,
|
||||
exports: []const *Module.Export,
|
||||
) !void {
|
||||
switch (base.tag) {
|
||||
.coff => return @fieldParentPtr(Coff, "base", base).updateDeclExports(module, decl, exports),
|
||||
.elf => return @fieldParentPtr(Elf, "base", base).updateDeclExports(module, decl, exports),
|
||||
.macho => return @fieldParentPtr(MachO, "base", base).updateDeclExports(module, decl, exports),
|
||||
.c => return {},
|
||||
.wasm => return @fieldParentPtr(Wasm, "base", base).updateDeclExports(module, decl, exports),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn getDeclVAddr(base: *File, decl: *const Module.Decl) u64 {
|
||||
switch (base.tag) {
|
||||
.coff => return @fieldParentPtr(Coff, "base", base).getDeclVAddr(decl),
|
||||
.elf => return @fieldParentPtr(Elf, "base", base).getDeclVAddr(decl),
|
||||
.macho => return @fieldParentPtr(MachO, "base", base).getDeclVAddr(decl),
|
||||
.c => unreachable,
|
||||
.wasm => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
fn linkAsArchive(base: *File, comp: *Compilation) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
var arena_allocator = std.heap.ArenaAllocator.init(base.allocator);
|
||||
defer arena_allocator.deinit();
|
||||
const arena = &arena_allocator.allocator;
|
||||
|
||||
const directory = base.options.emit.?.directory; // Just an alias to make it shorter to type.
|
||||
|
||||
// If there is no Zig code to compile, then we should skip flushing the output file because it
|
||||
// will not be part of the linker line anyway.
|
||||
const module_obj_path: ?[]const u8 = if (base.options.module) |module| blk: {
|
||||
const use_stage1 = build_options.is_stage1 and base.options.use_llvm;
|
||||
if (use_stage1) {
|
||||
const obj_basename = try std.zig.binNameAlloc(arena, .{
|
||||
.root_name = base.options.root_name,
|
||||
.target = base.options.target,
|
||||
.output_mode = .Obj,
|
||||
});
|
||||
const full_obj_path = try directory.join(arena, &[_][]const u8{obj_basename});
|
||||
break :blk full_obj_path;
|
||||
}
|
||||
try base.flushModule(comp);
|
||||
const obj_basename = base.intermediary_basename.?;
|
||||
const full_obj_path = try directory.join(arena, &[_][]const u8{obj_basename});
|
||||
break :blk full_obj_path;
|
||||
} else null;
|
||||
|
||||
// This function follows the same pattern as link.Elf.linkWithLLD so if you want some
|
||||
// insight as to what's going on here you can read that function body which is more
|
||||
// well-commented.
|
||||
|
||||
const id_symlink_basename = "llvm-ar.id";
|
||||
|
||||
base.releaseLock();
|
||||
|
||||
var ch = comp.cache_parent.obtain();
|
||||
defer ch.deinit();
|
||||
|
||||
try ch.addListOfFiles(base.options.objects);
|
||||
for (comp.c_object_table.items()) |entry| {
|
||||
_ = try ch.addFile(entry.key.status.success.object_path, null);
|
||||
}
|
||||
try ch.addOptionalFile(module_obj_path);
|
||||
|
||||
// We don't actually care whether it's a cache hit or miss; we just need the digest and the lock.
|
||||
_ = try ch.hit();
|
||||
const digest = ch.final();
|
||||
|
||||
var prev_digest_buf: [digest.len]u8 = undefined;
|
||||
const prev_digest: []u8 = directory.handle.readLink(id_symlink_basename, &prev_digest_buf) catch |err| b: {
|
||||
log.debug("archive new_digest={} readlink error: {}", .{ digest, @errorName(err) });
|
||||
break :b prev_digest_buf[0..0];
|
||||
};
|
||||
if (mem.eql(u8, prev_digest, &digest)) {
|
||||
log.debug("archive digest={} match - skipping invocation", .{digest});
|
||||
base.lock = ch.toOwnedLock();
|
||||
return;
|
||||
}
|
||||
|
||||
// We are about to change the output file to be different, so we invalidate the build hash now.
|
||||
directory.handle.deleteFile(id_symlink_basename) catch |err| switch (err) {
|
||||
error.FileNotFound => {},
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
var object_files = std.ArrayList([*:0]const u8).init(base.allocator);
|
||||
defer object_files.deinit();
|
||||
|
||||
try object_files.ensureCapacity(base.options.objects.len + comp.c_object_table.items().len + 1);
|
||||
for (base.options.objects) |obj_path| {
|
||||
object_files.appendAssumeCapacity(try arena.dupeZ(u8, obj_path));
|
||||
}
|
||||
for (comp.c_object_table.items()) |entry| {
|
||||
object_files.appendAssumeCapacity(try arena.dupeZ(u8, entry.key.status.success.object_path));
|
||||
}
|
||||
if (module_obj_path) |p| {
|
||||
object_files.appendAssumeCapacity(try arena.dupeZ(u8, p));
|
||||
}
|
||||
|
||||
const full_out_path = try directory.join(arena, &[_][]const u8{base.options.emit.?.sub_path});
|
||||
const full_out_path_z = try arena.dupeZ(u8, full_out_path);
|
||||
|
||||
if (base.options.verbose_link) {
|
||||
std.debug.print("ar rcs {}", .{full_out_path_z});
|
||||
for (object_files.items) |arg| {
|
||||
std.debug.print(" {}", .{arg});
|
||||
}
|
||||
std.debug.print("\n", .{});
|
||||
}
|
||||
|
||||
const llvm = @import("llvm.zig");
|
||||
const os_type = @import("target.zig").osToLLVM(base.options.target.os.tag);
|
||||
const bad = llvm.WriteArchive(full_out_path_z, object_files.items.ptr, object_files.items.len, os_type);
|
||||
if (bad) return error.UnableToWriteArchive;
|
||||
|
||||
directory.handle.symLink(&digest, id_symlink_basename, .{}) catch |err| {
|
||||
std.log.warn("failed to save archive hash digest symlink: {}", .{@errorName(err)});
|
||||
};
|
||||
|
||||
ch.writeManifest() catch |err| {
|
||||
std.log.warn("failed to write cache manifest when archiving: {}", .{@errorName(err)});
|
||||
};
|
||||
|
||||
base.lock = ch.toOwnedLock();
|
||||
}
|
||||
|
||||
pub const Tag = enum {
|
||||
coff,
|
||||
elf,
|
||||
macho,
|
||||
c,
|
||||
wasm,
|
||||
};
|
||||
|
||||
pub const ErrorFlags = struct {
|
||||
no_entry_point_found: bool = false,
|
||||
};
|
||||
|
||||
pub const C = @import("link/C.zig");
|
||||
pub const Coff = @import("link/Coff.zig");
|
||||
pub const Elf = @import("link/Elf.zig");
|
||||
pub const MachO = @import("link/MachO.zig");
|
||||
pub const Wasm = @import("link/Wasm.zig");
|
||||
};
|
||||
|
||||
pub fn determineMode(options: Options) fs.File.Mode {
|
||||
// On common systems with a 0o022 umask, 0o777 will still result in a file created
|
||||
// with 0o755 permissions, but it works appropriately if the system is configured
|
||||
// more leniently. As another data point, C's fopen seems to open files with the
|
||||
// 666 mode.
|
||||
const executable_mode = if (std.Target.current.os.tag == .windows) 0 else 0o777;
|
||||
switch (options.effectiveOutputMode()) {
|
||||
.Lib => return switch (options.link_mode) {
|
||||
.Dynamic => executable_mode,
|
||||
.Static => fs.File.default_mode,
|
||||
},
|
||||
.Exe => return executable_mode,
|
||||
.Obj => return fs.File.default_mode,
|
||||
}
|
||||
}
|
||||
@ -3,9 +3,11 @@ const mem = std.mem;
|
||||
const assert = std.debug.assert;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const Module = @import("../Module.zig");
|
||||
const Compilation = @import("../Compilation.zig");
|
||||
const fs = std.fs;
|
||||
const codegen = @import("../codegen/c.zig");
|
||||
const link = @import("../link.zig");
|
||||
const trace = @import("../tracy.zig").trace;
|
||||
const File = link.File;
|
||||
const C = @This();
|
||||
|
||||
@ -20,12 +22,15 @@ main: std.ArrayList(u8),
|
||||
called: std.StringHashMap(void),
|
||||
need_stddef: bool = false,
|
||||
need_stdint: bool = false,
|
||||
error_msg: *Module.ErrorMsg = undefined,
|
||||
error_msg: *Compilation.ErrorMsg = undefined,
|
||||
|
||||
pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: link.Options) !*File {
|
||||
pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*C {
|
||||
assert(options.object_format == .c);
|
||||
|
||||
const file = try dir.createFile(sub_path, .{ .truncate = true, .read = true, .mode = link.determineMode(options) });
|
||||
if (options.use_llvm) return error.LLVMHasNoCBackend;
|
||||
if (options.use_lld) return error.LLDHasNoCBackend;
|
||||
|
||||
const file = try options.emit.?.directory.handle.createFile(sub_path, .{ .truncate = true, .read = true, .mode = link.determineMode(options) });
|
||||
errdefer file.close();
|
||||
|
||||
var c_file = try allocator.create(C);
|
||||
@ -44,11 +49,11 @@ pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, option
|
||||
.called = std.StringHashMap(void).init(allocator),
|
||||
};
|
||||
|
||||
return &c_file.base;
|
||||
return c_file;
|
||||
}
|
||||
|
||||
pub fn fail(self: *C, src: usize, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } {
|
||||
self.error_msg = try Module.ErrorMsg.create(self.base.allocator, src, format, args);
|
||||
self.error_msg = try Compilation.ErrorMsg.create(self.base.allocator, src, format, args);
|
||||
return error.AnalysisFail;
|
||||
}
|
||||
|
||||
@ -68,7 +73,14 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn flush(self: *C, module: *Module) !void {
|
||||
pub fn flush(self: *C, comp: *Compilation) !void {
|
||||
return self.flushModule(comp);
|
||||
}
|
||||
|
||||
pub fn flushModule(self: *C, comp: *Compilation) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
const writer = self.base.file.?.writer();
|
||||
try writer.writeAll(@embedFile("cbe.h"));
|
||||
var includes = false;
|
||||
1220
src/link/Coff.zig
Normal file
1220
src/link/Coff.zig
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,23 +1,29 @@
|
||||
const Elf = @This();
|
||||
|
||||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
const assert = std.debug.assert;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const ir = @import("../ir.zig");
|
||||
const Module = @import("../Module.zig");
|
||||
const fs = std.fs;
|
||||
const elf = std.elf;
|
||||
const codegen = @import("../codegen.zig");
|
||||
const log = std.log.scoped(.link);
|
||||
const DW = std.dwarf;
|
||||
const trace = @import("../tracy.zig").trace;
|
||||
const leb128 = std.debug.leb;
|
||||
|
||||
const ir = @import("../ir.zig");
|
||||
const Module = @import("../Module.zig");
|
||||
const Compilation = @import("../Compilation.zig");
|
||||
const codegen = @import("../codegen.zig");
|
||||
const trace = @import("../tracy.zig").trace;
|
||||
const Package = @import("../Package.zig");
|
||||
const Value = @import("../value.zig").Value;
|
||||
const Type = @import("../type.zig").Type;
|
||||
const link = @import("../link.zig");
|
||||
const File = link.File;
|
||||
const Elf = @This();
|
||||
const build_options = @import("build_options");
|
||||
const target_util = @import("../target.zig");
|
||||
const glibc = @import("../glibc.zig");
|
||||
const Cache = @import("../Cache.zig");
|
||||
|
||||
const default_entry_addr = 0x8000000;
|
||||
|
||||
@ -28,7 +34,7 @@ pub const base_tag: File.Tag = .elf;
|
||||
|
||||
base: File,
|
||||
|
||||
ptr_width: enum { p32, p64 },
|
||||
ptr_width: PtrWidth,
|
||||
|
||||
/// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write.
|
||||
/// Same order as in the file.
|
||||
@ -130,6 +136,8 @@ const alloc_den = 3;
|
||||
const minimum_text_block_size = 64;
|
||||
const min_text_capacity = minimum_text_block_size * alloc_num / alloc_den;
|
||||
|
||||
pub const PtrWidth = enum { p32, p64 };
|
||||
|
||||
pub const TextBlock = struct {
|
||||
/// Each decl always gets a local symbol with the fully qualified name.
|
||||
/// The vaddr and size are found here directly.
|
||||
@ -216,74 +224,23 @@ pub const SrcFn = struct {
|
||||
};
|
||||
};
|
||||
|
||||
pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: link.Options) !*File {
|
||||
pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*Elf {
|
||||
assert(options.object_format == .elf);
|
||||
|
||||
const file = try dir.createFile(sub_path, .{ .truncate = false, .read = true, .mode = link.determineMode(options) });
|
||||
if (options.use_llvm) return error.LLVMBackendUnimplementedForELF; // TODO
|
||||
|
||||
const file = try options.emit.?.directory.handle.createFile(sub_path, .{
|
||||
.truncate = false,
|
||||
.read = true,
|
||||
.mode = link.determineMode(options),
|
||||
});
|
||||
errdefer file.close();
|
||||
|
||||
var elf_file = try allocator.create(Elf);
|
||||
errdefer allocator.destroy(elf_file);
|
||||
const self = try createEmpty(allocator, options);
|
||||
errdefer self.base.destroy();
|
||||
|
||||
elf_file.* = openFile(allocator, file, options) catch |err| switch (err) {
|
||||
error.IncrFailed => try createFile(allocator, file, options),
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
return &elf_file.base;
|
||||
}
|
||||
|
||||
/// Returns error.IncrFailed if incremental update could not be performed.
|
||||
fn openFile(allocator: *Allocator, file: fs.File, options: link.Options) !Elf {
|
||||
switch (options.output_mode) {
|
||||
.Exe => {},
|
||||
.Obj => {},
|
||||
.Lib => return error.IncrFailed,
|
||||
}
|
||||
var self: Elf = .{
|
||||
.base = .{
|
||||
.file = file,
|
||||
.tag = .elf,
|
||||
.options = options,
|
||||
.allocator = allocator,
|
||||
},
|
||||
.ptr_width = switch (options.target.cpu.arch.ptrBitWidth()) {
|
||||
0 ... 32 => .p32,
|
||||
33 ... 64 => .p64,
|
||||
else => return error.UnsupportedELFArchitecture,
|
||||
},
|
||||
};
|
||||
errdefer self.deinit();
|
||||
|
||||
// TODO implement reading the elf file
|
||||
return error.IncrFailed;
|
||||
//try self.populateMissingMetadata();
|
||||
//return self;
|
||||
}
|
||||
|
||||
/// Truncates the existing file contents and overwrites the contents.
|
||||
/// Returns an error if `file` is not already open with +read +write +seek abilities.
|
||||
fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !Elf {
|
||||
switch (options.output_mode) {
|
||||
.Exe => {},
|
||||
.Obj => {},
|
||||
.Lib => return error.TODOImplementWritingLibFiles,
|
||||
}
|
||||
var self: Elf = .{
|
||||
.base = .{
|
||||
.tag = .elf,
|
||||
.options = options,
|
||||
.allocator = allocator,
|
||||
.file = file,
|
||||
},
|
||||
.ptr_width = switch (options.target.cpu.arch.ptrBitWidth()) {
|
||||
0 ... 32 => .p32,
|
||||
33 ... 64 => .p64,
|
||||
else => return error.UnsupportedELFArchitecture,
|
||||
},
|
||||
.shdr_table_dirty = true,
|
||||
};
|
||||
errdefer self.deinit();
|
||||
self.base.file = file;
|
||||
self.shdr_table_dirty = true;
|
||||
|
||||
// Index 0 is always a null symbol.
|
||||
try self.local_symbols.append(allocator, .{
|
||||
@ -314,6 +271,25 @@ fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !Elf
|
||||
return self;
|
||||
}
|
||||
|
||||
pub fn createEmpty(gpa: *Allocator, options: link.Options) !*Elf {
|
||||
const ptr_width: PtrWidth = switch (options.target.cpu.arch.ptrBitWidth()) {
|
||||
0 ... 32 => .p32,
|
||||
33 ... 64 => .p64,
|
||||
else => return error.UnsupportedELFArchitecture,
|
||||
};
|
||||
const self = try gpa.create(Elf);
|
||||
self.* = .{
|
||||
.base = .{
|
||||
.tag = .elf,
|
||||
.options = options,
|
||||
.allocator = gpa,
|
||||
.file = null,
|
||||
},
|
||||
.ptr_width = ptr_width,
|
||||
};
|
||||
return self;
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Elf) void {
|
||||
self.sections.deinit(self.base.allocator);
|
||||
self.program_headers.deinit(self.base.allocator);
|
||||
@ -738,8 +714,26 @@ pub const abbrev_base_type = 4;
|
||||
pub const abbrev_pad1 = 5;
|
||||
pub const abbrev_parameter = 6;
|
||||
|
||||
/// Commit pending changes and write headers.
|
||||
pub fn flush(self: *Elf, module: *Module) !void {
|
||||
pub fn flush(self: *Elf, comp: *Compilation) !void {
|
||||
if (build_options.have_llvm and self.base.options.use_lld) {
|
||||
return self.linkWithLLD(comp);
|
||||
} else {
|
||||
switch (self.base.options.effectiveOutputMode()) {
|
||||
.Exe, .Obj => {},
|
||||
.Lib => return error.TODOImplementWritingLibFiles,
|
||||
}
|
||||
return self.flushModule(comp);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn flushModule(self: *Elf, comp: *Compilation) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
// TODO This linker code currently assumes there is only 1 compilation unit and it corresponds to the
|
||||
// Zig source code.
|
||||
const module = self.base.options.module orelse return error.LinkingWithoutZigSourceUnimplemented;
|
||||
|
||||
const target_endian = self.base.options.target.cpu.arch.endian();
|
||||
const foreign_endian = target_endian != std.Target.current.cpu.arch.endian();
|
||||
const ptr_width_bytes: u8 = self.ptrWidthBytes();
|
||||
@ -861,8 +855,8 @@ pub fn flush(self: *Elf, module: *Module) !void {
|
||||
},
|
||||
}
|
||||
// Write the form for the compile unit, which must match the abbrev table above.
|
||||
const name_strp = try self.makeDebugString(self.base.options.root_pkg.root_src_path);
|
||||
const comp_dir_strp = try self.makeDebugString(self.base.options.root_pkg.root_src_dir_path);
|
||||
const name_strp = try self.makeDebugString(module.root_pkg.root_src_path);
|
||||
const comp_dir_strp = try self.makeDebugString(module.root_pkg.root_src_directory.path orelse ".");
|
||||
const producer_strp = try self.makeDebugString(link.producer_string);
|
||||
// Currently only one compilation unit is supported, so the address range is simply
|
||||
// identical to the main program header virtual address and memory size.
|
||||
@ -1031,7 +1025,7 @@ pub fn flush(self: *Elf, module: *Module) !void {
|
||||
0, // include_directories (none except the compilation unit cwd)
|
||||
});
|
||||
// file_names[0]
|
||||
di_buf.appendSliceAssumeCapacity(self.base.options.root_pkg.root_src_path); // relative path name
|
||||
di_buf.appendSliceAssumeCapacity(module.root_pkg.root_src_path); // relative path name
|
||||
di_buf.appendSliceAssumeCapacity(&[_]u8{
|
||||
0, // null byte for the relative path name
|
||||
0, // directory_index
|
||||
@ -1195,7 +1189,7 @@ pub fn flush(self: *Elf, module: *Module) !void {
|
||||
}
|
||||
self.shdr_table_dirty = false;
|
||||
}
|
||||
if (self.entry_addr == null and self.base.options.output_mode == .Exe) {
|
||||
if (self.entry_addr == null and self.base.options.effectiveOutputMode() == .Exe) {
|
||||
log.debug("flushing. no_entry_point_found = true\n", .{});
|
||||
self.error_flags.no_entry_point_found = true;
|
||||
} else {
|
||||
@ -1216,6 +1210,449 @@ pub fn flush(self: *Elf, module: *Module) !void {
|
||||
assert(!self.debug_strtab_dirty);
|
||||
}
|
||||
|
||||
fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator);
|
||||
defer arena_allocator.deinit();
|
||||
const arena = &arena_allocator.allocator;
|
||||
|
||||
const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type.
|
||||
|
||||
// If there is no Zig code to compile, then we should skip flushing the output file because it
|
||||
// will not be part of the linker line anyway.
|
||||
const module_obj_path: ?[]const u8 = if (self.base.options.module) |module| blk: {
|
||||
const use_stage1 = build_options.is_stage1 and self.base.options.use_llvm;
|
||||
if (use_stage1) {
|
||||
const obj_basename = try std.zig.binNameAlloc(arena, .{
|
||||
.root_name = self.base.options.root_name,
|
||||
.target = self.base.options.target,
|
||||
.output_mode = .Obj,
|
||||
});
|
||||
const o_directory = self.base.options.module.?.zig_cache_artifact_directory;
|
||||
const full_obj_path = try o_directory.join(arena, &[_][]const u8{obj_basename});
|
||||
break :blk full_obj_path;
|
||||
}
|
||||
|
||||
try self.flushModule(comp);
|
||||
const obj_basename = self.base.intermediary_basename.?;
|
||||
const full_obj_path = try directory.join(arena, &[_][]const u8{obj_basename});
|
||||
break :blk full_obj_path;
|
||||
} else null;
|
||||
|
||||
const is_obj = self.base.options.output_mode == .Obj;
|
||||
const is_lib = self.base.options.output_mode == .Lib;
|
||||
const is_dyn_lib = self.base.options.link_mode == .Dynamic and is_lib;
|
||||
const is_exe_or_dyn_lib = is_dyn_lib or self.base.options.output_mode == .Exe;
|
||||
const have_dynamic_linker = self.base.options.link_libc and
|
||||
self.base.options.link_mode == .Dynamic and is_exe_or_dyn_lib;
|
||||
const link_in_crt = self.base.options.link_libc and self.base.options.output_mode == .Exe;
|
||||
const target = self.base.options.target;
|
||||
const gc_sections = self.base.options.gc_sections orelse !is_obj;
|
||||
const stack_size = self.base.options.stack_size_override orelse 16777216;
|
||||
const allow_shlib_undefined = self.base.options.allow_shlib_undefined orelse !self.base.options.is_native_os;
|
||||
|
||||
// Here we want to determine whether we can save time by not invoking LLD when the
|
||||
// output is unchanged. None of the linker options or the object files that are being
|
||||
// linked are in the hash that namespaces the directory we are outputting to. Therefore,
|
||||
// we must hash those now, and the resulting digest will form the "id" of the linking
|
||||
// job we are about to perform.
|
||||
// After a successful link, we store the id in the metadata of a symlink named "id.txt" in
|
||||
// the artifact directory. So, now, we check if this symlink exists, and if it matches
|
||||
// our digest. If so, we can skip linking. Otherwise, we proceed with invoking LLD.
|
||||
const id_symlink_basename = "lld.id";
|
||||
|
||||
var man: Cache.Manifest = undefined;
|
||||
defer if (!self.base.options.disable_lld_caching) man.deinit();
|
||||
|
||||
var digest: [Cache.hex_digest_len]u8 = undefined;
|
||||
|
||||
if (!self.base.options.disable_lld_caching) {
|
||||
man = comp.cache_parent.obtain();
|
||||
|
||||
// We are about to obtain this lock, so here we give other processes a chance first.
|
||||
self.base.releaseLock();
|
||||
|
||||
try man.addOptionalFile(self.base.options.linker_script);
|
||||
try man.addOptionalFile(self.base.options.version_script);
|
||||
try man.addListOfFiles(self.base.options.objects);
|
||||
for (comp.c_object_table.items()) |entry| {
|
||||
_ = try man.addFile(entry.key.status.success.object_path, null);
|
||||
}
|
||||
try man.addOptionalFile(module_obj_path);
|
||||
// We can skip hashing libc and libc++ components that we are in charge of building from Zig
|
||||
// installation sources because they are always a product of the compiler version + target information.
|
||||
man.hash.add(stack_size);
|
||||
man.hash.add(gc_sections);
|
||||
man.hash.add(self.base.options.eh_frame_hdr);
|
||||
man.hash.add(self.base.options.rdynamic);
|
||||
man.hash.addListOfBytes(self.base.options.extra_lld_args);
|
||||
man.hash.addListOfBytes(self.base.options.lib_dirs);
|
||||
man.hash.addListOfBytes(self.base.options.rpath_list);
|
||||
man.hash.add(self.base.options.each_lib_rpath);
|
||||
man.hash.add(self.base.options.is_compiler_rt_or_libc);
|
||||
man.hash.add(self.base.options.z_nodelete);
|
||||
man.hash.add(self.base.options.z_defs);
|
||||
if (self.base.options.link_libc) {
|
||||
man.hash.add(self.base.options.libc_installation != null);
|
||||
if (self.base.options.libc_installation) |libc_installation| {
|
||||
man.hash.addBytes(libc_installation.crt_dir.?);
|
||||
}
|
||||
if (have_dynamic_linker) {
|
||||
man.hash.addOptionalBytes(self.base.options.dynamic_linker);
|
||||
}
|
||||
}
|
||||
if (is_dyn_lib) {
|
||||
man.hash.addOptionalBytes(self.base.options.override_soname);
|
||||
man.hash.addOptional(self.base.options.version);
|
||||
}
|
||||
man.hash.addStringSet(self.base.options.system_libs);
|
||||
man.hash.add(allow_shlib_undefined);
|
||||
man.hash.add(self.base.options.bind_global_refs_locally);
|
||||
|
||||
// We don't actually care whether it's a cache hit or miss; we just need the digest and the lock.
|
||||
_ = try man.hit();
|
||||
digest = man.final();
|
||||
|
||||
var prev_digest_buf: [digest.len]u8 = undefined;
|
||||
const prev_digest: []u8 = directory.handle.readLink(id_symlink_basename, &prev_digest_buf) catch |err| blk: {
|
||||
log.debug("ELF LLD new_digest={} readlink error: {}", .{digest, @errorName(err)});
|
||||
// Handle this as a cache miss.
|
||||
break :blk prev_digest_buf[0..0];
|
||||
};
|
||||
if (mem.eql(u8, prev_digest, &digest)) {
|
||||
log.debug("ELF LLD digest={} match - skipping invocation", .{digest});
|
||||
// Hot diggity dog! The output binary is already there.
|
||||
self.base.lock = man.toOwnedLock();
|
||||
return;
|
||||
}
|
||||
log.debug("ELF LLD prev_digest={} new_digest={}", .{prev_digest, digest});
|
||||
|
||||
// We are about to change the output file to be different, so we invalidate the build hash now.
|
||||
directory.handle.deleteFile(id_symlink_basename) catch |err| switch (err) {
|
||||
error.FileNotFound => {},
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
|
||||
// Create an LLD command line and invoke it.
|
||||
var argv = std.ArrayList([]const u8).init(self.base.allocator);
|
||||
defer argv.deinit();
|
||||
// Even though we're calling LLD as a library it thinks the first argument is its own exe name.
|
||||
try argv.append("lld");
|
||||
if (is_obj) {
|
||||
try argv.append("-r");
|
||||
}
|
||||
|
||||
try argv.append("-error-limit=0");
|
||||
|
||||
if (self.base.options.output_mode == .Exe) {
|
||||
try argv.append("-z");
|
||||
try argv.append(try std.fmt.allocPrint(arena, "stack-size={}", .{stack_size}));
|
||||
}
|
||||
|
||||
if (self.base.options.linker_script) |linker_script| {
|
||||
try argv.append("-T");
|
||||
try argv.append(linker_script);
|
||||
}
|
||||
|
||||
if (gc_sections) {
|
||||
try argv.append("--gc-sections");
|
||||
}
|
||||
|
||||
if (self.base.options.eh_frame_hdr) {
|
||||
try argv.append("--eh-frame-hdr");
|
||||
}
|
||||
|
||||
if (self.base.options.rdynamic) {
|
||||
try argv.append("--export-dynamic");
|
||||
}
|
||||
|
||||
try argv.appendSlice(self.base.options.extra_lld_args);
|
||||
|
||||
if (self.base.options.z_nodelete) {
|
||||
try argv.append("-z");
|
||||
try argv.append("nodelete");
|
||||
}
|
||||
if (self.base.options.z_defs) {
|
||||
try argv.append("-z");
|
||||
try argv.append("defs");
|
||||
}
|
||||
|
||||
if (getLDMOption(target)) |ldm| {
|
||||
// Any target ELF will use the freebsd osabi if suffixed with "_fbsd".
|
||||
const arg = if (target.os.tag == .freebsd)
|
||||
try std.fmt.allocPrint(arena, "{}_fbsd", .{ldm})
|
||||
else
|
||||
ldm;
|
||||
try argv.append("-m");
|
||||
try argv.append(arg);
|
||||
}
|
||||
|
||||
if (self.base.options.link_mode == .Static) {
|
||||
if (target.cpu.arch.isARM() or target.cpu.arch.isThumb()) {
|
||||
try argv.append("-Bstatic");
|
||||
} else {
|
||||
try argv.append("-static");
|
||||
}
|
||||
} else if (is_dyn_lib) {
|
||||
try argv.append("-shared");
|
||||
}
|
||||
|
||||
if (target_util.requiresPIE(target) and self.base.options.output_mode == .Exe) {
|
||||
try argv.append("-pie");
|
||||
}
|
||||
|
||||
const full_out_path = try directory.join(arena, &[_][]const u8{self.base.options.emit.?.sub_path});
|
||||
try argv.append("-o");
|
||||
try argv.append(full_out_path);
|
||||
|
||||
if (link_in_crt) {
|
||||
const crt1o: []const u8 = o: {
|
||||
if (target.os.tag == .netbsd) {
|
||||
break :o "crt0.o";
|
||||
} else if (target.isAndroid()) {
|
||||
if (self.base.options.link_mode == .Dynamic) {
|
||||
break :o "crtbegin_dynamic.o";
|
||||
} else {
|
||||
break :o "crtbegin_static.o";
|
||||
}
|
||||
} else if (self.base.options.link_mode == .Static) {
|
||||
break :o "crt1.o";
|
||||
} else {
|
||||
break :o "Scrt1.o";
|
||||
}
|
||||
};
|
||||
try argv.append(try comp.get_libc_crt_file(arena, crt1o));
|
||||
if (target_util.libc_needs_crti_crtn(target)) {
|
||||
try argv.append(try comp.get_libc_crt_file(arena, "crti.o"));
|
||||
}
|
||||
}
|
||||
|
||||
// rpaths
|
||||
var rpath_table = std.StringHashMap(void).init(self.base.allocator);
|
||||
defer rpath_table.deinit();
|
||||
for (self.base.options.rpath_list) |rpath| {
|
||||
if ((try rpath_table.fetchPut(rpath, {})) == null) {
|
||||
try argv.append("-rpath");
|
||||
try argv.append(rpath);
|
||||
}
|
||||
}
|
||||
if (self.base.options.each_lib_rpath) {
|
||||
var test_path = std.ArrayList(u8).init(self.base.allocator);
|
||||
defer test_path.deinit();
|
||||
for (self.base.options.lib_dirs) |lib_dir_path| {
|
||||
for (self.base.options.system_libs.items()) |link_lib| {
|
||||
test_path.shrinkRetainingCapacity(0);
|
||||
const sep = fs.path.sep_str;
|
||||
try test_path.writer().print("{}" ++ sep ++ "lib{}.so", .{ lib_dir_path, link_lib });
|
||||
fs.cwd().access(test_path.items, .{}) catch |err| switch (err) {
|
||||
error.FileNotFound => continue,
|
||||
else => |e| return e,
|
||||
};
|
||||
if ((try rpath_table.fetchPut(lib_dir_path, {})) == null) {
|
||||
try argv.append("-rpath");
|
||||
try argv.append(lib_dir_path);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (self.base.options.lib_dirs) |lib_dir| {
|
||||
try argv.append("-L");
|
||||
try argv.append(lib_dir);
|
||||
}
|
||||
|
||||
if (self.base.options.link_libc) {
|
||||
if (self.base.options.libc_installation) |libc_installation| {
|
||||
try argv.append("-L");
|
||||
try argv.append(libc_installation.crt_dir.?);
|
||||
}
|
||||
|
||||
if (have_dynamic_linker) {
|
||||
if (self.base.options.dynamic_linker) |dynamic_linker| {
|
||||
try argv.append("-dynamic-linker");
|
||||
try argv.append(dynamic_linker);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (is_dyn_lib) {
|
||||
const soname = self.base.options.override_soname orelse if (self.base.options.version) |ver|
|
||||
try std.fmt.allocPrint(arena, "lib{}.so.{}", .{self.base.options.root_name, ver.major})
|
||||
else
|
||||
try std.fmt.allocPrint(arena, "lib{}.so", .{self.base.options.root_name});
|
||||
try argv.append("-soname");
|
||||
try argv.append(soname);
|
||||
|
||||
if (self.base.options.version_script) |version_script| {
|
||||
try argv.append("-version-script");
|
||||
try argv.append(version_script);
|
||||
}
|
||||
}
|
||||
|
||||
// Positional arguments to the linker such as object files.
|
||||
try argv.appendSlice(self.base.options.objects);
|
||||
|
||||
for (comp.c_object_table.items()) |entry| {
|
||||
try argv.append(entry.key.status.success.object_path);
|
||||
}
|
||||
|
||||
if (module_obj_path) |p| {
|
||||
try argv.append(p);
|
||||
}
|
||||
|
||||
// compiler-rt and libc
|
||||
if (is_exe_or_dyn_lib and !self.base.options.is_compiler_rt_or_libc) {
|
||||
if (!self.base.options.link_libc) {
|
||||
try argv.append(comp.libc_static_lib.?.full_object_path);
|
||||
}
|
||||
try argv.append(comp.compiler_rt_static_lib.?.full_object_path);
|
||||
}
|
||||
|
||||
// Shared libraries.
|
||||
const system_libs = self.base.options.system_libs.items();
|
||||
try argv.ensureCapacity(argv.items.len + system_libs.len);
|
||||
for (system_libs) |entry| {
|
||||
const link_lib = entry.key;
|
||||
// By this time, we depend on these libs being dynamically linked libraries and not static libraries
|
||||
// (the check for that needs to be earlier), but they could be full paths to .so files, in which
|
||||
// case we want to avoid prepending "-l".
|
||||
const ext = Compilation.classifyFileExt(link_lib);
|
||||
const arg = if (ext == .shared_library) link_lib else try std.fmt.allocPrint(arena, "-l{}", .{link_lib});
|
||||
argv.appendAssumeCapacity(arg);
|
||||
}
|
||||
|
||||
if (!is_obj) {
|
||||
// libc++ dep
|
||||
if (self.base.options.link_libcpp) {
|
||||
try argv.append(comp.libcxxabi_static_lib.?.full_object_path);
|
||||
try argv.append(comp.libcxx_static_lib.?.full_object_path);
|
||||
}
|
||||
|
||||
// libc dep
|
||||
if (self.base.options.link_libc) {
|
||||
if (self.base.options.libc_installation != null) {
|
||||
if (self.base.options.link_mode == .Static) {
|
||||
try argv.append("--start-group");
|
||||
try argv.append("-lc");
|
||||
try argv.append("-lm");
|
||||
try argv.append("--end-group");
|
||||
} else {
|
||||
try argv.append("-lc");
|
||||
try argv.append("-lm");
|
||||
}
|
||||
|
||||
if (target.os.tag == .freebsd or target.os.tag == .netbsd) {
|
||||
try argv.append("-lpthread");
|
||||
}
|
||||
} else if (target.isGnuLibC()) {
|
||||
try argv.append(comp.libunwind_static_lib.?.full_object_path);
|
||||
for (glibc.libs) |lib| {
|
||||
const lib_path = try std.fmt.allocPrint(arena, "{s}{c}lib{s}.so.{d}", .{
|
||||
comp.glibc_so_files.?.dir_path, fs.path.sep, lib.name, lib.sover,
|
||||
});
|
||||
try argv.append(lib_path);
|
||||
}
|
||||
try argv.append(try comp.get_libc_crt_file(arena, "libc_nonshared.a"));
|
||||
} else if (target.isMusl()) {
|
||||
try argv.append(comp.libunwind_static_lib.?.full_object_path);
|
||||
try argv.append(try comp.get_libc_crt_file(arena, "libc.a"));
|
||||
} else if (self.base.options.link_libcpp) {
|
||||
try argv.append(comp.libunwind_static_lib.?.full_object_path);
|
||||
} else {
|
||||
unreachable; // Compiler was supposed to emit an error for not being able to provide libc.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// crt end
|
||||
if (link_in_crt) {
|
||||
if (target.isAndroid()) {
|
||||
try argv.append(try comp.get_libc_crt_file(arena, "crtend_android.o"));
|
||||
} else if (target_util.libc_needs_crti_crtn(target)) {
|
||||
try argv.append(try comp.get_libc_crt_file(arena, "crtn.o"));
|
||||
}
|
||||
}
|
||||
|
||||
if (allow_shlib_undefined) {
|
||||
try argv.append("--allow-shlib-undefined");
|
||||
}
|
||||
|
||||
if (self.base.options.bind_global_refs_locally) {
|
||||
try argv.append("-Bsymbolic");
|
||||
}
|
||||
|
||||
if (self.base.options.verbose_link) {
|
||||
Compilation.dump_argv(argv.items);
|
||||
}
|
||||
|
||||
// Oh, snapplesauce! We need null terminated argv.
|
||||
const new_argv = try arena.allocSentinel(?[*:0]const u8, argv.items.len, null);
|
||||
for (argv.items) |arg, i| {
|
||||
new_argv[i] = try arena.dupeZ(u8, arg);
|
||||
}
|
||||
|
||||
var stderr_context: LLDContext = .{
|
||||
.elf = self,
|
||||
.data = std.ArrayList(u8).init(self.base.allocator),
|
||||
};
|
||||
defer stderr_context.data.deinit();
|
||||
var stdout_context: LLDContext = .{
|
||||
.elf = self,
|
||||
.data = std.ArrayList(u8).init(self.base.allocator),
|
||||
};
|
||||
defer stdout_context.data.deinit();
|
||||
const llvm = @import("../llvm.zig");
|
||||
const ok = llvm.Link(.ELF, new_argv.ptr, new_argv.len, append_diagnostic,
|
||||
@ptrToInt(&stdout_context),
|
||||
@ptrToInt(&stderr_context),
|
||||
);
|
||||
if (stderr_context.oom or stdout_context.oom) return error.OutOfMemory;
|
||||
if (stdout_context.data.items.len != 0) {
|
||||
std.log.warn("unexpected LLD stdout: {}", .{stdout_context.data.items});
|
||||
}
|
||||
if (!ok) {
|
||||
// TODO parse this output and surface with the Compilation API rather than
|
||||
// directly outputting to stderr here.
|
||||
std.debug.print("{}", .{stderr_context.data.items});
|
||||
return error.LLDReportedFailure;
|
||||
}
|
||||
if (stderr_context.data.items.len != 0) {
|
||||
std.log.warn("unexpected LLD stderr: {}", .{stderr_context.data.items});
|
||||
}
|
||||
|
||||
if (!self.base.options.disable_lld_caching) {
|
||||
// Update the dangling symlink with the digest. If it fails we can continue; it only
|
||||
// means that the next invocation will have an unnecessary cache miss.
|
||||
directory.handle.symLink(&digest, id_symlink_basename, .{}) catch |err| {
|
||||
std.log.warn("failed to save linking hash digest symlink: {}", .{@errorName(err)});
|
||||
};
|
||||
// Again failure here only means an unnecessary cache miss.
|
||||
man.writeManifest() catch |err| {
|
||||
std.log.warn("failed to write cache manifest when linking: {}", .{ @errorName(err) });
|
||||
};
|
||||
// We hang on to this lock so that the output file path can be used without
|
||||
// other processes clobbering it.
|
||||
self.base.lock = man.toOwnedLock();
|
||||
}
|
||||
}
|
||||
|
||||
const LLDContext = struct {
|
||||
data: std.ArrayList(u8),
|
||||
elf: *Elf,
|
||||
oom: bool = false,
|
||||
};
|
||||
|
||||
fn append_diagnostic(context: usize, ptr: [*]const u8, len: usize) callconv(.C) void {
|
||||
const lld_context = @intToPtr(*LLDContext, context);
|
||||
const msg = ptr[0..len];
|
||||
lld_context.data.appendSlice(msg) catch |err| switch (err) {
|
||||
error.OutOfMemory => lld_context.oom = true,
|
||||
};
|
||||
}
|
||||
|
||||
fn writeDwarfAddrAssumeCapacity(self: *Elf, buf: *std.ArrayList(u8), addr: u64) void {
|
||||
const target_endian = self.base.options.target.cpu.arch.endian();
|
||||
switch (self.ptr_width) {
|
||||
@ -1255,7 +1692,7 @@ fn writeElfHeader(self: *Elf) !void {
|
||||
|
||||
assert(index == 16);
|
||||
|
||||
const elf_type = switch (self.base.options.output_mode) {
|
||||
const elf_type = switch (self.base.options.effectiveOutputMode()) {
|
||||
.Exe => elf.ET.EXEC,
|
||||
.Obj => elf.ET.REL,
|
||||
.Lib => switch (self.base.options.link_mode) {
|
||||
@ -2104,7 +2541,7 @@ pub fn updateDeclExports(
|
||||
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1);
|
||||
module.failed_exports.putAssumeCapacityNoClobber(
|
||||
exp,
|
||||
try Module.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: ExportOptions.section", .{}),
|
||||
try Compilation.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: ExportOptions.section", .{}),
|
||||
);
|
||||
continue;
|
||||
}
|
||||
@ -2122,7 +2559,7 @@ pub fn updateDeclExports(
|
||||
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1);
|
||||
module.failed_exports.putAssumeCapacityNoClobber(
|
||||
exp,
|
||||
try Module.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: GlobalLinkage.LinkOnce", .{}),
|
||||
try Compilation.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: GlobalLinkage.LinkOnce", .{}),
|
||||
);
|
||||
continue;
|
||||
},
|
||||
@ -2426,12 +2863,13 @@ fn dbgLineNeededHeaderBytes(self: Elf) u32 {
|
||||
const file_name_entry_format_count = 1;
|
||||
const directory_count = 1;
|
||||
const file_name_count = 1;
|
||||
const root_src_dir_path_len = if (self.base.options.module.?.root_pkg.root_src_directory.path) |p| p.len else 1; // "."
|
||||
return @intCast(u32, 53 + directory_entry_format_count * 2 + file_name_entry_format_count * 2 +
|
||||
directory_count * 8 + file_name_count * 8 +
|
||||
// These are encoded as DW.FORM_string rather than DW.FORM_strp as we would like
|
||||
// because of a workaround for readelf and gdb failing to understand DWARFv5 correctly.
|
||||
self.base.options.root_pkg.root_src_dir_path.len +
|
||||
self.base.options.root_pkg.root_src_path.len);
|
||||
root_src_dir_path_len +
|
||||
self.base.options.module.?.root_pkg.root_src_path.len);
|
||||
}
|
||||
|
||||
fn dbgInfoNeededHeaderBytes(self: Elf) u32 {
|
||||
@ -2630,3 +3068,33 @@ fn sectHeaderTo32(shdr: elf.Elf64_Shdr) elf.Elf32_Shdr {
|
||||
.sh_entsize = @intCast(u32, shdr.sh_entsize),
|
||||
};
|
||||
}
|
||||
|
||||
fn getLDMOption(target: std.Target) ?[]const u8 {
|
||||
switch (target.cpu.arch) {
|
||||
.i386 => return "elf_i386",
|
||||
.aarch64 => return "aarch64linux",
|
||||
.aarch64_be => return "aarch64_be_linux",
|
||||
.arm, .thumb => return "armelf_linux_eabi",
|
||||
.armeb, .thumbeb => return "armebelf_linux_eabi",
|
||||
.powerpc => return "elf32ppclinux",
|
||||
.powerpc64 => return "elf64ppc",
|
||||
.powerpc64le => return "elf64lppc",
|
||||
.sparc, .sparcel => return "elf32_sparc",
|
||||
.sparcv9 => return "elf64_sparc",
|
||||
.mips => return "elf32btsmip",
|
||||
.mipsel => return "elf32ltsmip",
|
||||
.mips64 => return "elf64btsmip",
|
||||
.mips64el => return "elf64ltsmip",
|
||||
.s390x => return "elf64_s390",
|
||||
.x86_64 => {
|
||||
if (target.abi == .gnux32) {
|
||||
return "elf32_x86_64";
|
||||
} else {
|
||||
return "elf_x86_64";
|
||||
}
|
||||
},
|
||||
.riscv32 => return "elf32lriscv",
|
||||
.riscv64 => return "elf64lriscv",
|
||||
else => return null,
|
||||
}
|
||||
}
|
||||
@ -9,12 +9,16 @@ const macho = std.macho;
|
||||
const codegen = @import("../codegen.zig");
|
||||
const math = std.math;
|
||||
const mem = std.mem;
|
||||
|
||||
const trace = @import("../tracy.zig").trace;
|
||||
const Type = @import("../type.zig").Type;
|
||||
|
||||
const build_options = @import("build_options");
|
||||
const Module = @import("../Module.zig");
|
||||
const Compilation = @import("../Compilation.zig");
|
||||
const link = @import("../link.zig");
|
||||
const File = link.File;
|
||||
const Cache = @import("../Cache.zig");
|
||||
const target_util = @import("../target.zig");
|
||||
|
||||
pub const base_tag: File.Tag = File.Tag.macho;
|
||||
|
||||
@ -134,71 +138,64 @@ pub const SrcFn = struct {
|
||||
pub const empty = SrcFn{};
|
||||
};
|
||||
|
||||
pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: link.Options) !*File {
|
||||
pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*MachO {
|
||||
assert(options.object_format == .macho);
|
||||
|
||||
const file = try dir.createFile(sub_path, .{ .truncate = false, .read = true, .mode = link.determineMode(options) });
|
||||
if (options.use_llvm) return error.LLVM_BackendIsTODO_ForMachO; // TODO
|
||||
if (options.use_lld) return error.LLD_LinkingIsTODO_ForMachO; // TODO
|
||||
|
||||
const file = try options.emit.?.directory.handle.createFile(sub_path, .{
|
||||
.truncate = false,
|
||||
.read = true,
|
||||
.mode = link.determineMode(options),
|
||||
});
|
||||
errdefer file.close();
|
||||
|
||||
var macho_file = try allocator.create(MachO);
|
||||
errdefer allocator.destroy(macho_file);
|
||||
const self = try createEmpty(allocator, options);
|
||||
errdefer self.base.destroy();
|
||||
|
||||
macho_file.* = openFile(allocator, file, options) catch |err| switch (err) {
|
||||
error.IncrFailed => try createFile(allocator, file, options),
|
||||
else => |e| return e,
|
||||
};
|
||||
self.base.file = file;
|
||||
|
||||
return &macho_file.base;
|
||||
}
|
||||
|
||||
/// Returns error.IncrFailed if incremental update could not be performed.
|
||||
fn openFile(allocator: *Allocator, file: fs.File, options: link.Options) !MachO {
|
||||
switch (options.output_mode) {
|
||||
.Exe => {},
|
||||
.Obj => {},
|
||||
.Lib => return error.IncrFailed,
|
||||
}
|
||||
var self: MachO = .{
|
||||
.base = .{
|
||||
.file = file,
|
||||
.tag = .macho,
|
||||
.options = options,
|
||||
.allocator = allocator,
|
||||
},
|
||||
};
|
||||
errdefer self.deinit();
|
||||
|
||||
// TODO implement reading the macho file
|
||||
return error.IncrFailed;
|
||||
//try self.populateMissingMetadata();
|
||||
//return self;
|
||||
}
|
||||
|
||||
/// Truncates the existing file contents and overwrites the contents.
|
||||
/// Returns an error if `file` is not already open with +read +write +seek abilities.
|
||||
fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !MachO {
|
||||
switch (options.output_mode) {
|
||||
.Exe => {},
|
||||
.Obj => {},
|
||||
.Lib => return error.TODOImplementWritingLibFiles,
|
||||
}
|
||||
|
||||
var self: MachO = .{
|
||||
.base = .{
|
||||
.file = file,
|
||||
.tag = .macho,
|
||||
.options = options,
|
||||
.allocator = allocator,
|
||||
},
|
||||
};
|
||||
errdefer self.deinit();
|
||||
|
||||
try self.populateMissingMetadata();
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
pub fn flush(self: *MachO, module: *Module) !void {
|
||||
pub fn createEmpty(gpa: *Allocator, options: link.Options) !*MachO {
|
||||
const self = try gpa.create(MachO);
|
||||
self.* = .{
|
||||
.base = .{
|
||||
.tag = .macho,
|
||||
.options = options,
|
||||
.allocator = gpa,
|
||||
.file = null,
|
||||
},
|
||||
};
|
||||
return self;
|
||||
}
|
||||
|
||||
pub fn flush(self: *MachO, comp: *Compilation) !void {
|
||||
if (build_options.have_llvm and self.base.options.use_lld) {
|
||||
return self.linkWithLLD(comp);
|
||||
} else {
|
||||
switch (self.base.options.effectiveOutputMode()) {
|
||||
.Exe, .Obj => {},
|
||||
.Lib => return error.TODOImplementWritingLibFiles,
|
||||
}
|
||||
return self.flushModule(comp);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn flushModule(self: *MachO, comp: *Compilation) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
switch (self.base.options.output_mode) {
|
||||
.Exe => {
|
||||
var last_cmd_offset: usize = @sizeOf(macho.mach_header_64);
|
||||
@ -291,6 +288,384 @@ pub fn flush(self: *MachO, module: *Module) !void {
|
||||
}
|
||||
}
|
||||
|
||||
fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator);
|
||||
defer arena_allocator.deinit();
|
||||
const arena = &arena_allocator.allocator;
|
||||
|
||||
const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type.
|
||||
|
||||
// If there is no Zig code to compile, then we should skip flushing the output file because it
|
||||
// will not be part of the linker line anyway.
|
||||
const module_obj_path: ?[]const u8 = if (self.base.options.module) |module| blk: {
|
||||
const use_stage1 = build_options.is_stage1 and self.base.options.use_llvm;
|
||||
if (use_stage1) {
|
||||
const obj_basename = try std.zig.binNameAlloc(arena, .{
|
||||
.root_name = self.base.options.root_name,
|
||||
.target = self.base.options.target,
|
||||
.output_mode = .Obj,
|
||||
});
|
||||
const o_directory = self.base.options.module.?.zig_cache_artifact_directory;
|
||||
const full_obj_path = try o_directory.join(arena, &[_][]const u8{obj_basename});
|
||||
break :blk full_obj_path;
|
||||
}
|
||||
|
||||
try self.flushModule(comp);
|
||||
const obj_basename = self.base.intermediary_basename.?;
|
||||
const full_obj_path = try directory.join(arena, &[_][]const u8{obj_basename});
|
||||
break :blk full_obj_path;
|
||||
} else null;
|
||||
|
||||
const is_lib = self.base.options.output_mode == .Lib;
|
||||
const is_dyn_lib = self.base.options.link_mode == .Dynamic and is_lib;
|
||||
const is_exe_or_dyn_lib = is_dyn_lib or self.base.options.output_mode == .Exe;
|
||||
const target = self.base.options.target;
|
||||
const stack_size = self.base.options.stack_size_override orelse 16777216;
|
||||
const allow_shlib_undefined = self.base.options.allow_shlib_undefined orelse !self.base.options.is_native_os;
|
||||
|
||||
const id_symlink_basename = "lld.id";
|
||||
|
||||
var man: Cache.Manifest = undefined;
|
||||
defer if (!self.base.options.disable_lld_caching) man.deinit();
|
||||
|
||||
var digest: [Cache.hex_digest_len]u8 = undefined;
|
||||
|
||||
if (!self.base.options.disable_lld_caching) {
|
||||
man = comp.cache_parent.obtain();
|
||||
|
||||
// We are about to obtain this lock, so here we give other processes a chance first.
|
||||
self.base.releaseLock();
|
||||
|
||||
try man.addOptionalFile(self.base.options.linker_script);
|
||||
try man.addOptionalFile(self.base.options.version_script);
|
||||
try man.addListOfFiles(self.base.options.objects);
|
||||
for (comp.c_object_table.items()) |entry| {
|
||||
_ = try man.addFile(entry.key.status.success.object_path, null);
|
||||
}
|
||||
try man.addOptionalFile(module_obj_path);
|
||||
// We can skip hashing libc and libc++ components that we are in charge of building from Zig
|
||||
// installation sources because they are always a product of the compiler version + target information.
|
||||
man.hash.add(stack_size);
|
||||
man.hash.add(self.base.options.rdynamic);
|
||||
man.hash.addListOfBytes(self.base.options.extra_lld_args);
|
||||
man.hash.addListOfBytes(self.base.options.lib_dirs);
|
||||
man.hash.addListOfBytes(self.base.options.framework_dirs);
|
||||
man.hash.addListOfBytes(self.base.options.frameworks);
|
||||
man.hash.addListOfBytes(self.base.options.rpath_list);
|
||||
man.hash.add(self.base.options.is_compiler_rt_or_libc);
|
||||
man.hash.add(self.base.options.z_nodelete);
|
||||
man.hash.add(self.base.options.z_defs);
|
||||
if (is_dyn_lib) {
|
||||
man.hash.addOptional(self.base.options.version);
|
||||
}
|
||||
man.hash.addStringSet(self.base.options.system_libs);
|
||||
man.hash.add(allow_shlib_undefined);
|
||||
man.hash.add(self.base.options.bind_global_refs_locally);
|
||||
|
||||
// We don't actually care whether it's a cache hit or miss; we just need the digest and the lock.
|
||||
_ = try man.hit();
|
||||
digest = man.final();
|
||||
|
||||
var prev_digest_buf: [digest.len]u8 = undefined;
|
||||
const prev_digest: []u8 = directory.handle.readLink(id_symlink_basename, &prev_digest_buf) catch |err| blk: {
|
||||
log.debug("MachO LLD new_digest={} readlink error: {}", .{ digest, @errorName(err) });
|
||||
// Handle this as a cache miss.
|
||||
break :blk prev_digest_buf[0..0];
|
||||
};
|
||||
if (mem.eql(u8, prev_digest, &digest)) {
|
||||
log.debug("MachO LLD digest={} match - skipping invocation", .{digest});
|
||||
// Hot diggity dog! The output binary is already there.
|
||||
self.base.lock = man.toOwnedLock();
|
||||
return;
|
||||
}
|
||||
log.debug("MachO LLD prev_digest={} new_digest={}", .{ prev_digest, digest });
|
||||
|
||||
// We are about to change the output file to be different, so we invalidate the build hash now.
|
||||
directory.handle.deleteFile(id_symlink_basename) catch |err| switch (err) {
|
||||
error.FileNotFound => {},
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
|
||||
const full_out_path = try directory.join(arena, &[_][]const u8{self.base.options.emit.?.sub_path});
|
||||
|
||||
if (self.base.options.output_mode == .Obj) {
|
||||
// LLD's MachO driver does not support the equvialent of `-r` so we do a simple file copy
|
||||
// here. TODO: think carefully about how we can avoid this redundant operation when doing
|
||||
// build-obj. See also the corresponding TODO in linkAsArchive.
|
||||
const the_object_path = blk: {
|
||||
if (self.base.options.objects.len != 0)
|
||||
break :blk self.base.options.objects[0];
|
||||
|
||||
if (comp.c_object_table.count() != 0)
|
||||
break :blk comp.c_object_table.items()[0].key.status.success.object_path;
|
||||
|
||||
if (module_obj_path) |p|
|
||||
break :blk p;
|
||||
|
||||
// TODO I think this is unreachable. Audit this situation when solving the above TODO
|
||||
// regarding eliding redundant object -> object transformations.
|
||||
return error.NoObjectsToLink;
|
||||
};
|
||||
// This can happen when using --enable-cache and using the stage1 backend. In this case
|
||||
// we can skip the file copy.
|
||||
if (!mem.eql(u8, the_object_path, full_out_path)) {
|
||||
try fs.cwd().copyFile(the_object_path, fs.cwd(), full_out_path, .{});
|
||||
}
|
||||
} else {
|
||||
// Create an LLD command line and invoke it.
|
||||
var argv = std.ArrayList([]const u8).init(self.base.allocator);
|
||||
defer argv.deinit();
|
||||
// Even though we're calling LLD as a library it thinks the first argument is its own exe name.
|
||||
try argv.append("lld");
|
||||
|
||||
try argv.append("-error-limit");
|
||||
try argv.append("0");
|
||||
|
||||
try argv.append("-demangle");
|
||||
|
||||
if (self.base.options.rdynamic) {
|
||||
try argv.append("--export-dynamic");
|
||||
}
|
||||
|
||||
try argv.appendSlice(self.base.options.extra_lld_args);
|
||||
|
||||
if (self.base.options.z_nodelete) {
|
||||
try argv.append("-z");
|
||||
try argv.append("nodelete");
|
||||
}
|
||||
if (self.base.options.z_defs) {
|
||||
try argv.append("-z");
|
||||
try argv.append("defs");
|
||||
}
|
||||
|
||||
if (is_dyn_lib) {
|
||||
try argv.append("-static");
|
||||
} else {
|
||||
try argv.append("-dynamic");
|
||||
}
|
||||
|
||||
if (is_dyn_lib) {
|
||||
try argv.append("-dylib");
|
||||
|
||||
if (self.base.options.version) |ver| {
|
||||
const compat_vers = try std.fmt.allocPrint(arena, "{d}.0.0", .{ver.major});
|
||||
try argv.append("-compatibility_version");
|
||||
try argv.append(compat_vers);
|
||||
|
||||
const cur_vers = try std.fmt.allocPrint(arena, "{d}.{d}.{d}", .{ ver.major, ver.minor, ver.patch });
|
||||
try argv.append("-current_version");
|
||||
try argv.append(cur_vers);
|
||||
}
|
||||
|
||||
// TODO getting an error when running an executable when doing this rpath thing
|
||||
//Buf *dylib_install_name = buf_sprintf("@rpath/lib%s.%" ZIG_PRI_usize ".dylib",
|
||||
// buf_ptr(g->root_out_name), g->version_major);
|
||||
//try argv.append("-install_name");
|
||||
//try argv.append(buf_ptr(dylib_install_name));
|
||||
}
|
||||
|
||||
try argv.append("-arch");
|
||||
try argv.append(darwinArchString(target.cpu.arch));
|
||||
|
||||
switch (target.os.tag) {
|
||||
.macosx => {
|
||||
try argv.append("-macosx_version_min");
|
||||
},
|
||||
.ios, .tvos, .watchos => switch (target.cpu.arch) {
|
||||
.i386, .x86_64 => {
|
||||
try argv.append("-ios_simulator_version_min");
|
||||
},
|
||||
else => {
|
||||
try argv.append("-iphoneos_version_min");
|
||||
},
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
const ver = target.os.version_range.semver.min;
|
||||
const version_string = try std.fmt.allocPrint(arena, "{d}.{d}.{d}", .{ ver.major, ver.minor, ver.patch });
|
||||
try argv.append(version_string);
|
||||
|
||||
try argv.append("-sdk_version");
|
||||
try argv.append(version_string);
|
||||
|
||||
if (target_util.requiresPIE(target) and self.base.options.output_mode == .Exe) {
|
||||
try argv.append("-pie");
|
||||
}
|
||||
|
||||
try argv.append("-o");
|
||||
try argv.append(full_out_path);
|
||||
|
||||
// rpaths
|
||||
var rpath_table = std.StringHashMap(void).init(self.base.allocator);
|
||||
defer rpath_table.deinit();
|
||||
for (self.base.options.rpath_list) |rpath| {
|
||||
if ((try rpath_table.fetchPut(rpath, {})) == null) {
|
||||
try argv.append("-rpath");
|
||||
try argv.append(rpath);
|
||||
}
|
||||
}
|
||||
if (is_dyn_lib) {
|
||||
if ((try rpath_table.fetchPut(full_out_path, {})) == null) {
|
||||
try argv.append("-rpath");
|
||||
try argv.append(full_out_path);
|
||||
}
|
||||
}
|
||||
|
||||
for (self.base.options.lib_dirs) |lib_dir| {
|
||||
try argv.append("-L");
|
||||
try argv.append(lib_dir);
|
||||
}
|
||||
|
||||
// Positional arguments to the linker such as object files.
|
||||
try argv.appendSlice(self.base.options.objects);
|
||||
|
||||
for (comp.c_object_table.items()) |entry| {
|
||||
try argv.append(entry.key.status.success.object_path);
|
||||
}
|
||||
if (module_obj_path) |p| {
|
||||
try argv.append(p);
|
||||
}
|
||||
|
||||
// compiler_rt on darwin is missing some stuff, so we still build it and rely on LinkOnce
|
||||
if (is_exe_or_dyn_lib and !self.base.options.is_compiler_rt_or_libc) {
|
||||
try argv.append(comp.compiler_rt_static_lib.?.full_object_path);
|
||||
}
|
||||
|
||||
// Shared libraries.
|
||||
const system_libs = self.base.options.system_libs.items();
|
||||
try argv.ensureCapacity(argv.items.len + system_libs.len);
|
||||
for (system_libs) |entry| {
|
||||
const link_lib = entry.key;
|
||||
// By this time, we depend on these libs being dynamically linked libraries and not static libraries
|
||||
// (the check for that needs to be earlier), but they could be full paths to .dylib files, in which
|
||||
// case we want to avoid prepending "-l".
|
||||
const ext = Compilation.classifyFileExt(link_lib);
|
||||
const arg = if (ext == .shared_library) link_lib else try std.fmt.allocPrint(arena, "-l{}", .{link_lib});
|
||||
argv.appendAssumeCapacity(arg);
|
||||
}
|
||||
|
||||
// libc++ dep
|
||||
if (self.base.options.link_libcpp) {
|
||||
try argv.append(comp.libcxxabi_static_lib.?.full_object_path);
|
||||
try argv.append(comp.libcxx_static_lib.?.full_object_path);
|
||||
}
|
||||
|
||||
// On Darwin, libSystem has libc in it, but also you have to use it
|
||||
// to make syscalls because the syscall numbers are not documented
|
||||
// and change between versions. So we always link against libSystem.
|
||||
// LLD craps out if you do -lSystem cross compiling, so until that
|
||||
// codebase gets some love from the new maintainers we're left with
|
||||
// this dirty hack.
|
||||
if (self.base.options.is_native_os) {
|
||||
try argv.append("-lSystem");
|
||||
}
|
||||
|
||||
for (self.base.options.framework_dirs) |framework_dir| {
|
||||
try argv.append("-F");
|
||||
try argv.append(framework_dir);
|
||||
}
|
||||
for (self.base.options.frameworks) |framework| {
|
||||
try argv.append("-framework");
|
||||
try argv.append(framework);
|
||||
}
|
||||
|
||||
if (allow_shlib_undefined) {
|
||||
try argv.append("-undefined");
|
||||
try argv.append("dynamic_lookup");
|
||||
}
|
||||
if (self.base.options.bind_global_refs_locally) {
|
||||
try argv.append("-Bsymbolic");
|
||||
}
|
||||
|
||||
if (self.base.options.verbose_link) {
|
||||
Compilation.dump_argv(argv.items);
|
||||
}
|
||||
|
||||
const new_argv = try arena.allocSentinel(?[*:0]const u8, argv.items.len, null);
|
||||
for (argv.items) |arg, i| {
|
||||
new_argv[i] = try arena.dupeZ(u8, arg);
|
||||
}
|
||||
|
||||
var stderr_context: LLDContext = .{
|
||||
.macho = self,
|
||||
.data = std.ArrayList(u8).init(self.base.allocator),
|
||||
};
|
||||
defer stderr_context.data.deinit();
|
||||
var stdout_context: LLDContext = .{
|
||||
.macho = self,
|
||||
.data = std.ArrayList(u8).init(self.base.allocator),
|
||||
};
|
||||
defer stdout_context.data.deinit();
|
||||
const llvm = @import("../llvm.zig");
|
||||
const ok = llvm.Link(
|
||||
.MachO,
|
||||
new_argv.ptr,
|
||||
new_argv.len,
|
||||
append_diagnostic,
|
||||
@ptrToInt(&stdout_context),
|
||||
@ptrToInt(&stderr_context),
|
||||
);
|
||||
if (stderr_context.oom or stdout_context.oom) return error.OutOfMemory;
|
||||
if (stdout_context.data.items.len != 0) {
|
||||
std.log.warn("unexpected LLD stdout: {}", .{stdout_context.data.items});
|
||||
}
|
||||
if (!ok) {
|
||||
// TODO parse this output and surface with the Compilation API rather than
|
||||
// directly outputting to stderr here.
|
||||
std.debug.print("{}", .{stderr_context.data.items});
|
||||
return error.LLDReportedFailure;
|
||||
}
|
||||
if (stderr_context.data.items.len != 0) {
|
||||
std.log.warn("unexpected LLD stderr: {}", .{stderr_context.data.items});
|
||||
}
|
||||
}
|
||||
|
||||
if (!self.base.options.disable_lld_caching) {
|
||||
// Update the dangling symlink with the digest. If it fails we can continue; it only
|
||||
// means that the next invocation will have an unnecessary cache miss.
|
||||
directory.handle.symLink(&digest, id_symlink_basename, .{}) catch |err| {
|
||||
std.log.warn("failed to save linking hash digest symlink: {}", .{@errorName(err)});
|
||||
};
|
||||
// Again failure here only means an unnecessary cache miss.
|
||||
man.writeManifest() catch |err| {
|
||||
std.log.warn("failed to write cache manifest when linking: {}", .{@errorName(err)});
|
||||
};
|
||||
// We hang on to this lock so that the output file path can be used without
|
||||
// other processes clobbering it.
|
||||
self.base.lock = man.toOwnedLock();
|
||||
}
|
||||
}
|
||||
|
||||
const LLDContext = struct {
|
||||
data: std.ArrayList(u8),
|
||||
macho: *MachO,
|
||||
oom: bool = false,
|
||||
};
|
||||
|
||||
fn append_diagnostic(context: usize, ptr: [*]const u8, len: usize) callconv(.C) void {
|
||||
const lld_context = @intToPtr(*LLDContext, context);
|
||||
const msg = ptr[0..len];
|
||||
lld_context.data.appendSlice(msg) catch |err| switch (err) {
|
||||
error.OutOfMemory => lld_context.oom = true,
|
||||
};
|
||||
}
|
||||
|
||||
fn darwinArchString(arch: std.Target.Cpu.Arch) []const u8 {
|
||||
return switch (arch) {
|
||||
.aarch64, .aarch64_be, .aarch64_32 => "arm64",
|
||||
.thumb, .arm => "arm",
|
||||
.thumbeb, .armeb => "armeb",
|
||||
.powerpc => "ppc",
|
||||
.powerpc64 => "ppc64",
|
||||
.powerpc64le => "ppc64le",
|
||||
else => @tagName(arch),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *MachO) void {
|
||||
self.offset_table.deinit(self.base.allocator);
|
||||
self.string_table.deinit(self.base.allocator);
|
||||
479
src/link/Wasm.zig
Normal file
479
src/link/Wasm.zig
Normal file
@ -0,0 +1,479 @@
|
||||
const Wasm = @This();
|
||||
|
||||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const assert = std.debug.assert;
|
||||
const fs = std.fs;
|
||||
const leb = std.debug.leb;
|
||||
const log = std.log.scoped(.link);
|
||||
|
||||
const Module = @import("../Module.zig");
|
||||
const Compilation = @import("../Compilation.zig");
|
||||
const codegen = @import("../codegen/wasm.zig");
|
||||
const link = @import("../link.zig");
|
||||
const trace = @import("../tracy.zig").trace;
|
||||
const build_options = @import("build_options");
|
||||
const Cache = @import("../Cache.zig");
|
||||
|
||||
/// Various magic numbers defined by the wasm spec
|
||||
const spec = struct {
|
||||
const magic = [_]u8{ 0x00, 0x61, 0x73, 0x6D }; // \0asm
|
||||
const version = [_]u8{ 0x01, 0x00, 0x00, 0x00 }; // version 1
|
||||
|
||||
const custom_id = 0;
|
||||
const types_id = 1;
|
||||
const imports_id = 2;
|
||||
const funcs_id = 3;
|
||||
const tables_id = 4;
|
||||
const memories_id = 5;
|
||||
const globals_id = 6;
|
||||
const exports_id = 7;
|
||||
const start_id = 8;
|
||||
const elements_id = 9;
|
||||
const code_id = 10;
|
||||
const data_id = 11;
|
||||
};
|
||||
|
||||
pub const base_tag = link.File.Tag.wasm;
|
||||
|
||||
pub const FnData = struct {
|
||||
/// Generated code for the type of the function
|
||||
functype: std.ArrayListUnmanaged(u8) = .{},
|
||||
/// Generated code for the body of the function
|
||||
code: std.ArrayListUnmanaged(u8) = .{},
|
||||
/// Locations in the generated code where function indexes must be filled in.
|
||||
/// This must be kept ordered by offset.
|
||||
idx_refs: std.ArrayListUnmanaged(struct { offset: u32, decl: *Module.Decl }) = .{},
|
||||
};
|
||||
|
||||
base: link.File,
|
||||
|
||||
/// List of all function Decls to be written to the output file. The index of
|
||||
/// each Decl in this list at the time of writing the binary is used as the
|
||||
/// function index.
|
||||
/// TODO: can/should we access some data structure in Module directly?
|
||||
funcs: std.ArrayListUnmanaged(*Module.Decl) = .{},
|
||||
|
||||
pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*Wasm {
|
||||
assert(options.object_format == .wasm);
|
||||
|
||||
if (options.use_llvm) return error.LLVM_BackendIsTODO_ForWasm; // TODO
|
||||
if (options.use_lld) return error.LLD_LinkingIsTODO_ForWasm; // TODO
|
||||
|
||||
// TODO: read the file and keep vaild parts instead of truncating
|
||||
const file = try options.emit.?.directory.handle.createFile(sub_path, .{ .truncate = true, .read = true });
|
||||
errdefer file.close();
|
||||
|
||||
const wasm = try createEmpty(allocator, options);
|
||||
errdefer wasm.base.destroy();
|
||||
|
||||
wasm.base.file = file;
|
||||
|
||||
try file.writeAll(&(spec.magic ++ spec.version));
|
||||
|
||||
return wasm;
|
||||
}
|
||||
|
||||
pub fn createEmpty(gpa: *Allocator, options: link.Options) !*Wasm {
|
||||
const wasm = try gpa.create(Wasm);
|
||||
wasm.* = .{
|
||||
.base = .{
|
||||
.tag = .wasm,
|
||||
.options = options,
|
||||
.file = null,
|
||||
.allocator = gpa,
|
||||
},
|
||||
};
|
||||
return wasm;
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Wasm) void {
|
||||
for (self.funcs.items) |decl| {
|
||||
decl.fn_link.wasm.?.functype.deinit(self.base.allocator);
|
||||
decl.fn_link.wasm.?.code.deinit(self.base.allocator);
|
||||
decl.fn_link.wasm.?.idx_refs.deinit(self.base.allocator);
|
||||
}
|
||||
self.funcs.deinit(self.base.allocator);
|
||||
}
|
||||
|
||||
// Generate code for the Decl, storing it in memory to be later written to
|
||||
// the file on flush().
|
||||
pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void {
|
||||
if (decl.typed_value.most_recent.typed_value.ty.zigTypeTag() != .Fn)
|
||||
return error.TODOImplementNonFnDeclsForWasm;
|
||||
|
||||
if (decl.fn_link.wasm) |*fn_data| {
|
||||
fn_data.functype.items.len = 0;
|
||||
fn_data.code.items.len = 0;
|
||||
fn_data.idx_refs.items.len = 0;
|
||||
} else {
|
||||
decl.fn_link.wasm = .{};
|
||||
try self.funcs.append(self.base.allocator, decl);
|
||||
}
|
||||
const fn_data = &decl.fn_link.wasm.?;
|
||||
|
||||
var managed_functype = fn_data.functype.toManaged(self.base.allocator);
|
||||
var managed_code = fn_data.code.toManaged(self.base.allocator);
|
||||
try codegen.genFunctype(&managed_functype, decl);
|
||||
try codegen.genCode(&managed_code, decl);
|
||||
fn_data.functype = managed_functype.toUnmanaged();
|
||||
fn_data.code = managed_code.toUnmanaged();
|
||||
}
|
||||
|
||||
pub fn updateDeclExports(
|
||||
self: *Wasm,
|
||||
module: *Module,
|
||||
decl: *const Module.Decl,
|
||||
exports: []const *Module.Export,
|
||||
) !void {}
|
||||
|
||||
pub fn freeDecl(self: *Wasm, decl: *Module.Decl) void {
|
||||
// TODO: remove this assert when non-function Decls are implemented
|
||||
assert(decl.typed_value.most_recent.typed_value.ty.zigTypeTag() == .Fn);
|
||||
_ = self.funcs.swapRemove(self.getFuncidx(decl).?);
|
||||
decl.fn_link.wasm.?.functype.deinit(self.base.allocator);
|
||||
decl.fn_link.wasm.?.code.deinit(self.base.allocator);
|
||||
decl.fn_link.wasm.?.idx_refs.deinit(self.base.allocator);
|
||||
decl.fn_link.wasm = null;
|
||||
}
|
||||
|
||||
pub fn flush(self: *Wasm, comp: *Compilation) !void {
|
||||
if (build_options.have_llvm and self.base.options.use_lld) {
|
||||
return self.linkWithLLD(comp);
|
||||
} else {
|
||||
return self.flushModule(comp);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn flushModule(self: *Wasm, comp: *Compilation) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
const file = self.base.file.?;
|
||||
const header_size = 5 + 1;
|
||||
|
||||
// No need to rewrite the magic/version header
|
||||
try file.setEndPos(@sizeOf(@TypeOf(spec.magic ++ spec.version)));
|
||||
try file.seekTo(@sizeOf(@TypeOf(spec.magic ++ spec.version)));
|
||||
|
||||
// Type section
|
||||
{
|
||||
const header_offset = try reserveVecSectionHeader(file);
|
||||
for (self.funcs.items) |decl| {
|
||||
try file.writeAll(decl.fn_link.wasm.?.functype.items);
|
||||
}
|
||||
try writeVecSectionHeader(
|
||||
file,
|
||||
header_offset,
|
||||
spec.types_id,
|
||||
@intCast(u32, (try file.getPos()) - header_offset - header_size),
|
||||
@intCast(u32, self.funcs.items.len),
|
||||
);
|
||||
}
|
||||
|
||||
// Function section
|
||||
{
|
||||
const header_offset = try reserveVecSectionHeader(file);
|
||||
const writer = file.writer();
|
||||
for (self.funcs.items) |_, typeidx| try leb.writeULEB128(writer, @intCast(u32, typeidx));
|
||||
try writeVecSectionHeader(
|
||||
file,
|
||||
header_offset,
|
||||
spec.funcs_id,
|
||||
@intCast(u32, (try file.getPos()) - header_offset - header_size),
|
||||
@intCast(u32, self.funcs.items.len),
|
||||
);
|
||||
}
|
||||
|
||||
// Export section
|
||||
if (self.base.options.module) |module| {
|
||||
const header_offset = try reserveVecSectionHeader(file);
|
||||
const writer = file.writer();
|
||||
var count: u32 = 0;
|
||||
for (module.decl_exports.entries.items) |entry| {
|
||||
for (entry.value) |exprt| {
|
||||
// Export name length + name
|
||||
try leb.writeULEB128(writer, @intCast(u32, exprt.options.name.len));
|
||||
try writer.writeAll(exprt.options.name);
|
||||
|
||||
switch (exprt.exported_decl.typed_value.most_recent.typed_value.ty.zigTypeTag()) {
|
||||
.Fn => {
|
||||
// Type of the export
|
||||
try writer.writeByte(0x00);
|
||||
// Exported function index
|
||||
try leb.writeULEB128(writer, self.getFuncidx(exprt.exported_decl).?);
|
||||
},
|
||||
else => return error.TODOImplementNonFnDeclsForWasm,
|
||||
}
|
||||
|
||||
count += 1;
|
||||
}
|
||||
}
|
||||
try writeVecSectionHeader(
|
||||
file,
|
||||
header_offset,
|
||||
spec.exports_id,
|
||||
@intCast(u32, (try file.getPos()) - header_offset - header_size),
|
||||
count,
|
||||
);
|
||||
}
|
||||
|
||||
// Code section
|
||||
{
|
||||
const header_offset = try reserveVecSectionHeader(file);
|
||||
const writer = file.writer();
|
||||
for (self.funcs.items) |decl| {
|
||||
const fn_data = &decl.fn_link.wasm.?;
|
||||
|
||||
// Write the already generated code to the file, inserting
|
||||
// function indexes where required.
|
||||
var current: u32 = 0;
|
||||
for (fn_data.idx_refs.items) |idx_ref| {
|
||||
try writer.writeAll(fn_data.code.items[current..idx_ref.offset]);
|
||||
current = idx_ref.offset;
|
||||
// Use a fixed width here to make calculating the code size
|
||||
// in codegen.wasm.genCode() simpler.
|
||||
var buf: [5]u8 = undefined;
|
||||
leb.writeUnsignedFixed(5, &buf, self.getFuncidx(idx_ref.decl).?);
|
||||
try writer.writeAll(&buf);
|
||||
}
|
||||
|
||||
try writer.writeAll(fn_data.code.items[current..]);
|
||||
}
|
||||
try writeVecSectionHeader(
|
||||
file,
|
||||
header_offset,
|
||||
spec.code_id,
|
||||
@intCast(u32, (try file.getPos()) - header_offset - header_size),
|
||||
@intCast(u32, self.funcs.items.len),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn linkWithLLD(self: *Wasm, comp: *Compilation) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator);
|
||||
defer arena_allocator.deinit();
|
||||
const arena = &arena_allocator.allocator;
|
||||
|
||||
const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type.
|
||||
|
||||
// If there is no Zig code to compile, then we should skip flushing the output file because it
|
||||
// will not be part of the linker line anyway.
|
||||
const module_obj_path: ?[]const u8 = if (self.base.options.module) |module| blk: {
|
||||
const use_stage1 = build_options.is_stage1 and self.base.options.use_llvm;
|
||||
if (use_stage1) {
|
||||
const obj_basename = try std.zig.binNameAlloc(arena, .{
|
||||
.root_name = self.base.options.root_name,
|
||||
.target = self.base.options.target,
|
||||
.output_mode = .Obj,
|
||||
});
|
||||
const o_directory = self.base.options.module.?.zig_cache_artifact_directory;
|
||||
const full_obj_path = try o_directory.join(arena, &[_][]const u8{obj_basename});
|
||||
break :blk full_obj_path;
|
||||
}
|
||||
|
||||
try self.flushModule(comp);
|
||||
const obj_basename = self.base.intermediary_basename.?;
|
||||
const full_obj_path = try directory.join(arena, &[_][]const u8{obj_basename});
|
||||
break :blk full_obj_path;
|
||||
} else null;
|
||||
|
||||
const target = self.base.options.target;
|
||||
|
||||
const id_symlink_basename = "lld.id";
|
||||
|
||||
var man: Cache.Manifest = undefined;
|
||||
defer if (!self.base.options.disable_lld_caching) man.deinit();
|
||||
|
||||
var digest: [Cache.hex_digest_len]u8 = undefined;
|
||||
|
||||
if (!self.base.options.disable_lld_caching) {
|
||||
man = comp.cache_parent.obtain();
|
||||
|
||||
// We are about to obtain this lock, so here we give other processes a chance first.
|
||||
self.base.releaseLock();
|
||||
|
||||
try man.addListOfFiles(self.base.options.objects);
|
||||
for (comp.c_object_table.items()) |entry| {
|
||||
_ = try man.addFile(entry.key.status.success.object_path, null);
|
||||
}
|
||||
try man.addOptionalFile(module_obj_path);
|
||||
man.hash.addOptional(self.base.options.stack_size_override);
|
||||
man.hash.addListOfBytes(self.base.options.extra_lld_args);
|
||||
|
||||
// We don't actually care whether it's a cache hit or miss; we just need the digest and the lock.
|
||||
_ = try man.hit();
|
||||
digest = man.final();
|
||||
|
||||
var prev_digest_buf: [digest.len]u8 = undefined;
|
||||
const prev_digest: []u8 = directory.handle.readLink(id_symlink_basename, &prev_digest_buf) catch |err| blk: {
|
||||
log.debug("WASM LLD new_digest={} readlink error: {}", .{ digest, @errorName(err) });
|
||||
// Handle this as a cache miss.
|
||||
break :blk prev_digest_buf[0..0];
|
||||
};
|
||||
if (mem.eql(u8, prev_digest, &digest)) {
|
||||
log.debug("WASM LLD digest={} match - skipping invocation", .{digest});
|
||||
// Hot diggity dog! The output binary is already there.
|
||||
self.base.lock = man.toOwnedLock();
|
||||
return;
|
||||
}
|
||||
log.debug("WASM LLD prev_digest={} new_digest={}", .{ prev_digest, digest });
|
||||
|
||||
// We are about to change the output file to be different, so we invalidate the build hash now.
|
||||
directory.handle.deleteFile(id_symlink_basename) catch |err| switch (err) {
|
||||
error.FileNotFound => {},
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
|
||||
const is_obj = self.base.options.output_mode == .Obj;
|
||||
|
||||
// Create an LLD command line and invoke it.
|
||||
var argv = std.ArrayList([]const u8).init(self.base.allocator);
|
||||
defer argv.deinit();
|
||||
// Even though we're calling LLD as a library it thinks the first argument is its own exe name.
|
||||
try argv.append("lld");
|
||||
if (is_obj) {
|
||||
try argv.append("-r");
|
||||
}
|
||||
|
||||
try argv.append("-error-limit=0");
|
||||
|
||||
if (self.base.options.output_mode == .Exe) {
|
||||
// Increase the default stack size to a more reasonable value of 1MB instead of
|
||||
// the default of 1 Wasm page being 64KB, unless overriden by the user.
|
||||
try argv.append("-z");
|
||||
const stack_size = self.base.options.stack_size_override orelse 1048576;
|
||||
const arg = try std.fmt.allocPrint(arena, "stack-size={d}", .{stack_size});
|
||||
try argv.append(arg);
|
||||
|
||||
// Put stack before globals so that stack overflow results in segfault immediately
|
||||
// before corrupting globals. See https://github.com/ziglang/zig/issues/4496
|
||||
try argv.append("--stack-first");
|
||||
} else {
|
||||
try argv.append("--no-entry"); // So lld doesn't look for _start.
|
||||
try argv.append("--export-all");
|
||||
}
|
||||
try argv.appendSlice(&[_][]const u8{
|
||||
"--allow-undefined",
|
||||
"-o",
|
||||
try directory.join(arena, &[_][]const u8{self.base.options.emit.?.sub_path}),
|
||||
});
|
||||
|
||||
// Positional arguments to the linker such as object files.
|
||||
try argv.appendSlice(self.base.options.objects);
|
||||
|
||||
for (comp.c_object_table.items()) |entry| {
|
||||
try argv.append(entry.key.status.success.object_path);
|
||||
}
|
||||
if (module_obj_path) |p| {
|
||||
try argv.append(p);
|
||||
}
|
||||
|
||||
if (self.base.options.output_mode == .Exe and !self.base.options.is_compiler_rt_or_libc) {
|
||||
if (!self.base.options.link_libc) {
|
||||
try argv.append(comp.libc_static_lib.?.full_object_path);
|
||||
}
|
||||
try argv.append(comp.compiler_rt_static_lib.?.full_object_path);
|
||||
}
|
||||
|
||||
if (self.base.options.verbose_link) {
|
||||
Compilation.dump_argv(argv.items);
|
||||
}
|
||||
|
||||
const new_argv = try arena.allocSentinel(?[*:0]const u8, argv.items.len, null);
|
||||
for (argv.items) |arg, i| {
|
||||
new_argv[i] = try arena.dupeZ(u8, arg);
|
||||
}
|
||||
|
||||
var stderr_context: LLDContext = .{
|
||||
.wasm = self,
|
||||
.data = std.ArrayList(u8).init(self.base.allocator),
|
||||
};
|
||||
defer stderr_context.data.deinit();
|
||||
var stdout_context: LLDContext = .{
|
||||
.wasm = self,
|
||||
.data = std.ArrayList(u8).init(self.base.allocator),
|
||||
};
|
||||
defer stdout_context.data.deinit();
|
||||
const llvm = @import("../llvm.zig");
|
||||
const ok = llvm.Link(
|
||||
.Wasm,
|
||||
new_argv.ptr,
|
||||
new_argv.len,
|
||||
append_diagnostic,
|
||||
@ptrToInt(&stdout_context),
|
||||
@ptrToInt(&stderr_context),
|
||||
);
|
||||
if (stderr_context.oom or stdout_context.oom) return error.OutOfMemory;
|
||||
if (stdout_context.data.items.len != 0) {
|
||||
std.log.warn("unexpected LLD stdout: {}", .{stdout_context.data.items});
|
||||
}
|
||||
if (!ok) {
|
||||
// TODO parse this output and surface with the Compilation API rather than
|
||||
// directly outputting to stderr here.
|
||||
std.debug.print("{}", .{stderr_context.data.items});
|
||||
return error.LLDReportedFailure;
|
||||
}
|
||||
if (stderr_context.data.items.len != 0) {
|
||||
std.log.warn("unexpected LLD stderr: {}", .{stderr_context.data.items});
|
||||
}
|
||||
|
||||
if (!self.base.options.disable_lld_caching) {
|
||||
// Update the dangling symlink with the digest. If it fails we can continue; it only
|
||||
// means that the next invocation will have an unnecessary cache miss.
|
||||
directory.handle.symLink(&digest, id_symlink_basename, .{}) catch |err| {
|
||||
std.log.warn("failed to save linking hash digest symlink: {}", .{@errorName(err)});
|
||||
};
|
||||
// Again failure here only means an unnecessary cache miss.
|
||||
man.writeManifest() catch |err| {
|
||||
std.log.warn("failed to write cache manifest when linking: {}", .{@errorName(err)});
|
||||
};
|
||||
// We hang on to this lock so that the output file path can be used without
|
||||
// other processes clobbering it.
|
||||
self.base.lock = man.toOwnedLock();
|
||||
}
|
||||
}
|
||||
|
||||
const LLDContext = struct {
|
||||
data: std.ArrayList(u8),
|
||||
wasm: *Wasm,
|
||||
oom: bool = false,
|
||||
};
|
||||
|
||||
fn append_diagnostic(context: usize, ptr: [*]const u8, len: usize) callconv(.C) void {
|
||||
const lld_context = @intToPtr(*LLDContext, context);
|
||||
const msg = ptr[0..len];
|
||||
lld_context.data.appendSlice(msg) catch |err| switch (err) {
|
||||
error.OutOfMemory => lld_context.oom = true,
|
||||
};
|
||||
}
|
||||
|
||||
/// Get the current index of a given Decl in the function list
|
||||
/// TODO: we could maintain a hash map to potentially make this
|
||||
fn getFuncidx(self: Wasm, decl: *Module.Decl) ?u32 {
|
||||
return for (self.funcs.items) |func, idx| {
|
||||
if (func == decl) break @intCast(u32, idx);
|
||||
} else null;
|
||||
}
|
||||
|
||||
fn reserveVecSectionHeader(file: fs.File) !u64 {
|
||||
// section id + fixed leb contents size + fixed leb vector length
|
||||
const header_size = 1 + 5 + 5;
|
||||
// TODO: this should be a single lseek(2) call, but fs.File does not
|
||||
// currently provide a way to do this.
|
||||
try file.seekBy(header_size);
|
||||
return (try file.getPos()) - header_size;
|
||||
}
|
||||
|
||||
fn writeVecSectionHeader(file: fs.File, offset: u64, section: u8, size: u32, items: u32) !void {
|
||||
var buf: [1 + 5 + 5]u8 = undefined;
|
||||
buf[0] = section;
|
||||
leb.writeUnsignedFixed(5, buf[1..6], size);
|
||||
leb.writeUnsignedFixed(5, buf[6..], items);
|
||||
try file.pwriteAll(&buf, offset);
|
||||
}
|
||||
140
src/llvm.zig
Normal file
140
src/llvm.zig
Normal file
@ -0,0 +1,140 @@
|
||||
//! We do this instead of @cImport because the self-hosted compiler is easier
|
||||
//! to bootstrap if it does not depend on translate-c.
|
||||
|
||||
pub const Link = ZigLLDLink;
|
||||
extern fn ZigLLDLink(
|
||||
oformat: ObjectFormatType,
|
||||
args: [*:null]const ?[*:0]const u8,
|
||||
arg_count: usize,
|
||||
append_diagnostic: fn (context: usize, ptr: [*]const u8, len: usize) callconv(.C) void,
|
||||
context_stdout: usize,
|
||||
context_stderr: usize,
|
||||
) bool;
|
||||
|
||||
pub const ObjectFormatType = extern enum(c_int) {
|
||||
Unknown,
|
||||
COFF,
|
||||
ELF,
|
||||
MachO,
|
||||
Wasm,
|
||||
XCOFF,
|
||||
};
|
||||
|
||||
pub const GetHostCPUName = LLVMGetHostCPUName;
|
||||
extern fn LLVMGetHostCPUName() ?[*:0]u8;
|
||||
|
||||
pub const GetNativeFeatures = ZigLLVMGetNativeFeatures;
|
||||
extern fn ZigLLVMGetNativeFeatures() ?[*:0]u8;
|
||||
|
||||
pub const WriteArchive = ZigLLVMWriteArchive;
|
||||
extern fn ZigLLVMWriteArchive(
|
||||
archive_name: [*:0]const u8,
|
||||
file_names_ptr: [*]const [*:0]const u8,
|
||||
file_names_len: usize,
|
||||
os_type: OSType,
|
||||
) bool;
|
||||
|
||||
pub const OSType = extern enum(c_int) {
|
||||
UnknownOS = 0,
|
||||
Ananas = 1,
|
||||
CloudABI = 2,
|
||||
Darwin = 3,
|
||||
DragonFly = 4,
|
||||
FreeBSD = 5,
|
||||
Fuchsia = 6,
|
||||
IOS = 7,
|
||||
KFreeBSD = 8,
|
||||
Linux = 9,
|
||||
Lv2 = 10,
|
||||
MacOSX = 11,
|
||||
NetBSD = 12,
|
||||
OpenBSD = 13,
|
||||
Solaris = 14,
|
||||
Win32 = 15,
|
||||
Haiku = 16,
|
||||
Minix = 17,
|
||||
RTEMS = 18,
|
||||
NaCl = 19,
|
||||
CNK = 20,
|
||||
AIX = 21,
|
||||
CUDA = 22,
|
||||
NVCL = 23,
|
||||
AMDHSA = 24,
|
||||
PS4 = 25,
|
||||
ELFIAMCU = 26,
|
||||
TvOS = 27,
|
||||
WatchOS = 28,
|
||||
Mesa3D = 29,
|
||||
Contiki = 30,
|
||||
AMDPAL = 31,
|
||||
HermitCore = 32,
|
||||
Hurd = 33,
|
||||
WASI = 34,
|
||||
Emscripten = 35,
|
||||
};
|
||||
|
||||
pub const ArchType = extern enum(c_int) {
|
||||
UnknownArch = 0,
|
||||
arm = 1,
|
||||
armeb = 2,
|
||||
aarch64 = 3,
|
||||
aarch64_be = 4,
|
||||
aarch64_32 = 5,
|
||||
arc = 6,
|
||||
avr = 7,
|
||||
bpfel = 8,
|
||||
bpfeb = 9,
|
||||
hexagon = 10,
|
||||
mips = 11,
|
||||
mipsel = 12,
|
||||
mips64 = 13,
|
||||
mips64el = 14,
|
||||
msp430 = 15,
|
||||
ppc = 16,
|
||||
ppc64 = 17,
|
||||
ppc64le = 18,
|
||||
r600 = 19,
|
||||
amdgcn = 20,
|
||||
riscv32 = 21,
|
||||
riscv64 = 22,
|
||||
sparc = 23,
|
||||
sparcv9 = 24,
|
||||
sparcel = 25,
|
||||
systemz = 26,
|
||||
tce = 27,
|
||||
tcele = 28,
|
||||
thumb = 29,
|
||||
thumbeb = 30,
|
||||
x86 = 31,
|
||||
x86_64 = 32,
|
||||
xcore = 33,
|
||||
nvptx = 34,
|
||||
nvptx64 = 35,
|
||||
le32 = 36,
|
||||
le64 = 37,
|
||||
amdil = 38,
|
||||
amdil64 = 39,
|
||||
hsail = 40,
|
||||
hsail64 = 41,
|
||||
spir = 42,
|
||||
spir64 = 43,
|
||||
kalimba = 44,
|
||||
shave = 45,
|
||||
lanai = 46,
|
||||
wasm32 = 47,
|
||||
wasm64 = 48,
|
||||
renderscript32 = 49,
|
||||
renderscript64 = 50,
|
||||
ve = 51,
|
||||
};
|
||||
|
||||
pub const ParseCommandLineOptions = ZigLLVMParseCommandLineOptions;
|
||||
extern fn ZigLLVMParseCommandLineOptions(argc: usize, argv: [*]const [*:0]const u8) void;
|
||||
|
||||
pub const WriteImportLibrary = ZigLLVMWriteImportLibrary;
|
||||
extern fn ZigLLVMWriteImportLibrary(
|
||||
def_path: [*:0]const u8,
|
||||
arch: ArchType,
|
||||
output_lib_path: [*c]const u8,
|
||||
kill_at: bool,
|
||||
) bool;
|
||||
1878
src/main.cpp
1878
src/main.cpp
File diff suppressed because it is too large
Load Diff
3039
src/main.zig
Normal file
3039
src/main.zig
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,181 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2020 Andrew Kelley
|
||||
*
|
||||
* This file is part of zig, which is MIT licensed.
|
||||
* See http://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
|
||||
#include "mem.hpp"
|
||||
#include "mem_list.hpp"
|
||||
#include "mem_profile.hpp"
|
||||
#include "heap.hpp"
|
||||
|
||||
namespace mem {
|
||||
|
||||
void Profile::init(const char *name, const char *kind) {
|
||||
this->name = name;
|
||||
this->kind = kind;
|
||||
this->usage_table.init(heap::bootstrap_allocator, 1024);
|
||||
}
|
||||
|
||||
void Profile::deinit() {
|
||||
assert(this->name != nullptr);
|
||||
if (mem::report_print)
|
||||
this->print_report();
|
||||
this->usage_table.deinit(heap::bootstrap_allocator);
|
||||
this->name = nullptr;
|
||||
}
|
||||
|
||||
void Profile::record_alloc(const TypeInfo &info, size_t count) {
|
||||
if (count == 0) return;
|
||||
auto existing_entry = this->usage_table.put_unique(
|
||||
heap::bootstrap_allocator,
|
||||
UsageKey{info.name_ptr, info.name_len},
|
||||
Entry{info, 1, count, 0, 0} );
|
||||
if (existing_entry != nullptr) {
|
||||
assert(existing_entry->value.info.size == info.size); // allocated name does not match type
|
||||
existing_entry->value.alloc.calls += 1;
|
||||
existing_entry->value.alloc.objects += count;
|
||||
}
|
||||
}
|
||||
|
||||
void Profile::record_dealloc(const TypeInfo &info, size_t count) {
|
||||
if (count == 0) return;
|
||||
auto existing_entry = this->usage_table.maybe_get(UsageKey{info.name_ptr, info.name_len});
|
||||
if (existing_entry == nullptr) {
|
||||
fprintf(stderr, "deallocated name '");
|
||||
for (size_t i = 0; i < info.name_len; ++i)
|
||||
fputc(info.name_ptr[i], stderr);
|
||||
zig_panic("' (size %zu) not found in allocated table; compromised memory usage stats", info.size);
|
||||
}
|
||||
if (existing_entry->value.info.size != info.size) {
|
||||
fprintf(stderr, "deallocated name '");
|
||||
for (size_t i = 0; i < info.name_len; ++i)
|
||||
fputc(info.name_ptr[i], stderr);
|
||||
zig_panic("' does not match expected type size %zu", info.size);
|
||||
}
|
||||
assert(existing_entry->value.alloc.calls - existing_entry->value.dealloc.calls > 0);
|
||||
assert(existing_entry->value.alloc.objects - existing_entry->value.dealloc.objects >= count);
|
||||
existing_entry->value.dealloc.calls += 1;
|
||||
existing_entry->value.dealloc.objects += count;
|
||||
}
|
||||
|
||||
static size_t entry_remain_total_bytes(const Profile::Entry *entry) {
|
||||
return (entry->alloc.objects - entry->dealloc.objects) * entry->info.size;
|
||||
}
|
||||
|
||||
static int entry_compare(const void *a, const void *b) {
|
||||
size_t total_a = entry_remain_total_bytes(*reinterpret_cast<Profile::Entry *const *>(a));
|
||||
size_t total_b = entry_remain_total_bytes(*reinterpret_cast<Profile::Entry *const *>(b));
|
||||
if (total_a > total_b)
|
||||
return -1;
|
||||
if (total_a < total_b)
|
||||
return 1;
|
||||
return 0;
|
||||
};
|
||||
|
||||
void Profile::print_report(FILE *file) {
|
||||
if (!file) {
|
||||
file = report_file;
|
||||
if (!file)
|
||||
file = stderr;
|
||||
}
|
||||
fprintf(file, "\n--- MEMORY PROFILE REPORT [%s]: %s ---\n", this->kind, this->name);
|
||||
|
||||
List<const Entry *> list;
|
||||
auto it = this->usage_table.entry_iterator();
|
||||
for (;;) {
|
||||
auto entry = it.next();
|
||||
if (!entry)
|
||||
break;
|
||||
list.append(&heap::bootstrap_allocator, &entry->value);
|
||||
}
|
||||
|
||||
qsort(list.items, list.length, sizeof(const Entry *), entry_compare);
|
||||
|
||||
size_t total_bytes_alloc = 0;
|
||||
size_t total_bytes_dealloc = 0;
|
||||
|
||||
size_t total_calls_alloc = 0;
|
||||
size_t total_calls_dealloc = 0;
|
||||
|
||||
for (size_t i = 0; i < list.length; i += 1) {
|
||||
const Entry *entry = list.at(i);
|
||||
fprintf(file, " ");
|
||||
for (size_t j = 0; j < entry->info.name_len; ++j)
|
||||
fputc(entry->info.name_ptr[j], file);
|
||||
fprintf(file, ": %zu bytes each", entry->info.size);
|
||||
|
||||
fprintf(file, ", alloc{ %zu calls, %zu objects, total ", entry->alloc.calls, entry->alloc.objects);
|
||||
const auto alloc_num_bytes = entry->alloc.objects * entry->info.size;
|
||||
zig_pretty_print_bytes(file, alloc_num_bytes);
|
||||
|
||||
fprintf(file, " }, dealloc{ %zu calls, %zu objects, total ", entry->dealloc.calls, entry->dealloc.objects);
|
||||
const auto dealloc_num_bytes = entry->dealloc.objects * entry->info.size;
|
||||
zig_pretty_print_bytes(file, dealloc_num_bytes);
|
||||
|
||||
fprintf(file, " }, remain{ %zu calls, %zu objects, total ",
|
||||
entry->alloc.calls - entry->dealloc.calls,
|
||||
entry->alloc.objects - entry->dealloc.objects );
|
||||
const auto remain_num_bytes = alloc_num_bytes - dealloc_num_bytes;
|
||||
zig_pretty_print_bytes(file, remain_num_bytes);
|
||||
|
||||
fprintf(file, " }\n");
|
||||
|
||||
total_bytes_alloc += alloc_num_bytes;
|
||||
total_bytes_dealloc += dealloc_num_bytes;
|
||||
|
||||
total_calls_alloc += entry->alloc.calls;
|
||||
total_calls_dealloc += entry->dealloc.calls;
|
||||
}
|
||||
|
||||
fprintf(file, "\n Total bytes allocated: ");
|
||||
zig_pretty_print_bytes(file, total_bytes_alloc);
|
||||
fprintf(file, ", deallocated: ");
|
||||
zig_pretty_print_bytes(file, total_bytes_dealloc);
|
||||
fprintf(file, ", remaining: ");
|
||||
zig_pretty_print_bytes(file, total_bytes_alloc - total_bytes_dealloc);
|
||||
|
||||
fprintf(file, "\n Total calls alloc: %zu, dealloc: %zu, remain: %zu\n",
|
||||
total_calls_alloc, total_calls_dealloc, (total_calls_alloc - total_calls_dealloc));
|
||||
|
||||
list.deinit(&heap::bootstrap_allocator);
|
||||
}
|
||||
|
||||
uint32_t Profile::usage_hash(UsageKey key) {
|
||||
// FNV 32-bit hash
|
||||
uint32_t h = 2166136261;
|
||||
for (size_t i = 0; i < key.name_len; ++i) {
|
||||
h = h ^ key.name_ptr[i];
|
||||
h = h * 16777619;
|
||||
}
|
||||
return h;
|
||||
}
|
||||
|
||||
bool Profile::usage_equal(UsageKey a, UsageKey b) {
|
||||
return memcmp(a.name_ptr, b.name_ptr, a.name_len > b.name_len ? a.name_len : b.name_len) == 0;
|
||||
}
|
||||
|
||||
void InternCounters::print_report(FILE *file) {
|
||||
if (!file) {
|
||||
file = report_file;
|
||||
if (!file)
|
||||
file = stderr;
|
||||
}
|
||||
fprintf(file, "\n--- IR INTERNING REPORT ---\n");
|
||||
fprintf(file, " undefined: interned %zu times\n", intern_counters.x_undefined);
|
||||
fprintf(file, " void: interned %zu times\n", intern_counters.x_void);
|
||||
fprintf(file, " null: interned %zu times\n", intern_counters.x_null);
|
||||
fprintf(file, " unreachable: interned %zu times\n", intern_counters.x_unreachable);
|
||||
fprintf(file, " zero_byte: interned %zu times\n", intern_counters.zero_byte);
|
||||
}
|
||||
|
||||
InternCounters intern_counters;
|
||||
|
||||
} // namespace mem
|
||||
|
||||
#endif
|
||||
@ -1,71 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2020 Andrew Kelley
|
||||
*
|
||||
* This file is part of zig, which is MIT licensed.
|
||||
* See http://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
#ifndef ZIG_MEM_PROFILE_HPP
|
||||
#define ZIG_MEM_PROFILE_HPP
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include "mem.hpp"
|
||||
#include "mem_hash_map.hpp"
|
||||
#include "util.hpp"
|
||||
|
||||
namespace mem {
|
||||
|
||||
struct Profile {
|
||||
void init(const char *name, const char *kind);
|
||||
void deinit();
|
||||
|
||||
void record_alloc(const TypeInfo &info, size_t count);
|
||||
void record_dealloc(const TypeInfo &info, size_t count);
|
||||
|
||||
void print_report(FILE *file = nullptr);
|
||||
|
||||
struct Entry {
|
||||
TypeInfo info;
|
||||
|
||||
struct Use {
|
||||
size_t calls;
|
||||
size_t objects;
|
||||
} alloc, dealloc;
|
||||
};
|
||||
|
||||
private:
|
||||
const char *name;
|
||||
const char *kind;
|
||||
|
||||
struct UsageKey {
|
||||
const char *name_ptr;
|
||||
size_t name_len;
|
||||
};
|
||||
|
||||
static uint32_t usage_hash(UsageKey key);
|
||||
static bool usage_equal(UsageKey a, UsageKey b);
|
||||
|
||||
HashMap<UsageKey, Entry, usage_hash, usage_equal> usage_table;
|
||||
};
|
||||
|
||||
struct InternCounters {
|
||||
size_t x_undefined;
|
||||
size_t x_void;
|
||||
size_t x_null;
|
||||
size_t x_unreachable;
|
||||
size_t zero_byte;
|
||||
|
||||
void print_report(FILE *file = nullptr);
|
||||
};
|
||||
|
||||
extern InternCounters intern_counters;
|
||||
|
||||
} // namespace mem
|
||||
|
||||
#endif
|
||||
#endif
|
||||
@ -1,136 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2020 Andrew Kelley
|
||||
*
|
||||
* This file is part of zig, which is MIT licensed.
|
||||
* See http://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
#ifndef ZIG_MEM_TYPE_INFO_HPP
|
||||
#define ZIG_MEM_TYPE_INFO_HPP
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#ifndef ZIG_TYPE_INFO_IMPLEMENTATION
|
||||
# ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
# define ZIG_TYPE_INFO_IMPLEMENTATION 1
|
||||
# else
|
||||
# define ZIG_TYPE_INFO_IMPLEMENTATION 0
|
||||
# endif
|
||||
#endif
|
||||
|
||||
namespace mem {
|
||||
|
||||
#if ZIG_TYPE_INFO_IMPLEMENTATION == 0
|
||||
|
||||
struct TypeInfo {
|
||||
size_t size;
|
||||
size_t alignment;
|
||||
|
||||
template <typename T>
|
||||
static constexpr TypeInfo make() {
|
||||
return {sizeof(T), alignof(T)};
|
||||
}
|
||||
};
|
||||
|
||||
#elif ZIG_TYPE_INFO_IMPLEMENTATION == 1
|
||||
|
||||
//
|
||||
// A non-portable way to get a human-readable type-name compatible with
|
||||
// non-RTTI C++ compiler mode; eg. `-fno-rtti`.
|
||||
//
|
||||
// Minimum requirements are c++11 and a compiler that has a constant for the
|
||||
// current function's decorated name whereby a template-type name can be
|
||||
// computed. eg. `__PRETTY_FUNCTION__` or `__FUNCSIG__`.
|
||||
//
|
||||
// given the following snippet:
|
||||
//
|
||||
// | #include <stdio.h>
|
||||
// |
|
||||
// | struct Top {};
|
||||
// | namespace mynamespace {
|
||||
// | using custom = unsigned int;
|
||||
// | struct Foo {
|
||||
// | struct Bar {};
|
||||
// | };
|
||||
// | };
|
||||
// |
|
||||
// | template <typename T>
|
||||
// | void foobar() {
|
||||
// | #ifdef _MSC_VER
|
||||
// | fprintf(stderr, "--> %s\n", __FUNCSIG__);
|
||||
// | #else
|
||||
// | fprintf(stderr, "--> %s\n", __PRETTY_FUNCTION__);
|
||||
// | #endif
|
||||
// | }
|
||||
// |
|
||||
// | int main() {
|
||||
// | foobar<Top>();
|
||||
// | foobar<unsigned int>();
|
||||
// | foobar<mynamespace::custom>();
|
||||
// | foobar<mynamespace::Foo*>();
|
||||
// | foobar<mynamespace::Foo::Bar*>();
|
||||
// | }
|
||||
//
|
||||
// gcc 9.2.0 produces:
|
||||
// --> void foobar() [with T = Top]
|
||||
// --> void foobar() [with T = unsigned int]
|
||||
// --> void foobar() [with T = unsigned int]
|
||||
// --> void foobar() [with T = mynamespace::Foo*]
|
||||
// --> void foobar() [with T = mynamespace::Foo::Bar*]
|
||||
//
|
||||
// xcode 11.3.1/clang produces:
|
||||
// --> void foobar() [T = Top]
|
||||
// --> void foobar() [T = unsigned int]
|
||||
// --> void foobar() [T = unsigned int]
|
||||
// --> void foobar() [T = mynamespace::Foo *]
|
||||
// --> void foobar() [T = mynamespace::Foo::Bar *]
|
||||
//
|
||||
// VStudio 2019 16.5.0/msvc produces:
|
||||
// --> void __cdecl foobar<struct Top>(void)
|
||||
// --> void __cdecl foobar<unsigned int>(void)
|
||||
// --> void __cdecl foobar<unsigned int>(void)
|
||||
// --> void __cdecl foobar<structmynamespace::Foo*>(void)
|
||||
// --> void __cdecl foobar<structmynamespace::Foo::Bar*>(void)
|
||||
//
|
||||
struct TypeInfo {
|
||||
const char *name_ptr;
|
||||
size_t name_len;
|
||||
size_t size;
|
||||
size_t alignment;
|
||||
|
||||
static constexpr TypeInfo to_type_info(const char *str, size_t start, size_t end, size_t size, size_t alignment) {
|
||||
return TypeInfo{str + start, end - start, size, alignment};
|
||||
}
|
||||
|
||||
static constexpr size_t index_of(const char *str, char c) {
|
||||
return *str == c ? 0 : 1 + index_of(str + 1, c);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static constexpr const char *decorated_name() {
|
||||
#ifdef _MSC_VER
|
||||
return __FUNCSIG__;
|
||||
#else
|
||||
return __PRETTY_FUNCTION__;
|
||||
#endif
|
||||
}
|
||||
|
||||
static constexpr TypeInfo extract(const char *decorated, size_t size, size_t alignment) {
|
||||
#ifdef _MSC_VER
|
||||
return to_type_info(decorated, index_of(decorated, '<') + 1, index_of(decorated, '>'), size, alignment);
|
||||
#else
|
||||
return to_type_info(decorated, index_of(decorated, '=') + 2, index_of(decorated, ']'), size, alignment);
|
||||
#endif
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static constexpr TypeInfo make() {
|
||||
return TypeInfo::extract(TypeInfo::decorated_name<T>(), sizeof(T), alignof(T));
|
||||
}
|
||||
};
|
||||
|
||||
#endif // ZIG_TYPE_INFO_IMPLEMENTATION
|
||||
|
||||
} // namespace mem
|
||||
|
||||
#endif
|
||||
1092
src/mingw.zig
Normal file
1092
src/mingw.zig
Normal file
File diff suppressed because it is too large
Load Diff
2142
src/musl.zig
Normal file
2142
src/musl.zig
Normal file
File diff suppressed because it is too large
Load Diff
@ -2,39 +2,39 @@ const std = @import("std");
|
||||
const build_options = @import("build_options");
|
||||
const introspect = @import("introspect.zig");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const fatal = @import("main.zig").fatal;
|
||||
|
||||
pub fn cmdEnv(gpa: *Allocator, args: []const []const u8, stdout: anytype) !void {
|
||||
const zig_lib_dir = introspect.resolveZigLibDir(gpa) catch |err| {
|
||||
std.debug.print("unable to find zig installation directory: {}\n", .{@errorName(err)});
|
||||
std.process.exit(1);
|
||||
};
|
||||
defer gpa.free(zig_lib_dir);
|
||||
const self_exe_path = try std.fs.selfExePathAlloc(gpa);
|
||||
defer gpa.free(self_exe_path);
|
||||
|
||||
const zig_std_dir = try std.fs.path.join(gpa, &[_][]const u8{ zig_lib_dir, "std" });
|
||||
var zig_lib_directory = introspect.findZigLibDirFromSelfExe(gpa, self_exe_path) catch |err| {
|
||||
fatal("unable to find zig installation directory: {}\n", .{@errorName(err)});
|
||||
};
|
||||
defer gpa.free(zig_lib_directory.path.?);
|
||||
defer zig_lib_directory.handle.close();
|
||||
|
||||
const zig_std_dir = try std.fs.path.join(gpa, &[_][]const u8{ zig_lib_directory.path.?, "std" });
|
||||
defer gpa.free(zig_std_dir);
|
||||
|
||||
const global_cache_dir = try introspect.resolveGlobalCacheDir(gpa);
|
||||
defer gpa.free(global_cache_dir);
|
||||
|
||||
const compiler_id_digest = try introspect.resolveCompilerId(gpa);
|
||||
var compiler_id_buf: [compiler_id_digest.len * 2]u8 = undefined;
|
||||
const compiler_id = std.fmt.bufPrint(&compiler_id_buf, "{x}", .{compiler_id_digest}) catch unreachable;
|
||||
|
||||
var bos = std.io.bufferedOutStream(stdout);
|
||||
const bos_stream = bos.outStream();
|
||||
|
||||
var jws = std.json.WriteStream(@TypeOf(bos_stream), 6).init(bos_stream);
|
||||
try jws.beginObject();
|
||||
|
||||
try jws.objectField("zig_exe");
|
||||
try jws.emitString(self_exe_path);
|
||||
|
||||
try jws.objectField("lib_dir");
|
||||
try jws.emitString(zig_lib_dir);
|
||||
try jws.emitString(zig_lib_directory.path.?);
|
||||
|
||||
try jws.objectField("std_dir");
|
||||
try jws.emitString(zig_std_dir);
|
||||
|
||||
try jws.objectField("id");
|
||||
try jws.emitString(compiler_id);
|
||||
|
||||
try jws.objectField("global_cache_dir");
|
||||
try jws.emitString(global_cache_dir);
|
||||
|
||||
@ -4,59 +4,11 @@ const io = std.io;
|
||||
const mem = std.mem;
|
||||
const Allocator = mem.Allocator;
|
||||
const Target = std.Target;
|
||||
const target = @import("target.zig");
|
||||
const assert = std.debug.assert;
|
||||
|
||||
const glibc = @import("glibc.zig");
|
||||
const introspect = @import("introspect.zig");
|
||||
|
||||
// TODO this is hard-coded until self-hosted gains this information canonically
|
||||
const available_libcs = [_][]const u8{
|
||||
"aarch64_be-linux-gnu",
|
||||
"aarch64_be-linux-musl",
|
||||
"aarch64_be-windows-gnu",
|
||||
"aarch64-linux-gnu",
|
||||
"aarch64-linux-musl",
|
||||
"aarch64-windows-gnu",
|
||||
"armeb-linux-gnueabi",
|
||||
"armeb-linux-gnueabihf",
|
||||
"armeb-linux-musleabi",
|
||||
"armeb-linux-musleabihf",
|
||||
"armeb-windows-gnu",
|
||||
"arm-linux-gnueabi",
|
||||
"arm-linux-gnueabihf",
|
||||
"arm-linux-musleabi",
|
||||
"arm-linux-musleabihf",
|
||||
"arm-windows-gnu",
|
||||
"i386-linux-gnu",
|
||||
"i386-linux-musl",
|
||||
"i386-windows-gnu",
|
||||
"mips64el-linux-gnuabi64",
|
||||
"mips64el-linux-gnuabin32",
|
||||
"mips64el-linux-musl",
|
||||
"mips64-linux-gnuabi64",
|
||||
"mips64-linux-gnuabin32",
|
||||
"mips64-linux-musl",
|
||||
"mipsel-linux-gnu",
|
||||
"mipsel-linux-musl",
|
||||
"mips-linux-gnu",
|
||||
"mips-linux-musl",
|
||||
"powerpc64le-linux-gnu",
|
||||
"powerpc64le-linux-musl",
|
||||
"powerpc64-linux-gnu",
|
||||
"powerpc64-linux-musl",
|
||||
"powerpc-linux-gnu",
|
||||
"powerpc-linux-musl",
|
||||
"riscv64-linux-gnu",
|
||||
"riscv64-linux-musl",
|
||||
"s390x-linux-gnu",
|
||||
"s390x-linux-musl",
|
||||
"sparc-linux-gnu",
|
||||
"sparcv9-linux-gnu",
|
||||
"wasm32-freestanding-musl",
|
||||
"x86_64-linux-gnu",
|
||||
"x86_64-linux-gnux32",
|
||||
"x86_64-linux-musl",
|
||||
"x86_64-windows-gnu",
|
||||
};
|
||||
const fatal = @import("main.zig").fatal;
|
||||
|
||||
pub fn cmdTargets(
|
||||
allocator: *Allocator,
|
||||
@ -65,33 +17,14 @@ pub fn cmdTargets(
|
||||
stdout: anytype,
|
||||
native_target: Target,
|
||||
) !void {
|
||||
const available_glibcs = blk: {
|
||||
const zig_lib_dir = introspect.resolveZigLibDir(allocator) catch |err| {
|
||||
std.debug.print("unable to find zig installation directory: {}\n", .{@errorName(err)});
|
||||
std.process.exit(1);
|
||||
var zig_lib_directory = introspect.findZigLibDir(allocator) catch |err| {
|
||||
fatal("unable to find zig installation directory: {}\n", .{@errorName(err)});
|
||||
};
|
||||
defer allocator.free(zig_lib_dir);
|
||||
defer zig_lib_directory.handle.close();
|
||||
defer allocator.free(zig_lib_directory.path.?);
|
||||
|
||||
var dir = try std.fs.cwd().openDir(zig_lib_dir, .{});
|
||||
defer dir.close();
|
||||
|
||||
const vers_txt = try dir.readFileAlloc(allocator, "libc" ++ std.fs.path.sep_str ++ "glibc" ++ std.fs.path.sep_str ++ "vers.txt", 10 * 1024);
|
||||
defer allocator.free(vers_txt);
|
||||
|
||||
var list = std.ArrayList(std.builtin.Version).init(allocator);
|
||||
defer list.deinit();
|
||||
|
||||
var it = mem.tokenize(vers_txt, "\r\n");
|
||||
while (it.next()) |line| {
|
||||
const prefix = "GLIBC_";
|
||||
assert(mem.startsWith(u8, line, prefix));
|
||||
const adjusted_line = line[prefix.len..];
|
||||
const ver = try std.builtin.Version.parse(adjusted_line);
|
||||
try list.append(ver);
|
||||
}
|
||||
break :blk list.toOwnedSlice();
|
||||
};
|
||||
defer allocator.free(available_glibcs);
|
||||
const glibc_abi = try glibc.loadMetaData(allocator, zig_lib_directory.handle);
|
||||
defer glibc_abi.destroy(allocator);
|
||||
|
||||
var bos = io.bufferedOutStream(stdout);
|
||||
const bos_stream = bos.outStream();
|
||||
@ -127,18 +60,22 @@ pub fn cmdTargets(
|
||||
|
||||
try jws.objectField("libc");
|
||||
try jws.beginArray();
|
||||
for (available_libcs) |libc| {
|
||||
for (target.available_libcs) |libc| {
|
||||
const tmp = try std.fmt.allocPrint(allocator, "{}-{}-{}", .{
|
||||
@tagName(libc.arch), @tagName(libc.os), @tagName(libc.abi),
|
||||
});
|
||||
defer allocator.free(tmp);
|
||||
try jws.arrayElem();
|
||||
try jws.emitString(libc);
|
||||
try jws.emitString(tmp);
|
||||
}
|
||||
try jws.endArray();
|
||||
|
||||
try jws.objectField("glibc");
|
||||
try jws.beginArray();
|
||||
for (available_glibcs) |glibc| {
|
||||
for (glibc_abi.all_versions) |ver| {
|
||||
try jws.arrayElem();
|
||||
|
||||
const tmp = try std.fmt.allocPrint(allocator, "{}", .{glibc});
|
||||
const tmp = try std.fmt.allocPrint(allocator, "{}", .{ver});
|
||||
defer allocator.free(tmp);
|
||||
try jws.emitString(tmp);
|
||||
}
|
||||
@ -215,7 +152,6 @@ pub fn cmdTargets(
|
||||
try jws.emitString(@tagName(native_target.os.tag));
|
||||
try jws.objectField("abi");
|
||||
try jws.emitString(@tagName(native_target.abi));
|
||||
// TODO implement native glibc version detection in self-hosted
|
||||
try jws.endObject();
|
||||
|
||||
try jws.endObject();
|
||||
426
src/stage1.zig
Normal file
426
src/stage1.zig
Normal file
@ -0,0 +1,426 @@
|
||||
//! This is the main entry point for the Zig/C++ hybrid compiler (stage1).
|
||||
//! It has the functions exported from Zig, called in C++, and bindings for
|
||||
//! the functions exported from C++, called from Zig.
|
||||
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const mem = std.mem;
|
||||
const CrossTarget = std.zig.CrossTarget;
|
||||
const Target = std.Target;
|
||||
|
||||
const build_options = @import("build_options");
|
||||
const stage2 = @import("main.zig");
|
||||
const fatal = stage2.fatal;
|
||||
const Compilation = @import("Compilation.zig");
|
||||
const translate_c = @import("translate_c.zig");
|
||||
const target_util = @import("target.zig");
|
||||
|
||||
comptime {
|
||||
assert(std.builtin.link_libc);
|
||||
assert(build_options.is_stage1);
|
||||
assert(build_options.have_llvm);
|
||||
_ = @import("compiler_rt");
|
||||
}
|
||||
|
||||
pub const log = stage2.log;
|
||||
pub const log_level = stage2.log_level;
|
||||
|
||||
pub export fn main(argc: c_int, argv: [*]const [*:0]const u8) c_int {
|
||||
std.debug.maybeEnableSegfaultHandler();
|
||||
|
||||
zig_stage1_os_init();
|
||||
|
||||
const gpa = std.heap.c_allocator;
|
||||
var arena_instance = std.heap.ArenaAllocator.init(gpa);
|
||||
defer arena_instance.deinit();
|
||||
const arena = &arena_instance.allocator;
|
||||
|
||||
const args = arena.alloc([]const u8, @intCast(usize, argc)) catch fatal("{}", .{"OutOfMemory"});
|
||||
for (args) |*arg, i| {
|
||||
arg.* = mem.spanZ(argv[i]);
|
||||
}
|
||||
stage2.mainArgs(gpa, arena, args) catch |err| fatal("{}", .{@errorName(err)});
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// Matches stage2.Color;
|
||||
pub const ErrColor = c_int;
|
||||
/// Matches std.builtin.CodeModel
|
||||
pub const CodeModel = c_int;
|
||||
/// Matches std.Target.Os.Tag
|
||||
pub const OS = c_int;
|
||||
/// Matches std.builtin.BuildMode
|
||||
pub const BuildMode = c_int;
|
||||
|
||||
pub const TargetSubsystem = extern enum(c_int) {
|
||||
Console,
|
||||
Windows,
|
||||
Posix,
|
||||
Native,
|
||||
EfiApplication,
|
||||
EfiBootServiceDriver,
|
||||
EfiRom,
|
||||
EfiRuntimeDriver,
|
||||
Auto,
|
||||
};
|
||||
|
||||
pub const Pkg = extern struct {
|
||||
name_ptr: [*]const u8,
|
||||
name_len: usize,
|
||||
path_ptr: [*]const u8,
|
||||
path_len: usize,
|
||||
children_ptr: [*]*Pkg,
|
||||
children_len: usize,
|
||||
parent: ?*Pkg,
|
||||
};
|
||||
|
||||
pub const Module = extern struct {
|
||||
root_name_ptr: [*]const u8,
|
||||
root_name_len: usize,
|
||||
emit_o_ptr: [*]const u8,
|
||||
emit_o_len: usize,
|
||||
emit_h_ptr: [*]const u8,
|
||||
emit_h_len: usize,
|
||||
emit_asm_ptr: [*]const u8,
|
||||
emit_asm_len: usize,
|
||||
emit_llvm_ir_ptr: [*]const u8,
|
||||
emit_llvm_ir_len: usize,
|
||||
emit_analysis_json_ptr: [*]const u8,
|
||||
emit_analysis_json_len: usize,
|
||||
emit_docs_ptr: [*]const u8,
|
||||
emit_docs_len: usize,
|
||||
builtin_zig_path_ptr: [*]const u8,
|
||||
builtin_zig_path_len: usize,
|
||||
test_filter_ptr: [*]const u8,
|
||||
test_filter_len: usize,
|
||||
test_name_prefix_ptr: [*]const u8,
|
||||
test_name_prefix_len: usize,
|
||||
userdata: usize,
|
||||
root_pkg: *Pkg,
|
||||
main_progress_node: ?*std.Progress.Node,
|
||||
code_model: CodeModel,
|
||||
subsystem: TargetSubsystem,
|
||||
err_color: ErrColor,
|
||||
pic: bool,
|
||||
link_libc: bool,
|
||||
link_libcpp: bool,
|
||||
strip: bool,
|
||||
is_single_threaded: bool,
|
||||
dll_export_fns: bool,
|
||||
link_mode_dynamic: bool,
|
||||
valgrind_enabled: bool,
|
||||
function_sections: bool,
|
||||
enable_stack_probing: bool,
|
||||
enable_time_report: bool,
|
||||
enable_stack_report: bool,
|
||||
test_is_evented: bool,
|
||||
verbose_tokenize: bool,
|
||||
verbose_ast: bool,
|
||||
verbose_ir: bool,
|
||||
verbose_llvm_ir: bool,
|
||||
verbose_cimport: bool,
|
||||
verbose_llvm_cpu_features: bool,
|
||||
|
||||
// Set by stage1
|
||||
have_c_main: bool,
|
||||
have_winmain: bool,
|
||||
have_wwinmain: bool,
|
||||
have_winmain_crt_startup: bool,
|
||||
have_wwinmain_crt_startup: bool,
|
||||
have_dllmain_crt_startup: bool,
|
||||
|
||||
pub fn build_object(mod: *Module) void {
|
||||
zig_stage1_build_object(mod);
|
||||
}
|
||||
|
||||
pub fn destroy(mod: *Module) void {
|
||||
zig_stage1_destroy(mod);
|
||||
}
|
||||
};
|
||||
|
||||
extern fn zig_stage1_os_init() void;
|
||||
|
||||
pub const create = zig_stage1_create;
|
||||
extern fn zig_stage1_create(
|
||||
optimize_mode: BuildMode,
|
||||
main_pkg_path_ptr: [*]const u8,
|
||||
main_pkg_path_len: usize,
|
||||
root_src_path_ptr: [*]const u8,
|
||||
root_src_path_len: usize,
|
||||
zig_lib_dir_ptr: [*c]const u8,
|
||||
zig_lib_dir_len: usize,
|
||||
target: [*c]const Stage2Target,
|
||||
is_test_build: bool,
|
||||
) ?*Module;
|
||||
|
||||
extern fn zig_stage1_build_object(*Module) void;
|
||||
extern fn zig_stage1_destroy(*Module) void;
|
||||
|
||||
// ABI warning
|
||||
export fn stage2_panic(ptr: [*]const u8, len: usize) void {
|
||||
@panic(ptr[0..len]);
|
||||
}
|
||||
|
||||
// ABI warning
|
||||
const Error = extern enum {
|
||||
None,
|
||||
OutOfMemory,
|
||||
InvalidFormat,
|
||||
SemanticAnalyzeFail,
|
||||
AccessDenied,
|
||||
Interrupted,
|
||||
SystemResources,
|
||||
FileNotFound,
|
||||
FileSystem,
|
||||
FileTooBig,
|
||||
DivByZero,
|
||||
Overflow,
|
||||
PathAlreadyExists,
|
||||
Unexpected,
|
||||
ExactDivRemainder,
|
||||
NegativeDenominator,
|
||||
ShiftedOutOneBits,
|
||||
CCompileErrors,
|
||||
EndOfFile,
|
||||
IsDir,
|
||||
NotDir,
|
||||
UnsupportedOperatingSystem,
|
||||
SharingViolation,
|
||||
PipeBusy,
|
||||
PrimitiveTypeNotFound,
|
||||
CacheUnavailable,
|
||||
PathTooLong,
|
||||
CCompilerCannotFindFile,
|
||||
NoCCompilerInstalled,
|
||||
ReadingDepFile,
|
||||
InvalidDepFile,
|
||||
MissingArchitecture,
|
||||
MissingOperatingSystem,
|
||||
UnknownArchitecture,
|
||||
UnknownOperatingSystem,
|
||||
UnknownABI,
|
||||
InvalidFilename,
|
||||
DiskQuota,
|
||||
DiskSpace,
|
||||
UnexpectedWriteFailure,
|
||||
UnexpectedSeekFailure,
|
||||
UnexpectedFileTruncationFailure,
|
||||
Unimplemented,
|
||||
OperationAborted,
|
||||
BrokenPipe,
|
||||
NoSpaceLeft,
|
||||
NotLazy,
|
||||
IsAsync,
|
||||
ImportOutsidePkgPath,
|
||||
UnknownCpuModel,
|
||||
UnknownCpuFeature,
|
||||
InvalidCpuFeatures,
|
||||
InvalidLlvmCpuFeaturesFormat,
|
||||
UnknownApplicationBinaryInterface,
|
||||
ASTUnitFailure,
|
||||
BadPathName,
|
||||
SymLinkLoop,
|
||||
ProcessFdQuotaExceeded,
|
||||
SystemFdQuotaExceeded,
|
||||
NoDevice,
|
||||
DeviceBusy,
|
||||
UnableToSpawnCCompiler,
|
||||
CCompilerExitCode,
|
||||
CCompilerCrashed,
|
||||
CCompilerCannotFindHeaders,
|
||||
LibCRuntimeNotFound,
|
||||
LibCStdLibHeaderNotFound,
|
||||
LibCKernel32LibNotFound,
|
||||
UnsupportedArchitecture,
|
||||
WindowsSdkNotFound,
|
||||
UnknownDynamicLinkerPath,
|
||||
TargetHasNoDynamicLinker,
|
||||
InvalidAbiVersion,
|
||||
InvalidOperatingSystemVersion,
|
||||
UnknownClangOption,
|
||||
NestedResponseFile,
|
||||
ZigIsTheCCompiler,
|
||||
FileBusy,
|
||||
Locked,
|
||||
};
|
||||
|
||||
// ABI warning
|
||||
export fn stage2_attach_segfault_handler() void {
|
||||
if (std.debug.runtime_safety and std.debug.have_segfault_handling_support) {
|
||||
std.debug.attachSegfaultHandler();
|
||||
}
|
||||
}
|
||||
|
||||
// ABI warning
|
||||
export fn stage2_progress_create() *std.Progress {
|
||||
const ptr = std.heap.c_allocator.create(std.Progress) catch @panic("out of memory");
|
||||
ptr.* = std.Progress{};
|
||||
return ptr;
|
||||
}
|
||||
|
||||
// ABI warning
|
||||
export fn stage2_progress_destroy(progress: *std.Progress) void {
|
||||
std.heap.c_allocator.destroy(progress);
|
||||
}
|
||||
|
||||
// ABI warning
|
||||
export fn stage2_progress_start_root(
|
||||
progress: *std.Progress,
|
||||
name_ptr: [*]const u8,
|
||||
name_len: usize,
|
||||
estimated_total_items: usize,
|
||||
) *std.Progress.Node {
|
||||
return progress.start(
|
||||
name_ptr[0..name_len],
|
||||
if (estimated_total_items == 0) null else estimated_total_items,
|
||||
) catch @panic("timer unsupported");
|
||||
}
|
||||
|
||||
// ABI warning
|
||||
export fn stage2_progress_disable_tty(progress: *std.Progress) void {
|
||||
progress.terminal = null;
|
||||
}
|
||||
|
||||
// ABI warning
|
||||
export fn stage2_progress_start(
|
||||
node: *std.Progress.Node,
|
||||
name_ptr: [*]const u8,
|
||||
name_len: usize,
|
||||
estimated_total_items: usize,
|
||||
) *std.Progress.Node {
|
||||
const child_node = std.heap.c_allocator.create(std.Progress.Node) catch @panic("out of memory");
|
||||
child_node.* = node.start(
|
||||
name_ptr[0..name_len],
|
||||
if (estimated_total_items == 0) null else estimated_total_items,
|
||||
);
|
||||
child_node.activate();
|
||||
return child_node;
|
||||
}
|
||||
|
||||
// ABI warning
|
||||
export fn stage2_progress_end(node: *std.Progress.Node) void {
|
||||
node.end();
|
||||
if (&node.context.root != node) {
|
||||
std.heap.c_allocator.destroy(node);
|
||||
}
|
||||
}
|
||||
|
||||
// ABI warning
|
||||
export fn stage2_progress_complete_one(node: *std.Progress.Node) void {
|
||||
node.completeOne();
|
||||
}
|
||||
|
||||
// ABI warning
|
||||
export fn stage2_progress_update_node(node: *std.Progress.Node, done_count: usize, total_count: usize) void {
|
||||
node.completed_items = done_count;
|
||||
node.estimated_total_items = total_count;
|
||||
node.activate();
|
||||
node.context.maybeRefresh();
|
||||
}
|
||||
|
||||
// ABI warning
|
||||
pub const Stage2Target = extern struct {
|
||||
arch: c_int,
|
||||
os: OS,
|
||||
abi: c_int,
|
||||
|
||||
is_native_os: bool,
|
||||
is_native_cpu: bool,
|
||||
|
||||
llvm_cpu_name: ?[*:0]const u8,
|
||||
llvm_cpu_features: ?[*:0]const u8,
|
||||
};
|
||||
|
||||
// ABI warning
|
||||
const Stage2SemVer = extern struct {
|
||||
major: u32,
|
||||
minor: u32,
|
||||
patch: u32,
|
||||
};
|
||||
|
||||
// ABI warning
|
||||
export fn stage2_cimport(
|
||||
stage1: *Module,
|
||||
c_src_ptr: [*]const u8,
|
||||
c_src_len: usize,
|
||||
out_zig_path_ptr: *[*]const u8,
|
||||
out_zig_path_len: *usize,
|
||||
out_errors_ptr: *[*]translate_c.ClangErrMsg,
|
||||
out_errors_len: *usize,
|
||||
) Error {
|
||||
const comp = @intToPtr(*Compilation, stage1.userdata);
|
||||
const c_src = c_src_ptr[0..c_src_len];
|
||||
const result = comp.cImport(c_src) catch |err| switch (err) {
|
||||
error.SystemResources => return .SystemResources,
|
||||
error.OperationAborted => return .OperationAborted,
|
||||
error.BrokenPipe => return .BrokenPipe,
|
||||
error.DiskQuota => return .DiskQuota,
|
||||
error.FileTooBig => return .FileTooBig,
|
||||
error.NoSpaceLeft => return .NoSpaceLeft,
|
||||
error.AccessDenied => return .AccessDenied,
|
||||
error.OutOfMemory => return .OutOfMemory,
|
||||
error.Unexpected => return .Unexpected,
|
||||
error.InputOutput => return .FileSystem,
|
||||
error.ASTUnitFailure => return .ASTUnitFailure,
|
||||
error.CacheUnavailable => return .CacheUnavailable,
|
||||
else => return .Unexpected,
|
||||
};
|
||||
out_zig_path_ptr.* = result.out_zig_path.ptr;
|
||||
out_zig_path_len.* = result.out_zig_path.len;
|
||||
out_errors_ptr.* = result.errors.ptr;
|
||||
out_errors_len.* = result.errors.len;
|
||||
if (result.errors.len != 0) return .CCompileErrors;
|
||||
return Error.None;
|
||||
}
|
||||
|
||||
export fn stage2_add_link_lib(
|
||||
stage1: *Module,
|
||||
lib_name_ptr: [*c]const u8,
|
||||
lib_name_len: usize,
|
||||
symbol_name_ptr: [*c]const u8,
|
||||
symbol_name_len: usize,
|
||||
) ?[*:0]const u8 {
|
||||
const comp = @intToPtr(*Compilation, stage1.userdata);
|
||||
const lib_name = std.ascii.allocLowerString(comp.gpa, lib_name_ptr[0..lib_name_len]) catch return "out of memory";
|
||||
const target = comp.getTarget();
|
||||
const is_libc = target_util.is_libc_lib_name(target, lib_name);
|
||||
if (is_libc) {
|
||||
if (!comp.bin_file.options.link_libc) {
|
||||
return "dependency on libc must be explicitly specified in the build command";
|
||||
}
|
||||
return null;
|
||||
}
|
||||
if (target_util.is_libcpp_lib_name(target, lib_name)) {
|
||||
if (!comp.bin_file.options.link_libcpp) {
|
||||
return "dependency on libc++ must be explicitly specified in the build command";
|
||||
}
|
||||
return null;
|
||||
}
|
||||
if (!target.isWasm() and !comp.bin_file.options.pic) {
|
||||
return std.fmt.allocPrint0(
|
||||
comp.gpa,
|
||||
"dependency on dynamic library '{s}' requires enabling Position Independent Code. Fixed by `-l{s}` or `-fPIC`.",
|
||||
.{ lib_name, lib_name },
|
||||
) catch "out of memory";
|
||||
}
|
||||
comp.stage1AddLinkLib(lib_name) catch |err| {
|
||||
return std.fmt.allocPrint0(comp.gpa, "unable to add link lib '{s}': {s}", .{
|
||||
lib_name, @errorName(err),
|
||||
}) catch "out of memory";
|
||||
};
|
||||
return null;
|
||||
}
|
||||
|
||||
export fn stage2_fetch_file(
|
||||
stage1: *Module,
|
||||
path_ptr: [*]const u8,
|
||||
path_len: usize,
|
||||
result_len: *usize,
|
||||
) ?[*]const u8 {
|
||||
const comp = @intToPtr(*Compilation, stage1.userdata);
|
||||
const file_path = path_ptr[0..path_len];
|
||||
const max_file_size = std.math.maxInt(u32);
|
||||
const contents = comp.stage1_cache_manifest.addFilePostFetch(file_path, max_file_size) catch return null;
|
||||
result_len.* = contents.len;
|
||||
return contents.ptr;
|
||||
}
|
||||
@ -10,7 +10,6 @@
|
||||
|
||||
#include "list.hpp"
|
||||
#include "buffer.hpp"
|
||||
#include "cache_hash.hpp"
|
||||
#include "zig_llvm.h"
|
||||
#include "hash_map.hpp"
|
||||
#include "errmsg.hpp"
|
||||
@ -1639,8 +1638,6 @@ struct ZigType {
|
||||
size_t abi_size;
|
||||
// Number of bits of information in this type. Known after ResolveStatusSizeKnown.
|
||||
size_t size_in_bits;
|
||||
|
||||
bool gen_h_loop_flag;
|
||||
};
|
||||
|
||||
enum FnAnalState {
|
||||
@ -1976,67 +1973,16 @@ struct TimeEvent {
|
||||
const char *name;
|
||||
};
|
||||
|
||||
enum BuildMode {
|
||||
BuildModeDebug,
|
||||
BuildModeFastRelease,
|
||||
BuildModeSafeRelease,
|
||||
BuildModeSmallRelease,
|
||||
};
|
||||
|
||||
enum CodeModel {
|
||||
CodeModelDefault,
|
||||
CodeModelTiny,
|
||||
CodeModelSmall,
|
||||
CodeModelKernel,
|
||||
CodeModelMedium,
|
||||
CodeModelLarge,
|
||||
};
|
||||
|
||||
struct LinkLib {
|
||||
Buf *name;
|
||||
Buf *path;
|
||||
ZigList<Buf *> symbols; // the list of symbols that we depend on from this lib
|
||||
bool provided_explicitly;
|
||||
};
|
||||
|
||||
enum ValgrindSupport {
|
||||
ValgrindSupportAuto,
|
||||
ValgrindSupportDisabled,
|
||||
ValgrindSupportEnabled,
|
||||
};
|
||||
|
||||
enum WantPIC {
|
||||
WantPICAuto,
|
||||
WantPICDisabled,
|
||||
WantPICEnabled,
|
||||
};
|
||||
|
||||
enum WantStackCheck {
|
||||
WantStackCheckAuto,
|
||||
WantStackCheckDisabled,
|
||||
WantStackCheckEnabled,
|
||||
};
|
||||
|
||||
enum WantCSanitize {
|
||||
WantCSanitizeAuto,
|
||||
WantCSanitizeDisabled,
|
||||
WantCSanitizeEnabled,
|
||||
};
|
||||
|
||||
enum OptionalBool {
|
||||
OptionalBoolNull,
|
||||
OptionalBoolFalse,
|
||||
OptionalBoolTrue,
|
||||
};
|
||||
|
||||
struct CFile {
|
||||
ZigList<const char *> args;
|
||||
const char *source_path;
|
||||
const char *preprocessor_only_basename;
|
||||
};
|
||||
|
||||
// When adding fields, check if they should be added to the hash computation in build_with_cache
|
||||
struct CodeGen {
|
||||
// Other code depends on this being first.
|
||||
ZigStage1 stage1;
|
||||
|
||||
// arena allocator destroyed just prior to codegen emit
|
||||
heap::ArenaAllocator *pass1_arena;
|
||||
|
||||
@ -2048,8 +1994,6 @@ struct CodeGen {
|
||||
ZigLLVMDIBuilder *dbuilder;
|
||||
ZigLLVMDICompileUnit *compile_unit;
|
||||
ZigLLVMDIFile *compile_unit_file;
|
||||
LinkLib *libc_link_lib;
|
||||
LinkLib *libcpp_link_lib;
|
||||
LLVMTargetDataRef target_data_ref;
|
||||
LLVMTargetMachineRef target_machine;
|
||||
ZigLLVMDIFile *dummy_di_file;
|
||||
@ -2104,7 +2048,6 @@ struct CodeGen {
|
||||
ZigList<ZigFn *> inline_fns;
|
||||
ZigList<ZigFn *> test_fns;
|
||||
ZigList<ErrorTableEntry *> errors_by_index;
|
||||
ZigList<CacheHash *> caches_to_release;
|
||||
size_t largest_err_name_len;
|
||||
ZigList<ZigType *> type_resolve_stack;
|
||||
|
||||
@ -2173,18 +2116,17 @@ struct CodeGen {
|
||||
Buf llvm_triple_str;
|
||||
Buf global_asm;
|
||||
Buf o_file_output_path;
|
||||
Buf bin_file_output_path;
|
||||
Buf h_file_output_path;
|
||||
Buf asm_file_output_path;
|
||||
Buf llvm_ir_file_output_path;
|
||||
Buf analysis_json_output_path;
|
||||
Buf docs_output_path;
|
||||
Buf *cache_dir;
|
||||
// As an input parameter, mutually exclusive with enable_cache. But it gets
|
||||
// populated in codegen_build_and_link.
|
||||
Buf *output_dir;
|
||||
Buf *c_artifact_dir;
|
||||
const char **libc_include_dir_list;
|
||||
size_t libc_include_dir_len;
|
||||
|
||||
Buf *zig_c_headers_dir; // Cannot be overridden; derived from zig_lib_dir.
|
||||
Buf *builtin_zig_path;
|
||||
Buf *zig_std_special_dir; // Cannot be overridden; derived from zig_lib_dir.
|
||||
|
||||
IrInstSrc *invalid_inst_src;
|
||||
@ -2206,118 +2148,46 @@ struct CodeGen {
|
||||
Stage2ProgressNode *main_progress_node;
|
||||
Stage2ProgressNode *sub_progress_node;
|
||||
|
||||
WantPIC want_pic;
|
||||
WantStackCheck want_stack_check;
|
||||
WantCSanitize want_sanitize_c;
|
||||
CacheHash cache_hash;
|
||||
ErrColor err_color;
|
||||
uint32_t next_unresolved_index;
|
||||
unsigned pointer_size_bytes;
|
||||
uint32_t target_os_index;
|
||||
uint32_t target_arch_index;
|
||||
uint32_t target_sub_arch_index;
|
||||
uint32_t target_abi_index;
|
||||
uint32_t target_oformat_index;
|
||||
bool is_big_endian;
|
||||
bool have_c_main;
|
||||
bool have_winmain;
|
||||
bool have_wwinmain;
|
||||
bool have_winmain_crt_startup;
|
||||
bool have_wwinmain_crt_startup;
|
||||
bool have_dllmain_crt_startup;
|
||||
bool have_err_ret_tracing;
|
||||
bool link_eh_frame_hdr;
|
||||
bool c_want_stdint;
|
||||
bool c_want_stdbool;
|
||||
bool verbose_tokenize;
|
||||
bool verbose_ast;
|
||||
bool verbose_link;
|
||||
bool verbose_ir;
|
||||
bool verbose_llvm_ir;
|
||||
bool verbose_cimport;
|
||||
bool verbose_cc;
|
||||
bool verbose_llvm_cpu_features;
|
||||
bool error_during_imports;
|
||||
bool generate_error_name_table;
|
||||
bool enable_cache; // mutually exclusive with output_dir
|
||||
bool enable_time_report;
|
||||
bool enable_stack_report;
|
||||
bool system_linker_hack;
|
||||
bool reported_bad_link_libc_error;
|
||||
bool is_dynamic; // shared library rather than static library. dynamic musl rather than static musl.
|
||||
bool need_frame_size_prefix_data;
|
||||
bool disable_c_depfile;
|
||||
|
||||
//////////////////////////// Participates in Input Parameter Cache Hash
|
||||
/////// Note: there is a separate cache hash for builtin.zig, when adding fields,
|
||||
/////// consider if they need to go into both.
|
||||
ZigList<LinkLib *> link_libs_list;
|
||||
// add -framework [name] args to linker
|
||||
ZigList<Buf *> darwin_frameworks;
|
||||
// add -rpath [name] args to linker
|
||||
ZigList<Buf *> rpath_list;
|
||||
ZigList<Buf *> forbidden_libs;
|
||||
ZigList<Buf *> link_objects;
|
||||
ZigList<Buf *> assembly_files;
|
||||
ZigList<CFile *> c_source_files;
|
||||
ZigList<const char *> lib_dirs;
|
||||
ZigList<const char *> framework_dirs;
|
||||
|
||||
Stage2LibCInstallation *libc;
|
||||
|
||||
bool is_versioned;
|
||||
size_t version_major;
|
||||
size_t version_minor;
|
||||
size_t version_patch;
|
||||
const char *linker_script;
|
||||
size_t stack_size_override;
|
||||
bool link_libc;
|
||||
bool link_libcpp;
|
||||
|
||||
BuildMode build_mode;
|
||||
OutType out_type;
|
||||
const ZigTarget *zig_target;
|
||||
TargetSubsystem subsystem; // careful using this directly; see detect_subsystem
|
||||
ValgrindSupport valgrind_support;
|
||||
CodeModel code_model;
|
||||
OptionalBool linker_gc_sections;
|
||||
OptionalBool linker_allow_shlib_undefined;
|
||||
OptionalBool linker_bind_global_refs_locally;
|
||||
bool strip_debug_symbols;
|
||||
bool is_test_build;
|
||||
bool is_single_threaded;
|
||||
bool want_single_threaded;
|
||||
bool linker_rdynamic;
|
||||
bool each_lib_rpath;
|
||||
bool is_dummy_so;
|
||||
bool disable_gen_h;
|
||||
bool bundle_compiler_rt;
|
||||
bool have_pic;
|
||||
bool have_dynamic_link; // this is whether the final thing will be dynamically linked. see also is_dynamic
|
||||
bool link_mode_dynamic;
|
||||
bool dll_export_fns;
|
||||
bool have_stack_probing;
|
||||
bool have_sanitize_c;
|
||||
bool function_sections;
|
||||
bool enable_dump_analysis;
|
||||
bool enable_doc_generation;
|
||||
bool emit_bin;
|
||||
bool emit_asm;
|
||||
bool emit_llvm_ir;
|
||||
bool test_is_evented;
|
||||
bool linker_z_nodelete;
|
||||
bool linker_z_defs;
|
||||
bool valgrind_enabled;
|
||||
|
||||
Buf *root_out_name;
|
||||
Buf *test_filter;
|
||||
Buf *test_name_prefix;
|
||||
Buf *zig_lib_dir;
|
||||
Buf *zig_std_dir;
|
||||
Buf *version_script_path;
|
||||
Buf *override_soname;
|
||||
Buf *linker_optimization;
|
||||
|
||||
const char **llvm_argv;
|
||||
size_t llvm_argv_len;
|
||||
|
||||
const char **clang_argv;
|
||||
size_t clang_argv_len;
|
||||
};
|
||||
|
||||
struct ZigVar {
|
||||
@ -825,6 +825,7 @@ ZigType *get_array_type(CodeGen *g, ZigType *child_type, uint64_t array_size, Zi
|
||||
}
|
||||
|
||||
ZigType *get_slice_type(CodeGen *g, ZigType *ptr_type) {
|
||||
Error err;
|
||||
assert(ptr_type->id == ZigTypeIdPointer);
|
||||
assert(ptr_type->data.pointer.ptr_len == PtrLenUnknown);
|
||||
|
||||
@ -833,6 +834,11 @@ ZigType *get_slice_type(CodeGen *g, ZigType *ptr_type) {
|
||||
return *parent_pointer;
|
||||
}
|
||||
|
||||
// We use the pointer type's abi size below, so we have to resolve it now.
|
||||
if ((err = type_resolve(g, ptr_type, ResolveStatusSizeKnown))) {
|
||||
codegen_report_errors_and_exit(g);
|
||||
}
|
||||
|
||||
ZigType *entry = new_type_table_entry(ZigTypeIdStruct);
|
||||
|
||||
buf_resize(&entry->name, 0);
|
||||
@ -3489,19 +3495,19 @@ void add_var_export(CodeGen *g, ZigVar *var, const char *symbol_name, GlobalLink
|
||||
}
|
||||
|
||||
void add_fn_export(CodeGen *g, ZigFn *fn_table_entry, const char *symbol_name, GlobalLinkageId linkage, CallingConvention cc) {
|
||||
if (cc == CallingConventionC && strcmp(symbol_name, "main") == 0 && g->libc_link_lib != nullptr) {
|
||||
g->have_c_main = true;
|
||||
if (cc == CallingConventionC && strcmp(symbol_name, "main") == 0 && g->link_libc) {
|
||||
g->stage1.have_c_main = true;
|
||||
} else if (cc == CallingConventionStdcall && g->zig_target->os == OsWindows) {
|
||||
if (strcmp(symbol_name, "WinMain") == 0) {
|
||||
g->have_winmain = true;
|
||||
g->stage1.have_winmain = true;
|
||||
} else if (strcmp(symbol_name, "wWinMain") == 0) {
|
||||
g->have_wwinmain = true;
|
||||
g->stage1.have_wwinmain = true;
|
||||
} else if (strcmp(symbol_name, "WinMainCRTStartup") == 0) {
|
||||
g->have_winmain_crt_startup = true;
|
||||
g->stage1.have_winmain_crt_startup = true;
|
||||
} else if (strcmp(symbol_name, "wWinMainCRTStartup") == 0) {
|
||||
g->have_wwinmain_crt_startup = true;
|
||||
g->stage1.have_wwinmain_crt_startup = true;
|
||||
} else if (strcmp(symbol_name, "DllMainCRTStartup") == 0) {
|
||||
g->have_dllmain_crt_startup = true;
|
||||
g->stage1.have_dllmain_crt_startup = true;
|
||||
}
|
||||
}
|
||||
|
||||
@ -7856,40 +7862,6 @@ const char *type_id_name(ZigTypeId id) {
|
||||
zig_unreachable();
|
||||
}
|
||||
|
||||
LinkLib *create_link_lib(Buf *name) {
|
||||
LinkLib *link_lib = heap::c_allocator.create<LinkLib>();
|
||||
link_lib->name = name;
|
||||
return link_lib;
|
||||
}
|
||||
|
||||
LinkLib *add_link_lib(CodeGen *g, Buf *name) {
|
||||
bool is_libc = buf_eql_str(name, "c");
|
||||
bool is_libcpp = buf_eql_str(name, "c++") || buf_eql_str(name, "c++abi");
|
||||
|
||||
if (is_libc && g->libc_link_lib != nullptr)
|
||||
return g->libc_link_lib;
|
||||
|
||||
if (is_libcpp && g->libcpp_link_lib != nullptr)
|
||||
return g->libcpp_link_lib;
|
||||
|
||||
for (size_t i = 0; i < g->link_libs_list.length; i += 1) {
|
||||
LinkLib *existing_lib = g->link_libs_list.at(i);
|
||||
if (buf_eql_buf(existing_lib->name, name)) {
|
||||
return existing_lib;
|
||||
}
|
||||
}
|
||||
|
||||
LinkLib *link_lib = create_link_lib(name);
|
||||
g->link_libs_list.append(link_lib);
|
||||
|
||||
if (is_libc)
|
||||
g->libc_link_lib = link_lib;
|
||||
if (is_libcpp)
|
||||
g->libcpp_link_lib = link_lib;
|
||||
|
||||
return link_lib;
|
||||
}
|
||||
|
||||
ZigType *get_align_amt_type(CodeGen *g) {
|
||||
if (g->align_amt_type == nullptr) {
|
||||
// according to LLVM the maximum alignment is 1 << 29.
|
||||
@ -8010,12 +7982,13 @@ not_integer:
|
||||
return ErrorNone;
|
||||
}
|
||||
|
||||
Error file_fetch(CodeGen *g, Buf *resolved_path, Buf *contents) {
|
||||
if (g->enable_cache) {
|
||||
return cache_add_file_fetch(&g->cache_hash, resolved_path, contents);
|
||||
} else {
|
||||
return os_fetch_file_path(resolved_path, contents);
|
||||
}
|
||||
Error file_fetch(CodeGen *g, Buf *resolved_path, Buf *contents_buf) {
|
||||
size_t len;
|
||||
const char *contents = stage2_fetch_file(&g->stage1, buf_ptr(resolved_path), buf_len(resolved_path), &len);
|
||||
if (contents == nullptr)
|
||||
return ErrorFileNotFound;
|
||||
buf_init_from_mem(contents_buf, contents, len);
|
||||
return ErrorNone;
|
||||
}
|
||||
|
||||
static X64CABIClass type_windows_abi_x86_64_class(CodeGen *g, ZigType *ty, size_t ty_size) {
|
||||
@ -200,8 +200,6 @@ ZigTypeId type_id_at_index(size_t index);
|
||||
size_t type_id_len();
|
||||
size_t type_id_index(ZigType *entry);
|
||||
ZigType *get_generic_fn_type(CodeGen *g, FnTypeId *fn_type_id);
|
||||
LinkLib *create_link_lib(Buf *name);
|
||||
LinkLib *add_link_lib(CodeGen *codegen, Buf *lib);
|
||||
bool optional_value_is_null(ZigValue *val);
|
||||
|
||||
uint32_t get_abi_alignment(CodeGen *g, ZigType *type_entry);
|
||||
@ -256,7 +254,6 @@ Error ensure_const_val_repr(IrAnalyze *ira, CodeGen *codegen, AstNode *source_no
|
||||
void typecheck_panic_fn(CodeGen *g, TldFn *tld_fn, ZigFn *panic_fn);
|
||||
Buf *type_bare_name(ZigType *t);
|
||||
Buf *type_h_name(ZigType *t);
|
||||
Error create_c_object_cache(CodeGen *g, CacheHash **out_cache_hash, bool verbose);
|
||||
|
||||
LLVMTypeRef get_llvm_type(CodeGen *g, ZigType *type);
|
||||
ZigLLVMDIType *get_llvm_di_type(CodeGen *g, ZigType *type);
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user